def all_printoff(): from hotspotter import fileio as io from hotspotter import HotSpotterAPI as api ds.print_off() mf.print_off() io.print_off() api.print_off() mc3.print_off() vr2.print_off()
def open_database(back, db_dir=None): # File -> Open Database try: # Use the same args in a new (opened) database args = back.hs.args if db_dir is None: msg = 'Select (or create) a database directory.' db_dir = guitools.select_directory(msg) print('[*back] user selects database: ' + db_dir) # Try and load db args.dbdir = db_dir hs = HotSpotterAPI.HotSpotter(args=args, db_dir=db_dir) hs.load(load_all=False) # Write to cache and connect if successful io.global_cache_write('db_dir', db_dir) back.connect_api(hs) #back.layout_figures() except Exception as ex: import traceback print(traceback.format_exc()) back.user_info('Aborting open database') print('aborting open database') print(ex) if back.hs.args.strict: raise print('')
def main(defaultdb='NAUTS', usedbcache=False, default_load_all=True): import matplotlib matplotlib.use('Qt4Agg') imports() from hscom import argparse2 from hscom import helpers from hotspotter import HotSpotterAPI as api args = argparse2.parse_arguments(defaultdb=defaultdb) # Parse arguments args = argparse2.fix_args_with_cache(args) if usedbcache: load_all, cids = preload_args_process(args) else: args = argparse2.fix_args_shortnames(args) load_all = helpers.get_flag('--load-all', default_load_all) # Preload process args if args.delete_global: from hscom import fileio as io io.delete_global_cache() # --- Build HotSpotter API --- hs = api.HotSpotter(args) setcfg = args.setcfg if setcfg is not None: import experiment_harness print('[main] setting cfg to %r' % setcfg) varried_list = experiment_harness.get_varried_params_list([setcfg]) cfg_dict = varried_list[0] #print(cfg_dict) hs.prefs.query_cfg.update_cfg(**cfg_dict) hs.prefs.save() #hs.prefs.printme() # load default preferences hs.default_preferences() # Load all data if needed now, otherwise be lazy try: hs.load(load_all=load_all) from hscom import fileio as io #imported from wrong module #from hotspotter import fileio as io db_dir = hs.dirs.db_dir io.global_cache_write('db_dir', db_dir) except ValueError as ex: print('[main] ValueError = %r' % (ex, )) if hs.args.strict: raise return hs
def main(defaultdb='cache', preload=False, app=None): from hscom import fileio as io from hscom import params from hotspotter import HotSpotterAPI as api from hsgui import guitools from hsgui import guiback if app is True: app, is_root = guitools.init_qtapp() args = parse_arguments(defaultdb, defaultdb == 'cache') # --- Build HotSpotter API --- if app is None: hs = api.HotSpotter(args) else: back = guiback.make_main_window(app) hs = back.open_database(args.dbdir) setcfg = args.setcfg if setcfg is not None: # FIXME move experiment harness to hsdev import experiment_harness print('[tapi.main] setting cfg to %r' % setcfg) varied_list = experiment_harness.get_varied_params_list([setcfg]) cfg_dict = varied_list[0] hs.prefs.query_cfg.update_cfg(**cfg_dict) hs.prefs.save() hs.prefs.printme() # Load all data if needed now, otherwise be lazy try: load_all = preload hs.load(load_all=load_all) db_dir = hs.dirs.db_dir io.global_cache_write('db_dir', db_dir) except ValueError as ex: print('[tapi.main] ValueError = %r' % (ex, )) if params.args.strict: raise if app is not None: return hs, back else: from hsgui import guitools app, is_root = guitools.init_qtapp() hs.app = app return hs
def open_database(db_dir=None): # File -> Open Database try: # Use the same args in a new (opened) database args = params.args # Try and load db if args is not None: args.dbdir = db_dir hs = HotSpotterAPI.HotSpotter(args=args, db_dir=db_dir) hs.load(load_all=False) except Exception as ex: import traceback import sys print(traceback.format_exc()) print('aborting open database') print(ex) if '--strict' in sys.argv: raise raise print('') return hs
def open_database(db_dir=None): # File -> Open Database try: # Use the same args in a new (opened) database args = params.args # Try and load db if args is not None: args.dbdir = db_dir hs = HotSpotterAPI.HotSpotter(args=args, db_dir=db_dir) hs.load(load_all=False) # Write to cache and connect if successful io.global_cache_write('db_dir', db_dir) #back.layout_figures() except Exception as ex: import traceback import sys print(traceback.format_exc()) print('aborting open database') print(ex) if '--strict' in sys.argv: raise raise print('') return hs
from hscom import helpers from hscom import helpers as util from hsviz import viz import multiprocessing import numpy as np # NOQA if __name__ == '__main__': multiprocessing.freeze_support() # Debugging vars chip_cfg = None #l')=103.7900s cx_list = None kwargs = {} # --- LOAD TABLES --- # args = argparse2.parse_arguments(defaultdb='NAUTS') hs = api.HotSpotter(args) hs.load_tables() hs.update_samples() # --- LOAD CHIPS --- # force_compute = helpers.get_flag('--force', default=False) cc2.load_chips(hs, force_compute=force_compute) cx = helpers.get_arg('--cx', type_=int) if not cx is None: #tau = np.pi * 2 #hs.change_theta(cx, tau / 8) viz.show_chip(hs, cx, draw_kpts=False, fnum=1) viz.show_image(hs, hs.cx2_gx(cx), fnum=2) else: print('usage: feature_compute.py --cx [cx]') exec(viz.df2.present())
def __define_method(hs, method_name): from hotspotter import HotSpotterAPI as api api.rrr() method_name = 'cx2_tnx' hs.__dict__[method_name] = lambda *args: api.__dict__['_' + method_name](hs, *args)
def __define_method(hs, method_name): from hotspotter import HotSpotterAPI as api api.rrr() method_name = 'cx2_tnx' hs.__dict__[method_name] = lambda *args: api.__dict__['_' + method_name]( hs, *args)
def test_configurations(hs, qcx_list, test_cfg_name_list, fnum=1): if __QUIET__: mc3.print_off() from hotspotter import HotSpotterAPI as api api.print_off() # Test Each configuration if not __QUIET__: print( textwrap.dedent(""" [harn]================ [harn] experiment_harness.test_configurations()""").strip()) hs.update_samples() # Grab list of algorithm configurations to test cfg_list = get_cfg_list(hs, test_cfg_name_list) if not __QUIET__: print('[harn] Testing %d different parameters' % len(cfg_list)) print('[harn] %d different chips' % len(qcx_list)) # Preallocate test result aggregation structures sel_cols = params.args.sel_cols # FIXME sel_rows = params.args.sel_rows # FIXME sel_cols = [] if sel_cols is None else sel_cols sel_rows = [] if sel_rows is None else sel_rows nCfg = len(cfg_list) nQuery = len(qcx_list) #rc2_res = np.empty((nQuery, nCfg), dtype=list) # row/col -> result mat_list = [] qreq = ds.QueryRequest() # TODO Add to argparse2 nocache_testres = util.get_flag('--nocache-testres', False) test_results_verbosity = 2 - (2 * __QUIET__) test_cfg_verbosity = 2 dbname = hs.get_db_name() testnameid = dbname + ' ' + str(test_cfg_name_list) msg = textwrap.dedent(''' --------------------- [harn] TEST_CFG %d/%d: ''' + testnameid + ''' ---------------------''') mark_progress = util.simple_progres_func(test_cfg_verbosity, msg, '+') nomemory = params.args.nomemory # Run each test configuration # Query Config / Col Loop dcxs = hs.get_indexed_sample() for cfgx, query_cfg in enumerate(cfg_list): if not __QUIET__: mark_progress(cfgx + 1, nCfg) # Set data to the current config qreq = mc3.prep_query_request(qreq=qreq, qcxs=qcx_list, dcxs=dcxs, query_cfg=query_cfg) # Run the test / read cache with util.Indenter2('[%s cfg %d/%d]' % (dbname, cfgx + 1, nCfg)): qx2_bestranks = get_test_results2(hs, qcx_list, qreq, cfgx, nCfg, nocache_testres, test_results_verbosity) if not nomemory: mat_list.append(qx2_bestranks) # Store the results if not __QUIET__: print('[harn] Finished testing parameters') if nomemory: print('ran tests in memory savings mode. exiting') return #-------------------- # Print Best Results rank_mat = np.hstack( mat_list) # concatenate each query rank across configs # Label the rank matrix: _colxs = np.arange(nCfg) lbld_mat = util.debug_vstack([_colxs, rank_mat]) _rowxs = np.arange(nQuery + 1).reshape(nQuery + 1, 1) - 1 lbld_mat = np.hstack([_rowxs, lbld_mat]) #------------ # Build row labels qx2_lbl = [] for qx in xrange(nQuery): qcx = qcx_list[qx] label = 'qx=%d) q%s ' % (qx, hs.cidstr(qcx, notes=True)) qx2_lbl.append(label) qx2_lbl = np.array(qx2_lbl) #------------ # Build col labels cfgx2_lbl = [] for cfgx in xrange(nCfg): test_uid = cfg_list[cfgx].get_uid() test_uid = cfg_list[cfgx].get_uid() cfg_label = 'cfgx=(%3d) %s' % (cfgx, test_uid) cfgx2_lbl.append(cfg_label) cfgx2_lbl = np.array(cfgx2_lbl) #------------ indent = util.indent @ArgGaurdFalse def print_rowlbl(): print('=====================') print('[harn] Row/Query Labels: %s' % testnameid) print('=====================') print('[harn] queries:\n%s' % '\n'.join(qx2_lbl)) print('--- /Row/Query Labels ---') print_rowlbl() #------------ @ArgGaurdFalse def print_collbl(): print('') print('=====================') print('[harn] Col/Config Labels: %s' % testnameid) print('=====================') print('[harn] configs:\n%s' % '\n'.join(cfgx2_lbl)) print('--- /Col/Config Labels ---') print_collbl() #------------ # Build Colscore qx2_min_rank = [] qx2_argmin_rank = [] new_hard_qx_list = [] new_qcid_list = [] new_hardtup_list = [] for qx in xrange(nQuery): ranks = rank_mat[qx] min_rank = ranks.min() bestCFG_X = np.where(ranks == min_rank)[0] qx2_min_rank.append(min_rank) qx2_argmin_rank.append(bestCFG_X) # Mark examples as hard if ranks.max() > 0: new_hard_qx_list += [qx] for qx in new_hard_qx_list: # New list is in cid format instead of cx format # because you should be copying and pasting it notes = ' ranks = ' + str(rank_mat[qx]) qcx = qcx_list[qx] qcid = hs.tables.cx2_cid[qcx] new_hardtup_list += [(qcid, notes)] new_qcid_list += [qcid] @ArgGaurdFalse def print_rowscore(): print('') print('=======================') print('[harn] Scores per Query: %s' % testnameid) print('=======================') for qx in xrange(nQuery): bestCFG_X = qx2_argmin_rank[qx] min_rank = qx2_min_rank[qx] minimizing_cfg_str = indent('\n'.join(cfgx2_lbl[bestCFG_X]), ' ') #minimizing_cfg_str = str(bestCFG_X) print('-------') print(qx2_lbl[qx]) print(' best_rank = %d ' % min_rank) if len(cfgx2_lbl) != 1: print(' minimizing_cfg_x\'s = %s ' % minimizing_cfg_str) print_rowscore() #------------ @ArgGaurdFalse def print_hardcase(): print('===') print('--- hard new_hardtup_list (w.r.t these configs): %s' % testnameid) print('\n'.join(map(repr, new_hardtup_list))) print('There are %d hard cases ' % len(new_hardtup_list)) print(sorted([x[0] for x in new_hardtup_list])) print('--- /Print Hardcase ---') print_hardcase() @ArgGaurdFalse def echo_hardcase(): print('====') print('--- hardcase commandline: %s' % testnameid) hardcids_str = ' '.join(map(str, [' ', '--qcid'] + new_qcid_list)) print(hardcids_str) print('--- /Echo Hardcase ---') echo_hardcase() #------------ # Build Colscore X_list = [1, 5] # Build a dictionary mapping X (as in #ranks < X) to a list of cfg scores nLessX_dict = {int(X): np.zeros(nCfg) for X in iter(X_list)} for cfgx in xrange(nCfg): ranks = rank_mat[:, cfgx] for X in iter(X_list): #nLessX_ = sum(np.bitwise_and(ranks < X, ranks >= 0)) nLessX_ = sum(np.logical_and(ranks < X, ranks >= 0)) nLessX_dict[int(X)][cfgx] = nLessX_ @ArgGaurdFalse def print_colscore(): print('') print('==================') print('[harn] Scores per Config: %s' % testnameid) print('==================') for cfgx in xrange(nCfg): print('[score] %s' % (cfgx2_lbl[cfgx])) for X in iter(X_list): nLessX_ = nLessX_dict[int(X)][cfgx] print(' ' + rankscore_str(X, nLessX_, nQuery)) print('--- /Scores per Config ---') print_colscore() #------------ @ArgGaurdFalse def print_latexsum(): print('') print('==========================') print('[harn] LaTeX: %s' % testnameid) print('==========================') # Create configuration latex table criteria_lbls = ['#ranks < %d' % X for X in X_list] db_name = hs.get_db_name(True) cfg_score_title = db_name + ' rank scores' cfgscores = np.array([nLessX_dict[int(X)] for X in X_list]).T replace_rowlbl = [(' *cfgx *', ' ')] tabular_kwargs = dict(title=cfg_score_title, out_of=nQuery, bold_best=True, replace_rowlbl=replace_rowlbl, flip=True) tabular_str = latex_formater.make_score_tabular( cfgx2_lbl, criteria_lbls, cfgscores, **tabular_kwargs) #latex_formater.render(tabular_str) print(tabular_str) print('--- /LaTeX ---') print_latexsum() #------------ best_rankscore_summary = [] to_intersect_list = [] # print each configs scores less than X=thresh for X, cfgx2_nLessX in nLessX_dict.iteritems(): max_LessX = cfgx2_nLessX.max() bestCFG_X = np.where(cfgx2_nLessX == max_LessX)[0] best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestCFG_X) best_rankscore += rankscore_str(X, max_LessX, nQuery) best_rankscore_summary += [best_rankscore] to_intersect_list += [cfgx2_lbl[bestCFG_X]] intersected = to_intersect_list[0] if len(to_intersect_list) > 0 else [] for ix in xrange(1, len(to_intersect_list)): intersected = np.intersect1d(intersected, to_intersect_list[ix]) @ArgGaurdFalse def print_bestcfg(): print('') print('==========================') print('[harn] Best Configurations: %s' % testnameid) print('==========================') # print each configs scores less than X=thresh for X, cfgx2_nLessX in nLessX_dict.iteritems(): max_LessX = cfgx2_nLessX.max() bestCFG_X = np.where(cfgx2_nLessX == max_LessX)[0] best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestCFG_X) best_rankscore += rankscore_str(X, max_LessX, nQuery) uid_list = cfgx2_lbl[bestCFG_X] #best_rankcfg = ''.join(map(wrap_uid, uid_list)) best_rankcfg = format_uid_list(uid_list) #indent('\n'.join(uid_list), ' ') print(best_rankscore) print(best_rankcfg) print('[cfg*] %d cfg(s) are the best of %d total cfgs' % (len(intersected), nCfg)) print(format_uid_list(intersected)) print('--- /Best Configurations ---') print_bestcfg() #------------ @ArgGaurdFalse def print_rankmat(): print('') print('-------------') print('RankMat: %s' % testnameid) print(' nRows=%r, nCols=%r' % lbld_mat.shape) print(' labled rank matrix: rows=queries, cols=cfgs:') #np.set_printoptions(threshold=5000, linewidth=5000, precision=5) with util.NpPrintOpts(threshold=5000, linewidth=5000, precision=5): print(lbld_mat) print('[harn]-------------') print_rankmat() #------------ sumstrs = [] sumstrs.append('') sumstrs.append('||===========================') sumstrs.append('|| [cfg*] SUMMARY: %s' % testnameid) sumstrs.append('||---------------------------') sumstrs.append(util.joins('\n|| ', best_rankscore_summary)) sumstrs.append('||===========================') print('\n' + '\n'.join(sumstrs) + '\n') #print('--- /SUMMARY ---') # Draw results if not __QUIET__: print('remember to inspect with --sel-rows (-r) and --sel-cols (-c) ') if len(sel_rows) > 0 and len(sel_cols) == 0: sel_cols = range(len(cfg_list)) if len(sel_cols) > 0 and len(sel_rows) == 0: sel_rows = range(len(qcx_list)) if params.args.view_all: sel_rows = range(len(qcx_list)) sel_cols = range(len(cfg_list)) sel_cols = list(sel_cols) sel_rows = list(sel_rows) total = len(sel_cols) * len(sel_rows) rciter = itertools.product(sel_rows, sel_cols) prev_cfg = None skip_to = util.get_arg('--skip-to', default=None) dev_mode = util.get_arg('--devmode', default=False) skip_list = [] if dev_mode: hs.prefs.display_cfg.N = 3 df2.FONTS.axtitle = df2.FONTS.smaller df2.FONTS.xlabel = df2.FONTS.smaller df2.FONTS.figtitle = df2.FONTS.smaller df2.SAFE_POS['top'] = .8 df2.SAFE_POS['bottom'] = .01 for count, (r, c) in enumerate(rciter): if skip_to is not None: if count < skip_to: continue if count in skip_list: continue # Get row and column index qcx = qcx_list[r] query_cfg = cfg_list[c] print('\n\n___________________________________') print(' --- VIEW %d / %d --- ' % (count + 1, total)) print('--------------------------------------') print('viewing (r, c) = (%r, %r)' % (r, c)) # Load / Execute the query qreq = mc3.prep_query_request(qreq=qreq, qcxs=[qcx], dcxs=dcxs, query_cfg=query_cfg) qcx2_res = mc3.process_query_request(hs, qreq, safe=True) res = qcx2_res[qcx] # Print Query UID print(res.uid) # Draw Result #res.show_top(hs, fnum=fnum) if prev_cfg != query_cfg: # This is way too aggro. Needs to be a bit lazier hs.refresh_features() prev_cfg = query_cfg fnum = count title_uid = res.uid title_uid = title_uid.replace('_FEAT', '\n_FEAT') res.show_analysis(hs, fnum=fnum, aug='\n' + title_uid, annote=1, show_name=False, show_gname=False, time_appart=False) df2.adjust_subplots_safe() if params.args.save_figures: from hsviz import allres_viz allres_viz.dump(hs, 'analysis', quality=True, overwrite=False) if not __QUIET__: print('[harn] EXIT EXPERIMENT HARNESS')