def ensure_text(fname, text, repo_dpath='.', force=None, locals_={}, chmod=None): """ Args: fname (str): file name text (str): repo_dpath (str): directory path string(default = '.') force (bool): (default = False) locals_ (dict): (default = {}) Example: >>> # DISABLE_DOCTEST >>> from utool.util_git import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ import utool as ut ut.colorprint('Ensuring fname=%r' % (fname), 'yellow') if force is None and ut.get_argflag('--force-%s' % (fname,)): force = True fpath = join(repo_dpath, fname) if force or not ut.checkpath(fpath, verbose=2, n=5): text_ = ut.remove_codeblock_syntax_sentinals(text) fmtkw = locals_.copy() fmtkw['fname'] = fname text_ = text_.format(**fmtkw) + '\n' ut.writeto(fpath, text_) try: if chmod: ut.chmod(fpath, chmod) except Exception as ex: ut.printex(ex, iswarning=True)
def ensure_text(fname, text, repo_dpath='.', force=None, locals_={}, chmod=None): """ Args: fname (str): file name text (str): repo_dpath (str): directory path string(default = '.') force (bool): (default = False) locals_ (dict): (default = {}) Example: >>> # DISABLE_DOCTEST >>> from utool.util_project import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ import utool as ut ut.colorprint('Ensuring fname=%r' % (fname), 'yellow') # if not fname.endswith('__init__.py'): # # HACK # return if force is None and ut.get_argflag('--force-%s' % (fname,)): force = True text_ = ut.remove_codeblock_syntax_sentinals(text) fmtkw = locals_.copy() fmtkw['fname'] = fname text_ = text_.format(**fmtkw) + '\n' fpath = join(repo_dpath, fname) ut.dump_autogen_code(fpath, text_)
def print_priors(model, ignore_ttypes=[], title='Priors', color='blue'): ut.colorprint('\n --- %s ---' % (title, ), color=color) for ttype, cpds in model.ttype2_cpds.items(): if ttype not in ignore_ttypes: for fs_ in ut.ichunks(cpds, 4): ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), color)
def ensure_dependencies(request): r""" CommandLine: python -m dtool.base --exec-BaseRequest.ensure_dependencies Example: >>> # ENABLE_DOCTEST >>> from dtool.base import * # NOQA >>> from dtool.example_depcache import testdata_depc >>> depc = testdata_depc() >>> request = depc.new_request('vsmany', [1, 2], [2, 3, 4]) >>> request.ensure_dependencies() """ import networkx as nx depc = request.depc if False: dependencies = nx.ancestors(depc.graph, request.tablename) subgraph = depc.graph.subgraph(set.union(dependencies, {request.tablename})) dependency_order = nx.topological_sort(subgraph) root = dependency_order[0] [nx.algorithms.dijkstra_path(subgraph, root, start)[:-1] + nx.algorithms.dijkstra_path(subgraph, start, request.tablename) for start in dependency_order] graph = depc.graph root = list(nx.topological_sort(graph))[0] edges = graph.edges() #parent_to_children = ut.edges_to_adjacency_list(edges) child_to_parents = ut.edges_to_adjacency_list([t[::-1] for t in edges]) to_root = {request.tablename: ut.paths_to_root(request.tablename, root, child_to_parents)} from_root = ut.reverse_path(to_root, root, child_to_parents) dependency_levels_ = ut.get_levels(from_root) dependency_levels = ut.longest_levels(dependency_levels_) true_order = ut.flatten(dependency_levels)[1:-1] #print('[req] Ensuring %s request dependencies: %r' % (request, true_order,)) ut.colorprint( '[req] Ensuring request %s dependencies: %r' % (request, true_order,), 'yellow') for tablename in true_order: table = depc[tablename] if table.ismulti: pass else: # HACK FOR IBEIS all_aids = ut.flat_unique(request.qaids, request.daids) depc.get_rowids(tablename, all_aids) pass pass #zip(depc.get_implicit_edges()) #zip(depc.get_implicit_edges()) #raise NotImplementedError('todo') #depc = request.depc #parent_rowids = request.parent_rowids #config = request.config #rowid_dict = depc.get_all_descendant_rowids( # request.tablename, root_rowids, config=config) pass
def print_feature_info(testres): """ draws keypoint statistics for each test configuration Args: testres (wbia.expt.test_result.TestResult): test result Ignore: import wbia.plottool as pt pt.qt4ensure() testres.draw_rank_cmc() Example: >>> # DISABLE_DOCTEST >>> from wbia.other.dbinfo import * # NOQA >>> import wbia >>> ibs, testres = wbia.testdata_expts(defaultdb='PZ_MTEST', a='timectrl', t='invar:AI=False') >>> (tex_nKpts, tex_kpts_stats, tex_scale_stats) = feature_info(ibs) >>> result = ('(tex_nKpts, tex_kpts_stats, tex_scale_stats) = %s' % (ut.repr2((tex_nKpts, tex_kpts_stats, tex_scale_stats)),)) >>> print(result) >>> ut.quit_if_noshow() >>> import wbia.plottool as pt >>> ut.show_if_requested() """ import vtool as vt # ibs = testres.ibs def print_feat_stats(kpts, vecs): assert len(vecs) == len(kpts), 'disagreement' logger.info('keypoints and vecs agree') flat_kpts = np.vstack(kpts) num_kpts = list(map(len, kpts)) kpt_scale = vt.get_scales(flat_kpts) num_kpts_stats = ut.get_stats(num_kpts) scale_kpts_stats = ut.get_stats(kpt_scale) logger.info('Number of ' + prefix + ' keypoints: ' + ut.repr3(num_kpts_stats, nl=0, precision=2)) logger.info('Scale of ' + prefix + ' keypoints: ' + ut.repr3(scale_kpts_stats, nl=0, precision=2)) for cfgx in range(testres.nConfig): logger.info('------------------') ut.colorprint(testres.cfgx2_lbl[cfgx], 'yellow') qreq_ = testres.cfgx2_qreq_[cfgx] depc = qreq_.ibs.depc_annot tablename = 'feat' prefix_list = ['query', 'data'] config_pair = [qreq_.query_config2_, qreq_.data_config2_] aids_pair = [qreq_.qaids, qreq_.daids] for prefix, aids, config in zip(prefix_list, aids_pair, config_pair): config_ = depc._ensure_config(tablename, config) ut.colorprint(prefix + ' Config: ' + str(config_), 'blue') # Get keypoints and SIFT descriptors for this config kpts = depc.get(tablename, aids, 'kpts', config=config_) vecs = depc.get(tablename, aids, 'vecs', config=config_) # Check various stats of these pairs print_feat_stats(kpts, vecs)
def print_pipe_configs(cfgdict_list, pipecfg_list): pipecfg_lbls = get_varied_pipecfg_lbls(cfgdict_list, pipecfg_list) #pipecfg_lbls = pipecfg_list #assert len(pipecfg_lbls) == len(pipecfg_lbls), 'unequal lens' for pcfgx, (pipecfg, lbl) in enumerate(zip(pipecfg_list, pipecfg_lbls)): print('+--- %d / %d ===' % (pcfgx, (len(pipecfg_list)))) ut.colorprint(lbl, 'white') print(pipecfg.get_cfgstr()) print('L___')
def print_pipe_configs(cfgdict_list, pipecfg_list): pipecfg_lbls = get_varied_pipecfg_lbls(cfgdict_list, pipecfg_list) # pipecfg_lbls = pipecfg_list # assert len(pipecfg_lbls) == len(pipecfg_lbls), 'unequal lens' for pcfgx, (pipecfg, lbl) in enumerate(zip(pipecfg_list, pipecfg_lbls)): logger.info('+--- %d / %d ===' % (pcfgx, (len(pipecfg_list)))) ut.colorprint(lbl, 'white') logger.info(pipecfg.get_cfgstr()) logger.info('L___')
def print_factors(model, factor_list): if hasattr(model, 'var2_cpd'): semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] else: semtypes = [0] * len(factor_list) for type_, factors in ut.group_items(factor_list, semtypes).items(): logger.info('Result Factors (%r)' % (type_, )) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow')
def print_feature_info(testres): """ draws keypoint statistics for each test configuration Args: testres (ibeis.expt.test_result.TestResult): test result Ignore: import plottool as pt pt.qt4ensure() testres.draw_rank_cdf() Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.dbinfo import * # NOQA >>> import ibeis >>> ibs, testres = ibeis.testdata_expts(defaultdb='PZ_MTEST', a='timectrl', t='invar:AI=False') >>> (tex_nKpts, tex_kpts_stats, tex_scale_stats) = feature_info(ibs) >>> result = ('(tex_nKpts, tex_kpts_stats, tex_scale_stats) = %s' % (ut.repr2((tex_nKpts, tex_kpts_stats, tex_scale_stats)),)) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ import vtool as vt #ibs = testres.ibs def print_feat_stats(kpts, vecs): assert len(vecs) == len(kpts), 'disagreement' print('keypoints and vecs agree') flat_kpts = np.vstack(kpts) num_kpts = list(map(len, kpts)) kpt_scale = vt.get_scales(flat_kpts) num_kpts_stats = ut.get_stats(num_kpts) scale_kpts_stats = ut.get_stats(kpt_scale) print('Number of ' + prefix + ' keypoints: ' + ut.repr3(num_kpts_stats, nl=0, precision=2)) print('Scale of ' + prefix + ' keypoints: ' + ut.repr3(scale_kpts_stats, nl=0, precision=2)) for cfgx in range(testres.nConfig): print('------------------') ut.colorprint(testres.cfgx2_lbl[cfgx], 'yellow') qreq_ = testres.cfgx2_qreq_[cfgx] depc = qreq_.ibs.depc_annot tablename = 'feat' prefix_list = ['query', 'data'] config_pair = [qreq_.query_config2_, qreq_.data_config2_] aids_pair = [qreq_.qaids, qreq_.daids] for prefix, aids, config in zip(prefix_list, aids_pair, config_pair): config_ = depc._ensure_config(tablename, config) ut.colorprint(prefix + ' Config: ' + str(config_), 'blue') # Get keypoints and SIFT descriptors for this config kpts = depc.get(tablename, aids, 'kpts', config=config_) vecs = depc.get(tablename, aids, 'vecs', config=config_) # Check various stats of these pairs print_feat_stats(kpts, vecs)
def print_factors(model, factor_list): if hasattr(model, 'var2_cpd'): semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] else: semtypes = [0] * len(factor_list) for type_, factors in ut.group_items(factor_list, semtypes).items(): print('Result Factors (%r)' % (type_,)) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow')
def execute(request, parent_rowids=None, use_cache=None, postprocess=True): ut.colorprint('[req] Executing request %s' % (request,), 'yellow') table = request.depc[request.tablename] if use_cache is None: use_cache = not ut.get_argflag('--nocache') if parent_rowids is None: parent_rowids = request.parent_rowids # Compute and cache any uncomputed results rowids = table.get_rowid(parent_rowids, config=request, recompute=not use_cache) # Load all results result_list = table.get_row_data(rowids) if postprocess and hasattr(request, 'postprocess_execute'): logger.info('Converting results') result_list = request.postprocess_execute(parent_rowids, result_list) pass return result_list
def execute(request, parent_rowids=None, use_cache=None, postprocess=True): ut.colorprint('[req] Executing request %s' % (request,), 'yellow') table = request.depc[request.tablename] if use_cache is None: use_cache = not ut.get_argflag('--nocache') if parent_rowids is None: parent_rowids = request.parent_rowids # Compute and cache any uncomputed results rowids = table.get_rowid(parent_rowids, config=request, recompute=not use_cache) # Load all results result_list = table.get_row_data(rowids) if postprocess and hasattr(request, 'postprocess_execute'): print('Converting results') result_list = request.postprocess_execute(parent_rowids, result_list) pass return result_list
def execute(request, parent_rowids=None, use_cache=None, postprocess=True): """ HACKY REIMPLEMENTATION """ ut.colorprint('[req] Executing request %s' % (request, ), 'yellow') table = request.depc[request.tablename] if use_cache is None: use_cache = not ut.get_argflag('--nocache') if parent_rowids is None: parent_rowids = request.parent_rowids else: # previously defined in execute subset # subparent_rowids = request.make_parent_rowids( # qaids, request.daids) logger.info('given %d specific parent_rowids' % (len(parent_rowids), )) # vsone hack (i,j) same as (j,i) if request._symmetric: import vtool as vt directed_edges = np.array(parent_rowids) undirected_edges = vt.to_undirected_edges(directed_edges) edge_ids = vt.compute_unique_data_ids(undirected_edges) unique_rows, unique_rowx, inverse_idx = np.unique( edge_ids, return_index=True, return_inverse=True) parent_rowids_ = ut.take(parent_rowids, unique_rowx) else: parent_rowids_ = parent_rowids # Compute and cache any uncomputed results rowids = table.get_rowid(parent_rowids_, config=request, recompute=not use_cache) # Load all results result_list = table.get_row_data(rowids) if request._symmetric: result_list = ut.take(result_list, inverse_idx) if postprocess and hasattr(request, 'postprocess_execute'): logger.info('Converting results') result_list = request.postprocess_execute(parent_rowids, result_list) pass return result_list
def ensure_text(fname, text, repo_dpath='.', force=None, locals_={}, chmod=None): """ Args: fname (str): file name text (str): repo_dpath (str): directory path string(default = '.') force (bool): (default = False) locals_ (dict): (default = {}) Example: >>> # DISABLE_DOCTEST >>> from utool.util_git import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ import utool as ut ut.colorprint('Ensuring fname=%r' % (fname), 'yellow') if force is None and ut.get_argflag('--force-%s' % (fname, )): force = True fpath = join(repo_dpath, fname) if force or not ut.checkpath(fpath, verbose=2, n=5): text_ = ut.remove_codeblock_syntax_sentinals(text) fmtkw = locals_.copy() fmtkw['fname'] = fname text_ = text_.format(**fmtkw) + '\n' ut.writeto(fpath, text_) try: if chmod: ut.chmod(fpath, chmod) except Exception as ex: ut.printex(ex, iswarning=True)
def execute(request, parent_rowids=None, use_cache=None, postprocess=True): """ HACKY REIMPLEMENTATION """ ut.colorprint('[req] Executing request %s' % (request,), 'yellow') table = request.depc[request.tablename] if use_cache is None: use_cache = not ut.get_argflag('--nocache') if parent_rowids is None: parent_rowids = request.parent_rowids else: print('given %d specific parent_rowids' % (len(parent_rowids),)) # vsone hack (i,j) same as (j,i) if request._symmetric: import vtool as vt directed_edges = np.array(parent_rowids) undirected_edges = vt.to_undirected_edges(directed_edges) edge_ids = vt.compute_unique_data_ids(undirected_edges) unique_rows, unique_rowx, inverse_idx = np.unique(edge_ids, return_index=True, return_inverse=True) parent_rowids_ = ut.take(parent_rowids, unique_rowx) else: parent_rowids_ = parent_rowids # Compute and cache any uncomputed results rowids = table.get_rowid(parent_rowids_, config=request, recompute=not use_cache) # Load all results result_list = table.get_row_data(rowids) if request._symmetric: result_list = ut.take(result_list, inverse_idx) if postprocess and hasattr(request, 'postprocess_execute'): print('Converting results') result_list = request.postprocess_execute(parent_rowids, result_list) pass return result_list
def test_body2(count, logmode, backspace): ut.colorprint('\n---- count = %r -----' % (count,), 'yellow') ut.colorprint('backspace = %r' % (backspace,), 'yellow') ut.colorprint('logmode = %r' % (logmode,), 'yellow') if logmode: ut.delete('test.log') ut.start_logging('test.log') print('Start main loop') import time for count in ut.ProgressIter(range(2), freq=1, backspace=backspace): for count in ut.ProgressIter(range(50), freq=1, backspace=backspace): time.sleep(.01) print('Done with main loop work') print('Exiting main body') if logmode: ut.stop_logging()
def test_body(count, logmode, backspace): ut.colorprint('\n---- count = %r -----' % (count,), 'yellow') ut.colorprint('backspace = %r' % (backspace,), 'yellow') ut.colorprint('logmode = %r' % (logmode,), 'yellow') if logmode: ut.delete('test.log') ut.start_logging('test.log') print('Start main loop') import time for count in ut.ProgressIter(range(20), freq=3, backspace=backspace): time.sleep(.01) print('Done with main loop work') print('Exiting main body') if logmode: ut.stop_logging() #print('-----DONE LOGGING----') testlog_text = ut.readfrom('test.log') print(ut.indent(testlog_text.replace('\r', '\n'), ' '))
def test_body2(count, logmode, backspace): ut.colorprint('\n---- count = %r -----' % (count, ), 'yellow') ut.colorprint('backspace = %r' % (backspace, ), 'yellow') ut.colorprint('logmode = %r' % (logmode, ), 'yellow') if logmode: ut.delete('test.log') ut.start_logging('test.log') print('Start main loop') import time for count in ut.ProgressIter(range(2), freq=1, backspace=backspace): for count in ut.ProgressIter(range(50), freq=1, backspace=backspace): time.sleep(.01) print('Done with main loop work') print('Exiting main body') if logmode: ut.stop_logging()
def test_body(count, logmode, backspace): ut.colorprint('\n---- count = %r -----' % (count, ), 'yellow') ut.colorprint('backspace = %r' % (backspace, ), 'yellow') ut.colorprint('logmode = %r' % (logmode, ), 'yellow') if logmode: ut.delete('test.log') ut.start_logging('test.log') print('Start main loop') import time for count in ut.ProgressIter(range(20), freq=3, backspace=backspace): time.sleep(.01) print('Done with main loop work') print('Exiting main body') if logmode: ut.stop_logging() #print('-----DONE LOGGING----') testlog_text = ut.readfrom('test.log') print(ut.indent(testlog_text.replace('\r', '\n'), ' '))
def print_results(ibs, testres): """ Prints results from an experiment harness run. Rows store different qaids (query annotation ids) Cols store different configurations (algorithm parameters) Args: ibs (IBEISController): ibeis controller object testres (test_result.TestResult): CommandLine: python dev.py -e print --db PZ_MTEST -a default:dpername=1,qpername=[1,2] -t default:fg_on=False python dev.py -e print -t best --db seals2 --allgt --vz python dev.py -e print --db PZ_MTEST --allgt -t custom --print-confusion-stats python dev.py -e print --db PZ_MTEST --allgt --noqcache --index 0:10:2 -t custom:rrvsone_on=True --print-confusion-stats python dev.py -e print --db PZ_MTEST --allgt --noqcache --qaid4 -t custom:rrvsone_on=True --print-confusion-stats python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl python -m ibeis --tf print_results --db PZ_MTEST -a default -t default:lnbnn_on=True default:lnbnn_on=False,bar_l2_on=True default:lnbnn_on=False,normonly_on=True CommandLine: python -m ibeis.expt.experiment_printres --test-print_results utprof.py -m ibeis.expt.experiment_printres --test-print_results Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.experiment_printres import * # NOQA >>> from ibeis.init import main_helpers >>> ibs, testres = main_helpers.testdata_expts( >>> 'PZ_MTEST', a='default:dpername=1,qpername=[1,2]', t='default:fg_on=False') >>> result = print_results(ibs, testres) >>> print(result) """ (cfg_list, cfgx2_cfgresinfo, testnameid, cfgx2_lbl, cfgx2_qreq_) = ut.dict_take(testres.__dict__, [ 'cfg_list', 'cfgx2_cfgresinfo', 'testnameid', 'cfgx2_lbl', 'cfgx2_qreq_' ]) # cfgx2_cfgresinfo is a list of dicts of lists # Parse result info out of the lists cfgx2_nextbestranks = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_next_bestranks') cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score') #cfgx2_aveprecs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_avepercision') cfgx2_scorediffs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_scorediff') #cfgx2_gt_raw_score = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') column_lbls = [ ut.remove_chars(ut.remove_vowels(lbl), [' ', ',']) for lbl in cfgx2_lbl ] scorediffs_mat = np.array(ut.replace_nones(cfgx2_scorediffs, np.nan)) print(' --- PRINT RESULTS ---') print(' use --rank-lt-list=1,5 to specify X_LIST') if True: # Num of ranks less than to score X_LIST = testres.get_X_LIST() #X_LIST = [1, 5] #nConfig = len(cfg_list) #nQuery = len(testres.qaids) cfgx2_nQuery = list(map(len, testres.cfgx2_qaids)) #cfgx2_qx2_ranks = testres.get_infoprop_list('qx2_bestranks') #-------------------- # A positive scorediff indicates the groundtruth was better than the # groundfalse scores istrue_list = [scorediff > 0 for scorediff in scorediffs_mat] isfalse_list = [~istrue for istrue in istrue_list] #------------ # Build Colscore nLessX_dict = testres.get_nLessX_dict() #------------ best_rankscore_summary = [] #to_intersect_list = [] # print each configs scores less than X=thresh for X, cfgx2_nLessX in six.iteritems(nLessX_dict): max_nLessX = cfgx2_nLessX.max() bestX_cfgx_list = np.where(cfgx2_nLessX == max_nLessX)[0] best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestX_cfgx_list) # FIXME best_rankscore += rankscore_str(X, max_nLessX, cfgx2_nQuery[bestX_cfgx_list[0]]) best_rankscore_summary += [best_rankscore] #to_intersect_list.append(ut.take(cfgx2_lbl, max_nLessX)) #intersected = to_intersect_list[0] if len(to_intersect_list) > 0 else [] #for ix in range(1, len(to_intersect_list)): # intersected = np.intersect1d(intersected, to_intersect_list[ix]) #if False: # #gt_raw_score_mat = np.vstack(cfgx2_gt_raw_score).T # #rank_mat = testres.get_rank_mat() # #------------ # # Build row lbls # if False: # qx2_lbl = np.array([ # 'qx=%d) q%s ' % (qx, ibsfuncs.aidstr(testres.qaids[qx], ibs=ibs, notes=True)) # for qx in range(nQuery)]) # #------------ # # Build Colscore and hard cases # if False: # qx2_min_rank = [] # qx2_argmin_rank = [] # new_hard_qaids = [] # new_hardtup_list = [] # for qx in range(nQuery): # ranks = rank_mat[qx] # valid_ranks = ranks[ranks >= 0] # min_rank = ranks.min() if len(valid_ranks) > 0 else -3 # bestCFG_X = np.where(ranks == min_rank)[0] # qx2_min_rank.append(min_rank) # # Find the best rank over all configurations # qx2_argmin_rank.append(bestCFG_X) #@ut.memoize #def get_new_hard_qx_list(testres): # """ Mark any query as hard if it didnt get everything correct """ # rank_mat = testres.get_rank_mat() # is_new_hard_list = rank_mat.max(axis=1) > 0 # new_hard_qx_list = np.where(is_new_hard_list)[0] # return new_hard_qx_list # new_hard_qx_list = testres.get_new_hard_qx_list() # for qx in new_hard_qx_list: # # New list is in aid format instead of cx format # # because you should be copying and pasting it # notes = ' ranks = ' + str(rank_mat[qx]) # qaid = testres.qaids[qx] # name = ibs.get_annot_names(qaid) # new_hardtup_list += [(qaid, name + " - " + notes)] # new_hard_qaids += [qaid] @ut.argv_flag_dec def intersect_hack(): failed = testres.rank_mat > 0 colx2_failed = [np.nonzero(failed_col)[0] for failed_col in failed.T] #failed_col2_only = np.setdiff1d(colx2_failed[1], colx2_failed[0]) #failed_col2_only_aids = ut.take(testres.qaids, failed_col2_only) failed_col1_only = np.setdiff1d(colx2_failed[0], colx2_failed[1]) failed_col1_only_aids = ut.take(testres.qaids, failed_col1_only) gt_aids1 = ibs.get_annot_groundtruth( failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[0].daids) gt_aids2 = ibs.get_annot_groundtruth( failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[1].daids) qaids_expt = failed_col1_only_aids gt_avl_aids1 = ut.flatten(gt_aids1) gt_avl_aids2 = list(set(ut.flatten(gt_aids2)).difference(gt_avl_aids1)) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids1) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids2) #jsontext = ut.to_json({ # 'qaids': list(qaids_expt), # 'dinclude_aids1': list(gt_aids_expt1), # 'dinclude_aids2': list(gt_aids_expt2), #}) #annotation_configs.varysize_pzm #from ibeis.expt import annotation_configs acfg = testres.acfg_list[0] import copy acfg1 = copy.deepcopy(acfg) acfg2 = copy.deepcopy(acfg) acfg1['qcfg']['min_pername'] = None acfg2['qcfg']['min_pername'] = None acfg1['dcfg']['min_pername'] = None acfg2['dcfg']['min_gt_per_name'] = None acfg1['qcfg']['default_aids'] = qaids_expt acfg1['dcfg']['gt_avl_aids'] = gt_avl_aids1 acfg2['qcfg']['default_aids'] = qaids_expt acfg2['dcfg']['gt_avl_aids'] = gt_avl_aids2 from ibeis.init import filter_annots from ibeis.expt import experiment_helpers annots1 = filter_annots.expand_acfgs(ibs, acfg1, verbose=True) annots2 = filter_annots.expand_acfgs(ibs, acfg2, verbose=True) acfg_name_list = dict( # NOQA acfg_list=[acfg1, acfg2], expanded_aids_list=[annots1, annots2], ) test_cfg_name_list = ['candidacy_k'] cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list( test_cfg_name_list, ibs=ibs) t1, t2 = testres_list # NOQA #ut.embed() #intersect_hack() #@ut.argv_flag_dec #def print_rowlbl(): # print('=====================') # print('[harn] Row/Query Labels: %s' % testnameid) # print('=====================') # print('[harn] queries:\n%s' % '\n'.join(qx2_lbl)) #print_rowlbl() #------------ @ut.argv_flag_dec def print_collbl(): print('=====================') print('[harn] Col/Config Labels: %s' % testnameid) print('=====================') enum_cfgx2_lbl = [ '%2d) %s' % (count, cfglbl) for count, cfglbl in enumerate(cfgx2_lbl) ] print('[harn] cfglbl:\n%s' % '\n'.join(enum_cfgx2_lbl)) print_collbl() #------------ @ut.argv_flag_dec def print_cfgstr(): print('=====================') print('[harn] Config Strings: %s' % testnameid) print('=====================') cfgstr_list = [query_cfg.get_cfgstr() for query_cfg in cfg_list] enum_cfgstr_list = [ '%2d) %s' % (count, cfgstr) for count, cfgstr in enumerate(cfgstr_list) ] print('\n[harn] cfgstr:\n%s' % '\n'.join(enum_cfgstr_list)) print_cfgstr() #------------ #@ut.argv_flag_dec #def print_rowscore(): # print('=======================') # print('[harn] Scores per Query: %s' % testnameid) # print('=======================') # for qx in range(nQuery): # bestCFG_X = qx2_argmin_rank[qx] # min_rank = qx2_min_rank[qx] # minimizing_cfg_str = ut.indentjoin(cfgx2_lbl[bestCFG_X], '\n * ') # #minimizing_cfg_str = str(bestCFG_X) # print('-------') # print(qx2_lbl[qx]) # print(' best_rank = %d ' % min_rank) # if len(cfgx2_lbl) != 1: # print(' minimizing_cfg_x\'s = %s ' % minimizing_cfg_str) #print_rowscore() #------------ #@ut.argv_flag_dec #def print_row_ave_precision(): # print('=======================') # print('[harn] Scores per Query: %s' % testnameid) # print('=======================') # for qx in range(nQuery): # aveprecs = ', '.join(['%.2f' % (aveprecs[qx],) for aveprecs in cfgx2_aveprecs]) # print('-------') # print(qx2_lbl[qx]) # print(' aveprecs = %s ' % aveprecs) #print_row_ave_precision() ##------------ #@ut.argv_flag_dec #def print_hardcase(): # print('--- hard new_hardtup_list (w.r.t these configs): %s' % testnameid) # print('\n'.join(map(repr, new_hardtup_list))) # print('There are %d hard cases ' % len(new_hardtup_list)) # aid_list = [aid_notes[0] for aid_notes in new_hardtup_list] # name_list = ibs.get_annot_names(aid_list) # name_set = set(name_list) # print(sorted(aid_list)) # print('Names: %r' % (name_set,)) #print_hardcase() #default=not ut.get_argflag('--allhard')) #------------ #@ut.argv_flag_dec #def echo_hardcase(): # print('--- hardcase commandline: %s' % testnameid) # # Show index for current query where hardids reside # #print('--index ' + (' '.join(map(str, new_hard_qx_list)))) # #print('--take new_hard_qx_list') # #hardaids_str = ' '.join(map(str, [' ', '--qaid'] + new_hard_qaids)) # hardaids_str = ' '.join(map(str, [' ', '--set-aids-as-hard'] + new_hard_qaids)) # print(hardaids_str) ##echo_hardcase(default=not ut.get_argflag('--allhard')) #echo_hardcase() #@ut.argv_flag_dec #def print_bestcfg(): # print('==========================') # print('[harn] Best Configurations: %s' % testnameid) # print('==========================') # # print each configs scores less than X=thresh # for X, cfgx2_nLessX in six.iteritems(nLessX_dict): # max_LessX = cfgx2_nLessX.max() # bestCFG_X = np.where(cfgx2_nLessX == max_LessX)[0] # best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestCFG_X) # best_rankscore += rankscore_str(X, max_LessX, nQuery) # cfglbl_list = cfgx2_lbl[bestCFG_X] # best_rankcfg = format_cfgstr_list(cfglbl_list) # #indent('\n'.join(cfgstr_list), ' ') # print(best_rankscore) # print(best_rankcfg) # print('[cfg*] %d cfg(s) are the best of %d total cfgs' % (len(intersected), nConfig)) # print(format_cfgstr_list(intersected)) #print_bestcfg() #------------ #@ut.argv_flag_dec #def print_gtscore(): # # Prints best ranks # print('gtscore_mat: %s' % testnameid) # print(' nRows=%r, nCols=%r' % (nQuery, nConfig)) # header = (' labled rank matrix: rows=queries, cols=cfgs:') # print('\n'.join(cfgx2_lbl)) # column_list = gt_raw_score_mat.T # print(ut.make_csv_table(column_list, row_lbls=testres.qaids, # column_lbls=column_lbls, header=header, # transpose=False, # use_lbl_width=len(cfgx2_lbl) < 5)) #print_gtscore() #------------ #@ut.argv_flag_dec #def print_best_rankmat(): # # Prints best ranks # print('-------------') # print('RankMat: %s' % testnameid) # print(' nRows=%r, nCols=%r' % (nQuery, nConfig)) # header = (' labled rank matrix: rows=queries, cols=cfgs:') # print('\n'.join(cfgx2_lbl)) # column_list = rank_mat.T # print(ut.make_csv_table(column_list, row_lbls=testres.qaids, # column_lbls=column_lbls, header=header, # transpose=False, # use_lbl_width=len(cfgx2_lbl) < 5)) #print_best_rankmat() #@ut.argv_flag_dec #def print_diffmat(): # # score differences over configs # print('-------------') # print('Diffmat: %s' % testnameid) # diff_matstr = get_diffmat_str(rank_mat, testres.qaids, nConfig) # print(diff_matstr) #print_diffmat() #@ut.argv_flag_dec #def print_rankhist_time(): # print('A rank histogram is a dictionary. ' # 'The keys denote the range of the ranks that the values fall in') # # TODO: rectify this code with other hist code # config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid') # config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs() # _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs)) # for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter: # #full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr() # #ut.print_dict(ut.dict_hist(ranks), 'rank histogram', sorted_=True) # # find the qxs that belong to each bin # aid_list1 = testres.qaids # aid_list2 = qx2_gt_aid # ibs.assert_valid_aids(aid_list1) # ibs.assert_valid_aids(aid_list2) # timedelta_list = ibs.get_annot_pair_timdelta(aid_list1, aid_list2) # #timedelta_str_list = [ut.get_posix_timedelta_str2(delta) # # for delta in timedelta_list] # bin_edges = testres.get_rank_histogram_bin_edges() # timedelta_groups = ut.dict_take(ut.group_items(timedelta_list, config_binxs), np.arange(len(bin_edges)), []) # timedelta_stats = [ut.get_stats(deltas, use_nan=True, datacast=ut.get_posix_timedelta_str2) for deltas in timedelta_groups] # print('Time statistics for each rank range:') # print(ut.dict_str(dict(zip(bin_edges, timedelta_stats)), sorted_=True)) #print_rankhist_time() #@ut.argv_flag_dec #def print_rankhist(): # print('A rank histogram is a dictionary. ' # 'The keys denote the range of the ranks that the values fall in') # # TODO: rectify this code with other hist code # config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid') # config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs() # _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs)) # for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter: # print('Frequency of rank ranges:') # ut.print_dict(agg_hist_dict, 'agg rank histogram', sorted_=True) #print_rankhist() #------------ # Print summary #print(' --- SUMMARY ---') #------------ #@ut.argv_flag_dec #def print_colmap(): # print('==================') # print('[harn] mAP per Config: %s (sorted by mAP)' % testnameid) # print('==================') # cfgx2_mAP = np.array([aveprec_list.mean() for aveprec_list in cfgx2_aveprecs]) # sortx = cfgx2_mAP.argsort() # for cfgx in sortx: # print('[mAP] cfgx=%r) mAP=%.3f -- %s' % (cfgx, cfgx2_mAP[cfgx], cfgx2_lbl[cfgx])) # #print('L___ Scores per Config ___') #print_colmap() #------------ @ut.argv_flag_dec_true def print_colscore(): print('==================') print('[harn] Scores per Config: %s' % testnameid) print('==================') #for cfgx in range(nConfig): # print('[score] %s' % (cfgx2_lbl[cfgx])) # for X in X_LIST: # nLessX_ = nLessX_dict[int(X)][cfgx] # print(' ' + rankscore_str(X, nLessX_, nQuery)) print('\n[harn] ... sorted scores') for X in X_LIST: print('\n[harn] Sorted #ranks < %r scores' % (X)) sortx = np.array(nLessX_dict[int(X)]).argsort() #frac_list = (nLessX_dict[int(X)] / cfgx2_nQuery)[:, None] #print('cfgx2_nQuery = %r' % (cfgx2_nQuery,)) #print('frac_list = %r' % (frac_list,)) #print('Pairwise Difference: ' + str(ut.safe_pdist(frac_list, metric=ut.absdiff))) for cfgx in sortx: nLessX_ = nLessX_dict[int(X)][cfgx] rankstr = rankscore_str(X, nLessX_, cfgx2_nQuery[cfgx], withlbl=False) print('[score] %s --- %s' % (rankstr, cfgx2_lbl[cfgx])) print_colscore() #------------ ut.argv_flag_dec(print_latexsum)(ibs, testres) @ut.argv_flag_dec def print_next_rankmat(): # Prints nextbest ranks print('-------------') print('NextRankMat: %s' % testnameid) header = (' top false rank matrix: rows=queries, cols=cfgs:') print('\n'.join(cfgx2_lbl)) column_list = cfgx2_nextbestranks print( ut.make_csv_table(column_list, row_lbls=testres.qaids, column_lbls=column_lbls, header=header, transpose=False, use_lbl_width=len(cfgx2_lbl) < 5)) print_next_rankmat() #------------ @ut.argv_flag_dec def print_scorediff_mat(): # Prints nextbest ranks print('-------------') print('ScoreDiffMat: %s' % testnameid) header = ( ' score difference between top true and top false: rows=queries, cols=cfgs:' ) print('\n'.join(cfgx2_lbl)) column_list = cfgx2_scorediffs column_type = [float] * len(column_list) print( ut.make_csv_table(column_list, row_lbls=testres.qaids, column_lbls=column_lbls, column_type=column_type, header=header, transpose=False, use_lbl_width=len(cfgx2_lbl) < 5)) print_scorediff_mat(alias_flags=['--sdm']) #------------ def jagged_stats_info(arr_, lbl, col_lbls): arr = ut.recursive_replace(arr_, np.inf, np.nan) # Treat infinite as nan stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True) sel_stat_dict, sel_indices = ut.find_interesting_stats( stat_dict, col_lbls) sel_col_lbls = ut.take(col_lbls, sel_indices) statstr_kw = dict(precision=3, newlines=True, lbl=lbl, align=True) stat_str = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw) sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw) sel_stat_str = 'sel_col_lbls = %s' % ( ut.list_str(sel_col_lbls), ) + '\n' + sel_stat_str return stat_str, sel_stat_str @ut.argv_flag_dec def print_confusion_stats(): """ CommandLine: python dev.py --allgt --print-scorediff-mat-stats --print-confusion-stats -t rrvsone_grid """ # Prints nextbest ranks print('-------------') print('ScoreDiffMatStats: %s' % testnameid) print('column_lbls = %r' % (column_lbls, )) #cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') #cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score') gt_rawscores_mat = ut.replace_nones(cfgx2_gt_rawscores, np.nan) gf_rawscores_mat = ut.replace_nones(cfgx2_gf_rawscores, np.nan) tp_rawscores = vt.zipcompress(gt_rawscores_mat, istrue_list) fp_rawscores = vt.zipcompress(gt_rawscores_mat, isfalse_list) tn_rawscores = vt.zipcompress(gf_rawscores_mat, istrue_list) fn_rawscores = vt.zipcompress(gf_rawscores_mat, isfalse_list) tp_rawscores_str, tp_rawscore_statstr = jagged_stats_info( tp_rawscores, 'tp_rawscores', cfgx2_lbl) fp_rawscores_str, fp_rawscore_statstr = jagged_stats_info( fp_rawscores, 'fp_rawscores', cfgx2_lbl) tn_rawscores_str, tn_rawscore_statstr = jagged_stats_info( tn_rawscores, 'tn_rawscores', cfgx2_lbl) fn_rawscores_str, fn_rawscore_statstr = jagged_stats_info( fn_rawscores, 'fn_rawscores', cfgx2_lbl) #print(tp_rawscores_str) #print(fp_rawscores_str) #print(tn_rawscores_str) #print(fn_rawscores_str) print(tp_rawscore_statstr) print(fp_rawscore_statstr) print(tn_rawscore_statstr) print(fn_rawscore_statstr) print_confusion_stats(alias_flags=['--cs']) ut.argv_flag_dec_true(testres.print_percent_identification_success)() sumstrs = [] sumstrs.append('') sumstrs.append('||===========================') sumstrs.append('|| [cfg*] SUMMARY: %s' % testnameid) sumstrs.append('||---------------------------') sumstrs.append(ut.joins('\n|| ', best_rankscore_summary)) sumstrs.append('||===========================') summary_str = '\n' + '\n'.join(sumstrs) + '\n' #print(summary_str) ut.colorprint(summary_str, 'blue') print('To enable all printouts add --print-all to the commandline')
def get_annotcfg_list( ibs, acfg_name_list, filter_dups=True, qaid_override=None, daid_override=None, initial_aids=None, use_cache=None, verbose=None, ): r""" For now can only specify one acfg name list TODO: move to filter_annots Args: annot_cfg_name_list (list): CommandLine: python -m wbia get_annotcfg_list:0 python -m wbia get_annotcfg_list:1 python -m wbia get_annotcfg_list:2 wbia get_annotcfg_list:0 --ainfo wbia get_annotcfg_list:0 --db NNP_Master3 -a viewpoint_compare --nocache-aid --verbtd wbia get_annotcfg_list:0 --db PZ_ViewPoints -a viewpoint_compare --nocache-aid --verbtd wbia get_annotcfg_list:0 --db PZ_MTEST -a unctrl ctrl::unctrl --ainfo --nocache-aid wbia get_annotcfg_list:0 --db testdb1 -a : --ainfo --nocache-aid wbia get_annotcfg_list:0 --db Oxford -a :qhas_any=query --ainfo --nocache-aid wbia get_annotcfg_list:0 --db Oxford -a :qhas_any=query,dhas_any=distractor --ainfo --nocache-aid Example0: >>> # DISABLE_DOCTEST >>> from wbia.expt.experiment_helpers import * # NOQA >>> import wbia >>> from wbia.expt import annotation_configs >>> ibs = wbia.opendb(defaultdb='PZ_MTEST') >>> filter_dups = not ut.get_argflag('--nofilter-dups') >>> acfg_name_list = testdata_acfg_names() >>> _tup = get_annotcfg_list(ibs, acfg_name_list, filter_dups) >>> acfg_list, expanded_aids_list = _tup >>> print('\n PRINTING TEST RESULTS') >>> result = ut.repr2(acfg_list, nl=3) >>> print('\n') >>> #statskw = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict, with_vals=False) >>> printkw = dict(combined=True, per_name_vpedge=None, >>> per_qual=False, per_vp=False, case_tag_hist=False) >>> annotation_configs.print_acfg_list( >>> acfg_list, expanded_aids_list, ibs, **printkw) Example: >>> # ENABLE_DOCTEST >>> from wbia.expt.experiment_helpers import * # NOQA >>> import wbia >>> from wbia.init import main_helpers >>> from wbia.expt import annotation_configs >>> ibs = wbia.opendb(defaultdb='PZ_MTEST') >>> aids = ibs.get_valid_aids() >>> main_helpers.monkeypatch_encounters(ibs, aids, days=50) >>> a = ['default:crossval_enc=True,require_timestamp=True'] >>> acfg_name_list = testdata_acfg_names(a) >>> acfg_list, expanded_aids_list = get_annotcfg_list(ibs, acfg_name_list) >>> annotation_configs.print_acfg_list(acfg_list, expanded_aids_list) >>> # Restore state >>> main_helpers.unmonkeypatch_encounters(ibs) """ if ut.VERBOSE: logger.info('[harn.help] building acfg_list using %r' % (acfg_name_list,)) from wbia.expt import annotation_configs acfg_combo_list = parse_acfg_combo_list(acfg_name_list) # acfg_slice = ut.get_argval('--acfg_slice', type_=slice, default=None) # HACK: Sliceing happens before expansion (dependenceis get) combo_slice = ut.get_argval( '--combo_slice', type_='fuzzy_subset', default=slice(None) ) acfg_combo_list = [ ut.take(acfg_combo_, combo_slice) for acfg_combo_ in acfg_combo_list ] if ut.get_argflag('--consistent'): # Expand everything as one consistent annot list acfg_combo_list = [ut.flatten(acfg_combo_list)] # + --- Do Parsing --- expanded_aids_combo_list = [ filter_annots.expand_acfgs_consistently( ibs, acfg_combo_, initial_aids=initial_aids, use_cache=use_cache, verbose=verbose, base=base, ) for base, acfg_combo_ in enumerate(acfg_combo_list) ] expanded_aids_combo_flag_list = ut.flatten(expanded_aids_combo_list) acfg_list = ut.get_list_column(expanded_aids_combo_flag_list, 0) expanded_aids_list = ut.get_list_column(expanded_aids_combo_flag_list, 1) # L___ # Slicing happens after expansion (but the labels get screwed up) acfg_slice = ut.get_argval('--acfg_slice', type_='fuzzy_subset', default=None) if acfg_slice is not None: acfg_list = ut.take(acfg_list, acfg_slice) expanded_aids_list = ut.take(expanded_aids_list, acfg_slice) # + --- Hack: Override qaids --- _qaids = ut.get_argval( ('--qaid', '--qaid-override'), type_=list, default=qaid_override ) if _qaids is not None: expanded_aids_list = [(_qaids, daids) for qaids, daids in expanded_aids_list] # more hack for daids _daids = ut.get_argval( ('--daids-override', '--daid-override'), type_=list, default=daid_override ) if _daids is not None: expanded_aids_list = [(qaids, _daids) for qaids, daids in expanded_aids_list] # L___ if filter_dups: expanded_aids_list, acfg_list = filter_duplicate_acfgs( expanded_aids_list, acfg_list, acfg_name_list ) if ut.get_argflag( ('--acfginfo', '--ainfo', '--aidcfginfo', '--print-acfg', '--printacfg') ): ut.colorprint('[experiment_helpers] Requested AcfgInfo ... ', 'red') logger.info('combo_slice = %r' % (combo_slice,)) logger.info('acfg_slice = %r' % (acfg_slice,)) annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs) ut.colorprint('[experiment_helpers] exiting due to AcfgInfo info request', 'red') sys.exit(0) return acfg_list, expanded_aids_list
def get_pipecfg_list(test_cfg_name_list, ibs=None, verbose=None): r""" Builds a list of varied query configurations. Only custom configs depend on an ibs object. The order of the output is not gaurenteed to aggree with input order. FIXME: This breaks if you proot=BC_DTW and ibs is None Args: test_cfg_name_list (list): list of strs ibs (wbia.IBEISController): wbia controller object (optional) Returns: tuple: (cfg_list, cfgx2_lbl) - cfg_list (list): list of config objects cfgx2_lbl (list): denotes which parameters are being varied. If there is just one config then nothing is varied CommandLine: python -m wbia get_pipecfg_list:0 python -m wbia get_pipecfg_list:1 --db humpbacks python -m wbia get_pipecfg_list:2 Example: >>> # ENABLE_DOCTEST >>> from wbia.expt.experiment_helpers import * # NOQA >>> import wbia >>> ibs = wbia.opendb(defaultdb='testdb1') >>> #test_cfg_name_list = ['best', 'custom', 'custom:sv_on=False'] >>> #test_cfg_name_list = ['default', 'default:sv_on=False', 'best'] >>> test_cfg_name_list = ['default', 'default:sv_on=False', 'best'] >>> # execute function >>> (pcfgdict_list, pipecfg_list) = get_pipecfg_list(test_cfg_name_list, ibs) >>> # verify results >>> assert pipecfg_list[0].sv_cfg.sv_on is True >>> assert pipecfg_list[1].sv_cfg.sv_on is False >>> pipecfg_lbls = get_varied_pipecfg_lbls(pcfgdict_list) >>> result = ('pipecfg_lbls = '+ ut.repr2(pipecfg_lbls)) >>> print(result) pipecfg_lbls = ['default:', 'default:sv_on=False'] Example1: >>> # DISABLE_DOCTEST >>> import wbia_flukematch.plugin >>> from wbia.expt.experiment_helpers import * # NOQA >>> import wbia >>> ibs = wbia.opendb(defaultdb='humpbacks') >>> test_cfg_name_list = ['default:pipeline_root=BC_DTW,decision=average,crop_dim_size=[960,500]', 'default:K=[1,4]'] >>> (pcfgdict_list, pipecfg_list) = get_pipecfg_list(test_cfg_name_list, ibs) >>> pipecfg_lbls = get_varied_pipecfg_lbls(pcfgdict_list) >>> result = ('pipecfg_lbls = '+ ut.repr2(pipecfg_lbls)) >>> print(result) >>> print_pipe_configs(pcfgdict_list, pipecfg_list) """ if verbose is None: verbose = ut.VERBOSE if ut.VERBOSE: logger.info( '[expt_help.get_pipecfg_list] building pipecfg_list using: %s' % test_cfg_name_list ) if isinstance(test_cfg_name_list, six.string_types): test_cfg_name_list = [test_cfg_name_list] _standard_cfg_names = [] _pcfgdict_list = [] # HACK: Parse out custom configs first for test_cfg_name in test_cfg_name_list: if test_cfg_name.startswith('custom:') or test_cfg_name == 'custom': logger.info('[expthelpers] Parsing nonstandard custom config') assert False, 'custom is no longer supported' # if test_cfg_name.startswith('custom:'): # # parse out modifications to custom # cfgstr_list = ':'.join(test_cfg_name.split(':')[1:]).split(',') # augcfgdict = ut.parse_cfgstr_list(cfgstr_list, smartcast=True) # else: # augcfgdict = {} # # Take the configuration from the wbia object # pipe_cfg = ibs.--cfg.query_cfg.deepcopy() # # Update with augmented params # pipe_cfg.update_query_cfg(**augcfgdict) # # Parse out a standard cfgdict # cfgdict = dict(pipe_cfg.parse_items()) # cfgdict['_cfgname'] = 'custom' # cfgdict['_cfgstr'] = test_cfg_name # _pcfgdict_list.append(cfgdict) else: _standard_cfg_names.append(test_cfg_name) # Handle stanndard configs next if len(_standard_cfg_names) > 0: # Get parsing information # cfg_default_dict = dict(Config.QueryConfig().parse_items()) # valid_keys = list(cfg_default_dict.keys()) cfgstr_list = _standard_cfg_names named_defaults_dict = ut.dict_subset( experiment_configs.__dict__, experiment_configs.TEST_NAMES ) alias_keys = experiment_configs.ALIAS_KEYS # Parse standard pipeline cfgstrings metadata = {'ibs': ibs} dict_comb_list = cfghelpers.parse_cfgstr_list2( cfgstr_list, named_defaults_dict, cfgtype=None, alias_keys=alias_keys, # Hack out valid keys for humpbacks # valid_keys=valid_keys, strict=False, metadata=metadata, ) # Get varied params (there may be duplicates) _pcfgdict_list.extend(ut.flatten(dict_comb_list)) # Expand cfgdicts into PipelineConfig config objects # TODO: respsect different algorithm parameters like flukes if ibs is None: configclass_list = [Config.QueryConfig] * len(_pcfgdict_list) else: root_to_config = ibs.depc_annot.configclass_dict.copy() from wbia.algo.smk import smk_pipeline root_to_config['smk'] = smk_pipeline.SMKRequestConfig configclass_list = [ root_to_config.get( _cfgdict.get('pipeline_root', _cfgdict.get('proot', 'vsmany')), Config.QueryConfig, ) for _cfgdict in _pcfgdict_list ] _pipecfg_list = [ cls(**_cfgdict) for cls, _cfgdict in zip(configclass_list, _pcfgdict_list) ] # Enforce rule that removes duplicate configs # by using feasiblity from wbia.algo.Config # TODO: Move this unique finding code to its own function # and then move it up one function level so even the custom # configs can be uniquified _flag_list = ut.flag_unique_items(_pipecfg_list) cfgdict_list = ut.compress(_pcfgdict_list, _flag_list) pipecfg_list = ut.compress(_pipecfg_list, _flag_list) if verbose: # for cfg in _pipecfg_list: # logger.info(cfg.get_cfgstr()) # logger.info(cfg) logger.info( '[harn.help] return %d / %d unique pipeline configs from: %r' % (len(cfgdict_list), len(_pcfgdict_list), test_cfg_name_list) ) if ut.get_argflag(('--pcfginfo', '--pinfo', '--pipecfginfo')): ut.colorprint('Requested PcfgInfo for tests... ', 'red') print_pipe_configs(cfgdict_list, pipecfg_list) ut.colorprint('Finished Reporting PcfgInfo. Exiting', 'red') sys.exit(0) return (cfgdict_list, pipecfg_list)
def make_single_testres(ibs, qaids, daids, pipecfg_list, cfgx2_lbl, cfgdict_list, lbl, testnameid, use_cache=None, subindexer_partial=ut.ProgressIter): """ CommandLine: python -m ibeis.expt.harness --exec-run_test_configurations2 """ cfgslice = None if cfgslice is not None: pipecfg_list = pipecfg_list[cfgslice] dbname = ibs.get_dbname() if ut.NOT_QUIET: print('[harn] Make single testres') cfgx2_qreq_ = [ ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg) for pipe_cfg in ut.ProgressIter( pipecfg_list, lbl='Building qreq_', enabled=False) ] if use_cache is None: use_cache = USE_BIG_TEST_CACHE if use_cache: get_big_test_cache_info(ibs, cfgx2_qreq_) try: cachetup = get_big_test_cache_info(ibs, cfgx2_qreq_) testres = ut.load_cache(*cachetup) testres.cfgdict_list = cfgdict_list testres.cfgx2_lbl = cfgx2_lbl # hack override except IOError: pass else: if ut.NOT_QUIET: ut.colorprint('[harn] single testres cache hit... returning', 'turquoise') return testres if ibs.table_cache: # HACK prev_feat_cfgstr = None cfgx2_cfgresinfo = [] #nPipeCfg = len(pipecfg_list) cfgiter = subindexer_partial(range(len(cfgx2_qreq_)), lbl='query config', freq=1, adjust=False, separate=True) # Run each pipeline configuration for cfgx in cfgiter: qreq_ = cfgx2_qreq_[cfgx] ut.colorprint('testnameid=%r' % (testnameid, ), 'green') ut.colorprint( 'annot_cfgstr = %s' % (qreq_.get_cfgstr(with_input=True, with_pipe=False), ), 'yellow') ut.colorprint( 'pipe_cfgstr= %s' % (qreq_.get_cfgstr(with_data=False), ), 'turquoise') ut.colorprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(), ), 'teal') if DRY_RUN: continue indent_prefix = '[%s cfg %d/%d]' % ( dbname, # cfgiter.count (doesnt work when quiet) (cfgiter.parent_index * cfgiter.nTotal) + cfgx, cfgiter.nTotal * cfgiter.parent_nTotal) with ut.Indenter(indent_prefix): # Run the test / read cache _need_compute = True if use_cache: # smaller cache for individual configuration runs st_cfgstr = qreq_.get_cfgstr(with_input=True) bt_cachedir = cachetup[0] st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests') st_cachename = 'smalltest' ut.ensuredir(st_cachedir) try: cfgres_info = ut.load_cache(st_cachedir, st_cachename, st_cfgstr) except IOError: _need_compute = True else: _need_compute = False if _need_compute: assert not ibs.table_cache if ibs.table_cache: if (len(prev_feat_cfgstr is not None and prev_feat_cfgstr != qreq_.qparams.feat_cfgstr)): # Clear features to preserve memory ibs.clear_table_cache() #qreq_.ibs.print_cachestats_str() cfgres_info = get_query_result_info(qreq_) # record previous feature configuration if ibs.table_cache: prev_feat_cfgstr = qreq_.qparams.feat_cfgstr if use_cache: ut.save_cache(st_cachedir, st_cachename, st_cfgstr, cfgres_info) if not NOMEMORY: # Store the results cfgx2_cfgresinfo.append(cfgres_info) else: cfgx2_qreq_[cfgx] = None if ut.NOT_QUIET: ut.colorprint('[harn] Completed running test configurations', 'white') if DRY_RUN: print('ran tests dryrun mode.') return if NOMEMORY: print('ran tests in memory savings mode. Cannot Print. exiting') return # Store all pipeline config results in a test result object testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cfgresinfo, cfgx2_qreq_) testres.testnameid = testnameid testres.lbl = lbl testres.cfgdict_list = cfgdict_list testres.aidcfg = None if use_cache: try: ut.save_cache(*tuple(list(cachetup) + [testres])) except Exception as ex: ut.printex(ex, 'error saving testres cache', iswarning=True) if ut.SUPER_STRICT: raise return testres
def update_bindings(): r""" Returns: dict: matchtups CommandLine: python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings Example: >>> # DISABLE_DOCTEST >>> from autogen_bindings import * # NOQA >>> import sys >>> import utool as ut >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts')) >>> matchtups = update_bindings() >>> result = ('matchtups = %s' % (ut.repr2(matchtups),)) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ from os.path import basename import difflib import numpy as np import re binding_names = [ 'build_index', 'used_memory', 'add_points', 'remove_point', 'compute_cluster_centers', 'load_index', 'save_index', 'find_nearest_neighbors', 'radius_search', 'remove_points', 'free_index', 'find_nearest_neighbors_index', # 'size', # 'veclen', # 'get_point', # 'flann_get_distance_order', # 'flann_get_distance_type', # 'flann_log_verbosity', # 'clean_removed_points', ] _places = [ '~/code/flann/src/cpp/flann/flann.cpp', '~/code/flann/src/cpp/flann/flann.h', '~/code/flann/src/python/pyflann/flann_ctypes.py', '~/code/flann/src/python/pyflann/index.py', ] eof_sentinals = { # 'flann_ctypes.py': '# END DEFINE BINDINGS', 'flann_ctypes.py': 'def ensure_2d_array(arr', # 'flann.h': '// END DEFINE BINDINGS', 'flann.h': '#ifdef __cplusplus', 'flann.cpp': None, 'index.py': None, } block_sentinals = { 'flann.h': re.escape('/**'), 'flann.cpp': 'template *<typename Distance>', # 'flann_ctypes.py': '\n', 'flann_ctypes.py': 'flann\.[a-z_.]* =', # 'index.py': ' def .*', 'index.py': ' [^ ].*', } places = { basename(fpath): fpath for fpath in ut.lmap(ut.truepath, _places) } text_dict = ut.map_dict_vals(ut.readfrom, places) lines_dict = {key: val.split('\n') for key, val in text_dict.items()} orig_texts = text_dict.copy() # NOQA binding_defs = {} named_blocks = {} print('binding_names = %r' % (binding_names, )) for binding_name in binding_names: blocks, defs = autogen_parts(binding_name) binding_defs[binding_name] = defs named_blocks[binding_name] = blocks for binding_name in ut.ProgIter(binding_names): ut.colorprint('+--- GENERATE BINDING %s -----' % (binding_name, ), 'yellow') blocks_dict = named_blocks[binding_name] for key in places.keys(): ut.colorprint( '---- generating %s for %s -----' % ( binding_name, key, ), 'yellow') # key = 'flann_ctypes.py' # print(text_dict[key]) old_text = text_dict[key] line_list = lines_dict[key] #text = old_text block = blocks_dict[key] debug = ut.get_argflag('--debug') # debug = True # if debug: # print(ut.highlight_code(block, splitext(key)[1])) # Find a place in the code that already exists searchblock = block if key.endswith('.cpp') or key.endswith('.h'): searchblock = re.sub(ut.REGEX_C_COMMENT, '', searchblock, flags=re.MULTILINE | re.DOTALL) searchblock = '\n'.join(searchblock.splitlines()[0:3]) # @ut.cached_func(verbose=False) def cached_match(old_text, searchblock): def isjunk(x): return False return x in ' \t,*()' def isjunk2(x): return x in ' \t,*()' # Not sure why the first one just doesnt find it # isjunk = None sm = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=False) sm0 = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=True) sm1 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=False) sm2 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=True) matchtups = (sm.get_matching_blocks() + sm0.get_matching_blocks() + sm1.get_matching_blocks() + sm2.get_matching_blocks()) return matchtups matchtups = cached_match(old_text, searchblock) # Find a reasonable match in matchtups found = False if debug: # print('searchblock =\n%s' % (searchblock,)) print('searchblock = %r' % (searchblock, )) for (a, b, size) in matchtups: matchtext = old_text[a:a + size] pybind = binding_defs[binding_name]['py_binding_name'] if re.search(binding_name + '\\b', matchtext) or re.search( pybind + '\\b', matchtext): found = True pos = a + size if debug: print('MATCHING TEXT') print(matchtext) break else: if debug and 0: print('Not matching') print('matchtext = %r' % (matchtext, )) matchtext2 = old_text[a - 10:a + size + 20] print('matchtext2 = %r' % (matchtext2, )) if found: linelens = np.array(ut.lmap(len, line_list)) + 1 sumlen = np.cumsum(linelens) row = np.where(sumlen < pos)[0][-1] + 1 #print(line_list[row]) # Search for extents of the block to overwrite block_sentinal = block_sentinals[key] row1 = ut.find_block_end(row, line_list, block_sentinal, -1) - 1 row2 = ut.find_block_end(row + 1, line_list, block_sentinal, +1) eof_sentinal = eof_sentinals[key] if eof_sentinal is not None: print('eof_sentinal = %r' % (eof_sentinal, )) row2 = min([ count for count, line in enumerate(line_list) if line.startswith(eof_sentinal) ][-1], row2) nr = len((block + '\n\n').splitlines()) new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint('FOUND AND REPLACING %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint( 'FOUND AND REPLACED WITH %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print( ut.color_diff_text( ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) else: # Append to end of the file eof_sentinal = eof_sentinals[key] if eof_sentinal is None: row2 = len(line_list) - 1 else: row2_choice = [ count for count, line in enumerate(line_list) if line.startswith(eof_sentinal) ] if len(row2_choice) == 0: row2 = len(line_list) - 1 assert False else: row2 = row2_choice[-1] - 1 # row1 = row2 - 1 # row2 = row2 - 1 row1 = row2 new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) # block + '\n\n\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) nr = len((block + '\n\n').splitlines()) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint( 'NOT FOUND AND REPLACING %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint( 'NOT FOUND AND REPLACED WITH %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print( ut.color_diff_text( ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) text_dict[key] = '\n'.join(new_line_list) lines_dict[key] = new_line_list ut.colorprint('L___ GENERATED BINDING %s ___' % (binding_name, ), 'yellow') for key in places: new_text = '\n'.join(lines_dict[key]) #ut.writeto(ut.augpath(places[key], '.new'), new_text) ut.writeto(ut.augpath(places[key]), new_text) for key in places: if ut.get_argflag('--diff'): difftext = ut.get_textdiff(orig_texts[key], new_text, num_context_lines=7, ignore_whitespace=True) difftext = ut.color_diff_text(difftext) print(difftext)
def make_single_testres(ibs, qaids, daids, pipecfg_list, cfgx2_lbl, cfgdict_list, lbl, testnameid, use_cache=None, subindexer_partial=ut.ProgressIter): """ CommandLine: python -m ibeis.expt.harness --exec-run_test_configurations2 """ cfgslice = None if cfgslice is not None: pipecfg_list = pipecfg_list[cfgslice] dbname = ibs.get_dbname() if ut.NOT_QUIET: print('[harn] Make single testres') cfgx2_qreq_ = [ ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg) for pipe_cfg in ut.ProgressIter(pipecfg_list, lbl='Building qreq_', enabled=False) ] if use_cache is None: use_cache = USE_BIG_TEST_CACHE if use_cache: get_big_test_cache_info(ibs, cfgx2_qreq_) try: cachetup = get_big_test_cache_info(ibs, cfgx2_qreq_) testres = ut.load_cache(*cachetup) testres.cfgdict_list = cfgdict_list testres.cfgx2_lbl = cfgx2_lbl # hack override except IOError: pass else: if ut.NOT_QUIET: ut.colorprint('[harn] single testres cache hit... returning', 'turquoise') return testres if ibs.table_cache: # HACK prev_feat_cfgstr = None cfgx2_cfgresinfo = [] #nPipeCfg = len(pipecfg_list) cfgiter = subindexer_partial(range(len(cfgx2_qreq_)), lbl='query config', freq=1, adjust=False, separate=True) # Run each pipeline configuration for cfgx in cfgiter: qreq_ = cfgx2_qreq_[cfgx] ut.colorprint('testnameid=%r' % ( testnameid,), 'green') ut.colorprint('annot_cfgstr = %s' % ( qreq_.get_cfgstr(with_input=True, with_pipe=False),), 'yellow') ut.colorprint('pipe_cfgstr= %s' % ( qreq_.get_cfgstr(with_data=False),), 'turquoise') ut.colorprint('pipe_hashstr = %s' % ( qreq_.get_pipe_hashid(),), 'teal') if DRY_RUN: continue indent_prefix = '[%s cfg %d/%d]' % ( dbname, # cfgiter.count (doesnt work when quiet) (cfgiter.parent_index * cfgiter.nTotal) + cfgx , cfgiter.nTotal * cfgiter.parent_nTotal ) with ut.Indenter(indent_prefix): # Run the test / read cache _need_compute = True if use_cache: # smaller cache for individual configuration runs st_cfgstr = qreq_.get_cfgstr(with_input=True) bt_cachedir = cachetup[0] st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests') st_cachename = 'smalltest' ut.ensuredir(st_cachedir) try: cfgres_info = ut.load_cache(st_cachedir, st_cachename, st_cfgstr) except IOError: _need_compute = True else: _need_compute = False if _need_compute: assert not ibs.table_cache if ibs.table_cache: if (len(prev_feat_cfgstr is not None and prev_feat_cfgstr != qreq_.qparams.feat_cfgstr)): # Clear features to preserve memory ibs.clear_table_cache() #qreq_.ibs.print_cachestats_str() cfgres_info = get_query_result_info(qreq_) # record previous feature configuration if ibs.table_cache: prev_feat_cfgstr = qreq_.qparams.feat_cfgstr if use_cache: ut.save_cache(st_cachedir, st_cachename, st_cfgstr, cfgres_info) if not NOMEMORY: # Store the results cfgx2_cfgresinfo.append(cfgres_info) else: cfgx2_qreq_[cfgx] = None if ut.NOT_QUIET: ut.colorprint('[harn] Completed running test configurations', 'white') if DRY_RUN: print('ran tests dryrun mode.') return if NOMEMORY: print('ran tests in memory savings mode. Cannot Print. exiting') return # Store all pipeline config results in a test result object testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cfgresinfo, cfgx2_qreq_) testres.testnameid = testnameid testres.lbl = lbl testres.cfgdict_list = cfgdict_list testres.aidcfg = None if use_cache: try: ut.save_cache(*tuple(list(cachetup) + [testres])) except Exception as ex: ut.printex(ex, 'error saving testres cache', iswarning=True) if ut.SUPER_STRICT: raise return testres
def print_results(ibs, testres): """ Prints results from an experiment harness run. Rows store different qaids (query annotation ids) Cols store different configurations (algorithm parameters) Args: ibs (IBEISController): ibeis controller object testres (test_result.TestResult): CommandLine: python dev.py -e print --db PZ_MTEST -a default:dpername=1,qpername=[1,2] -t default:fg_on=False python dev.py -e print -t best --db seals2 --allgt --vz python dev.py -e print --db PZ_MTEST --allgt -t custom --print-confusion-stats python dev.py -e print --db PZ_MTEST --allgt --noqcache --index 0:10:2 -t custom:rrvsone_on=True --print-confusion-stats python dev.py -e print --db PZ_MTEST --allgt --noqcache --qaid4 -t custom:rrvsone_on=True --print-confusion-stats python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl python -m ibeis --tf print_results -t default --db PZ_MTEST -a ctrl python -m ibeis --tf print_results --db PZ_MTEST -a default -t default:lnbnn_on=True default:lnbnn_on=False,bar_l2_on=True default:lnbnn_on=False,normonly_on=True CommandLine: python -m ibeis.expt.experiment_printres --test-print_results utprof.py -m ibeis.expt.experiment_printres --test-print_results Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.experiment_printres import * # NOQA >>> from ibeis.init import main_helpers >>> ibs, testres = main_helpers.testdata_expts( >>> 'PZ_MTEST', a='default:dpername=1,qpername=[1,2]', t='default:fg_on=False') >>> result = print_results(ibs, testres) >>> print(result) """ (cfg_list, cfgx2_cfgresinfo, testnameid, cfgx2_lbl, cfgx2_qreq_) = ut.dict_take( testres.__dict__, ['cfg_list', 'cfgx2_cfgresinfo', 'testnameid', 'cfgx2_lbl', 'cfgx2_qreq_']) # cfgx2_cfgresinfo is a list of dicts of lists # Parse result info out of the lists cfgx2_nextbestranks = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_next_bestranks') cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score') #cfgx2_aveprecs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_avepercision') cfgx2_scorediffs = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_scorediff') #cfgx2_gt_raw_score = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') column_lbls = [ut.remove_chars(ut.remove_vowels(lbl), [' ', ',']) for lbl in cfgx2_lbl] scorediffs_mat = np.array(ut.replace_nones(cfgx2_scorediffs, np.nan)) print(' --- PRINT RESULTS ---') print(' use --rank-lt-list=1,5 to specify X_LIST') if True: # Num of ranks less than to score X_LIST = testres.get_X_LIST() #X_LIST = [1, 5] #nConfig = len(cfg_list) #nQuery = len(testres.qaids) cfgx2_nQuery = list(map(len, testres.cfgx2_qaids)) #cfgx2_qx2_ranks = testres.get_infoprop_list('qx2_bestranks') #-------------------- # A positive scorediff indicates the groundtruth was better than the # groundfalse scores istrue_list = [scorediff > 0 for scorediff in scorediffs_mat] isfalse_list = [~istrue for istrue in istrue_list] #------------ # Build Colscore nLessX_dict = testres.get_nLessX_dict() #------------ best_rankscore_summary = [] #to_intersect_list = [] # print each configs scores less than X=thresh for X, cfgx2_nLessX in six.iteritems(nLessX_dict): max_nLessX = cfgx2_nLessX.max() bestX_cfgx_list = np.where(cfgx2_nLessX == max_nLessX)[0] best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestX_cfgx_list) # FIXME best_rankscore += rankscore_str(X, max_nLessX, cfgx2_nQuery[bestX_cfgx_list[0]]) best_rankscore_summary += [best_rankscore] #to_intersect_list.append(ut.take(cfgx2_lbl, max_nLessX)) #intersected = to_intersect_list[0] if len(to_intersect_list) > 0 else [] #for ix in range(1, len(to_intersect_list)): # intersected = np.intersect1d(intersected, to_intersect_list[ix]) #if False: # #gt_raw_score_mat = np.vstack(cfgx2_gt_raw_score).T # #rank_mat = testres.get_rank_mat() # #------------ # # Build row lbls # if False: # qx2_lbl = np.array([ # 'qx=%d) q%s ' % (qx, ibsfuncs.aidstr(testres.qaids[qx], ibs=ibs, notes=True)) # for qx in range(nQuery)]) # #------------ # # Build Colscore and hard cases # if False: # qx2_min_rank = [] # qx2_argmin_rank = [] # new_hard_qaids = [] # new_hardtup_list = [] # for qx in range(nQuery): # ranks = rank_mat[qx] # valid_ranks = ranks[ranks >= 0] # min_rank = ranks.min() if len(valid_ranks) > 0 else -3 # bestCFG_X = np.where(ranks == min_rank)[0] # qx2_min_rank.append(min_rank) # # Find the best rank over all configurations # qx2_argmin_rank.append(bestCFG_X) #@ut.memoize #def get_new_hard_qx_list(testres): # """ Mark any query as hard if it didnt get everything correct """ # rank_mat = testres.get_rank_mat() # is_new_hard_list = rank_mat.max(axis=1) > 0 # new_hard_qx_list = np.where(is_new_hard_list)[0] # return new_hard_qx_list # new_hard_qx_list = testres.get_new_hard_qx_list() # for qx in new_hard_qx_list: # # New list is in aid format instead of cx format # # because you should be copying and pasting it # notes = ' ranks = ' + str(rank_mat[qx]) # qaid = testres.qaids[qx] # name = ibs.get_annot_names(qaid) # new_hardtup_list += [(qaid, name + " - " + notes)] # new_hard_qaids += [qaid] @ut.argv_flag_dec def intersect_hack(): failed = testres.rank_mat > 0 colx2_failed = [np.nonzero(failed_col)[0] for failed_col in failed.T] #failed_col2_only = np.setdiff1d(colx2_failed[1], colx2_failed[0]) #failed_col2_only_aids = ut.take(testres.qaids, failed_col2_only) failed_col1_only = np.setdiff1d(colx2_failed[0], colx2_failed[1]) failed_col1_only_aids = ut.take(testres.qaids, failed_col1_only) gt_aids1 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[0].daids) gt_aids2 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[1].daids) qaids_expt = failed_col1_only_aids gt_avl_aids1 = ut.flatten(gt_aids1) gt_avl_aids2 = list(set(ut.flatten(gt_aids2)).difference(gt_avl_aids1)) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids1) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids2) #jsontext = ut.to_json({ # 'qaids': list(qaids_expt), # 'dinclude_aids1': list(gt_aids_expt1), # 'dinclude_aids2': list(gt_aids_expt2), #}) #annotation_configs.varysize_pzm #from ibeis.expt import annotation_configs acfg = testres.acfg_list[0] import copy acfg1 = copy.deepcopy(acfg) acfg2 = copy.deepcopy(acfg) acfg1['qcfg']['min_pername'] = None acfg2['qcfg']['min_pername'] = None acfg1['dcfg']['min_pername'] = None acfg2['dcfg']['min_gt_per_name'] = None acfg1['qcfg']['default_aids'] = qaids_expt acfg1['dcfg']['gt_avl_aids'] = gt_avl_aids1 acfg2['qcfg']['default_aids'] = qaids_expt acfg2['dcfg']['gt_avl_aids'] = gt_avl_aids2 from ibeis.init import filter_annots from ibeis.expt import experiment_helpers annots1 = filter_annots.expand_acfgs(ibs, acfg1, verbose=True) annots2 = filter_annots.expand_acfgs(ibs, acfg2, verbose=True) acfg_name_list = dict( # NOQA acfg_list=[acfg1, acfg2], expanded_aids_list=[annots1, annots2], ) test_cfg_name_list = ['candidacy_k'] cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list, ibs=ibs) t1, t2 = testres_list # NOQA #ut.embed() #intersect_hack() #@ut.argv_flag_dec #def print_rowlbl(): # print('=====================') # print('[harn] Row/Query Labels: %s' % testnameid) # print('=====================') # print('[harn] queries:\n%s' % '\n'.join(qx2_lbl)) #print_rowlbl() #------------ @ut.argv_flag_dec def print_collbl(): print('=====================') print('[harn] Col/Config Labels: %s' % testnameid) print('=====================') enum_cfgx2_lbl = ['%2d) %s' % (count, cfglbl) for count, cfglbl in enumerate(cfgx2_lbl)] print('[harn] cfglbl:\n%s' % '\n'.join(enum_cfgx2_lbl)) print_collbl() #------------ @ut.argv_flag_dec def print_cfgstr(): print('=====================') print('[harn] Config Strings: %s' % testnameid) print('=====================') cfgstr_list = [query_cfg.get_cfgstr() for query_cfg in cfg_list] enum_cfgstr_list = ['%2d) %s' % (count, cfgstr) for count, cfgstr in enumerate(cfgstr_list)] print('\n[harn] cfgstr:\n%s' % '\n'.join(enum_cfgstr_list)) print_cfgstr() #------------ #@ut.argv_flag_dec #def print_rowscore(): # print('=======================') # print('[harn] Scores per Query: %s' % testnameid) # print('=======================') # for qx in range(nQuery): # bestCFG_X = qx2_argmin_rank[qx] # min_rank = qx2_min_rank[qx] # minimizing_cfg_str = ut.indentjoin(cfgx2_lbl[bestCFG_X], '\n * ') # #minimizing_cfg_str = str(bestCFG_X) # print('-------') # print(qx2_lbl[qx]) # print(' best_rank = %d ' % min_rank) # if len(cfgx2_lbl) != 1: # print(' minimizing_cfg_x\'s = %s ' % minimizing_cfg_str) #print_rowscore() #------------ #@ut.argv_flag_dec #def print_row_ave_precision(): # print('=======================') # print('[harn] Scores per Query: %s' % testnameid) # print('=======================') # for qx in range(nQuery): # aveprecs = ', '.join(['%.2f' % (aveprecs[qx],) for aveprecs in cfgx2_aveprecs]) # print('-------') # print(qx2_lbl[qx]) # print(' aveprecs = %s ' % aveprecs) #print_row_ave_precision() ##------------ #@ut.argv_flag_dec #def print_hardcase(): # print('--- hard new_hardtup_list (w.r.t these configs): %s' % testnameid) # print('\n'.join(map(repr, new_hardtup_list))) # print('There are %d hard cases ' % len(new_hardtup_list)) # aid_list = [aid_notes[0] for aid_notes in new_hardtup_list] # name_list = ibs.get_annot_names(aid_list) # name_set = set(name_list) # print(sorted(aid_list)) # print('Names: %r' % (name_set,)) #print_hardcase() #default=not ut.get_argflag('--allhard')) #------------ #@ut.argv_flag_dec #def echo_hardcase(): # print('--- hardcase commandline: %s' % testnameid) # # Show index for current query where hardids reside # #print('--index ' + (' '.join(map(str, new_hard_qx_list)))) # #print('--take new_hard_qx_list') # #hardaids_str = ' '.join(map(str, [' ', '--qaid'] + new_hard_qaids)) # hardaids_str = ' '.join(map(str, [' ', '--set-aids-as-hard'] + new_hard_qaids)) # print(hardaids_str) ##echo_hardcase(default=not ut.get_argflag('--allhard')) #echo_hardcase() #@ut.argv_flag_dec #def print_bestcfg(): # print('==========================') # print('[harn] Best Configurations: %s' % testnameid) # print('==========================') # # print each configs scores less than X=thresh # for X, cfgx2_nLessX in six.iteritems(nLessX_dict): # max_LessX = cfgx2_nLessX.max() # bestCFG_X = np.where(cfgx2_nLessX == max_LessX)[0] # best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestCFG_X) # best_rankscore += rankscore_str(X, max_LessX, nQuery) # cfglbl_list = cfgx2_lbl[bestCFG_X] # best_rankcfg = format_cfgstr_list(cfglbl_list) # #indent('\n'.join(cfgstr_list), ' ') # print(best_rankscore) # print(best_rankcfg) # print('[cfg*] %d cfg(s) are the best of %d total cfgs' % (len(intersected), nConfig)) # print(format_cfgstr_list(intersected)) #print_bestcfg() #------------ #@ut.argv_flag_dec #def print_gtscore(): # # Prints best ranks # print('gtscore_mat: %s' % testnameid) # print(' nRows=%r, nCols=%r' % (nQuery, nConfig)) # header = (' labled rank matrix: rows=queries, cols=cfgs:') # print('\n'.join(cfgx2_lbl)) # column_list = gt_raw_score_mat.T # print(ut.make_csv_table(column_list, row_lbls=testres.qaids, # column_lbls=column_lbls, header=header, # transpose=False, # use_lbl_width=len(cfgx2_lbl) < 5)) #print_gtscore() #------------ #@ut.argv_flag_dec #def print_best_rankmat(): # # Prints best ranks # print('-------------') # print('RankMat: %s' % testnameid) # print(' nRows=%r, nCols=%r' % (nQuery, nConfig)) # header = (' labled rank matrix: rows=queries, cols=cfgs:') # print('\n'.join(cfgx2_lbl)) # column_list = rank_mat.T # print(ut.make_csv_table(column_list, row_lbls=testres.qaids, # column_lbls=column_lbls, header=header, # transpose=False, # use_lbl_width=len(cfgx2_lbl) < 5)) #print_best_rankmat() #@ut.argv_flag_dec #def print_diffmat(): # # score differences over configs # print('-------------') # print('Diffmat: %s' % testnameid) # diff_matstr = get_diffmat_str(rank_mat, testres.qaids, nConfig) # print(diff_matstr) #print_diffmat() #@ut.argv_flag_dec #def print_rankhist_time(): # print('A rank histogram is a dictionary. ' # 'The keys denote the range of the ranks that the values fall in') # # TODO: rectify this code with other hist code # config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid') # config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs() # _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs)) # for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter: # #full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr() # #ut.print_dict(ut.dict_hist(ranks), 'rank histogram', sorted_=True) # # find the qxs that belong to each bin # aid_list1 = testres.qaids # aid_list2 = qx2_gt_aid # ibs.assert_valid_aids(aid_list1) # ibs.assert_valid_aids(aid_list2) # timedelta_list = ibs.get_annot_pair_timdelta(aid_list1, aid_list2) # #timedelta_str_list = [ut.get_posix_timedelta_str2(delta) # # for delta in timedelta_list] # bin_edges = testres.get_rank_histogram_bin_edges() # timedelta_groups = ut.dict_take(ut.group_items(timedelta_list, config_binxs), np.arange(len(bin_edges)), []) # timedelta_stats = [ut.get_stats(deltas, use_nan=True, datacast=ut.get_posix_timedelta_str2) for deltas in timedelta_groups] # print('Time statistics for each rank range:') # print(ut.dict_str(dict(zip(bin_edges, timedelta_stats)), sorted_=True)) #print_rankhist_time() #@ut.argv_flag_dec #def print_rankhist(): # print('A rank histogram is a dictionary. ' # 'The keys denote the range of the ranks that the values fall in') # # TODO: rectify this code with other hist code # config_gt_aids = ut.get_list_column(testres.cfgx2_cfgresinfo, 'qx2_gt_aid') # config_rand_bin_qxs = testres.get_rank_histogram_qx_binxs() # _iter = enumerate(zip(rank_mat.T, agg_hist_dict, config_gt_aids, config_rand_bin_qxs)) # for cfgx, (ranks, agg_hist_dict, qx2_gt_aid, config_binxs) in _iter: # print('Frequency of rank ranges:') # ut.print_dict(agg_hist_dict, 'agg rank histogram', sorted_=True) #print_rankhist() #------------ # Print summary #print(' --- SUMMARY ---') #------------ #@ut.argv_flag_dec #def print_colmap(): # print('==================') # print('[harn] mAP per Config: %s (sorted by mAP)' % testnameid) # print('==================') # cfgx2_mAP = np.array([aveprec_list.mean() for aveprec_list in cfgx2_aveprecs]) # sortx = cfgx2_mAP.argsort() # for cfgx in sortx: # print('[mAP] cfgx=%r) mAP=%.3f -- %s' % (cfgx, cfgx2_mAP[cfgx], cfgx2_lbl[cfgx])) # #print('L___ Scores per Config ___') #print_colmap() #------------ @ut.argv_flag_dec_true def print_colscore(): print('==================') print('[harn] Scores per Config: %s' % testnameid) print('==================') #for cfgx in range(nConfig): # print('[score] %s' % (cfgx2_lbl[cfgx])) # for X in X_LIST: # nLessX_ = nLessX_dict[int(X)][cfgx] # print(' ' + rankscore_str(X, nLessX_, nQuery)) print('\n[harn] ... sorted scores') for X in X_LIST: print('\n[harn] Sorted #ranks < %r scores' % (X)) sortx = np.array(nLessX_dict[int(X)]).argsort() #frac_list = (nLessX_dict[int(X)] / cfgx2_nQuery)[:, None] #print('cfgx2_nQuery = %r' % (cfgx2_nQuery,)) #print('frac_list = %r' % (frac_list,)) #print('Pairwise Difference: ' + str(ut.safe_pdist(frac_list, metric=ut.absdiff))) for cfgx in sortx: nLessX_ = nLessX_dict[int(X)][cfgx] rankstr = rankscore_str(X, nLessX_, cfgx2_nQuery[cfgx], withlbl=False) print('[score] %s --- %s' % (rankstr, cfgx2_lbl[cfgx])) print_colscore() #------------ ut.argv_flag_dec(print_latexsum)(ibs, testres) @ut.argv_flag_dec def print_next_rankmat(): # Prints nextbest ranks print('-------------') print('NextRankMat: %s' % testnameid) header = (' top false rank matrix: rows=queries, cols=cfgs:') print('\n'.join(cfgx2_lbl)) column_list = cfgx2_nextbestranks print(ut.make_csv_table(column_list, row_lbls=testres.qaids, column_lbls=column_lbls, header=header, transpose=False, use_lbl_width=len(cfgx2_lbl) < 5)) print_next_rankmat() #------------ @ut.argv_flag_dec def print_scorediff_mat(): # Prints nextbest ranks print('-------------') print('ScoreDiffMat: %s' % testnameid) header = (' score difference between top true and top false: rows=queries, cols=cfgs:') print('\n'.join(cfgx2_lbl)) column_list = cfgx2_scorediffs column_type = [float] * len(column_list) print(ut.make_csv_table(column_list, row_lbls=testres.qaids, column_lbls=column_lbls, column_type=column_type, header=header, transpose=False, use_lbl_width=len(cfgx2_lbl) < 5)) print_scorediff_mat(alias_flags=['--sdm']) #------------ def jagged_stats_info(arr_, lbl, col_lbls): arr = ut.recursive_replace(arr_, np.inf, np.nan) # Treat infinite as nan stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True) sel_stat_dict, sel_indices = ut.find_interesting_stats(stat_dict, col_lbls) sel_col_lbls = ut.take(col_lbls, sel_indices) statstr_kw = dict(precision=3, newlines=True, lbl=lbl, align=True) stat_str = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw) sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw) sel_stat_str = 'sel_col_lbls = %s' % (ut.list_str(sel_col_lbls),) + '\n' + sel_stat_str return stat_str, sel_stat_str @ut.argv_flag_dec def print_confusion_stats(): """ CommandLine: python dev.py --allgt --print-scorediff-mat-stats --print-confusion-stats -t rrvsone_grid """ # Prints nextbest ranks print('-------------') print('ScoreDiffMatStats: %s' % testnameid) print('column_lbls = %r' % (column_lbls,)) #cfgx2_gt_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gt_raw_score') #cfgx2_gf_rawscores = ut.get_list_column(cfgx2_cfgresinfo, 'qx2_gf_raw_score') gt_rawscores_mat = ut.replace_nones(cfgx2_gt_rawscores, np.nan) gf_rawscores_mat = ut.replace_nones(cfgx2_gf_rawscores, np.nan) tp_rawscores = vt.zipcompress(gt_rawscores_mat, istrue_list) fp_rawscores = vt.zipcompress(gt_rawscores_mat, isfalse_list) tn_rawscores = vt.zipcompress(gf_rawscores_mat, istrue_list) fn_rawscores = vt.zipcompress(gf_rawscores_mat, isfalse_list) tp_rawscores_str, tp_rawscore_statstr = jagged_stats_info(tp_rawscores, 'tp_rawscores', cfgx2_lbl) fp_rawscores_str, fp_rawscore_statstr = jagged_stats_info(fp_rawscores, 'fp_rawscores', cfgx2_lbl) tn_rawscores_str, tn_rawscore_statstr = jagged_stats_info(tn_rawscores, 'tn_rawscores', cfgx2_lbl) fn_rawscores_str, fn_rawscore_statstr = jagged_stats_info(fn_rawscores, 'fn_rawscores', cfgx2_lbl) #print(tp_rawscores_str) #print(fp_rawscores_str) #print(tn_rawscores_str) #print(fn_rawscores_str) print(tp_rawscore_statstr) print(fp_rawscore_statstr) print(tn_rawscore_statstr) print(fn_rawscore_statstr) print_confusion_stats(alias_flags=['--cs']) ut.argv_flag_dec_true(testres.print_percent_identification_success)() sumstrs = [] sumstrs.append('') sumstrs.append('||===========================') sumstrs.append('|| [cfg*] SUMMARY: %s' % testnameid) sumstrs.append('||---------------------------') sumstrs.append(ut.joins('\n|| ', best_rankscore_summary)) sumstrs.append('||===========================') summary_str = '\n' + '\n'.join(sumstrs) + '\n' #print(summary_str) ut.colorprint(summary_str, 'blue') print('To enable all printouts add --print-all to the commandline')
def submit_query_request(qreq_, use_cache=None, use_bigcache=None, verbose=None, save_qcache=None, use_supercache=None, invalidate_supercache=None): """ Called from qreq_.execute Checks a big cache for qaid2_cm. If cache miss, tries to load each cm individually. On an individual cache miss, it preforms the query. CommandLine: python -m ibeis.algo.hots.match_chips4 --test-submit_query_request Examples: >>> # SLOW_DOCTEST >>> # xdoctest: +SKIP >>> from ibeis.algo.hots.match_chips4 import * # NOQA >>> import ibeis >>> qaid_list = [1] >>> daid_list = [1, 2, 3, 4, 5] >>> use_bigcache = True >>> use_cache = True >>> ibs = ibeis.opendb(db='testdb1') >>> qreq_ = ibs.new_query_request(qaid_list, daid_list, verbose=True) >>> cm_list = submit_query_request(qreq_=qreq_) """ # Get flag defaults if necessary if verbose is None: verbose = pipeline.VERB_PIPELINE if use_cache is None: use_cache = USE_CACHE if save_qcache is None: save_qcache = SAVE_CACHE if use_bigcache is None: use_bigcache = USE_BIGCACHE if use_supercache is None: use_supercache = USE_SUPERCACHE # Create new query request object to store temporary state if verbose: #print('[mc4] --- Submit QueryRequest_ --- ') ut.colorprint('[mc4] --- Submit QueryRequest_ --- ', 'darkyellow') assert qreq_ is not None, 'query request must be prebuilt' # Check fo empty queries try: assert len(qreq_.daids) > 0, 'there are no database chips' assert len(qreq_.qaids) > 0, 'there are no query chips' except AssertionError as ex: ut.printex(ex, 'Impossible query request', iswarning=True, keys=['qreq_.qaids', 'qreq_.daids']) if ut.SUPER_STRICT: raise cm_list = [None for qaid in qreq_.qaids] else: # --- BIG CACHE --- # Do not use bigcache single queries is_big = len(qreq_.qaids) > MIN_BIGCACHE_BUNDLE use_bigcache_ = (use_bigcache and use_cache and is_big) if (use_bigcache_ or save_qcache): cacher = qreq_.get_big_cacher() if use_bigcache_: try: qaid2_cm = cacher.load() cm_list = [qaid2_cm[qaid] for qaid in qreq_.qaids] except (IOError, AttributeError): pass else: return cm_list # ------------ # Execute query request qaid2_cm = execute_query_and_save_L1( qreq_, use_cache, save_qcache, verbose=verbose, use_supercache=use_supercache, invalidate_supercache=invalidate_supercache) # ------------ if save_qcache and is_big: cacher.save(qaid2_cm) cm_list = [qaid2_cm[qaid] for qaid in qreq_.qaids] return cm_list
def print_templates(model, ignore_ttypes=[]): templates = model._templates ut.colorprint('\n --- CPD Templates ---', 'blue') for temp_cpd in templates: if temp_cpd.ttype not in ignore_ttypes: ut.colorprint(temp_cpd._cpdstr('psql'), 'brightcyan')
def print_acfg_list(acfg_list, expanded_aids_list=None, ibs=None, combined=False, **kwargs): r""" Args: acfg_list (list): expanded_aids_list (list): (default = None) ibs (IBEISController): ibeis controller object(default = None) combined (bool): (default = False) CommandLine: python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.annotation_configs import * # NOQA >>> import ibeis >>> acfg_list = '?' >>> expanded_aids_list = None >>> ibs = None >>> combined = False >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ _tup = compress_acfg_list_for_printing(acfg_list) nonvaried_compressed_dict, varied_compressed_dict_list = _tup ut.colorprint('+=== <Info acfg_list> ===', 'white') #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),)) print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict)) seen_ = ut.ddict(list) # get default kwkeys for annot info if ibs is not None: annotstats_kw = kwargs.copy() kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict) annotstats_kw.update( ut.argparse_dict(dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True)) hashid_list = [] for acfgx in range(len(acfg_list)): acfg = acfg_list[acfgx] title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] + ' d_cfgname=' + acfg['dcfg']['_cfgname']) ut.colorprint( '+--- acfg %d / %d -- %s ---- ' % (acfgx + 1, len(acfg_list), title), 'lightgray') print('acfg = ' + ut.dict_str(varied_compressed_dict_list[acfgx], strvals=True)) if expanded_aids_list is not None: qaids, daids = expanded_aids_list[acfgx] key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids')) if key not in seen_: if ibs is not None: seen_[key].append(acfgx) stats_, locals_ = ibs.get_annotconfig_stats( qaids, daids, verbose=False, combined=combined, **annotstats_kw) hashids = (stats_['qaid_stats']['qhashid'], stats_['daid_stats']['dhashid']) hashid_list.append(hashids) stats_str2 = ut.dict_str(stats_, strvals=True, newlines=True, explicit=False, nobraces=False) print('annot_config_stats = ' + stats_str2) else: dupindex = seen_[key] print('DUPLICATE of index %r' % (dupindex, )) dupdict = varied_compressed_dict_list[dupindex[0]] print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True)) print('hashid summary = ' + ut.list_str(hashid_list, nl=1)) ut.colorprint('L___ </Info acfg_list> ___', 'white')
def print_templates(model, ignore_ttypes=[]): templates = model._templates ut.colorprint('\n --- CPD Templates ---', 'blue') for temp_cpd in templates: if temp_cpd.ttype not in ignore_ttypes: ut.colorprint(temp_cpd._cpdstr('psql'), 'turquoise')
def netrun(): r""" CommandLine: # --- UTILITY python -m ibeis_cnn --tf get_juction_dpath --show # --- DATASET BUILDING --- # Build Dataset Aliases python -m ibeis_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl --ensuredata python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg timectrl:pername=None --ensuredata python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show --datatype=category python -m ibeis_cnn --tf netrun --db mnist --ensuredata --show --datatype=siam-patch python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --ensuredata --show --datatype=siam-part # Parts based datasets python -m ibeis_cnn --tf netrun --db PZ_MTEST --acfg ctrl --datatype=siam-part --ensuredata --show % Patch based dataset (big one) python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --ensuredata --show --vtd python -m ibeis_cnn --tf netrun --ds pzm4 --weights=new --arch=siaml2_128 --train --monitor python -m ibeis_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --test python -m ibeis_cnn --tf netrun --ds pzm4 --arch=siaml2_128 --veryverbose --no-flask # --- TRAINING --- python -m ibeis_cnn --tf netrun --db PZ_Master1 --acfg default:is_known=True,qmin_pername=2,view=primary,species=primary,minqual=ok --weights=new --arch=siaml2_128 --train --monitor python -m ibeis_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False,contributor_contains=FlankHack --train --weights=new --arch=siaml2_128 --monitor # NOQA python -m ibeis_cnn --tf netrun --ds timectrl_pzmaster1 --acfg ctrl:pername=None,excluderef=False --train --weights=new --arch=siaml2_128 --monitor # NOQA python -m ibeis_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor --DEBUG_AUGMENTATION python -m ibeis_cnn --tf netrun --ds pzmtest --weights=new --arch=siaml2_128 --train --monitor python -m ibeis_cnn --tf netrun --ds flankhack --weights=new --arch=siaml2_partmatch --train --monitor --learning_rate=.00001 python -m ibeis_cnn --tf netrun --ds flankhack --weights=new --arch=siam_deepfaceish --train --monitor --learning_rate=.00001 # Different ways to train mnist python -m ibeis_cnn --tf netrun --db mnist --weights=new --arch=mnist_siaml2 --train --monitor --datatype=siam-patch python -m ibeis_cnn --tf netrun --db mnist --weights=new --arch=mnist-category --train --monitor --datatype=category # --- INITIALIZED-TRAINING --- python -m ibeis_cnn --tf netrun --ds pzmtest --arch=siaml2_128 --weights=gz-gray:current --train --monitor # --- TESTING --- python -m ibeis_cnn --tf netrun --db liberty --weights=liberty:current --arch=siaml2_128 --test python -m ibeis_cnn --tf netrun --db PZ_Master0 --weights=combo:current --arch=siaml2_128 --testall Example: >>> # DISABLE_DOCTEST >>> from ibeis_cnn.netrun import * # NOQA >>> netrun() >>> ut.show_if_requested() """ ut.colorprint('[netrun] NET RUN', 'red') requests, hyperparams, tags = parse_args() ds_tag = tags['ds_tag'] datatype = tags['datatype'] extern_ds_tag = tags['extern_ds_tag'] arch_tag = tags['arch_tag'] checkpoint_tag = tags['checkpoint_tag'] # ---------------------------- # Choose the main dataset ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.grab_dataset(ds_tag, datatype) if extern_ds_tag is not None: extern_dpath = ingest_data.get_extern_training_dpath(extern_ds_tag) else: extern_dpath = None print('dataset.training_dpath = %r' % (dataset.training_dpath,)) print('Dataset Alias Key: %r' % (dataset.alias_key,)) print('Current Dataset Tag: %r' % ( ut.invert_dict(DS_TAG_ALIAS2).get(dataset.alias_key, None),)) if requests['ensuredata']: # Print alias key that maps to this particular dataset if ut.show_was_requested(): interact_ = dataset.interact() # NOQA return print('...exiting') sys.exit(1) # ---------------------------- # Choose model architecture # TODO: data will need to return info about number of labels in viewpoint models # Specify model archichitecture ut.colorprint('[netrun] Architecture Specification', 'yellow') if arch_tag == 'siam2stream': model = models.SiameseCenterSurroundModel( data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) elif arch_tag.startswith('siam'): model = models.SiameseL2( data_shape=dataset.data_shape, arch_tag=arch_tag, training_dpath=dataset.training_dpath, **hyperparams) elif arch_tag == 'mnist-category': model = models.MNISTModel( data_shape=dataset.data_shape, output_dims=dataset.output_dims, arch_tag=arch_tag, training_dpath=dataset.training_dpath, **hyperparams) pass else: raise ValueError('Unknown arch_tag=%r' % (arch_tag,)) ut.colorprint('[netrun] Initialize archchitecture', 'yellow') model.init_arch() # ---------------------------- # Choose weight initialization ut.colorprint('[netrun] Setting weights', 'yellow') if checkpoint_tag == 'new': ut.colorprint('[netrun] * Initializing new weights', 'lightgray') model.reinit_weights() else: checkpoint_tag = model.resolve_fuzzy_checkpoint_pattern( checkpoint_tag, extern_dpath) ut.colorprint('[netrun] * Resolving weights checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray') if extern_dpath is not None: model.load_extern_weights(dpath=extern_dpath, checkpoint_tag=checkpoint_tag) elif model.has_saved_state(checkpoint_tag=checkpoint_tag): model.load_model_state(checkpoint_tag=checkpoint_tag) else: model_state_fpath = model.get_model_state_fpath( checkpoint_tag=checkpoint_tag) print('model_state_fpath = %r' % (model_state_fpath,)) ut.checkpath(model_state_fpath, verbose=True) print('Known checkpoints are: ' + ut.repr3(model.list_saved_checkpoints())) raise ValueError(('Unresolved weight init: ' 'checkpoint_tag=%r, extern_ds_tag=%r') % ( checkpoint_tag, extern_ds_tag,)) #print('Model State:') #print(model.get_state_str()) # ---------------------------- if not model.is_train_state_initialized(): ut.colorprint('[netrun] Need to initialize training state', 'yellow') X_train, y_train = dataset.subset('train') model.ensure_data_params(X_train, y_train) # Run Actions if requests['train']: ut.colorprint('[netrun] Training Requested', 'yellow') # parse training arguments config = ut.argparse_dict(dict( era_size=15, max_epochs=1200, rate_decay=.8, )) model.monitor_config.update(**config) X_train, y_train = dataset.subset('train') X_valid, y_valid = dataset.subset('valid') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) elif requests['test']: #assert model.best_results['epoch'] is not None ut.colorprint('[netrun] Test Requested', 'yellow') if requests['testall']: ut.colorprint('[netrun] * Testing on all data', 'lightgray') X_test, y_test = dataset.subset('all') flat_metadata = dataset.subset_metadata('all') else: ut.colorprint('[netrun] * Testing on test subset', 'lightgray') X_test, y_test = dataset.subset('test') flat_metadata = dataset.subset_metadata('test') data, labels = X_test, y_test dataname = dataset.alias_key experiments.test_siamese_performance(model, data, labels, flat_metadata, dataname) else: if not ut.get_argflag('--cmd'): raise ValueError('nothing here. need to train or test') if requests['publish']: ut.colorprint('[netrun] Publish Requested', 'yellow') publish_dpath = ut.truepath('~/Dropbox/IBEIS') published_model_state = ut.unixjoin( publish_dpath, model.arch_tag + '_model_state.pkl') ut.copy(model.get_model_state_fpath(), published_model_state) ut.view_directory(publish_dpath) print('You need to get the dropbox link and ' 'register it into the appropriate file') # pip install dropbox # https://www.dropbox.com/developers/core/start/python # import dropbox # need oauth #client.share('/myfile.txt', short_url=False) # https://cthulhu.dyn.wildme.io/public/models/siaml2_128_model_state.pkl if ut.get_argflag('--cmd'): ut.embed()
def run_test_configurations2(ibs, acfg_name_list, test_cfg_name_list, use_cache=None, qaid_override=None, daid_override=None, initial_aids=None): """ Loops over annot configs. Try and use this function as a starting point to clean up this module. The code is getting too untenable. CommandLine: python -m ibeis.expt.harness --exec-run_test_configurations2 Example: >>> # SLOW_DOCTEST >>> from ibeis.expt.harness import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST') >>> default_acfgstrs = ['controlled:qsize=20,dpername=1,dsize=10', 'controlled:qsize=20,dpername=10,dsize=100'] >>> acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstrs) >>> test_cfg_name_list = ut.get_argval(('-t', '-p')), type_=list, default=['custom', 'custom:fg_on=False']) >>> use_cache = False >>> testres_list = run_test_configurations2(ibs, acfg_name_list, test_cfg_name_list, use_cache) """ print('[harn] run_test_configurations2') # Generate list of database annotation configurations if len(acfg_name_list) == 0: raise ValueError('must give acfg name list') acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list( ibs, acfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids, use_cache=use_cache) # Generate list of query pipeline param configs cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list( test_cfg_name_list, ibs=ibs) cfgx2_lbl = experiment_helpers.get_varied_pipecfg_lbls(cfgdict_list) # NOTE: Can specify --pcfginfo or --acfginfo if ut.NOT_QUIET: ut.colorprint( textwrap.dedent(""" [harn]================ [harn] harness.test_configurations2()""").strip(), 'white') msg = '[harn] Running %s using %s and %s' % ( ut.quantstr('test', len(acfg_list) * len(pipecfg_list)), ut.quantstr('pipeline config', len(pipecfg_list)), ut.quantstr('annot config', len(acfg_list)), ) ut.colorprint(msg, 'white') testres_list = [] nAcfg = len(acfg_list) testnameid = ibs.get_dbname() + ' ' + str(test_cfg_name_list) + str( acfg_name_list) lbl = '[harn] TEST_CFG ' + str(test_cfg_name_list) + str(acfg_name_list) expanded_aids_iter = ut.ProgressIter(expanded_aids_list, lbl='annot config', freq=1, autoadjust=False, enabled=ut.NOT_QUIET) for acfgx, (qaids, daids) in enumerate(expanded_aids_iter): assert len(qaids) != 0, ('[harness] No query annotations specified') assert len(daids) != 0, ('[harness] No database annotations specified') acfg = acfg_list[acfgx] if ut.NOT_QUIET: ut.colorprint('\n---Annot config testnameid=%r' % (testnameid, ), 'turquoise') subindexer_partial = partial(ut.ProgressIter, parent_index=acfgx, parent_nTotal=nAcfg, enabled=ut.NOT_QUIET) testres = make_single_testres(ibs, qaids, daids, pipecfg_list, cfgx2_lbl, cfgdict_list, lbl, testnameid, use_cache=use_cache, subindexer_partial=subindexer_partial) if DRY_RUN: continue testres.acfg = acfg testres.test_cfg_name_list = test_cfg_name_list testres_list.append(testres) if DRY_RUN: print('DRYRUN: Cannot continue past run_test_configurations2') sys.exit(0) return testres_list
def get_annotcfg_list(ibs, acfg_name_list, filter_dups=True, qaid_override=None, daid_override=None, initial_aids=None, use_cache=None, verbose=None): r""" For now can only specify one acfg name list TODO: move to filter_annots Args: annot_cfg_name_list (list): CommandLine: python -m ibeis.expt.experiment_helpers --exec-get_annotcfg_list:0 python -m ibeis.expt.experiment_helpers --exec-get_annotcfg_list:1 python -m ibeis.expt.experiment_helpers --exec-get_annotcfg_list:2 ibeis -e print_acfg --ainfo ibeis -e print_acfg --db NNP_Master3 -a viewpoint_compare --nocache-aid --verbtd ibeis -e print_acfg --db PZ_ViewPoints -a viewpoint_compare --nocache-aid --verbtd ibeis -e print_acfg --db PZ_MTEST -a unctrl ctrl::unctrl --ainfo --nocache-aid ibeis -e print_acfg --db testdb1 -a default --ainfo --nocache-aid ibeis -e print_acfg --db Oxford -a default:qhas_any=query --ainfo --nocache-aid ibeis -e print_acfg --db Oxford -a default:qhas_any=query,dhas_any=distractor --ainfo --nocache-aid Example0: >>> # DISABLE_DOCTEST >>> from ibeis.expt.experiment_helpers import * # NOQA >>> import ibeis >>> from ibeis.expt import annotation_configs >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST') >>> filter_dups = not ut.get_argflag('--nofilter-dups') >>> acfg_name_list = testdata_acfg_names() >>> _tup = get_annotcfg_list(ibs, acfg_name_list, filter_dups) >>> acfg_list, expanded_aids_list = _tup >>> print('\n PRINTING TEST RESULTS') >>> result = ut.list_str(acfg_list, nl=3) >>> print('\n') >>> printkw = dict(combined=True, per_name_vpedge=None, >>> per_qual=False, per_vp=False) >>> annotation_configs.print_acfg_list( >>> acfg_list, expanded_aids_list, ibs, **printkw) """ if ut.VERBOSE: print('[harn.help] building acfg_list using %r' % (acfg_name_list,)) from ibeis.expt import annotation_configs acfg_combo_list = parse_acfg_combo_list(acfg_name_list) #acfg_slice = ut.get_argval('--acfg_slice', type_=slice, default=None) # HACK: Sliceing happens before expansion (dependenceis get) combo_slice = ut.get_argval('--combo_slice', type_='fuzzy_subset', default=slice(None)) acfg_combo_list = [ut.take(acfg_combo_, combo_slice) for acfg_combo_ in acfg_combo_list] if ut.get_argflag('--consistent'): # Expand everything as one consistent annot list acfg_combo_list = [ut.flatten(acfg_combo_list)] # + --- Do Parsing --- expanded_aids_combo_list = [ filter_annots.expand_acfgs_consistently(ibs, acfg_combo_, initial_aids=initial_aids, use_cache=use_cache, verbose=verbose) for acfg_combo_ in acfg_combo_list ] expanded_aids_combo_flag_list = ut.flatten(expanded_aids_combo_list) acfg_list = ut.get_list_column(expanded_aids_combo_flag_list, 0) expanded_aids_list = ut.get_list_column(expanded_aids_combo_flag_list, 1) # L___ # Slicing happens after expansion (but the labels get screwed up) acfg_slice = ut.get_argval('--acfg_slice', type_='fuzzy_subset', default=None) if acfg_slice is not None: acfg_list = ut.take(acfg_list, acfg_slice) expanded_aids_list = ut.take(expanded_aids_list, acfg_slice) # + --- Hack: Override qaids --- _qaids = ut.get_argval(('--qaid', '--qaid-override'), type_=list, default=qaid_override) if _qaids is not None: expanded_aids_list = [(_qaids, daids) for qaids, daids in expanded_aids_list] # more hack for daids _daids = ut.get_argval('--daids-override', type_=list, default=daid_override) if _daids is not None: expanded_aids_list = [(qaids, _daids) for qaids, daids in expanded_aids_list] # L___ if filter_dups: expanded_aids_list, acfg_list = filter_duplicate_acfgs( expanded_aids_list, acfg_list, acfg_name_list) if ut.get_argflag(('--acfginfo', '--ainfo', '--aidcfginfo', '--print-acfg', '--printacfg')): import sys ut.colorprint('[experiment_helpers] Requested AcfgInfo ... ', 'red') print('combo_slice = %r' % (combo_slice,)) print('acfg_slice = %r' % (acfg_slice,)) annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs) ut.colorprint('[experiment_helpers] exiting due to AcfgInfo info request', 'red') sys.exit(1) return acfg_list, expanded_aids_list
def run_test_configurations2(ibs, acfg_name_list, test_cfg_name_list, use_cache=None, qaid_override=None, daid_override=None, initial_aids=None): """ Loops over annot configs. Try and use this function as a starting point to clean up this module. The code is getting too untenable. CommandLine: python -m ibeis.expt.harness --exec-run_test_configurations2 Example: >>> # SLOW_DOCTEST >>> from ibeis.expt.harness import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST') >>> default_acfgstrs = ['controlled:qsize=20,dpername=1,dsize=10', 'controlled:qsize=20,dpername=10,dsize=100'] >>> acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstrs) >>> test_cfg_name_list = ut.get_argval(('-t', '-p')), type_=list, default=['custom', 'custom:fg_on=False']) >>> use_cache = False >>> testres_list = run_test_configurations2(ibs, acfg_name_list, test_cfg_name_list, use_cache) """ print('[harn] run_test_configurations2') # Generate list of database annotation configurations if len(acfg_name_list) == 0: raise ValueError('must give acfg name list') acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list( ibs, acfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids, use_cache=use_cache) # Generate list of query pipeline param configs cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list( test_cfg_name_list, ibs=ibs) cfgx2_lbl = experiment_helpers.get_varied_pipecfg_lbls(cfgdict_list) # NOTE: Can specify --pcfginfo or --acfginfo if ut.NOT_QUIET: ut.colorprint(textwrap.dedent(""" [harn]================ [harn] harness.test_configurations2()""").strip(), 'white') msg = '[harn] Running %s using %s and %s' % ( ut.quantstr('test', len(acfg_list) * len(pipecfg_list)), ut.quantstr('pipeline config', len(pipecfg_list)), ut.quantstr('annot config', len(acfg_list)), ) ut.colorprint(msg, 'white') testres_list = [] nAcfg = len(acfg_list) testnameid = ibs.get_dbname() + ' ' + str(test_cfg_name_list) + str(acfg_name_list) lbl = '[harn] TEST_CFG ' + str(test_cfg_name_list) + str(acfg_name_list) expanded_aids_iter = ut.ProgressIter(expanded_aids_list, lbl='annot config', freq=1, autoadjust=False, enabled=ut.NOT_QUIET) for acfgx, (qaids, daids) in enumerate(expanded_aids_iter): assert len(qaids) != 0, ( '[harness] No query annotations specified') assert len(daids) != 0, ( '[harness] No database annotations specified') acfg = acfg_list[acfgx] if ut.NOT_QUIET: ut.colorprint('\n---Annot config testnameid=%r' % ( testnameid,), 'turquoise') subindexer_partial = partial(ut.ProgressIter, parent_index=acfgx, parent_nTotal=nAcfg, enabled=ut.NOT_QUIET) testres = make_single_testres(ibs, qaids, daids, pipecfg_list, cfgx2_lbl, cfgdict_list, lbl, testnameid, use_cache=use_cache, subindexer_partial=subindexer_partial) if DRY_RUN: continue testres.acfg = acfg testres.test_cfg_name_list = test_cfg_name_list testres_list.append(testres) if DRY_RUN: print('DRYRUN: Cannot continue past run_test_configurations2') sys.exit(0) return testres_list
def get_pipecfg_list(test_cfg_name_list, ibs=None): r""" Builds a list of varied query configurations. Only custom configs depend on an ibs object. The order of the output is not gaurenteed to aggree with input order. Args: test_cfg_name_list (list): list of strs ibs (IBEISController): ibeis controller object (optional) Returns: tuple: (cfg_list, cfgx2_lbl) - cfg_list (list): list of config objects cfgx2_lbl (list): denotes which parameters are being varied. If there is just one config then nothing is varied CommandLine: python -m ibeis.expt.experiment_helpers --exec-get_pipecfg_list:0 python -m ibeis.expt.experiment_helpers --exec-get_pipecfg_list:1 --db humpbacks Example: >>> # ENABLE_DOCTEST >>> from ibeis.expt.experiment_helpers import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> #test_cfg_name_list = ['best', 'custom', 'custom:sv_on=False'] >>> #test_cfg_name_list = ['default', 'default:sv_on=False', 'best'] >>> test_cfg_name_list = ['default', 'default:sv_on=False', 'best'] >>> # execute function >>> (pcfgdict_list, pipecfg_list) = get_pipecfg_list(test_cfg_name_list, ibs) >>> # verify results >>> assert pipecfg_list[0].sv_cfg.sv_on is True >>> assert pipecfg_list[1].sv_cfg.sv_on is False >>> pipecfg_lbls = get_varied_pipecfg_lbls(pcfgdict_list) >>> result = ('pipecfg_lbls = '+ ut.list_str(pipecfg_lbls)) >>> print(result) pipecfg_lbls = [ 'default:', 'default:sv_on=False', ] Example1: >>> # DISABLE_DOCTEST >>> import ibeis_flukematch.plugin >>> from ibeis.expt.experiment_helpers import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='humpbacks') >>> test_cfg_name_list = ['default:pipeline_root=BC_DTW,decision=average', 'default:K=[1,4]'] >>> (pcfgdict_list, pipecfg_list) = get_pipecfg_list(test_cfg_name_list, ibs) >>> pipecfg_lbls = get_varied_pipecfg_lbls(pcfgdict_list) >>> result = ('pipecfg_lbls = '+ ut.list_str(pipecfg_lbls)) >>> print(result) >>> print_pipe_configs(pcfgdict_list, pipecfg_list) """ if ut.VERBOSE: print('[expt_help.get_pipecfg_list] building pipecfg_list using: %s' % test_cfg_name_list) if isinstance(test_cfg_name_list, six.string_types): test_cfg_name_list = [test_cfg_name_list] _standard_cfg_names = [] _pcfgdict_list = [] # HACK: Parse out custom configs first for test_cfg_name in test_cfg_name_list: if test_cfg_name.startswith('custom:') or test_cfg_name == 'custom': print('[expthelpers] Parsing nonstandard custom config') if test_cfg_name.startswith('custom:'): # parse out modifications to custom cfgstr_list = ':'.join(test_cfg_name.split(':')[1:]).split(',') augcfgdict = ut.parse_cfgstr_list(cfgstr_list, smartcast=True) else: augcfgdict = {} # Take the configuration from the ibeis object pipe_cfg = ibs.cfg.query_cfg.deepcopy() # Update with augmented params pipe_cfg.update_query_cfg(**augcfgdict) # Parse out a standard cfgdict cfgdict = dict(pipe_cfg.parse_items()) cfgdict['_cfgname'] = 'custom' cfgdict['_cfgstr'] = test_cfg_name _pcfgdict_list.append(cfgdict) else: _standard_cfg_names.append(test_cfg_name) # Handle stanndard configs next if len(_standard_cfg_names) > 0: # Get parsing information #cfg_default_dict = dict(Config.QueryConfig().parse_items()) #valid_keys = list(cfg_default_dict.keys()) cfgstr_list = _standard_cfg_names named_defaults_dict = ut.dict_subset( experiment_configs.__dict__, experiment_configs.TEST_NAMES) alias_keys = experiment_configs.ALIAS_KEYS # Parse standard pipeline cfgstrings metadata = {'ibs': ibs} dict_comb_list = cfghelpers.parse_cfgstr_list2( cfgstr_list, named_defaults_dict, cfgtype=None, alias_keys=alias_keys, # Hack out valid keys for humpbacks #valid_keys=valid_keys, strict=False, metadata=metadata ) # Get varied params (there may be duplicates) _pcfgdict_list.extend(ut.flatten(dict_comb_list)) # TODO: respsect different algorithm parameters # like flukes # Expand cfgdicts into PipelineConfig config objects if ibs is None: configclass_list = [Config.QueryConfig] * len(_pcfgdict_list) else: root_to_config = ibs.depc.configclass_dict configclass_list = [ root_to_config.get(_cfgdict.get('pipeline_root', 'vsmany'), Config.QueryConfig) for _cfgdict in _pcfgdict_list] _pipecfg_list = [cls(**_cfgdict) for cls, _cfgdict in zip(configclass_list, _pcfgdict_list)] # Enforce rule that removes duplicate configs # by using feasiblity from ibeis.algo.Config # TODO: Move this unique finding code to its own function # and then move it up one function level so even the custom # configs can be uniquified _flag_list = ut.flag_unique_items(_pipecfg_list) cfgdict_list = ut.compress(_pcfgdict_list, _flag_list) pipecfg_list = ut.compress(_pipecfg_list, _flag_list) if ut.NOT_QUIET: print('[harn.help] return %d / %d unique pipeline configs from: %r' % (len(cfgdict_list), len(_pcfgdict_list), test_cfg_name_list)) if ut.get_argflag(('--pcfginfo', '--pinfo', '--pipecfginfo')): import sys ut.colorprint('Requested PcfgInfo for tests... ', 'red') print_pipe_configs(cfgdict_list, pipecfg_list) ut.colorprint('Finished Reporting PcfgInfo. Exiting', 'red') sys.exit(1) return (cfgdict_list, pipecfg_list)
def parse_args(): ds_default = None arch_default = 'siaml2_128' weights_tag_default = None # Test values if False: ds_default = 'liberty' weights_tag_default = 'current' assert ut.inIPython() # Parse commandline args ds_tag = ut.get_argval(('--dataset', '--ds'), type_=str, default=ds_default) datatype = ut.get_argval(('--datatype', '--dt'), type_=str, default='siam-patch') arch_tag = ut.get_argval(('--arch', '-a'), default=arch_default) weights_tag = ut.get_argval(('--weights', '+w'), type_=str, default=weights_tag_default) # Incorporate new config stuff? #NEW = False #if NEW: # default_dstag_cfg = { # 'ds': 'PZ_MTEST', # 'mode': 'patches', # 'arch': arch_default # } # named_defaults_dict = { # '': default_dstag_cfg # } # ut.parse_argv_cfg('dstag', named_defaults_dict=named_defaults_dict) hyperparams = ut.argparse_dict( { #'batch_size': 128, 'batch_size': 256, #'learning_rate': .0005, 'learning_rate': .1, 'momentum': .9, #'weight_decay': 0.0005, 'weight_decay': 0.0001, }, alias_dict={ 'weight_decay': ['decay'], 'learning_rate': ['learn_rate'], } ) requests = ut.argparse_dict( { 'train': False, 'test': False, 'testall': False, 'publish': False, 'ensuredata': False, } ) requests['test'] = requests['test'] or requests['testall'] # breakup weights tag into extern_ds and checkpoint if weights_tag is not None and ':' in weights_tag: extern_ds_tag, checkpoint_tag = weights_tag.split(':') else: extern_ds_tag = None checkpoint_tag = weights_tag # resolve aliases ds_tag = DS_TAG_ALIAS2.get(ds_tag, ds_tag) extern_ds_tag = DS_TAG_ALIAS2.get(extern_ds_tag, extern_ds_tag) checkpoint_tag = CHECKPOINT_TAG_ALIAS.get(checkpoint_tag, checkpoint_tag) tags = { 'ds_tag': ds_tag, 'extern_ds_tag': extern_ds_tag, 'checkpoint_tag': checkpoint_tag, 'arch_tag': arch_tag, 'datatype': datatype, } ut.colorprint('[netrun] * ds_tag=%r' % (ds_tag,), 'lightgray') ut.colorprint('[netrun] * arch_tag=%r' % (arch_tag,), 'lightgray') ut.colorprint('[netrun] * extern_ds_tag=%r' % (extern_ds_tag,), 'lightgray') ut.colorprint('[netrun] * checkpoint_tag=%r' % (checkpoint_tag,), 'lightgray') return requests, hyperparams, tags
def temp_model(num_annots, num_names, score_evidence=[], name_evidence=[], other_evidence={}, noquery=False, verbose=None, **kwargs): if verbose is None: verbose = ut.VERBOSE method = kwargs.pop('method', None) model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs) if verbose: model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE]) model, evidence, soft_evidence = update_model_evidence( model, name_evidence, score_evidence, other_evidence) if verbose and len(soft_evidence) != 0: model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE], title='Soft Evidence', color='green') # if verbose: # ut.colorprint('\n --- Soft Evidence ---', 'white') # for ttype, cpds in model.ttype2_cpds.items(): # if ttype != MATCH_TTYPE: # for fs_ in ut.ichunks(cpds, 4): # ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), # 'green') if verbose: ut.colorprint('\n --- Inference ---', 'red') if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery: evidence = model._ensure_internal_evidence(evidence) query_vars = [] query_vars += ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable') # query_vars += ut.list_getattr(model.ttype2_cpds[MATCH_TTYPE], 'variable') query_vars = ut.setdiff(query_vars, evidence.keys()) # query_vars = ut.setdiff(query_vars, soft_evidence.keys()) query_results = cluster_query(model, query_vars, evidence, soft_evidence, method) else: query_results = {} factor_list = query_results['factor_list'] if verbose: if verbose: logger.info('+--------') semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] for type_, factors in ut.group_items(factor_list, semtypes).items(): logger.info('Result Factors (%r)' % (type_, )) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow') logger.info('MAP assignments') top_assignments = query_results.get('top_assignments', []) tmp = [] for lbl, val in top_assignments: tmp.append('%s : %.4f' % (ut.repr2(lbl), val)) logger.info(ut.align('\n'.join(tmp), ' :')) logger.info('L_____\n') showkw = dict(evidence=evidence, soft_evidence=soft_evidence, **query_results) from wbia.algo.hots import pgm_viz pgm_viz.show_model(model, **showkw) return (model, evidence, query_results)
def precfg(ibs, acfg_name_list, test_cfg_name_list): r""" Helper to precompute information Args: ibs (IBEISController): ibeis controller object qaids (list): query annotation ids daids (list): database annotation ids test_cfg_name_list (list): CommandLine: python -m ibeis.expt.precomputer --exec-precfg -t custom --expt-preload python -m ibeis.expt.precomputer --exec-precfg -t candidacy -a default:qaids=allgt --preload python -m ibeis.expt.precomputer --exec-precfg -t candidacy_invariance -a default:qaids=allgt --preload python -m ibeis.expt.precomputer --exec-precfg --delete-nn-cache Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.precomputer import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST') >>> default_acfgstrs = ['default:qaids=allgt'] >>> acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstrs) >>> test_cfg_name_list = ut.get_argval('-t', type_=list, default=['custom']) >>> result = precfg(ibs, acfg_name_list, test_cfg_name_list) >>> print(result) """ # Generate list of database annotation configurations acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(ibs, acfg_name_list) # Generate list of query pipeline param configs cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list, ibs=ibs) #cfgx2_lbl = experiment_helpers.get_varied_cfg_lbls(cfgdict_list) expanded_aids_iter = ut.ProgressIter(expanded_aids_list, lbl='annot config', freq=1, autoadjust=False) nAcfg = len(acfg_list) for acfgx, (qaids, daids) in enumerate(expanded_aids_iter): if len(qaids) == 0: print('[harness] WARNING No query annotations specified') continue if len(daids) == 0: print('[harness] WARNING No database annotations specified') continue ut.colorprint('\n---Annot config', 'turquoise') nCfg = len(pipecfg_list) # number of configurations (cols) dbname = ibs.get_dbname() cfgiter = ut.ProgressIter(pipecfg_list, lbl='query config', freq=1, autoadjust=False, parent_index=acfgx, parent_nTotal=nAcfg) flag = False if ut.get_argflag('--delete-nn-cache'): ibs.delete_neighbor_cache() flag = True for cfgx, query_cfg in enumerate(cfgiter): print('') ut.colorprint(query_cfg.get_cfgstr(), 'turquoise') verbose = True with ut.Indenter('[%s cfg %d/%d]' % (dbname, (acfgx * nCfg) + cfgx * + 1, nCfg * nAcfg)): qreq_ = ibs.new_query_request(qaids, daids, verbose=True, query_cfg=query_cfg) if ut.get_argflag('--preload'): qreq_.lazy_preload(verbose=verbose) flag = True if ut.get_argflag('--preload-chip'): qreq_.ensure_chips(verbose=verbose, extra_tries=1) flag = True if ut.get_argflag('--preload-feat'): qreq_.ensure_features(verbose=verbose) flag = True if ut.get_argflag('--preload-featweight'): qreq_.ensure_featweights(verbose=verbose) flag = True if ut.get_argflag('--preindex'): flag = True if qreq_.qparams.pipeline_root in ['vsone', 'vsmany']: qreq_.load_indexer(verbose=verbose) assert flag is True, 'no flag specified' assert flag is True, 'no flag specified'
def test_siamese_performance(model, data, labels, flat_metadata, dataname=''): r""" CommandLine: utprof.py -m ibeis_cnn --tf pz_patchmatch --db liberty --test --weights=liberty:current --arch=siaml2_128 --test python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test --ensure python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test --ensure --weights=new python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --train --weights=new python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 --test # NOQA python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 """ import vtool as vt import plottool as pt # TODO: save in model.trainind_dpath/diagnostics/figures ut.colorprint('\n[siam_perf] Testing Siamese Performance', 'white') #epoch_dpath = model.get_epoch_diagnostic_dpath() epoch_dpath = model.arch_dpath ut.vd(epoch_dpath) dataname += ' ' + model.get_history_hashid() + '\n' history_text = ut.list_str(model.era_history, newlines=True) ut.write_to(ut.unixjoin(epoch_dpath, 'era_history.txt'), history_text) #if True: # import matplotlib as mpl # mpl.rcParams['agg.path.chunksize'] = 100000 #data = data[::50] #labels = labels[::50] #from ibeis_cnn import utils #data, labels = utils.random_xy_sample(data, labels, 10000, model.data_per_label_input) FULL = not ut.get_argflag('--quick') fnum_gen = pt.make_fnum_nextgen() ut.colorprint('[siam_perf] Show era history', 'white') fig = model.show_era_loss(fnum=fnum_gen()) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) # hack ut.colorprint('[siam_perf] Show weights image', 'white') fig = model.show_weights_image(fnum=fnum_gen()) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) #model.draw_all_conv_layer_weights(fnum=fnum_gen()) #model.imwrite_weights(1) #model.imwrite_weights(2) # Compute each type of score ut.colorprint('[siam_perf] Building Scores', 'white') test_outputs = model.predict2(model, data) network_output = test_outputs['network_output_determ'] # hack converting network output to distances for non-descriptor networks if len(network_output.shape) == 2 and network_output.shape[1] == 1: cnn_scores = network_output.T[0] elif len(network_output.shape) == 1: cnn_scores = network_output elif len(network_output.shape) == 2 and network_output.shape[1] > 1: assert model.data_per_label_output == 2 vecs1 = network_output[0::2] vecs2 = network_output[1::2] cnn_scores = vt.L2(vecs1, vecs2) else: assert False cnn_scores = cnn_scores.astype(np.float64) # Segfaults with the data passed in is large (AND MEMMAPPED apparently) # Fixed in hesaff implementation SIFT = FULL if SIFT: sift_scores, sift_list = test_sift_patchmatch_scores(data, labels) sift_scores = sift_scores.astype(np.float64) ut.colorprint('[siam_perf] Learning Encoders', 'white') # Learn encoders encoder_kw = { #'monotonize': False, 'monotonize': True, } cnn_encoder = vt.ScoreNormalizer(**encoder_kw) cnn_encoder.fit(cnn_scores, labels) if SIFT: sift_encoder = vt.ScoreNormalizer(**encoder_kw) sift_encoder.fit(sift_scores, labels) # Visualize ut.colorprint('[siam_perf] Visualize Encoders', 'white') viz_kw = dict( with_scores=False, with_postbayes=False, with_prebayes=False, target_tpr=.95, ) inter_cnn = cnn_encoder.visualize( figtitle=dataname + ' CNN scores. #data=' + str(len(data)), fnum=fnum_gen(), **viz_kw) if SIFT: inter_sift = sift_encoder.visualize( figtitle=dataname + ' SIFT scores. #data=' + str(len(data)), fnum=fnum_gen(), **viz_kw) # Save pt.save_figure(fig=inter_cnn.fig, dpath=epoch_dpath) if SIFT: pt.save_figure(fig=inter_sift.fig, dpath=epoch_dpath) # Save out examples of hard errors #cnn_fp_label_indicies, cnn_fn_label_indicies = #cnn_encoder.get_error_indicies(cnn_scores, labels) #sift_fp_label_indicies, sift_fn_label_indicies = #sift_encoder.get_error_indicies(sift_scores, labels) with_patch_examples = FULL if with_patch_examples: ut.colorprint('[siam_perf] Visualize Confusion Examples', 'white') cnn_indicies = cnn_encoder.get_confusion_indicies(cnn_scores, labels) if SIFT: sift_indicies = sift_encoder.get_confusion_indicies(sift_scores, labels) warped_patch1_list, warped_patch2_list = list(zip(*ut.ichunks(data, 2))) samp_args = (warped_patch1_list, warped_patch2_list, labels) _sample = functools.partial(draw_results.get_patch_sample_img, *samp_args) cnn_fp_img = _sample({'fs': cnn_scores}, cnn_indicies.fp)[0] cnn_fn_img = _sample({'fs': cnn_scores}, cnn_indicies.fn)[0] cnn_tp_img = _sample({'fs': cnn_scores}, cnn_indicies.tp)[0] cnn_tn_img = _sample({'fs': cnn_scores}, cnn_indicies.tn)[0] if SIFT: sift_fp_img = _sample({'fs': sift_scores}, sift_indicies.fp)[0] sift_fn_img = _sample({'fs': sift_scores}, sift_indicies.fn)[0] sift_tp_img = _sample({'fs': sift_scores}, sift_indicies.tp)[0] sift_tn_img = _sample({'fs': sift_scores}, sift_indicies.tn)[0] #if ut.show_was_requested(): #def rectify(arr): # return np.flipud(arr) SINGLE_FIG = False if SINGLE_FIG: def dump_img(img_, lbl, fnum): fig, ax = pt.imshow(img_, figtitle=dataname + ' ' + lbl, fnum=fnum) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) dump_img(cnn_fp_img, 'cnn_fp_img', fnum_gen()) dump_img(cnn_fn_img, 'cnn_fn_img', fnum_gen()) dump_img(cnn_tp_img, 'cnn_tp_img', fnum_gen()) dump_img(cnn_tn_img, 'cnn_tn_img', fnum_gen()) dump_img(sift_fp_img, 'sift_fp_img', fnum_gen()) dump_img(sift_fn_img, 'sift_fn_img', fnum_gen()) dump_img(sift_tp_img, 'sift_tp_img', fnum_gen()) dump_img(sift_tn_img, 'sift_tn_img', fnum_gen()) #vt.imwrite(dataname + '_' + 'cnn_fp_img.png', (cnn_fp_img)) #vt.imwrite(dataname + '_' + 'cnn_fn_img.png', (cnn_fn_img)) #vt.imwrite(dataname + '_' + 'sift_fp_img.png', (sift_fp_img)) #vt.imwrite(dataname + '_' + 'sift_fn_img.png', (sift_fn_img)) else: print('Drawing TP FP TN FN') fnum = fnum_gen() pnum_gen = pt.make_pnum_nextgen(4, 2) fig = pt.figure(fnum) pt.imshow(cnn_fp_img, title='CNN FP', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_fp_img, title='SIFT FP', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_fn_img, title='CNN FN', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_fn_img, title='SIFT FN', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_tp_img, title='CNN TP', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_tp_img, title='SIFT TP', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_tn_img, title='CNN TN', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_tn_img, title='SIFT TN', fnum=fnum, pnum=pnum_gen()) pt.set_figtitle(dataname + ' confusions') pt.adjust_subplots(left=0, right=1.0, bottom=0., wspace=.01, hspace=.05) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18)) with_patch_desc = FULL if with_patch_desc: ut.colorprint('[siam_perf] Visualize Patch Descriptors', 'white') fnum = fnum_gen() fig = pt.figure(fnum=fnum, pnum=(1, 1, 1)) num_rows = 7 pnum_gen = pt.make_pnum_nextgen(num_rows, 3) # Compare actual output descriptors for index in ut.random_indexes(len(sift_list), num_rows): vec_sift = sift_list[index] vec_cnn = network_output[index] patch = data[index] pt.imshow(patch, fnum=fnum, pnum=pnum_gen()) pt.plot_descriptor_signature(vec_cnn, 'cnn vec', fnum=fnum, pnum=pnum_gen()) pt.plot_sift_signature(vec_sift, 'sift vec', fnum=fnum, pnum=pnum_gen()) pt.set_figtitle('Patch Descriptors') pt.adjust_subplots(left=0, right=0.95, bottom=0., wspace=.1, hspace=.15) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))
def update_bindings(): r""" Returns: dict: matchtups CommandLine: python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings Example: >>> # DISABLE_DOCTEST >>> from autogen_bindings import * # NOQA >>> import sys >>> import utool as ut >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts')) >>> matchtups = update_bindings() >>> result = ('matchtups = %s' % (ut.repr2(matchtups),)) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ from os.path import basename import difflib import numpy as np import re binding_names = [ 'build_index', 'used_memory', 'add_points', 'remove_point', 'compute_cluster_centers', 'load_index', 'save_index', 'find_nearest_neighbors', 'radius_search', 'remove_points', 'free_index', 'find_nearest_neighbors_index', # 'size', # 'veclen', # 'get_point', # 'flann_get_distance_order', # 'flann_get_distance_type', # 'flann_log_verbosity', # 'clean_removed_points', ] _places = [ '~/code/flann/src/cpp/flann/flann.cpp', '~/code/flann/src/cpp/flann/flann.h', '~/code/flann/src/python/pyflann/flann_ctypes.py', '~/code/flann/src/python/pyflann/index.py', ] eof_sentinals = { # 'flann_ctypes.py': '# END DEFINE BINDINGS', 'flann_ctypes.py': 'def ensure_2d_array(arr', # 'flann.h': '// END DEFINE BINDINGS', 'flann.h': '#ifdef __cplusplus', 'flann.cpp': None, 'index.py': None, } block_sentinals = { 'flann.h': re.escape('/**'), 'flann.cpp': 'template *<typename Distance>', # 'flann_ctypes.py': '\n', 'flann_ctypes.py': 'flann\.[a-z_.]* =', # 'index.py': ' def .*', 'index.py': ' [^ ].*', } places = {basename(fpath): fpath for fpath in ut.lmap(ut.truepath, _places)} text_dict = ut.map_dict_vals(ut.readfrom, places) lines_dict = {key: val.split('\n') for key, val in text_dict.items()} orig_texts = text_dict.copy() # NOQA binding_defs = {} named_blocks = {} print('binding_names = %r' % (binding_names,)) for binding_name in binding_names: blocks, defs = autogen_parts(binding_name) binding_defs[binding_name] = defs named_blocks[binding_name] = blocks for binding_name in ut.ProgIter(binding_names): ut.colorprint('+--- GENERATE BINDING %s -----' % (binding_name,), 'yellow') blocks_dict = named_blocks[binding_name] for key in places.keys(): ut.colorprint('---- generating %s for %s -----' % (binding_name, key,), 'yellow') # key = 'flann_ctypes.py' # print(text_dict[key]) old_text = text_dict[key] line_list = lines_dict[key] #text = old_text block = blocks_dict[key] debug = ut.get_argflag('--debug') # debug = True # if debug: # print(ut.highlight_code(block, splitext(key)[1])) # Find a place in the code that already exists searchblock = block if key.endswith('.cpp') or key.endswith('.h'): searchblock = re.sub(ut.REGEX_C_COMMENT, '', searchblock, flags=re.MULTILINE | re.DOTALL) searchblock = '\n'.join(searchblock.splitlines()[0:3]) # @ut.cached_func(verbose=False) def cached_match(old_text, searchblock): def isjunk(x): return False return x in ' \t,*()' def isjunk2(x): return x in ' \t,*()' # Not sure why the first one just doesnt find it # isjunk = None sm = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=False) sm0 = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=True) sm1 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=False) sm2 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=True) matchtups = (sm.get_matching_blocks() + sm0.get_matching_blocks() + sm1.get_matching_blocks() + sm2.get_matching_blocks()) return matchtups matchtups = cached_match(old_text, searchblock) # Find a reasonable match in matchtups found = False if debug: # print('searchblock =\n%s' % (searchblock,)) print('searchblock = %r' % (searchblock,)) for (a, b, size) in matchtups: matchtext = old_text[a: a + size] pybind = binding_defs[binding_name]['py_binding_name'] if re.search(binding_name + '\\b', matchtext) or re.search(pybind + '\\b', matchtext): found = True pos = a + size if debug: print('MATCHING TEXT') print(matchtext) break else: if debug and 0: print('Not matching') print('matchtext = %r' % (matchtext,)) matchtext2 = old_text[a - 10: a + size + 20] print('matchtext2 = %r' % (matchtext2,)) if found: linelens = np.array(ut.lmap(len, line_list)) + 1 sumlen = np.cumsum(linelens) row = np.where(sumlen < pos)[0][-1] + 1 #print(line_list[row]) # Search for extents of the block to overwrite block_sentinal = block_sentinals[key] row1 = ut.find_block_end(row, line_list, block_sentinal, -1) - 1 row2 = ut.find_block_end(row + 1, line_list, block_sentinal, +1) eof_sentinal = eof_sentinals[key] if eof_sentinal is not None: print('eof_sentinal = %r' % (eof_sentinal,)) row2 = min([count for count, line in enumerate(line_list) if line.startswith(eof_sentinal)][-1], row2) nr = len((block + '\n\n').splitlines()) new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint('FOUND AND REPLACING %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint('FOUND AND REPLACED WITH %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print(ut.color_diff_text(ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) else: # Append to end of the file eof_sentinal = eof_sentinals[key] if eof_sentinal is None: row2 = len(line_list) - 1 else: row2_choice = [count for count, line in enumerate(line_list) if line.startswith(eof_sentinal)] if len(row2_choice) == 0: row2 = len(line_list) - 1 assert False else: row2 = row2_choice[-1] - 1 # row1 = row2 - 1 # row2 = row2 - 1 row1 = row2 new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) # block + '\n\n\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) nr = len((block + '\n\n').splitlines()) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint('NOT FOUND AND REPLACING %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint('NOT FOUND AND REPLACED WITH %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print(ut.color_diff_text(ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) text_dict[key] = '\n'.join(new_line_list) lines_dict[key] = new_line_list ut.colorprint('L___ GENERATED BINDING %s ___' % (binding_name,), 'yellow') for key in places: new_text = '\n'.join(lines_dict[key]) #ut.writeto(ut.augpath(places[key], '.new'), new_text) ut.writeto(ut.augpath(places[key]), new_text) for key in places: if ut.get_argflag('--diff'): difftext = ut.get_textdiff(orig_texts[key], new_text, num_context_lines=7, ignore_whitespace=True) difftext = ut.color_diff_text(difftext) print(difftext)
def submit_query_request(ibs, qaid_list, daid_list, use_cache=None, use_bigcache=None, cfgdict=None, qreq_=None, verbose=None, save_qcache=None, prog_hook=None): """ The standard query interface. TODO: rename use_cache to use_qcache Checks a big cache for qaid2_cm. If cache miss, tries to load each cm individually. On an individual cache miss, it preforms the query. Args: ibs (ibeis.IBEISController) : ibeis control object qaid_list (list): query annotation ids daid_list (list): database annotation ids use_cache (bool): use_bigcache (bool): Returns: qaid2_cm (dict): dict of QueryResult objects CommandLine: python -m ibeis.algo.hots.match_chips4 --test-submit_query_request Examples: >>> # SLOW_DOCTEST >>> from ibeis.algo.hots.match_chips4 import * # NOQA >>> import ibeis >>> qaid_list = [1] >>> daid_list = [1, 2, 3, 4, 5] >>> use_bigcache = True >>> use_cache = True >>> ibs = ibeis.opendb(db='testdb1') >>> qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict={}, verbose=True) >>> qaid2_cm = submit_query_request(ibs, qaid_list, daid_list, use_cache, use_bigcache, qreq_=qreq_) """ # Get flag defaults if necessary if verbose is None: verbose = pipeline.VERB_PIPELINE if use_cache is None: use_cache = USE_CACHE if save_qcache is None: save_qcache = SAVE_CACHE if use_bigcache is None: use_bigcache = USE_BIGCACHE # Create new query request object to store temporary state if verbose: #print('[mc4] --- Submit QueryRequest_ --- ') ut.colorprint('[mc4] --- Submit QueryRequest_ --- ', 'darkyellow') assert qreq_ is not None, 'query request must be prebuilt' qreq_.prog_hook = prog_hook # --- BIG CACHE --- # Do not use bigcache single queries use_bigcache_ = (use_bigcache and use_cache and len(qaid_list) > MIN_BIGCACHE_BUNDLE) if (use_bigcache_ or save_qcache) and len(qaid_list) > MIN_BIGCACHE_BUNDLE: bc_dpath = ibs.get_big_cachedir() # TODO: SYSTEM : semantic should only be used if name scoring is on #qhashid = qreq_.get_data_hashid() #dhashid = qreq_.get_query_hashid() #pipe_hashstr = qreq_.get_pipe_hashid() #bc_fname = ''.join((ibs.get_dbname(), '_QRESMAP', qhashid, dhashid, pipe_hashstr)) #bc_fname = ''.join((ibs.get_dbname(), '_BIG_MC4_CM', qhashid, dhashid, pipe_hashstr)) bc_fname = 'BIG_MC4_' + qreq_.get_shortinfo_cfgstr() #bc_cfgstr = ibs.cfg.query_cfg.get_cfgstr() # FIXME, rectify w/ qparams bc_cfgstr = qreq_.get_full_cfgstr() if use_bigcache_: # Try and load directly from a big cache try: qaid2_cm = ut.load_cache(bc_dpath, bc_fname, bc_cfgstr) cm_list = [qaid2_cm[qaid] for qaid in qaid_list] except (IOError, AttributeError): pass else: return cm_list # ------------ # Execute query request qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=verbose) # ------------ if save_qcache and len(qaid_list) > MIN_BIGCACHE_BUNDLE: ut.save_cache(bc_dpath, bc_fname, bc_cfgstr, qaid2_cm) cm_list = [qaid2_cm[qaid] for qaid in qaid_list] return cm_list
def train_aoi(output_path, data_fpath, labels_fpath): r""" CommandLine: python -m ibeis_cnn.train --test-train_aoi Example: >>> # DISABLE_DOCTEST >>> from ibeis_cnn.train import * # NOQA >>> result = train_aoi() >>> print(result) """ era_size = 256 batch_size = 16 max_epochs = era_size * 16 hyperparams = ut.argparse_dict({ 'era_size': era_size, 'learning_rate': .01, 'rate_schedule': 0.75, 'momentum': .9, 'weight_decay': 0.0001, 'augment_on': True, 'augment_on_validate': True, 'whiten_on': False, 'max_epochs': max_epochs, 'stopping_patience': max_epochs, 'class_weight': None, }) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('aoi', data_fpath, labels_fpath, output_path) X_train, y_train = dataset.subset('train') X_valid, y_valid = dataset.subset('valid') print('dataset.training_dpath = %r' % (dataset.training_dpath, )) input_shape = ( batch_size, dataset.data_shape[0] + 4, ) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = AoIModel(input_shape=input_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Initialize architecture', 'yellow') model.output_dims = 1 model.input_shape = ( None, dataset.data_shape[0] + 4, ) model.batch_size = batch_size model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() ut.colorprint('[netrun] Training Requested', 'yellow') # parse training arguments config = ut.argparse_dict( dict( era_size=era_size, max_epochs=max_epochs, show_confusion=False, )) model.monitor_config.update(**config) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path
def print_priors(model, ignore_ttypes=[], title='Priors', color='darkblue'): ut.colorprint('\n --- %s ---' % (title,), color=color) for ttype, cpds in model.ttype2_cpds.items(): if ttype not in ignore_ttypes: for fs_ in ut.ichunks(cpds, 4): ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), color)
def train_classifier(output_path, data_fpath, labels_fpath): r""" CommandLine: python -m ibeis_cnn.train --test-train_classifier Example: >>> # DISABLE_DOCTEST >>> from ibeis_cnn.train import * # NOQA >>> result = train_classifier() >>> print(result) """ era_size = 16 max_epochs = 256 hyperparams = ut.argparse_dict( { 'era_size' : era_size, 'batch_size' : 128, 'learning_rate' : .01, 'rate_schedule' : 0.75, 'momentum' : .9, 'weight_decay' : 0.0001, 'augment_on' : True, 'whiten_on' : True, 'max_epochs' : max_epochs, } ) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('classifier', data_fpath, labels_fpath, output_path) X_train, y_train = dataset.subset('train') X_valid, y_valid = dataset.subset('valid') print('dataset.training_dpath = %r' % (dataset.training_dpath,)) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = ClassifierModel( data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Init encoder and convert labels', 'yellow') if hasattr(model, 'init_encoder'): model.init_encoder(y_train) ut.colorprint('[netrun] Initialize archchitecture', 'yellow') model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() # else: # model.reinit_weights() # ut.colorprint('[netrun] Need to initialize training state', 'yellow') # X_train, y_train = dataset.subset('train') # model.ensure_data_params(X_train, y_train) ut.colorprint('[netrun] Training Requested', 'yellow') # parse training arguments config = ut.argparse_dict(dict( monitor=True, monitor_updates=True, show_confusion=True, era_size=era_size, max_epochs=max_epochs, )) model.monitor_config.update(**config) if getattr(model, 'encoder', None) is not None: class_list = list(model.encoder.classes_) y_train = np.array([class_list.index(_) for _ in y_train ]) y_valid = np.array([class_list.index(_) for _ in y_valid ]) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path
def make_single_testres( ibs, qaids, daids, pipecfg_list, cfgx2_lbl, cfgdict_list, lbl, testnameid, use_cache=None, subindexer_partial=ut.ProgIter, ): """ CommandLine: python -m wbia run_expt """ cfgslice = None if cfgslice is not None: pipecfg_list = pipecfg_list[cfgslice] dbname = ibs.get_dbname() # if ut.NOT_QUIET: # logger.info('[harn] Make single testres') cfgx2_qreq_ = [ ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg) for pipe_cfg in ut.ProgIter( pipecfg_list, lbl='Building qreq_', enabled=False) ] if use_cache is None: use_cache = USE_BIG_TEST_CACHE if use_cache: try: bt_cachedir = ut.ensuredir( (ibs.get_cachedir(), 'BULK_TEST_CACHE2')) cfgstr_list = [ qreq_.get_cfgstr(with_input=True) for qreq_ in cfgx2_qreq_ ] bt_cachestr = ut.hashstr_arr27(cfgstr_list, ibs.get_dbname() + '_cfgs') bt_cachename = 'BULKTESTCACHE2_v2' testres = ut.load_cache(bt_cachedir, bt_cachename, bt_cachestr) testres.cfgdict_list = cfgdict_list testres.cfgx2_lbl = cfgx2_lbl # hack override except IOError: pass else: if ut.NOT_QUIET: ut.colorprint('[harn] single testres cache hit... returning', 'brightcyan') return testres if ibs.table_cache: # HACK prev_feat_cfgstr = None cfgx2_cmsinfo = [] cfgiter = subindexer_partial(range(len(cfgx2_qreq_)), lbl='pipe config', freq=1, adjust=False) # Run each pipeline configuration for cfgx in cfgiter: qreq_ = cfgx2_qreq_[cfgx] cprint = ut.colorprint cprint('testnameid=%r' % (testnameid, ), 'green') cprint( 'annot_cfgstr = %s' % (qreq_.get_cfgstr(with_input=True, with_pipe=False), ), 'yellow', ) cprint('pipe_cfgstr= %s' % (qreq_.get_cfgstr(with_data=False), ), 'brightcyan') cprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(), ), 'cyan') if DRY_RUN: continue indent_prefix = '[%s cfg %d/%d]' % ( dbname, # cfgiter.count (doesnt work when quiet) (cfgiter.parent_index * cfgiter.length) + cfgx, cfgiter.length * cfgiter.parent_length, ) with ut.Indenter(indent_prefix): # Run the test / read cache _need_compute = True if use_cache: # smaller cache for individual configuration runs st_cfgstr = qreq_.get_cfgstr(with_input=True) st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests') st_cachename = 'smalltest' ut.ensuredir(st_cachedir) try: cmsinfo = ut.load_cache(st_cachedir, st_cachename, st_cfgstr) except IOError: _need_compute = True else: _need_compute = False if _need_compute: assert not ibs.table_cache if ibs.table_cache: if len(prev_feat_cfgstr is not None and prev_feat_cfgstr != qreq_.qparams.feat_cfgstr): # Clear features to preserve memory ibs.clear_table_cache() # qreq_.ibs.print_cachestats_str() cm_list = qreq_.execute() cmsinfo = test_result.build_cmsinfo(cm_list, qreq_) # record previous feature configuration if ibs.table_cache: prev_feat_cfgstr = qreq_.qparams.feat_cfgstr if use_cache: ut.save_cache(st_cachedir, st_cachename, st_cfgstr, cmsinfo) if not NOMEMORY: # Store the results cfgx2_cmsinfo.append(cmsinfo) else: cfgx2_qreq_[cfgx] = None if ut.NOT_QUIET: ut.colorprint('[harn] Completed running test configurations', 'white') if DRY_RUN: logger.info('ran tests dryrun mode.') return if NOMEMORY: logger.info('ran tests in memory savings mode. Cannot Print. exiting') return # Store all pipeline config results in a test result object testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_) testres.testnameid = testnameid testres.lbl = lbl testres.cfgdict_list = cfgdict_list testres.aidcfg = None if use_cache: try: ut.save_cache(bt_cachedir, bt_cachename, bt_cachestr, testres) except Exception as ex: ut.printex(ex, 'error saving testres cache', iswarning=True) if ut.SUPER_STRICT: raise return testres
def test_model(num_annots, num_names, score_evidence=[], name_evidence=[], other_evidence={}, noquery=False, verbose=None, **kwargs): if verbose is None: verbose = ut.VERBOSE method = kwargs.pop('method', None) model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs) if verbose: model.print_priors(ignore_ttypes=['match', 'score']) model, evidence, soft_evidence = update_model_evidence( model, name_evidence, score_evidence, other_evidence) if verbose and len(soft_evidence) != 0: model.print_priors(ignore_ttypes=['match', 'score'], title='Soft Evidence', color='green') #if verbose: # ut.colorprint('\n --- Soft Evidence ---', 'white') # for ttype, cpds in model.ttype2_cpds.items(): # if ttype != 'match': # for fs_ in ut.ichunks(cpds, 4): # ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), # 'green') if verbose: ut.colorprint('\n --- Inference ---', 'red') if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery: evidence = model._ensure_internal_evidence(evidence) query_vars = [] query_vars += ut.list_getattr(model.ttype2_cpds['name'], 'variable') #query_vars += ut.list_getattr(model.ttype2_cpds['match'], 'variable') query_vars = ut.setdiff(query_vars, evidence.keys()) #query_vars = ut.setdiff(query_vars, soft_evidence.keys()) query_results = cluster_query(model, query_vars, evidence, soft_evidence, method) else: query_results = {} factor_list = query_results['factor_list'] if verbose: if verbose: print('+--------') semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] for type_, factors in ut.group_items(factor_list, semtypes).items(): print('Result Factors (%r)' % (type_,)) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow') print('MAP assignments') top_assignments = query_results.get('top_assignments', []) tmp = [] for lbl, val in top_assignments: tmp.append('%s : %.4f' % (ut.repr2(lbl), val)) print(ut.align('\n'.join(tmp), ' :')) print('L_____\n') showkw = dict(evidence=evidence, soft_evidence=soft_evidence, **query_results) pgm_viz.show_model(model, **showkw) return (model, evidence, query_results)
def run_expt( ibs, acfg_name_list, test_cfg_name_list, use_cache=None, qaid_override=None, daid_override=None, initial_aids=None, ): r""" Loops over annot configs. Try and use this function as a starting point to clean up this module. The code is getting too untenable. CommandLine: python -m wbia.expt.harness run_expt --acfginfo python -m wbia.expt.harness run_expt --pcfginfo python -m wbia.expt.harness run_expt Ignore: test_cfg_name_list = [p] Example: >>> # SLOW_DOCTEST >>> from wbia.expt.harness import * # NOQA >>> import wbia >>> ibs = wbia.opendb(defaultdb='PZ_MTEST') >>> default_acfgstrs = ['ctrl:qsize=20,dpername=1,dsize=10', >>> 'ctrl:qsize=20,dpername=10,dsize=20'] >>> acfg_name_list = default_acfgstrs >>> test_cfg_name_list = ['default:proot=smk', 'default'] >>> #test_cfg_name_list = ['custom', 'custom:fg_on=False'] >>> use_cache = False >>> testres_list = run_expt(ibs, acfg_name_list, test_cfg_name_list, use_cache) """ logger.info('[harn] run_expt') # Generate list of database annotation configurations if len(acfg_name_list) == 0: raise ValueError('must give acfg name list') acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list( ibs, acfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids, use_cache=use_cache, ) # Generate list of query pipeline param configs cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list( test_cfg_name_list, ibs=ibs) cfgx2_lbl = experiment_helpers.get_varied_pipecfg_lbls(cfgdict_list) # NOTE: Can specify --pcfginfo or --acfginfo if ut.NOT_QUIET: ut.colorprint( textwrap.dedent(""" [harn]================ [harn] harness.test_configurations2()""").strip(), 'white', ) msg = '[harn] Running %s using %s and %s' % ( ut.quantstr('test', len(acfg_list) * len(cfgdict_list)), ut.quantstr('pipeline config', len(cfgdict_list)), ut.quantstr('annot config', len(acfg_list)), ) ut.colorprint(msg, 'white') testres_list = [] nAcfg = len(acfg_list) testnameid = ibs.get_dbname() + ' ' + str(test_cfg_name_list) + str( acfg_name_list) lbl = '[harn] TEST_CFG ' + str(test_cfg_name_list) + str(acfg_name_list) expanded_aids_iter = ut.ProgIter( expanded_aids_list, lbl='annot config', freq=1, autoadjust=False, enabled=ut.NOT_QUIET, ) for acfgx, (qaids, daids) in enumerate(expanded_aids_iter): assert len(qaids) != 0, '[harness] No query annots specified' assert len(daids) != 0, '[harness] No database annotas specified' acfg = acfg_list[acfgx] if ut.NOT_QUIET: ut.colorprint('\n---Annot config testnameid=%r' % (testnameid, ), 'brightcyan') subindexer_partial = ut.ProgPartial(parent_index=acfgx, parent_length=nAcfg, enabled=ut.NOT_QUIET) testres_ = make_single_testres( ibs, qaids, daids, pipecfg_list, cfgx2_lbl, cfgdict_list, lbl, testnameid, use_cache=use_cache, subindexer_partial=subindexer_partial, ) if DRY_RUN: continue testres_.acfg = acfg testres_.test_cfg_name_list = test_cfg_name_list testres_list.append(testres_) if DRY_RUN: logger.info('DRYRUN: Cannot continue past run_expt') sys.exit(0) testres = test_result.combine_testres_list(ibs, testres_list) # testres.print_results() logger.info('Returning Test Result') return testres
def print_acfg_list(acfg_list, expanded_aids_list=None, ibs=None, combined=False, **kwargs): r""" Args: acfg_list (list): expanded_aids_list (list): (default = None) ibs (IBEISController): ibeis controller object(default = None) combined (bool): (default = False) CommandLine: python -m ibeis.expt.annotation_configs --exec-print_acfg_list --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.annotation_configs import * # NOQA >>> import ibeis >>> acfg_list = '?' >>> expanded_aids_list = None >>> ibs = None >>> combined = False >>> result = print_acfg_list(acfg_list, expanded_aids_list, ibs, combined) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ _tup = compress_acfg_list_for_printing(acfg_list) nonvaried_compressed_dict, varied_compressed_dict_list = _tup ut.colorprint('+=== <Info acfg_list> ===', 'white') #print('Printing acfg_list info. len(acfg_list) = %r' % (len(acfg_list),)) print('non-varied aidcfg = ' + ut.dict_str(nonvaried_compressed_dict)) seen_ = ut.ddict(list) # get default kwkeys for annot info if ibs is not None: annotstats_kw = kwargs.copy() kwkeys = ut.parse_func_kwarg_keys(ibs.get_annot_stats_dict) annotstats_kw.update(ut.argparse_dict( dict(zip(kwkeys, [None] * len(kwkeys))), only_specified=True)) for acfgx in range(len(acfg_list)): acfg = acfg_list[acfgx] title = ('q_cfgname=' + acfg['qcfg']['_cfgname'] + ' d_cfgname=' + acfg['dcfg']['_cfgname']) ut.colorprint('+--- acfg %d / %d -- %s ---- ' % (acfgx + 1, len(acfg_list), title), 'lightgray') print('acfg = ' + ut.dict_str(varied_compressed_dict_list[acfgx], strvals=True)) if expanded_aids_list is not None: qaids, daids = expanded_aids_list[acfgx] key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids')) if key not in seen_: if ibs is not None: seen_[key].append(acfgx) annotconfig_stats_strs, _ = ibs.get_annotconfig_stats( qaids, daids, verbose=True, combined=combined, **annotstats_kw) else: dupindex = seen_[key] print('DUPLICATE of index %r' % (dupindex,)) dupdict = varied_compressed_dict_list[dupindex[0]] print('DUP OF acfg = ' + ut.dict_str(dupdict, strvals=True)) ut.colorprint('L___ </Info acfg_list> ___', 'white')
def print_results(ibs, testres, **kwargs): """ Prints results from an experiment harness run. Rows store different qaids (query annotation ids) Cols store different configurations (algorithm parameters) TODO: join acfgs Args: ibs (IBEISController): ibeis controller object testres (test_result.TestResult): CommandLine: python dev.py -e print --db PZ_MTEST \ -a default:dpername=1,qpername=[1,2] -t default:fg_on=False python dev.py -e print -t best --db seals2 --allgt --vz python dev.py -e print --db PZ_MTEST --allgt -t custom \ --print-confusion-stats python dev.py -e print --db PZ_MTEST --allgt --noqcache \ --index 0:10:2 -t custom:rrvsone_on=True --print-confusion-stats python dev.py -e print --db PZ_MTEST --allgt --noqcache --qaid4 \ -t custom:rrvsone_on=True --print-confusion-stats python -m ibeis print_results -t default --db PZ_MTEST -a ctrl python -m ibeis print_results -t default --db PZ_MTEST -a ctrl python -m ibeis print_results --db PZ_MTEST -a default -t default:lnbnn_on=True default:lnbnn_on=False,bar_l2_on=True \ default:lnbnn_on=False,normonly_on=True CommandLine: python -m ibeis.expt.experiment_printres --test-print_results utprof.py -m ibeis.expt.experiment_printres --test-print_results Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.experiment_printres import * # NOQA >>> from ibeis.init import main_helpers >>> ibs, testres = main_helpers.testdata_expts( >>> 'pz_mtest', a='default:dpername=1,qpername=[1,2]', >>> t='default:fg_on=false') >>> result = print_results(ibs, testres) >>> print(result) """ tup = ut.dict_take(testres.__dict__, [ 'cfg_list', 'cfgx2_cmsinfo', 'testnameid', 'cfgx2_lbl', 'cfgx2_qreq_' ]) (cfg_list, cfgx2_cmsinfo, testnameid, cfgx2_lbl, cfgx2_qreq_) = tup # join_acfgs = kwargs.get('join_acfgs', False) print(' --- PRINT RESULTS ---') # print(' use --rank-lt-list=1,5 to specify X_LIST') if True: # Num of ranks less than to score X_LIST = testres.get_X_LIST() #X_LIST = [1, 5] #nConfig = len(cfg_list) #nQuery = len(testres.qaids) cfgx2_nQuery = list(map(len, testres.cfgx2_qaids)) #cfgx2_qx2_ranks = testres.get_infoprop_list('qx2_gt_rank') #-------------------- # A positive scorediff indicates the groundtruth was better than the # groundfalse scores #istrue_list = [scorediff > 0 for scorediff in scorediffs_mat] #isfalse_list = [~istrue for istrue in istrue_list] #------------ # Build Colscore nLessX_dict = testres.get_nLessX_dict() # cfgx2_hist, edges = testres.get_rank_histograms(bins=X_LIST + [np.inf], # join_acfgs=join_acfgs) # cfgx2_cumsum = cfgx2_hist.cumsum(axis=1) #------------ best_rankscore_summary = [] #to_intersect_list = [] # print each configs scores less than X=thresh for X, cfgx2_nLessX in six.iteritems(nLessX_dict): max_nLessX = cfgx2_nLessX.max() bestX_cfgx_list = np.where(cfgx2_nLessX == max_nLessX)[0] best_rankscore = '[cfg*] %d cfg(s) scored ' % len(bestX_cfgx_list) # FIXME best_rankscore += rankscore_str(X, max_nLessX, cfgx2_nQuery[bestX_cfgx_list[0]]) best_rankscore_summary += [best_rankscore] @ut.argv_flag_dec def intersect_hack(): failed = testres.rank_mat > 0 colx2_failed = [np.nonzero(failed_col)[0] for failed_col in failed.T] #failed_col2_only = np.setdiff1d(colx2_failed[1], colx2_failed[0]) #failed_col2_only_aids = ut.take(testres.qaids, failed_col2_only) failed_col1_only = np.setdiff1d(colx2_failed[0], colx2_failed[1]) failed_col1_only_aids = ut.take(testres.qaids, failed_col1_only) gt_aids1 = ibs.get_annot_groundtruth( failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[0].daids) gt_aids2 = ibs.get_annot_groundtruth( failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[1].daids) qaids_expt = failed_col1_only_aids gt_avl_aids1 = ut.flatten(gt_aids1) gt_avl_aids2 = list(set(ut.flatten(gt_aids2)).difference(gt_avl_aids1)) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids1) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids2) #jsontext = ut.to_json({ # 'qaids': list(qaids_expt), # 'dinclude_aids1': list(gt_aids_expt1), # 'dinclude_aids2': list(gt_aids_expt2), #}) #annotation_configs.varysize_pzm #from ibeis.expt import annotation_configs acfg = testres.acfg_list[0] import copy acfg1 = copy.deepcopy(acfg) acfg2 = copy.deepcopy(acfg) acfg1['qcfg']['min_pername'] = None acfg2['qcfg']['min_pername'] = None acfg1['dcfg']['min_pername'] = None acfg2['dcfg']['min_gt_per_name'] = None acfg1['qcfg']['default_aids'] = qaids_expt acfg1['dcfg']['gt_avl_aids'] = gt_avl_aids1 acfg2['qcfg']['default_aids'] = qaids_expt acfg2['dcfg']['gt_avl_aids'] = gt_avl_aids2 from ibeis.init import filter_annots from ibeis.expt import experiment_helpers annots1 = filter_annots.expand_acfgs(ibs, acfg1, verbose=True) annots2 = filter_annots.expand_acfgs(ibs, acfg2, verbose=True) acfg_name_list = dict( # NOQA acfg_list=[acfg1, acfg2], expanded_aids_list=[annots1, annots2], ) test_cfg_name_list = ['candidacy_k'] cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list( test_cfg_name_list, ibs=ibs) t1, t2 = testres_list # NOQA #intersect_hack() #@ut.argv_flag_dec #def print_rowlbl(): # print('=====================') # print('[harn] Row/Query Labels: %s' % testnameid) # print('=====================') # print('[harn] queries:\n%s' % '\n'.join(qx2_lbl)) #print_rowlbl() #------------ @ut.argv_flag_dec def print_collbl(): print('=====================') print('[harn] Col/Config Labels: %s' % testnameid) print('=====================') enum_cfgx2_lbl = [ '%2d) %s' % (count, cfglbl) for count, cfglbl in enumerate(cfgx2_lbl) ] print('[harn] cfglbl:\n%s' % '\n'.join(enum_cfgx2_lbl)) print_collbl() #------------ @ut.argv_flag_dec def print_cfgstr(): print('=====================') print('[harn] Config Strings: %s' % testnameid) print('=====================') cfgstr_list = [query_cfg.get_cfgstr() for query_cfg in cfg_list] enum_cfgstr_list = [ '%2d) %s' % (count, cfgstr) for count, cfgstr in enumerate(cfgstr_list) ] print('\n[harn] cfgstr:\n%s' % '\n'.join(enum_cfgstr_list)) print_cfgstr(**kwargs) @ut.argv_flag_dec() def print_colscore(): print('==================') print('[harn] Scores per Config: %s' % testnameid) print('==================') #for cfgx in range(nConfig): # print('[score] %s' % (cfgx2_lbl[cfgx])) # for X in X_LIST: # nLessX_ = nLessX_dict[int(X)][cfgx] # print(' ' + rankscore_str(X, nLessX_, nQuery)) print('\n[harn] ... sorted scores') for X in X_LIST: print('\n[harn] Sorted #ranks < %r scores' % (X)) sortx = np.array(nLessX_dict[int(X)]).argsort() #frac_list = (nLessX_dict[int(X)] / cfgx2_nQuery)[:, None] #print('cfgx2_nQuery = %r' % (cfgx2_nQuery,)) #print('frac_list = %r' % (frac_list,)) #print('Pairwise Difference: ' + str(ut.safe_pdist(frac_list, metric=ut.absdiff))) for cfgx in sortx: nLessX_ = nLessX_dict[int(X)][cfgx] rankstr = rankscore_str(X, nLessX_, cfgx2_nQuery[cfgx], withlbl=False) print('[score] %s --- %s' % (rankstr, cfgx2_lbl[cfgx])) print_colscore(**kwargs) ut.argv_flag_dec(testres.print_percent_identification_success)(**kwargs) sumstrs = [] sumstrs.append('++===========================') sumstrs.append('|| [cfg*] TestName: %s' % testnameid) sumstrs.append('||---------------------------') sumstrs.append(ut.joins('\n|| ', best_rankscore_summary)) sumstrs.append('LL===========================') summary_str = '\n'.join(sumstrs) #print(summary_str) ut.colorprint(summary_str, 'blue') print('To enable all printouts add --print-all to the commandline')