def vary_two_cfg(hs, qcx, cx, query_cfg, vary_cfg, fnum=1): if len(vary_cfg) > 2: raise Exception('can only vary at most two cfgeters') print('[dev] vary_two_cfg: q' + hs.vs_str(qcx, cx)) cfg_keys = vary_cfg.keys() cfg_vals = vary_cfg.values() cfg1_name = cfg_keys[0] cfg2_name = cfg_keys[1] cfg1_steps = cfg_vals[0] cfg2_steps = cfg_vals[1] nRows = len(cfg1_steps) nCols = len(cfg2_steps) print('[dev] Varying configs: nRows=%r, nCols=%r' % (nRows, nCols)) print('[dev] %r = %r ' % (cfg1_name, cfg1_steps)) print('[dev] %r = %r ' % (cfg2_name, cfg2_steps)) ylabel_args = dict(rotation='horizontal', verticalalignment='bottom', horizontalalignment='right', fontproperties=df2.FONTS.medbold) xlabel_args = dict(fontproperties=df2.FONTS.medbold) #ax = df2.gca() # Vary cfg1 #df2..gcf().clf() print_lock_ = helpers.ModulePrintLock(mc3, df2) assign_alg = query_cfg.agg_cfg.query_type vert = not hs.args.horiz plt_match_args = dict(fnum=fnum, show_gname=False, showTF=False, vert=vert) for rowx, cfg1_value in enumerate(cfg1_steps): query_cfg.update_cfg(**{cfg1_name: cfg1_value}) y_title = cfg1_name + '=' + helpers.format(cfg1_value, 3) # Vary cfg2 for colx, cfg2_value in enumerate(cfg2_steps): query_cfg.update_cfg(**{cfg2_name: cfg2_value}) pnum = (nRows, nCols, rowx * nCols + colx + 1) # HACK #print(pnum) #print(query_cfg) # query only the chips of interest (groundtruth) when doing vsone if assign_alg == 'vsone': res = hs.query_groundtruth(qcx, query_cfg) # query the entire database in vsmany (just as fast as vgroundtruth) elif assign_alg == 'vsmany': res = hs.query(qcx, query_cfg) res.plot_single_match(hs, cx, pnum=pnum, **plt_match_args) x_title = cfg2_name + '=' + helpers.format(cfg2_value, 3) # helpers.commas(cfg2_value, 3) ax = df2.gca() if rowx == len(cfg1_steps) - 1: ax.set_xlabel(x_title, **xlabel_args) if colx == 0: ax.set_ylabel(y_title, **ylabel_args) del print_lock_ vary_title = '%s vary %s and %s' % (assign_alg, cfg1_name, cfg2_name) figtitle = '%s %s %s' % (vary_title, hs.vs_str(qcx, cx), str(hs.cx2_property(qcx, 'Notes'))) subtitle = mc3.simplify_test_uid(query_cfg.get_uid()) df2.set_figtitle(figtitle, subtitle) df2.adjust_subplots_xylabels() fnum += 1 viz.save_if_requested(hs, vary_title) return fnum
def dump_orgres_matches(allres, orgres_type): orgres = allres.__dict__[orgres_type] hs = allres.hs qcx2_res = allres.qcx2_res # loop over each query / result of interest for qcx, cx, score, rank in orgres.iter(): query_gname, _ = os.path.splitext(hs.tables.gx2_gname[hs.tables.cx2_gx[qcx]]) result_gname, _ = os.path.splitext(hs.tables.gx2_gname[hs.tables.cx2_gx[cx]]) res = qcx2_res[qcx] df2.figure(fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=False, fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=True, fnum=1, plotnum=122) big_title = 'score=%.2f_rank=%d_q=%s_r=%s' % (score, rank, query_gname, result_gname) df2.set_figtitle(big_title) viz.__dump_or_browse(allres, orgres_type + '_matches' + allres.title_suffix)
def dump_orgres_matches(allres, orgres_type): orgres = allres.__dict__[orgres_type] hs = allres.hs qcx2_res = allres.qcx2_res # loop over each query / result of interest for qcx, cx, score, rank in orgres.iter(): query_gname, _ = os.path.splitext( hs.tables.gx2_gname[hs.tables.cx2_gx[qcx]]) result_gname, _ = os.path.splitext( hs.tables.gx2_gname[hs.tables.cx2_gx[cx]]) res = qcx2_res[qcx] df2.figure(fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=False, fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=True, fnum=1, plotnum=122) big_title = 'score=%.2f_rank=%d_q=%s_r=%s' % (score, rank, query_gname, result_gname) df2.set_figtitle(big_title) viz.__dump_or_browse(allres, orgres_type + '_matches' + allres.title_suffix)
def dump_feature_pair_analysis(allres): print('[rr2] Doing: feature pair analysis') # TODO: Measure score consistency over a spatial area. # Measures entropy of matching vs nonmatching descriptors # Measures scale of m vs nm desc hs = allres.hs qcx2_res = allres.qcx2_res import scipy def _hist_prob_x(desc, bw_factor): # Choose number of bins based on the bandwidth bin_range = (0, 256) # assuming input is uint8 bins = bin_range[1] // bw_factor bw_factor = bin_range[1] / bins # Compute the probabilty mass function, each w.r.t a single descriptor hist_params = dict(bins=bins, range=bin_range, density=True) hist_func = np.histogram desc_pmf = [hist_func(d, **hist_params)[0] for d in desc] # Compute the probability that you saw what you saw # TODO: could use linear interpolation for a bit more robustness here bin_vals = [np.array(np.floor(d / bw_factor), dtype=np.uint8) for d in desc] hist_prob_x = [pmf[vals] for pmf, vals in zip(desc_pmf, bin_vals)] return hist_prob_x def _gkde_prob_x(desc, bw_factor): # Estimate the probabilty density function, each w.r.t a single descriptor gkde_func = scipy.stats.gaussian_kde desc_pdf = [gkde_func(d, bw_factor) for d in desc] gkde_prob_x = [pdf(d) for pdf, d in zip(desc_pdf, desc)] return gkde_prob_x def descriptor_entropy(desc, bw_factor=4): 'computes the shannon entropy of each descriptor in desc' # Compute shannon entropy = -sum(p(x)*log(p(x))) prob_x = _hist_prob_x(desc, bw_factor) entropy = [-(px * np.log2(px)).sum() for px in prob_x] return entropy # Load features if we need to if hs.feats.cx2_desc.size == 0: print(' * forcing load of descriptors') hs.load_features() cx2_desc = hs.feats.cx2_desc cx2_kpts = hs.feats.cx2_kpts def measure_feat_pairs(allres, orgtype='top_true'): print('Measure ' + orgtype + ' pairs') orgres = allres.__dict__[orgtype] entropy_list = [] scale_list = [] score_list = [] lbl = 'Measuring ' + orgtype + ' pair ' fmt_str = helpers.make_progress_fmt_str(len(orgres), lbl) rank_skips = [] gt_skips = [] for ix, (qcx, cx, score, rank) in enumerate(orgres.iter()): helpers.print_(fmt_str % (ix + 1,)) # Skip low ranks if rank > 5: rank_skips.append(qcx) continue other_cxs = hs.get_other_indexed_cxs(qcx) # Skip no groundtruth if len(other_cxs) == 0: gt_skips.append(qcx) continue res = qcx2_res[qcx] # Get matching feature indexes fm = res.cx2_fm[cx] # Get their scores fs = res.cx2_fs[cx] # Get matching descriptors printDBG('\nfm.shape=%r' % (fm.shape,)) desc1 = cx2_desc[qcx][fm[:, 0]] desc2 = cx2_desc[cx][fm[:, 1]] # Get matching keypoints kpts1 = cx2_kpts[qcx][fm[:, 0]] kpts2 = cx2_kpts[cx][fm[:, 1]] # Get their scale scale1_m = sv2.keypoint_scale(kpts1) scale2_m = sv2.keypoint_scale(kpts2) # Get their entropy entropy1 = descriptor_entropy(desc1, bw_factor=1) entropy2 = descriptor_entropy(desc2, bw_factor=1) # Append to results entropy_tup = np.array(zip(entropy1, entropy2)) scale_tup = np.array(zip(scale1_m, scale2_m)) entropy_tup = entropy_tup.reshape(len(entropy_tup), 2) scale_tup = scale_tup.reshape(len(scale_tup), 2) entropy_list.append(entropy_tup) scale_list.append(scale_tup) score_list.append(fs) print('Skipped %d total.' % (len(rank_skips) + len(gt_skips),)) print('Skipped %d for rank > 5, %d for no gt' % (len(rank_skips), len(gt_skips),)) print(np.unique(map(len, entropy_list))) def evstack(tup): return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2)) def ehstack(tup): return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2)) entropy_pairs = evstack(entropy_list) scale_pairs = evstack(scale_list) scores = ehstack(score_list) print('\n * Measured %d pairs' % len(entropy_pairs)) return entropy_pairs, scale_pairs, scores tt_entropy, tt_scale, tt_scores = measure_feat_pairs(allres, 'top_true') tf_entropy, tf_scale, tf_scores = measure_feat_pairs(allres, 'top_false') # Measure ratios def measure_ratio(arr): return arr[:, 0] / arr[:, 1] if len(arr) > 0 else np.array([]) tt_entropy_ratio = measure_ratio(tt_entropy) tf_entropy_ratio = measure_ratio(tf_entropy) tt_scale_ratio = measure_ratio(tt_scale) tf_scale_ratio = measure_ratio(tf_scale) title_suffix = allres.title_suffix # Entropy vs Score df2.figure(fnum=1, docla=True) df2.figure(fnum=1, plotnum=(2, 2, 1)) df2.plot2(tt_entropy[:, 0], tt_scores, 'gx', 'entropy1', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 2)) df2.plot2(tf_entropy[:, 0], tf_scores, 'rx', 'entropy1', 'score', 'Top False') df2.figure(fnum=1, plotnum=(2, 2, 3)) df2.plot2(tt_entropy[:, 1], tt_scores, 'gx', 'entropy2', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 4)) df2.plot2(tf_entropy[:, 1], tf_scores, 'rx', 'entropy2', 'score', 'Top False') df2.set_figtitle('Entropy vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale vs Score df2.figure(fnum=2, plotnum=(2, 2, 1), docla=True) df2.plot2(tt_scale[:, 0], tt_scores, 'gx', 'scale1', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 2)) df2.plot2(tf_scale[:, 0], tf_scores, 'rx', 'scale1', 'score', 'Top False') df2.figure(fnum=2, plotnum=(2, 2, 3)) df2.plot2(tt_scale[:, 1], tt_scores, 'gx', 'scale2', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 4)) df2.plot2(tf_scale[:, 1], tf_scores, 'rx', 'scale2', 'score', 'Top False') df2.set_figtitle('Scale vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Entropy Ratio vs Score df2.figure(fnum=3, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_entropy_ratio, tt_scores, 'gx', 'entropy-ratio', 'score', 'Top True') df2.figure(fnum=3, plotnum=(1, 2, 2)) df2.plot2(tf_entropy_ratio, tf_scores, 'rx', 'entropy-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale Ratio vs Score df2.figure(fnum=4, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_scale_ratio, tt_scores, 'gx', 'scale-ratio', 'score', 'Top True') df2.figure(fnum=4, plotnum=(1, 2, 2)) df2.plot2(tf_scale_ratio, tf_scores, 'rx', 'scale-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis')
def vary_two_cfg(hs, qcx, cx, query_cfg, vary_cfg, fnum=1): if len(vary_cfg) > 2: raise Exception('can only vary at most two cfgeters') print('[dev] vary_two_cfg: q' + hs.vs_str(qcx, cx)) cfg_keys = vary_cfg.keys() cfg_vals = vary_cfg.values() cfg1_name = cfg_keys[0] cfg2_name = cfg_keys[1] cfg1_steps = cfg_vals[0] cfg2_steps = cfg_vals[1] nRows = len(cfg1_steps) nCols = len(cfg2_steps) print('[dev] Varying configs: nRows=%r, nCols=%r' % (nRows, nCols)) print('[dev] %r = %r ' % (cfg1_name, cfg1_steps)) print('[dev] %r = %r ' % (cfg2_name, cfg2_steps)) ylabel_args = dict(rotation='horizontal', verticalalignment='bottom', horizontalalignment='right', fontproperties=df2.FONTS.medbold) xlabel_args = dict(fontproperties=df2.FONTS.medbold) #ax = df2.gca() # Vary cfg1 #df2..gcf().clf() print_lock_ = helpers.ModulePrintLock(mc3, df2) assign_alg = query_cfg.agg_cfg.query_type vert = not hs.args.horiz plt_match_args = dict(fnum=fnum, show_gname=False, showTF=False, vert=vert) for rowx, cfg1_value in enumerate(cfg1_steps): query_cfg.update_cfg(**{cfg1_name: cfg1_value}) y_title = cfg1_name + '=' + helpers.format(cfg1_value, 3) # Vary cfg2 for colx, cfg2_value in enumerate(cfg2_steps): query_cfg.update_cfg(**{cfg2_name: cfg2_value}) pnum = (nRows, nCols, rowx * nCols + colx + 1) # HACK #print(pnum) #print(query_cfg) # query only the chips of interest (groundtruth) when doing vsone if assign_alg == 'vsone': res = hs.query_groundtruth(qcx, query_cfg) # query the entire database in vsmany (just as fast as vgroundtruth) elif assign_alg == 'vsmany': res = hs.query(qcx, query_cfg) res.plot_single_match(hs, cx, pnum=pnum, **plt_match_args) x_title = cfg2_name + '=' + helpers.format( cfg2_value, 3) # helpers.commas(cfg2_value, 3) ax = df2.gca() if rowx == len(cfg1_steps) - 1: ax.set_xlabel(x_title, **xlabel_args) if colx == 0: ax.set_ylabel(y_title, **ylabel_args) del print_lock_ vary_title = '%s vary %s and %s' % (assign_alg, cfg1_name, cfg2_name) figtitle = '%s %s %s' % (vary_title, hs.vs_str( qcx, cx), str(hs.cx2_property(qcx, 'Notes'))) subtitle = mc3.simplify_test_uid(query_cfg.get_uid()) df2.set_figtitle(figtitle, subtitle) df2.adjust_subplots_xylabels() fnum += 1 viz.save_if_requested(hs, vary_title) return fnum
def show_splash(back, fnum, view='Nice', **kwargs): if df2.plt.fignum_exists(fnum): df2.figure(fnum=fnum, docla=True, doclf=True) viz.show_splash(fnum=fnum) df2.set_figtitle('%s View' % view)
def dump_feature_pair_analysis(allres): print('[rr2] Doing: feature pair analysis') # TODO: Measure score consistency over a spatial area. # Measures entropy of matching vs nonmatching descriptors # Measures scale of m vs nm desc hs = allres.hs qcx2_res = allres.qcx2_res import scipy def _hist_prob_x(desc, bw_factor): # Choose number of bins based on the bandwidth bin_range = (0, 256) # assuming input is uint8 bins = bin_range[1] // bw_factor bw_factor = bin_range[1] / bins # Compute the probabilty mass function, each w.r.t a single descriptor hist_params = dict(bins=bins, range=bin_range, density=True) hist_func = np.histogram desc_pmf = [hist_func(d, **hist_params)[0] for d in desc] # Compute the probability that you saw what you saw # TODO: could use linear interpolation for a bit more robustness here bin_vals = [ np.array(np.floor(d / bw_factor), dtype=np.uint8) for d in desc ] hist_prob_x = [pmf[vals] for pmf, vals in zip(desc_pmf, bin_vals)] return hist_prob_x def _gkde_prob_x(desc, bw_factor): # Estimate the probabilty density function, each w.r.t a single descriptor gkde_func = scipy.stats.gaussian_kde desc_pdf = [gkde_func(d, bw_factor) for d in desc] gkde_prob_x = [pdf(d) for pdf, d in zip(desc_pdf, desc)] return gkde_prob_x def descriptor_entropy(desc, bw_factor=4): 'computes the shannon entropy of each descriptor in desc' # Compute shannon entropy = -sum(p(x)*log(p(x))) prob_x = _hist_prob_x(desc, bw_factor) entropy = [-(px * np.log2(px)).sum() for px in prob_x] return entropy # Load features if we need to if hs.feats.cx2_desc.size == 0: print(' * forcing load of descriptors') hs.load_features() cx2_desc = hs.feats.cx2_desc cx2_kpts = hs.feats.cx2_kpts def measure_feat_pairs(allres, orgtype='top_true'): print('Measure ' + orgtype + ' pairs') orgres = allres.__dict__[orgtype] entropy_list = [] scale_list = [] score_list = [] lbl = 'Measuring ' + orgtype + ' pair ' fmt_str = helpers.make_progress_fmt_str(len(orgres), lbl) rank_skips = [] gt_skips = [] for ix, (qcx, cx, score, rank) in enumerate(orgres.iter()): helpers.print_(fmt_str % (ix + 1, )) # Skip low ranks if rank > 5: rank_skips.append(qcx) continue other_cxs = hs.get_other_indexed_cxs(qcx) # Skip no groundtruth if len(other_cxs) == 0: gt_skips.append(qcx) continue res = qcx2_res[qcx] # Get matching feature indexes fm = res.cx2_fm[cx] # Get their scores fs = res.cx2_fs[cx] # Get matching descriptors printDBG('\nfm.shape=%r' % (fm.shape, )) desc1 = cx2_desc[qcx][fm[:, 0]] desc2 = cx2_desc[cx][fm[:, 1]] # Get matching keypoints kpts1 = cx2_kpts[qcx][fm[:, 0]] kpts2 = cx2_kpts[cx][fm[:, 1]] # Get their scale scale1_m = sv2.keypoint_scale(kpts1) scale2_m = sv2.keypoint_scale(kpts2) # Get their entropy entropy1 = descriptor_entropy(desc1, bw_factor=1) entropy2 = descriptor_entropy(desc2, bw_factor=1) # Append to results entropy_tup = np.array(zip(entropy1, entropy2)) scale_tup = np.array(zip(scale1_m, scale2_m)) entropy_tup = entropy_tup.reshape(len(entropy_tup), 2) scale_tup = scale_tup.reshape(len(scale_tup), 2) entropy_list.append(entropy_tup) scale_list.append(scale_tup) score_list.append(fs) print('Skipped %d total.' % (len(rank_skips) + len(gt_skips), )) print('Skipped %d for rank > 5, %d for no gt' % ( len(rank_skips), len(gt_skips), )) print(np.unique(map(len, entropy_list))) def evstack(tup): return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2)) def ehstack(tup): return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2)) entropy_pairs = evstack(entropy_list) scale_pairs = evstack(scale_list) scores = ehstack(score_list) print('\n * Measured %d pairs' % len(entropy_pairs)) return entropy_pairs, scale_pairs, scores tt_entropy, tt_scale, tt_scores = measure_feat_pairs(allres, 'top_true') tf_entropy, tf_scale, tf_scores = measure_feat_pairs(allres, 'top_false') # Measure ratios def measure_ratio(arr): return arr[:, 0] / arr[:, 1] if len(arr) > 0 else np.array([]) tt_entropy_ratio = measure_ratio(tt_entropy) tf_entropy_ratio = measure_ratio(tf_entropy) tt_scale_ratio = measure_ratio(tt_scale) tf_scale_ratio = measure_ratio(tf_scale) title_suffix = allres.title_suffix # Entropy vs Score df2.figure(fnum=1, docla=True) df2.figure(fnum=1, plotnum=(2, 2, 1)) df2.plot2(tt_entropy[:, 0], tt_scores, 'gx', 'entropy1', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 2)) df2.plot2(tf_entropy[:, 0], tf_scores, 'rx', 'entropy1', 'score', 'Top False') df2.figure(fnum=1, plotnum=(2, 2, 3)) df2.plot2(tt_entropy[:, 1], tt_scores, 'gx', 'entropy2', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 4)) df2.plot2(tf_entropy[:, 1], tf_scores, 'rx', 'entropy2', 'score', 'Top False') df2.set_figtitle('Entropy vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale vs Score df2.figure(fnum=2, plotnum=(2, 2, 1), docla=True) df2.plot2(tt_scale[:, 0], tt_scores, 'gx', 'scale1', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 2)) df2.plot2(tf_scale[:, 0], tf_scores, 'rx', 'scale1', 'score', 'Top False') df2.figure(fnum=2, plotnum=(2, 2, 3)) df2.plot2(tt_scale[:, 1], tt_scores, 'gx', 'scale2', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 4)) df2.plot2(tf_scale[:, 1], tf_scores, 'rx', 'scale2', 'score', 'Top False') df2.set_figtitle('Scale vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Entropy Ratio vs Score df2.figure(fnum=3, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_entropy_ratio, tt_scores, 'gx', 'entropy-ratio', 'score', 'Top True') df2.figure(fnum=3, plotnum=(1, 2, 2)) df2.plot2(tf_entropy_ratio, tf_scores, 'rx', 'entropy-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale Ratio vs Score df2.figure(fnum=4, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_scale_ratio, tt_scores, 'gx', 'scale-ratio', 'score', 'Top True') df2.figure(fnum=4, plotnum=(1, 2, 2)) df2.plot2(tf_scale_ratio, tf_scores, 'rx', 'scale-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis')
scale_factor=scale_factor) dstimg_thresh = dstimg.copy() dstimg_thresh[dstimg_thresh > 0] = 1 # Get matching coverage hs.prefs.query_cfg.agg_cfg.score_method = 'coverage' print(hs.get_cache_uid()) res = hs.query(cx) nTop = 2 for tx in xrange(nTop): cx2 = res.topN_cxs(hs)[tx] dstimg1, dstimg2, args_, kwargs_ = test_result_coverage( hs, res, cx2, scale_factor) test_find_coverage_score(hs, res) res.show_chipres(hs, cx2, fnum=fnum) df2.set_figtitle('matching viz' + str(tx), incanvas=False) fnum += 1 df2.show_chipmatch2(dstimg1, dstimg2, *args_, fnum=fnum, **kwargs_) df2.set_figtitle('matching coverage' + str(tx)) fnum += 1 df2.imshow(srcimg, fnum=fnum, heatmap=True) df2.set_figtitle('gaussian weights') fnum += 1 df2.imshow(dstimg, fnum=fnum, heatmap=True) df2.set_figtitle('chip coverage map') fnum += 1 df2.imshow(dstimg_thresh, fnum=fnum, heatmap=True)
did_exist = df2.plt.fignum_exists(fnum) df2.figure(fnum=fnum, docla=True, doclf=True) #------- function: interact.interact_chipres(back.hs, res, cx=cx, fnum=fnum) annote_ptr = [0] pnum=(1, 1, 1) xywh2_ptr = [None] #-------function: def _chipmatch_view(pnum=(1, 1, 1), **kwargs): mode = annote_ptr[0] draw_ell = mode >= 1 draw_lines = mode == 2 annote_ptr[0] = (annote_ptr[0] + 1) % 3 df2.figure(fnum=fnum, docla=True, doclf=True) # TODO RENAME This to remove res and rectify with show_chipres tup = res_show_chipres(query_res, hs, db_cx, fnum=fnum, pnum=pnum, draw_lines=draw_lines, draw_ell=draw_ell, colorbar_=True) figtitle='Inspect Query Result' df2.set_figtitle(figtitle + hs.vs_str(query_cx, db_cx)) # if Flag_save & 1: fpath = save_dir usetitle = 'querycid_' + str(query_cid) + '_db' + str(db_cid) df2.save_figure(fnum=fnum, fpath=fpath, usetitle=usetitle, overwrite=True)