def dump_orgres_matches(allres, orgres_type): orgres = allres.__dict__[orgres_type] hs = allres.hs qcx2_res = allres.qcx2_res # loop over each query / result of interest for qcx, cx, score, rank in orgres.iter(): query_gname, _ = os.path.splitext(hs.tables.gx2_gname[hs.tables.cx2_gx[qcx]]) result_gname, _ = os.path.splitext(hs.tables.gx2_gname[hs.tables.cx2_gx[cx]]) res = qcx2_res[qcx] df2.figure(fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=False, fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=True, fnum=1, plotnum=122) big_title = 'score=%.2f_rank=%d_q=%s_r=%s' % (score, rank, query_gname, result_gname) df2.set_figtitle(big_title) viz.__dump_or_browse(allres, orgres_type + '_matches' + allres.title_suffix)
def dump_orgres_matches(allres, orgres_type): orgres = allres.__dict__[orgres_type] hs = allres.hs qcx2_res = allres.qcx2_res # loop over each query / result of interest for qcx, cx, score, rank in orgres.iter(): query_gname, _ = os.path.splitext( hs.tables.gx2_gname[hs.tables.cx2_gx[qcx]]) result_gname, _ = os.path.splitext( hs.tables.gx2_gname[hs.tables.cx2_gx[cx]]) res = qcx2_res[qcx] df2.figure(fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=False, fnum=1, plotnum=121) df2.show_matches_annote_res(res, hs, cx, SV=True, fnum=1, plotnum=122) big_title = 'score=%.2f_rank=%d_q=%s_r=%s' % (score, rank, query_gname, result_gname) df2.set_figtitle(big_title) viz.__dump_or_browse(allres, orgres_type + '_matches' + allres.title_suffix)
def dump_feature_pair_analysis(allres): print('[rr2] Doing: feature pair analysis') # TODO: Measure score consistency over a spatial area. # Measures entropy of matching vs nonmatching descriptors # Measures scale of m vs nm desc hs = allres.hs qcx2_res = allres.qcx2_res import scipy def _hist_prob_x(desc, bw_factor): # Choose number of bins based on the bandwidth bin_range = (0, 256) # assuming input is uint8 bins = bin_range[1] // bw_factor bw_factor = bin_range[1] / bins # Compute the probabilty mass function, each w.r.t a single descriptor hist_params = dict(bins=bins, range=bin_range, density=True) hist_func = np.histogram desc_pmf = [hist_func(d, **hist_params)[0] for d in desc] # Compute the probability that you saw what you saw # TODO: could use linear interpolation for a bit more robustness here bin_vals = [np.array(np.floor(d / bw_factor), dtype=np.uint8) for d in desc] hist_prob_x = [pmf[vals] for pmf, vals in zip(desc_pmf, bin_vals)] return hist_prob_x def _gkde_prob_x(desc, bw_factor): # Estimate the probabilty density function, each w.r.t a single descriptor gkde_func = scipy.stats.gaussian_kde desc_pdf = [gkde_func(d, bw_factor) for d in desc] gkde_prob_x = [pdf(d) for pdf, d in zip(desc_pdf, desc)] return gkde_prob_x def descriptor_entropy(desc, bw_factor=4): 'computes the shannon entropy of each descriptor in desc' # Compute shannon entropy = -sum(p(x)*log(p(x))) prob_x = _hist_prob_x(desc, bw_factor) entropy = [-(px * np.log2(px)).sum() for px in prob_x] return entropy # Load features if we need to if hs.feats.cx2_desc.size == 0: print(' * forcing load of descriptors') hs.load_features() cx2_desc = hs.feats.cx2_desc cx2_kpts = hs.feats.cx2_kpts def measure_feat_pairs(allres, orgtype='top_true'): print('Measure ' + orgtype + ' pairs') orgres = allres.__dict__[orgtype] entropy_list = [] scale_list = [] score_list = [] lbl = 'Measuring ' + orgtype + ' pair ' fmt_str = helpers.make_progress_fmt_str(len(orgres), lbl) rank_skips = [] gt_skips = [] for ix, (qcx, cx, score, rank) in enumerate(orgres.iter()): helpers.print_(fmt_str % (ix + 1,)) # Skip low ranks if rank > 5: rank_skips.append(qcx) continue other_cxs = hs.get_other_indexed_cxs(qcx) # Skip no groundtruth if len(other_cxs) == 0: gt_skips.append(qcx) continue res = qcx2_res[qcx] # Get matching feature indexes fm = res.cx2_fm[cx] # Get their scores fs = res.cx2_fs[cx] # Get matching descriptors printDBG('\nfm.shape=%r' % (fm.shape,)) desc1 = cx2_desc[qcx][fm[:, 0]] desc2 = cx2_desc[cx][fm[:, 1]] # Get matching keypoints kpts1 = cx2_kpts[qcx][fm[:, 0]] kpts2 = cx2_kpts[cx][fm[:, 1]] # Get their scale scale1_m = sv2.keypoint_scale(kpts1) scale2_m = sv2.keypoint_scale(kpts2) # Get their entropy entropy1 = descriptor_entropy(desc1, bw_factor=1) entropy2 = descriptor_entropy(desc2, bw_factor=1) # Append to results entropy_tup = np.array(zip(entropy1, entropy2)) scale_tup = np.array(zip(scale1_m, scale2_m)) entropy_tup = entropy_tup.reshape(len(entropy_tup), 2) scale_tup = scale_tup.reshape(len(scale_tup), 2) entropy_list.append(entropy_tup) scale_list.append(scale_tup) score_list.append(fs) print('Skipped %d total.' % (len(rank_skips) + len(gt_skips),)) print('Skipped %d for rank > 5, %d for no gt' % (len(rank_skips), len(gt_skips),)) print(np.unique(map(len, entropy_list))) def evstack(tup): return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2)) def ehstack(tup): return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2)) entropy_pairs = evstack(entropy_list) scale_pairs = evstack(scale_list) scores = ehstack(score_list) print('\n * Measured %d pairs' % len(entropy_pairs)) return entropy_pairs, scale_pairs, scores tt_entropy, tt_scale, tt_scores = measure_feat_pairs(allres, 'top_true') tf_entropy, tf_scale, tf_scores = measure_feat_pairs(allres, 'top_false') # Measure ratios def measure_ratio(arr): return arr[:, 0] / arr[:, 1] if len(arr) > 0 else np.array([]) tt_entropy_ratio = measure_ratio(tt_entropy) tf_entropy_ratio = measure_ratio(tf_entropy) tt_scale_ratio = measure_ratio(tt_scale) tf_scale_ratio = measure_ratio(tf_scale) title_suffix = allres.title_suffix # Entropy vs Score df2.figure(fnum=1, docla=True) df2.figure(fnum=1, plotnum=(2, 2, 1)) df2.plot2(tt_entropy[:, 0], tt_scores, 'gx', 'entropy1', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 2)) df2.plot2(tf_entropy[:, 0], tf_scores, 'rx', 'entropy1', 'score', 'Top False') df2.figure(fnum=1, plotnum=(2, 2, 3)) df2.plot2(tt_entropy[:, 1], tt_scores, 'gx', 'entropy2', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 4)) df2.plot2(tf_entropy[:, 1], tf_scores, 'rx', 'entropy2', 'score', 'Top False') df2.set_figtitle('Entropy vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale vs Score df2.figure(fnum=2, plotnum=(2, 2, 1), docla=True) df2.plot2(tt_scale[:, 0], tt_scores, 'gx', 'scale1', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 2)) df2.plot2(tf_scale[:, 0], tf_scores, 'rx', 'scale1', 'score', 'Top False') df2.figure(fnum=2, plotnum=(2, 2, 3)) df2.plot2(tt_scale[:, 1], tt_scores, 'gx', 'scale2', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 4)) df2.plot2(tf_scale[:, 1], tf_scores, 'rx', 'scale2', 'score', 'Top False') df2.set_figtitle('Scale vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Entropy Ratio vs Score df2.figure(fnum=3, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_entropy_ratio, tt_scores, 'gx', 'entropy-ratio', 'score', 'Top True') df2.figure(fnum=3, plotnum=(1, 2, 2)) df2.plot2(tf_entropy_ratio, tf_scores, 'rx', 'entropy-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale Ratio vs Score df2.figure(fnum=4, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_scale_ratio, tt_scores, 'gx', 'scale-ratio', 'score', 'Top True') df2.figure(fnum=4, plotnum=(1, 2, 2)) df2.plot2(tf_scale_ratio, tf_scores, 'rx', 'scale-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis')
def dump_feature_pair_analysis(allres): print('[rr2] Doing: feature pair analysis') # TODO: Measure score consistency over a spatial area. # Measures entropy of matching vs nonmatching descriptors # Measures scale of m vs nm desc hs = allres.hs qcx2_res = allres.qcx2_res import scipy def _hist_prob_x(desc, bw_factor): # Choose number of bins based on the bandwidth bin_range = (0, 256) # assuming input is uint8 bins = bin_range[1] // bw_factor bw_factor = bin_range[1] / bins # Compute the probabilty mass function, each w.r.t a single descriptor hist_params = dict(bins=bins, range=bin_range, density=True) hist_func = np.histogram desc_pmf = [hist_func(d, **hist_params)[0] for d in desc] # Compute the probability that you saw what you saw # TODO: could use linear interpolation for a bit more robustness here bin_vals = [ np.array(np.floor(d / bw_factor), dtype=np.uint8) for d in desc ] hist_prob_x = [pmf[vals] for pmf, vals in zip(desc_pmf, bin_vals)] return hist_prob_x def _gkde_prob_x(desc, bw_factor): # Estimate the probabilty density function, each w.r.t a single descriptor gkde_func = scipy.stats.gaussian_kde desc_pdf = [gkde_func(d, bw_factor) for d in desc] gkde_prob_x = [pdf(d) for pdf, d in zip(desc_pdf, desc)] return gkde_prob_x def descriptor_entropy(desc, bw_factor=4): 'computes the shannon entropy of each descriptor in desc' # Compute shannon entropy = -sum(p(x)*log(p(x))) prob_x = _hist_prob_x(desc, bw_factor) entropy = [-(px * np.log2(px)).sum() for px in prob_x] return entropy # Load features if we need to if hs.feats.cx2_desc.size == 0: print(' * forcing load of descriptors') hs.load_features() cx2_desc = hs.feats.cx2_desc cx2_kpts = hs.feats.cx2_kpts def measure_feat_pairs(allres, orgtype='top_true'): print('Measure ' + orgtype + ' pairs') orgres = allres.__dict__[orgtype] entropy_list = [] scale_list = [] score_list = [] lbl = 'Measuring ' + orgtype + ' pair ' fmt_str = helpers.make_progress_fmt_str(len(orgres), lbl) rank_skips = [] gt_skips = [] for ix, (qcx, cx, score, rank) in enumerate(orgres.iter()): helpers.print_(fmt_str % (ix + 1, )) # Skip low ranks if rank > 5: rank_skips.append(qcx) continue other_cxs = hs.get_other_indexed_cxs(qcx) # Skip no groundtruth if len(other_cxs) == 0: gt_skips.append(qcx) continue res = qcx2_res[qcx] # Get matching feature indexes fm = res.cx2_fm[cx] # Get their scores fs = res.cx2_fs[cx] # Get matching descriptors printDBG('\nfm.shape=%r' % (fm.shape, )) desc1 = cx2_desc[qcx][fm[:, 0]] desc2 = cx2_desc[cx][fm[:, 1]] # Get matching keypoints kpts1 = cx2_kpts[qcx][fm[:, 0]] kpts2 = cx2_kpts[cx][fm[:, 1]] # Get their scale scale1_m = sv2.keypoint_scale(kpts1) scale2_m = sv2.keypoint_scale(kpts2) # Get their entropy entropy1 = descriptor_entropy(desc1, bw_factor=1) entropy2 = descriptor_entropy(desc2, bw_factor=1) # Append to results entropy_tup = np.array(zip(entropy1, entropy2)) scale_tup = np.array(zip(scale1_m, scale2_m)) entropy_tup = entropy_tup.reshape(len(entropy_tup), 2) scale_tup = scale_tup.reshape(len(scale_tup), 2) entropy_list.append(entropy_tup) scale_list.append(scale_tup) score_list.append(fs) print('Skipped %d total.' % (len(rank_skips) + len(gt_skips), )) print('Skipped %d for rank > 5, %d for no gt' % ( len(rank_skips), len(gt_skips), )) print(np.unique(map(len, entropy_list))) def evstack(tup): return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2)) def ehstack(tup): return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2)) entropy_pairs = evstack(entropy_list) scale_pairs = evstack(scale_list) scores = ehstack(score_list) print('\n * Measured %d pairs' % len(entropy_pairs)) return entropy_pairs, scale_pairs, scores tt_entropy, tt_scale, tt_scores = measure_feat_pairs(allres, 'top_true') tf_entropy, tf_scale, tf_scores = measure_feat_pairs(allres, 'top_false') # Measure ratios def measure_ratio(arr): return arr[:, 0] / arr[:, 1] if len(arr) > 0 else np.array([]) tt_entropy_ratio = measure_ratio(tt_entropy) tf_entropy_ratio = measure_ratio(tf_entropy) tt_scale_ratio = measure_ratio(tt_scale) tf_scale_ratio = measure_ratio(tf_scale) title_suffix = allres.title_suffix # Entropy vs Score df2.figure(fnum=1, docla=True) df2.figure(fnum=1, plotnum=(2, 2, 1)) df2.plot2(tt_entropy[:, 0], tt_scores, 'gx', 'entropy1', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 2)) df2.plot2(tf_entropy[:, 0], tf_scores, 'rx', 'entropy1', 'score', 'Top False') df2.figure(fnum=1, plotnum=(2, 2, 3)) df2.plot2(tt_entropy[:, 1], tt_scores, 'gx', 'entropy2', 'score', 'Top True') df2.figure(fnum=1, plotnum=(2, 2, 4)) df2.plot2(tf_entropy[:, 1], tf_scores, 'rx', 'entropy2', 'score', 'Top False') df2.set_figtitle('Entropy vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale vs Score df2.figure(fnum=2, plotnum=(2, 2, 1), docla=True) df2.plot2(tt_scale[:, 0], tt_scores, 'gx', 'scale1', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 2)) df2.plot2(tf_scale[:, 0], tf_scores, 'rx', 'scale1', 'score', 'Top False') df2.figure(fnum=2, plotnum=(2, 2, 3)) df2.plot2(tt_scale[:, 1], tt_scores, 'gx', 'scale2', 'score', 'Top True') df2.figure(fnum=2, plotnum=(2, 2, 4)) df2.plot2(tf_scale[:, 1], tf_scores, 'rx', 'scale2', 'score', 'Top False') df2.set_figtitle('Scale vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Entropy Ratio vs Score df2.figure(fnum=3, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_entropy_ratio, tt_scores, 'gx', 'entropy-ratio', 'score', 'Top True') df2.figure(fnum=3, plotnum=(1, 2, 2)) df2.plot2(tf_entropy_ratio, tf_scores, 'rx', 'entropy-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis') # Scale Ratio vs Score df2.figure(fnum=4, plotnum=(1, 2, 1), docla=True) df2.plot2(tt_scale_ratio, tt_scores, 'gx', 'scale-ratio', 'score', 'Top True') df2.figure(fnum=4, plotnum=(1, 2, 2)) df2.plot2(tf_scale_ratio, tf_scores, 'rx', 'scale-ratio', 'score', 'Top False') df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix) viz.__dump_or_browse(allres, 'pair_analysis')