def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None): import plottool as pt from ibeis import viz fnum = pt.ensure_fnum(fnum) idx_list = ut.dict_take(cm.daid2_idx, daids) nPlots = len(idx_list) + 1 nRows, nCols = pt.get_square_row_cols(nPlots) pnum_ = pt.make_pnum_nextgen(nRows, nCols) pt.figure(fnum=fnum, pnum=(1, 2, 1)) # Draw coverage masks with bbox # <FlipHack> #weight_mask_m = np.fliplr(np.flipud(weight_mask_m)) #weight_mask = np.fliplr(np.flipud(weight_mask)) # </FlipHack> stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True) (woff, hoff) = offset_tup[1] wh1 = weight_mask_m.shape[0:2][::-1] wh2 = weight_mask.shape[0:2][::-1] pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match') pt.draw_bbox(( 0, 0) + wh1, bbox_color=(0, 0, 1)) pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1)) # Get contributing matches qaid = cm.qaid daid_list = daids fm_list = ut.take(cm.fm_list, idx_list) fs_list = ut.take(cm.fs_list, idx_list) # Draw matches for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1): viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, draw_pts=False, draw_lines=True, draw_ell=False, fnum=fnum, pnum=pnum_(px), darken=.5) coverage_score = score_matching_mask(weight_mask_m, weight_mask) pt.set_figtitle('score=%.4f' % (coverage_score,))
def show_page(self): if self.fig is None: raise AssertionError('fig is None, did you run interction.start()?') import plottool as pt fig = ih.begin_interaction('expandable', self.fnum) if not any(self.pnum_list) and self.nRows is None and self.nRows is None: # Hack if no pnum was given self.nRows, self.nCols = pt.get_num_rc(len(self.pnum_list), nRows=self.nRows, nCols=self.nCols) nSubplots = len(self.func_list) pnum_ = pt.make_pnum_nextgen(self.nRows, self.nCols, nSubplots=nSubplots) self.pnum_list = [pnum_() for _ in self.pnum_list] for index, (pnum, func) in enumerate(zip(self.pnum_list, self.func_list)): if check_if_subinteract(func): # Hack interclass = func interclass.static_plot(fnum=self.fnum, pnum=pnum) elif hasattr(func, 'plot'): inter = func inter.plot(fnum=self.fnum, pnum=pnum) else: func(fnum=self.fnum, pnum=pnum) ax = pt.gca() pt.set_plotdat(ax, 'plot_func', func) pt.set_plotdat(ax, 'expandable_index', index) #if self.interactive is None or self.interactive: # ih.connect_callback(fig, 'button_press_event', self.onclick) self.connect_callbacks() self.fig = fig return fig
def show_power_law_plots(): """ CommandLine: python -m ibeis.algo.hots.devcases --test-show_power_law_plots --show Example: >>> # DISABLE_DOCTEST >>> #%pylab qt4 >>> from ibeis.all_imports import * # NOQA >>> from ibeis.algo.hots.devcases import * # NOQA >>> show_power_law_plots() >>> pt.show_if_requested() """ import numpy as np import plottool as pt xdata = np.linspace(0, 1, 1000) ydata = xdata fnum = 1 powers = [.01, .1, .5, 1, 2, 30, 70, 100, 1000] nRows, nCols = pt.get_square_row_cols(len(powers), fix=True) pnum_next = pt.make_pnum_nextgen(nRows, nCols) for p in powers: plotkw = dict( fnum=fnum, marker='g-', linewidth=2, pnum=pnum_next(), title='p=%r' % (p,) ) ydata_ = ydata ** p pt.plot2(xdata, ydata_, **plotkw) pt.set_figtitle('power laws y = x ** p')
def show_power_law_plots(): """ CommandLine: python -m ibeis.algo.hots.devcases --test-show_power_law_plots --show Example: >>> # DISABLE_DOCTEST >>> #%pylab qt4 >>> from ibeis.all_imports import * # NOQA >>> from ibeis.algo.hots.devcases import * # NOQA >>> show_power_law_plots() >>> pt.show_if_requested() """ import numpy as np import plottool as pt xdata = np.linspace(0, 1, 1000) ydata = xdata fnum = 1 powers = [.01, .1, .5, 1, 2, 30, 70, 100, 1000] nRows, nCols = pt.get_square_row_cols(len(powers), fix=True) pnum_next = pt.make_pnum_nextgen(nRows, nCols) for p in powers: plotkw = dict(fnum=fnum, marker='g-', linewidth=2, pnum=pnum_next(), title='p=%r' % (p, )) ydata_ = ydata**p pt.plot2(xdata, ydata_, **plotkw) pt.set_figtitle('power laws y = x ** p')
def show_internals(self, fnum=None): import plottool as pt pt.qtensure() pnum_ = pt.make_pnum_nextgen(nRows=1, nCols=len(self.forests)) for level, forest in enumerate(self.forests): pt.show_nx(forest.to_networkx(), title='level=%r' % (level,), fnum=fnum, pnum=pnum_())
def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None): import plottool as pt from ibeis import viz fnum = pt.ensure_fnum(fnum) idx_list = ut.dict_take(cm.daid2_idx, daids) nPlots = len(idx_list) + 1 nRows, nCols = pt.get_square_row_cols(nPlots) pnum_ = pt.make_pnum_nextgen(nRows, nCols) pt.figure(fnum=fnum, pnum=(1, 2, 1)) # Draw coverage masks with bbox # <FlipHack> #weight_mask_m = np.fliplr(np.flipud(weight_mask_m)) #weight_mask = np.fliplr(np.flipud(weight_mask)) # </FlipHack> stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True) (woff, hoff) = offset_tup[1] wh1 = weight_mask_m.shape[0:2][::-1] wh2 = weight_mask.shape[0:2][::-1] pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match') pt.draw_bbox((0, 0) + wh1, bbox_color=(0, 0, 1)) pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1)) # Get contributing matches qaid = cm.qaid daid_list = daids fm_list = ut.take(cm.fm_list, idx_list) fs_list = ut.take(cm.fs_list, idx_list) # Draw matches for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1): viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, draw_pts=False, draw_lines=True, draw_ell=False, fnum=fnum, pnum=pnum_(px), darken=.5) coverage_score = score_matching_mask(weight_mask_m, weight_mask) pt.set_figtitle('score=%.4f' % (coverage_score, ))
def sanity_checks(offset_list, Y_list, query_annots, ibs): nfeat_list = np.diff(offset_list) for Y, nfeat in ut.ProgIter(zip(Y_list, nfeat_list), 'checking'): assert nfeat == sum(ut.lmap(len, Y.fxs_list)) if False: # Visualize queries # Look at the standard query images here # http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf from ibeis.viz import viz_chip import plottool as pt pt.qt4ensure() fnum = 1 pnum_ = pt.make_pnum_nextgen(len(query_annots.aids) // 5, 5) for aid in ut.ProgIter(query_annots.aids): pnum = pnum_() viz_chip.show_chip(ibs, aid, in_image=True, annote=False, notitle=True, draw_lbls=False, fnum=fnum, pnum=pnum)
def test_siamese_performance(model, data, labels, flat_metadata, dataname=''): r""" CommandLine: utprof.py -m ibeis_cnn --tf pz_patchmatch --db liberty --test --weights=liberty:current --arch=siaml2_128 --test python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test --ensure python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test --ensure --weights=new python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --train --weights=new python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 --test # NOQA python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 """ import vtool as vt import plottool as pt # TODO: save in model.trainind_dpath/diagnostics/figures ut.colorprint('\n[siam_perf] Testing Siamese Performance', 'white') #epoch_dpath = model.get_epoch_diagnostic_dpath() epoch_dpath = model.arch_dpath ut.vd(epoch_dpath) dataname += ' ' + model.get_history_hashid() + '\n' history_text = ut.list_str(model.era_history, newlines=True) ut.write_to(ut.unixjoin(epoch_dpath, 'era_history.txt'), history_text) #if True: # import matplotlib as mpl # mpl.rcParams['agg.path.chunksize'] = 100000 #data = data[::50] #labels = labels[::50] #from ibeis_cnn import utils #data, labels = utils.random_xy_sample(data, labels, 10000, model.data_per_label_input) FULL = not ut.get_argflag('--quick') fnum_gen = pt.make_fnum_nextgen() ut.colorprint('[siam_perf] Show era history', 'white') fig = model.show_era_loss(fnum=fnum_gen()) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) # hack ut.colorprint('[siam_perf] Show weights image', 'white') fig = model.show_weights_image(fnum=fnum_gen()) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) #model.draw_all_conv_layer_weights(fnum=fnum_gen()) #model.imwrite_weights(1) #model.imwrite_weights(2) # Compute each type of score ut.colorprint('[siam_perf] Building Scores', 'white') test_outputs = model.predict2(model, data) network_output = test_outputs['network_output_determ'] # hack converting network output to distances for non-descriptor networks if len(network_output.shape) == 2 and network_output.shape[1] == 1: cnn_scores = network_output.T[0] elif len(network_output.shape) == 1: cnn_scores = network_output elif len(network_output.shape) == 2 and network_output.shape[1] > 1: assert model.data_per_label_output == 2 vecs1 = network_output[0::2] vecs2 = network_output[1::2] cnn_scores = vt.L2(vecs1, vecs2) else: assert False cnn_scores = cnn_scores.astype(np.float64) # Segfaults with the data passed in is large (AND MEMMAPPED apparently) # Fixed in hesaff implementation SIFT = FULL if SIFT: sift_scores, sift_list = test_sift_patchmatch_scores(data, labels) sift_scores = sift_scores.astype(np.float64) ut.colorprint('[siam_perf] Learning Encoders', 'white') # Learn encoders encoder_kw = { #'monotonize': False, 'monotonize': True, } cnn_encoder = vt.ScoreNormalizer(**encoder_kw) cnn_encoder.fit(cnn_scores, labels) if SIFT: sift_encoder = vt.ScoreNormalizer(**encoder_kw) sift_encoder.fit(sift_scores, labels) # Visualize ut.colorprint('[siam_perf] Visualize Encoders', 'white') viz_kw = dict( with_scores=False, with_postbayes=False, with_prebayes=False, target_tpr=.95, ) inter_cnn = cnn_encoder.visualize( figtitle=dataname + ' CNN scores. #data=' + str(len(data)), fnum=fnum_gen(), **viz_kw) if SIFT: inter_sift = sift_encoder.visualize( figtitle=dataname + ' SIFT scores. #data=' + str(len(data)), fnum=fnum_gen(), **viz_kw) # Save pt.save_figure(fig=inter_cnn.fig, dpath=epoch_dpath) if SIFT: pt.save_figure(fig=inter_sift.fig, dpath=epoch_dpath) # Save out examples of hard errors #cnn_fp_label_indicies, cnn_fn_label_indicies = #cnn_encoder.get_error_indicies(cnn_scores, labels) #sift_fp_label_indicies, sift_fn_label_indicies = #sift_encoder.get_error_indicies(sift_scores, labels) with_patch_examples = FULL if with_patch_examples: ut.colorprint('[siam_perf] Visualize Confusion Examples', 'white') cnn_indicies = cnn_encoder.get_confusion_indicies(cnn_scores, labels) if SIFT: sift_indicies = sift_encoder.get_confusion_indicies(sift_scores, labels) warped_patch1_list, warped_patch2_list = list(zip(*ut.ichunks(data, 2))) samp_args = (warped_patch1_list, warped_patch2_list, labels) _sample = functools.partial(draw_results.get_patch_sample_img, *samp_args) cnn_fp_img = _sample({'fs': cnn_scores}, cnn_indicies.fp)[0] cnn_fn_img = _sample({'fs': cnn_scores}, cnn_indicies.fn)[0] cnn_tp_img = _sample({'fs': cnn_scores}, cnn_indicies.tp)[0] cnn_tn_img = _sample({'fs': cnn_scores}, cnn_indicies.tn)[0] if SIFT: sift_fp_img = _sample({'fs': sift_scores}, sift_indicies.fp)[0] sift_fn_img = _sample({'fs': sift_scores}, sift_indicies.fn)[0] sift_tp_img = _sample({'fs': sift_scores}, sift_indicies.tp)[0] sift_tn_img = _sample({'fs': sift_scores}, sift_indicies.tn)[0] #if ut.show_was_requested(): #def rectify(arr): # return np.flipud(arr) SINGLE_FIG = False if SINGLE_FIG: def dump_img(img_, lbl, fnum): fig, ax = pt.imshow(img_, figtitle=dataname + ' ' + lbl, fnum=fnum) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) dump_img(cnn_fp_img, 'cnn_fp_img', fnum_gen()) dump_img(cnn_fn_img, 'cnn_fn_img', fnum_gen()) dump_img(cnn_tp_img, 'cnn_tp_img', fnum_gen()) dump_img(cnn_tn_img, 'cnn_tn_img', fnum_gen()) dump_img(sift_fp_img, 'sift_fp_img', fnum_gen()) dump_img(sift_fn_img, 'sift_fn_img', fnum_gen()) dump_img(sift_tp_img, 'sift_tp_img', fnum_gen()) dump_img(sift_tn_img, 'sift_tn_img', fnum_gen()) #vt.imwrite(dataname + '_' + 'cnn_fp_img.png', (cnn_fp_img)) #vt.imwrite(dataname + '_' + 'cnn_fn_img.png', (cnn_fn_img)) #vt.imwrite(dataname + '_' + 'sift_fp_img.png', (sift_fp_img)) #vt.imwrite(dataname + '_' + 'sift_fn_img.png', (sift_fn_img)) else: print('Drawing TP FP TN FN') fnum = fnum_gen() pnum_gen = pt.make_pnum_nextgen(4, 2) fig = pt.figure(fnum) pt.imshow(cnn_fp_img, title='CNN FP', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_fp_img, title='SIFT FP', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_fn_img, title='CNN FN', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_fn_img, title='SIFT FN', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_tp_img, title='CNN TP', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_tp_img, title='SIFT TP', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_tn_img, title='CNN TN', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_tn_img, title='SIFT TN', fnum=fnum, pnum=pnum_gen()) pt.set_figtitle(dataname + ' confusions') pt.adjust_subplots(left=0, right=1.0, bottom=0., wspace=.01, hspace=.05) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18)) with_patch_desc = FULL if with_patch_desc: ut.colorprint('[siam_perf] Visualize Patch Descriptors', 'white') fnum = fnum_gen() fig = pt.figure(fnum=fnum, pnum=(1, 1, 1)) num_rows = 7 pnum_gen = pt.make_pnum_nextgen(num_rows, 3) # Compare actual output descriptors for index in ut.random_indexes(len(sift_list), num_rows): vec_sift = sift_list[index] vec_cnn = network_output[index] patch = data[index] pt.imshow(patch, fnum=fnum, pnum=pnum_gen()) pt.plot_descriptor_signature(vec_cnn, 'cnn vec', fnum=fnum, pnum=pnum_gen()) pt.plot_sift_signature(vec_sift, 'sift vec', fnum=fnum, pnum=pnum_gen()) pt.set_figtitle('Patch Descriptors') pt.adjust_subplots(left=0, right=0.95, bottom=0., wspace=.1, hspace=.15) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))
def sift_test(): """ Play with SIFT equations using python so I can see and compare results. """ import numpy as np # Sample measurement from lena sift_raw = np.array([ 48.0168, 130.017, 159.065, 54.5727, 63.7103, 14.3629, 27.0228, 15.3527, 40.5067, 165.721, 511.036, 196.888, 4.72748, 8.85093, 15.9457, 14.4198, 49.7571, 209.104, 452.047, 223.972, 2.66391, 16.8975, 21.7488, 13.6855, 0.700244, 10.2518, 312.483, 282.647, 1.82898, 3.01759, 0.448028, 0, 144.834, 300.438, 131.837, 40.3284, 11.1998, 9.68647, 7.68484, 29.166, 425.953, 386.903, 352.388, 267.883, 12.9652, 18.833, 8.55462, 71.7924, 112.282, 295.512, 678.599, 419.405, 21.3151, 91.9408, 22.8681, 9.83749, 3.06347, 97.6562, 458.799, 221.873, 68.1473, 410.764, 48.9493, 2.01682, 194.794, 43.7171, 16.2078, 17.5604, 48.8504, 48.3823, 45.7636, 299.432, 901.565, 188.732, 32.6512, 23.6874, 55.379, 272.264, 68.2334, 221.37, 159.631, 44.1475, 126.636, 95.1978, 74.1097, 1353.24, 239.319, 33.5368, 5.62254, 69.0013, 51.7629, 9.55458, 26.4599, 699.623, 208.78, 2.09156, 135.278, 19.5378, 52.0265, 51.8445, 49.1938, 9.04161, 11.6605, 87.4498, 604.012, 85.6801, 42.9738, 75.8549, 183.65, 206.912, 34.2781, 95.0146, 13.4201, 83.7426, 440.322, 83.0038, 125.663, 457.333, 52.6424, 4.93713, 0.38947, 244.762, 291.113, 7.50165, 8.16208, 73.2169, 21.9674, 0.00429259, ]) import vtool as vt # CONFIRMED: One normalization followed by another does not do anything #sift_root1 = vt.normalize(sift_root1, ord=2) #sift_root1 = vt.normalize(sift_root1, ord=1) sift_clip = sift_raw.copy() sift_clip = vt.normalize(sift_clip, ord=2) sift_clip[sift_clip > .2] = .2 sift_clip = vt.normalize(sift_clip, ord=2) siff_ell2 = vt.normalize(sift_raw, ord=2) siff_ell1 = vt.normalize(sift_raw, ord=1) # Two versions of root SIFT # They are equlivalent # taken from https://hal.inria.fr/hal-00840721/PDF/RR-8325.pdf normalize1 = lambda x: vt.normalize(x, ord=1) # NOQA normalize2 = lambda x: vt.normalize(x, ord=2) # NOQA assert np.all( np.isclose(np.sqrt(normalize1(sift_raw)), normalize2(np.sqrt(sift_raw)))) # How do we genralize this for alpha != .5? # Just always L2 normalize afterwords? alpha = .2 powerlaw = lambda x: np.power(x, alpha) # NOQA sift_root1 = normalize2(powerlaw(normalize1(sift_raw))) sift_root2 = normalize2(powerlaw(sift_raw)) flags = np.isclose(sift_root1, sift_root2) print(flags) assert np.all(flags) #sift_root_quant = np.clip((sift_root1 * 512), 0, 255).astype(np.uint8) #p = (np.bincount(sift_root_quant) / 128) #entropy = -np.nansum(p * np.log2(p)) s = sift_raw[0:10] np.sqrt(s) / (np.sqrt(s).sum()**2) np.power(normalize1(s), 2) #b = powerlaw(normalize1(s)) #print(np.isclose(a, b)) np.isclose(normalize1(s), normalize1(normalize2(s))) # Another root SIFT version from # https://hal.inria.fr/hal-00688169/document # but this doesnt seem to work with uint8 representations sift_root3 = np.sqrt(sift_raw) sift_root3 = sift_root3 / np.sqrt(np.linalg.norm(sift_root3)) import plottool as pt import utool as ut ut.qtensure() fig = pt.figure(fnum=1, pnum=None) def draw_sift(sift, pnum, title, **kwargs): ax = fig.add_subplot(*pnum) pt.draw_sifts(ax, sift[None, :], **kwargs) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.grid(False) ax.set_aspect('equal') ax.set_xticks([]) ax.set_yticks([]) if title: ax.set_title(title) fig.clf() pnum_ = pt.make_pnum_nextgen(2, 4) draw_sift(sift_raw, pnum_(), 'raw/max(raw)', fidelity=sift_raw.max()) draw_sift(sift_clip, pnum_(), 'clip', fidelity=1.0) draw_sift(siff_ell2, pnum_(), 'l2', fidelity=1.0) draw_sift(siff_ell1, pnum_(), 'l1', fidelity=1.0) draw_sift(sift_root1, pnum_(), 'root1', fidelity=1.0) draw_sift(sift_root2, pnum_(), 'root2', fidelity=1.0) draw_sift(sift_root3, pnum_(), 'root3', fidelity=2.0)
def test_rot_invar(): r""" CommandLine: python -m pyhesaff test_rot_invar --show --rebuild-hesaff --no-rmbuild python -m pyhesaff test_rot_invar --show --nocpp python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.0 --rotation_invariance --rebuild-hesaff python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance --rebuild-hesaff Example: >>> # DISABLE_DODCTEST >>> from pyhesaff._pyhesaff import * # NOQA >>> test_rot_invar() """ import cv2 import vtool as vt import plottool as pt TAU = 2 * np.pi fnum = pt.next_fnum() NUM_PTS = 5 # 9 theta_list = np.linspace(0, TAU, NUM_PTS, endpoint=False) nRows, nCols = pt.get_square_row_cols(len(theta_list), fix=True) next_pnum = pt.make_pnum_nextgen(nRows, nCols) # Expand the border a bit around star.png pad_ = 100 img_fpath = grab_test_imgpath('star.png') img_fpath2 = vt.pad_image_ondisk(img_fpath, pad_, value=26) for theta in theta_list: print('-----------------') print('theta = %r' % (theta, )) img_fpath = vt.rotate_image_ondisk(img_fpath2, theta, border_mode=cv2.BORDER_REPLICATE) if not ub.argflag('--nocpp'): (kpts_list_ri, vecs_list2) = detect_feats(img_fpath, rotation_invariance=True) kpts_ri = kpts_list_ri[0:2] (kpts_list_gv, vecs_list1) = detect_feats(img_fpath, rotation_invariance=False) kpts_gv = kpts_list_gv[0:2] # find_kpts_direction imgBGR = vt.imread(img_fpath) kpts_ripy = vt.find_kpts_direction(imgBGR, kpts_gv, DEBUG_ROTINVAR=False) # Verify results stdout #print('nkpts = %r' % (len(kpts_gv))) #print(vt.kpts_repr(kpts_gv)) #print(vt.kpts_repr(kpts_ri)) #print(vt.kpts_repr(kpts_ripy)) # Verify results plot pt.figure(fnum=fnum, pnum=next_pnum()) pt.imshow(imgBGR) #if len(kpts_gv) > 0: # pt.draw_kpts2(kpts_gv, ori=True, ell_color=pt.BLUE, ell_linewidth=10.5) ell = False rect = True if not ub.argflag('--nocpp'): if len(kpts_ri) > 0: pt.draw_kpts2(kpts_ri, rect=rect, ell=ell, ori=True, ell_color=pt.RED, ell_linewidth=5.5) if len(kpts_ripy) > 0: pt.draw_kpts2(kpts_ripy, rect=rect, ell=ell, ori=True, ell_color=pt.GREEN, ell_linewidth=3.5) pt.set_figtitle('green=python, red=C++') pt.show_if_requested()
def fourier_devtest(img): r""" Args: img (ndarray[uint8_t, ndim=2]): image data CommandLine: python -m vtool.quality_classifier --test-fourier_devtest --show References: http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.html http://cns-alumni.bu.edu/~slehar/fourier/fourier.html Example: >>> # DISABLE_DOCTEST >>> from vtool.quality_classifier import * # NOQA >>> import vtool as vt >>> # build test data >>> img_fpath = ut.grab_test_imgpath('lena.png') >>> img = vt.imread(img_fpath, grayscale=True) >>> # execute function >>> magnitude_spectrum = fourier_devtest(img) """ import plottool as pt def pad_img(img): rows, cols = img.shape nrows = cv2.getOptimalDFTSize(rows) ncols = cv2.getOptimalDFTSize(cols) right = ncols - cols bottom = nrows - rows bordertype = cv2.BORDER_CONSTANT nimg = cv2.copyMakeBorder(img, 0, bottom, 0, right, bordertype, value=0) return nimg def convert_to_fdomain(img): dft = cv2.dft(img.astype(np.float32), flags=cv2.DFT_COMPLEX_OUTPUT) #dft_shift = np.fft.fftshift(dft) return dft def convert_from_fdomain(dft): img = cv2.idft(dft) img = cv2.magnitude(img[:, :, 0], img[:, :, 1]) img /= img.max() return img * 255.0 def get_fdomain_mag(dft_shift): magnitude_spectrum = np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1])) return magnitude_spectrum def imgstats(img): print('stats:') print(' dtype = %r ' % (img.dtype,)) print(' ' + ut.get_stats_str(img, axis=None)) nimg = pad_img(img) dft = convert_to_fdomain(nimg) #freq_domain = np.fft.fft2(img) #freq_domain_shift = np.fft.fftshift(freq_domain) rows, cols = nimg.shape crow, ccol = rows / 2 , cols / 2 # create a mask first, center square is 1, remaining all zeros mask = np.zeros((rows, cols, 2), np.uint8) mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1 dft_mask = np.fft.ifftshift(np.fft.fftshift(dft) * mask) img_back = convert_from_fdomain(dft_mask) imgstats(dft) imgstats(mask) imgstats(nimg) imgstats(nimg) print('nimg.shape = %r' % (nimg.shape,)) print('dft_shift.shape = %r' % (dft.shape,)) if ut.show_was_requested(): #import plottool as pt next_pnum = pt.make_pnum_nextgen(nRows=3, nCols=2) pt.imshow(nimg, pnum=next_pnum(), title='nimg') pt.imshow(20 * get_fdomain_mag(dft), pnum=next_pnum(), title='mag(f)') pt.imshow(20 * get_fdomain_mag(dft_mask), pnum=next_pnum(), title='dft_mask') pt.imshow(img_back, pnum=next_pnum(), title='img_back') pt.show_if_requested()
def start_new_viz(simp, nRows, nCols, fnum=None): import plottool as pt rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2 = simp.testtup fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT = simp.basetup fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR = simp.nexttup fm_norm_RAT, fm_norm_SV = simp.base_meta fm_norm_SC, fm_norm_SCR, fm_norm_SVSCR = simp.next_meta locals_ = ut.delete_dict_keys(locals(), ['title']) keytitle_tups = [ ('ORIG', 'initial neighbors'), ('RAT', 'ratio filtered'), ('SV', 'ratio filtered + SV'), ('SC', 'spatially constrained'), ('SCR', 'spatially constrained + ratio'), ('SCRSV', 'spatially constrained + SV'), ] keytitle_dict = dict(keytitle_tups) key_list = ut.get_list_column(keytitle_tups, 0) matchtup_dict = { key: (locals_['fm_' + key], locals_['fs_' + key]) for key in key_list } normtup_dict = { key: locals_.get('fm_norm_' + key, None) for key in key_list } next_pnum = pt.make_pnum_nextgen(nRows=nRows, nCols=nCols) if fnum is None: fnum = pt.next_fnum() INTERACTIVE = True if INTERACTIVE: from plottool import interact_helpers as ih fig = ih.begin_interaction('qres', fnum) ih.connect_callback(fig, 'button_press_event', on_single_match_clicked) else: pt.figure(fnum=fnum, doclf=True, docla=True) def show_matches_(key, **kwargs): assert key in key_list, 'unknown key=%r' % (key,) showkw = locals_.copy() pnum = next_pnum() showkw['pnum'] = pnum showkw['fnum'] = fnum showkw.update(kwargs) _fm, _fs = matchtup_dict[key] title = keytitle_dict[key] if kwargs.get('coverage'): from vtool import coverage_kpts kpts2, rchip2 = ut.dict_get(locals_, ('kpts2', 'rchip2')) kpts2_m = kpts2.take(_fm.T[1], axis=0) chipshape2 = rchip2.shape chipsize2 = chipshape2[0:2][::-1] coverage_mask = coverage_kpts.make_kpts_coverage_mask(kpts2_m, chipsize2, fx2_score=_fs, resize=True, return_patch=False) pt.imshow(coverage_mask * 255, pnum=pnum, fnum=fnum) else: if kwargs.get('norm', False): _fm = normtup_dict[key] assert _fm is not None, key showkw['cmap'] = 'cool' title += ' normalizers' show_matches(_fm, _fs, title=title, key=key, **showkw) # state hack #show_matches_.next_pnum = next_pnum return show_matches_
def compare_featscores(): """ CommandLine: ibeis --tf compare_featscores --db PZ_MTEST \ --nfscfg :disttype=[L2_sift,lnbnn],top_percent=[None,.5,.1] -a timectrl \ -p default:K=[1,2],normalizer_rule=name \ --save featscore{db}.png --figsize=13,20 --diskshow ibeis --tf compare_featscores --db PZ_MTEST \ --nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5] -a timectrl \ -p default:K=[1],normalizer_rule=name,sv_on=[True,False] \ --save featscore{db}.png --figsize=13,10 --diskshow ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \ -a timectrl -p default:K=1,normalizer_rule=name --db PZ_Master1 \ --save featscore{db}.png --figsize=13,13 --diskshow ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \ -a timectrl -p default:K=1,normalizer_rule=name --db GZ_ALL \ --save featscore{db}.png --figsize=13,13 --diskshow ibeis --tf compare_featscores --db GIRM_Master1 \ --nfscfg ':disttype=fg,L2_sift,normdist,lnbnn' \ -a timectrl -p default:K=1,normalizer_rule=name \ --save featscore{db}.png --figsize=13,13 ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \ -a timectrl -p default:K=[1,2,3],normalizer_rule=name,sv_on=False \ --db PZ_Master1 --save featscore{db}.png \ --dpi=128 --figsize=15,20 --diskshow ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db PZ_MTEST ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db GZ_ALL ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db PZ_Master1 ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db GIRM_Master1 ibeis --tf compare_featscores --db PZ_MTEST \ --nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5,.2] -a timectrl \ -p default:K=[1],normalizer_rule=name \ --save featscore{db}.png --figsize=13,20 --diskshow ibeis --tf compare_featscores --db PZ_MTEST \ --nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5,.2] -a timectrl \ -p default:K=[1],normalizer_rule=name \ --save featscore{db}.png --figsize=13,20 --diskshow Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.scorenorm import * # NOQA >>> result = compare_featscores() >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ import plottool as pt import ibeis nfs_cfg_list = NormFeatScoreConfig.from_argv_cfgs() learnkw = {} ibs, testres = ibeis.testdata_expts( defaultdb='PZ_MTEST', a=['default'], p=['default:K=1']) print('nfs_cfg_list = ' + ut.repr3(nfs_cfg_list)) encoder_list = [] lbl_list = [] varied_nfs_lbls = ut.get_varied_cfg_lbls(nfs_cfg_list) varied_qreq_lbls = ut.get_varied_cfg_lbls(testres.cfgdict_list) #varies_qreq_lbls #func = ut.cached_func(cache_dir='.')(learn_featscore_normalizer) for datakw, nlbl in zip(nfs_cfg_list, varied_nfs_lbls): for qreq_, qlbl in zip(testres.cfgx2_qreq_, varied_qreq_lbls): lbl = qlbl + ' ' + nlbl cfgstr = '_'.join([datakw.get_cfgstr(), qreq_.get_full_cfgstr()]) try: encoder = vt.ScoreNormalizer() encoder.load(cfgstr=cfgstr) except IOError: print('datakw = %r' % (datakw,)) encoder = learn_featscore_normalizer(qreq_, datakw, learnkw) encoder.save(cfgstr=cfgstr) encoder_list.append(encoder) lbl_list.append(lbl) fnum = 1 # next_pnum = pt.make_pnum_nextgen(nRows=len(encoder_list), nCols=3) next_pnum = pt.make_pnum_nextgen(nRows=len(encoder_list) + 1, nCols=3, start=3) iconsize = 94 if len(encoder_list) > 3: iconsize = 64 icon = qreq_.ibs.get_database_icon(max_dsize=(None, iconsize), aid=qreq_.qaids[0]) score_range = (0, .6) for encoder, lbl in zip(encoder_list, lbl_list): #encoder.visualize(figtitle=encoder.get_cfgstr(), with_prebayes=False, with_postbayes=False) encoder._plot_score_support_hist(fnum, pnum=next_pnum(), titlesuf='\n' + lbl, score_range=score_range) encoder._plot_prebayes(fnum, pnum=next_pnum()) encoder._plot_roc(fnum, pnum=next_pnum()) if icon is not None: pt.overlay_icon(icon, coords=(1, 0), bbox_alignment=(1, 0)) nonvaried_lbl = ut.get_nonvaried_cfg_lbls(nfs_cfg_list)[0] figtitle = qreq_.__str__() + '\n' + nonvaried_lbl pt.set_figtitle(figtitle) pt.adjust_subplots(hspace=.5, top=.92, bottom=.08, left=.1, right=.9) pt.update_figsize() pt.plt.tight_layout()
def test_featweight_worker(): """ test function python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --cnn """ import ibeis qreq_ = ibeis.main_helpers.testdata_qreq_(defaultdb='PZ_MTEST', p=['default:fw_detector=cnn'], qaid_override=[1]) ibs = qreq_.ibs config2_ = qreq_.qparams lazy = True aid_list = qreq_.get_external_qaids() #aid_list = ibs.get_valid_aids()[0:30] kpts_list = ibs.get_annot_kpts(aid_list) chipsize_list = ibs.get_annot_chip_sizes(aid_list, config2_=config2_) probchip_fpath_list = preproc_probchip.compute_and_write_probchip(ibs, aid_list, lazy=lazy, config2_=config2_) print('probchip_fpath_list = %r' % (probchip_fpath_list,)) probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None for fpath in probchip_fpath_list] _iter = list(zip(aid_list, kpts_list, probchip_list, chipsize_list)) _iter = ut.InteractiveIter(_iter, enabled=ut.get_argflag('--show')) for aid, kpts, probchip, chipsize in _iter: #kpts = kpts_list[0] #aid = aid_list[0] #probchip = probchip_list[0] #chipsize = chipsize_list[0] tup = (aid, kpts, probchip, chipsize) (aid, weights) = gen_featweight_worker(tup) if aid == 3 and ibs.get_dbname() == 'testdb1': # Run Asserts if not interactive weights_03_test = weights[0:3] print('weights[0:3] = %r' % (weights_03_test,)) #weights_03_target = [ 0.098, 0.155, 0.422] #weights_03_target = [ 0.324, 0.407, 0.688] #weights_thresh = [ 0.09, 0.09, 0.09] #ut.assert_almost_eq(weights_03_test, weights_03_target, weights_thresh) ut.assert_inbounds(weights_03_test, 0, 1) if not ut.show_was_requested(): break if ut.show_was_requested(): import plottool as pt #sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1]) #kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy)) pnum_ = pt.make_pnum_nextgen(1, 3) # *pt.get_square_row_cols(4)) fnum = 1 pt.figure(fnum=fnum, doclf=True) ### pt.imshow(ibs.get_annot_chips(aid, config2_=config2_), pnum=pnum_(0), fnum=fnum) if ut.get_argflag('--numlbl'): pt.gca().set_xlabel('(1)') ### pt.imshow(probchip, pnum=pnum_(2), fnum=fnum) if ut.get_argflag('--numlbl'): pt.gca().set_xlabel('(2)') #pt.draw_kpts2(kpts_, ell_alpha=.4, color_list=pt.ORANGE) ### #pt.imshow(probchip, pnum=pnum_(3), fnum=fnum) #color_list = pt.draw_kpts2(kpts_, weights=weights, ell_alpha=.7, cmap_='jet') #cb = pt.colorbar(weights, color_list) #cb.set_label('featweights') ### pt.imshow(ibs.get_annot_chips(aid, config2_=qreq_.qparams), pnum=pnum_(1), fnum=fnum) #color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3, cmap_='jet') color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3) cb = pt.colorbar(weights, color_list) cb.set_label('featweights') if ut.get_argflag('--numlbl'): pt.gca().set_xlabel('(3)') #pt.draw_kpts2(kpts, ell_alpha=.4) pt.draw() pt.show_if_requested()
def start_new_viz(simp, nRows, nCols, fnum=None): import plottool as pt rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2 = simp.testtup fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT = simp.basetup fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR = simp.nexttup fm_norm_RAT, fm_norm_SV = simp.base_meta fm_norm_SC, fm_norm_SCR, fm_norm_SVSCR = simp.next_meta locals_ = ut.delete_dict_keys(locals(), ['title']) keytitle_tups = [ ('ORIG', 'initial neighbors'), ('RAT', 'ratio filtered'), ('SV', 'ratio filtered + SV'), ('SC', 'spatially constrained'), ('SCR', 'spatially constrained + ratio'), ('SCRSV', 'spatially constrained + SV'), ] keytitle_dict = dict(keytitle_tups) key_list = ut.get_list_column(keytitle_tups, 0) matchtup_dict = { key: (locals_['fm_' + key], locals_['fs_' + key]) for key in key_list } normtup_dict = { key: locals_.get('fm_norm_' + key, None) for key in key_list } next_pnum = pt.make_pnum_nextgen(nRows=nRows, nCols=nCols) if fnum is None: fnum = pt.next_fnum() INTERACTIVE = True if INTERACTIVE: from plottool import interact_helpers as ih fig = ih.begin_interaction('qres', fnum) ih.connect_callback(fig, 'button_press_event', on_single_match_clicked) else: pt.figure(fnum=fnum, doclf=True, docla=True) def show_matches_(key, **kwargs): assert key in key_list, 'unknown key=%r' % (key, ) showkw = locals_.copy() pnum = next_pnum() showkw['pnum'] = pnum showkw['fnum'] = fnum showkw.update(kwargs) _fm, _fs = matchtup_dict[key] title = keytitle_dict[key] if kwargs.get('coverage'): from vtool import coverage_kpts kpts2, rchip2 = ut.dict_get(locals_, ('kpts2', 'rchip2')) kpts2_m = kpts2.take(_fm.T[1], axis=0) chipshape2 = rchip2.shape chipsize2 = chipshape2[0:2][::-1] coverage_mask = coverage_kpts.make_kpts_coverage_mask( kpts2_m, chipsize2, fx2_score=_fs, resize=True, return_patch=False) pt.imshow(coverage_mask * 255, pnum=pnum, fnum=fnum) else: if kwargs.get('norm', False): _fm = normtup_dict[key] assert _fm is not None, key showkw['cmap'] = 'cool' title += ' normalizers' show_matches(_fm, _fs, title=title, key=key, **showkw) # state hack #show_matches_.next_pnum = next_pnum return show_matches_
def test_rot_invar(): r""" CommandLine: python -m pyhesaff test_rot_invar --show --rebuild-hesaff --no-rmbuild python -m pyhesaff test_rot_invar --show --nocpp python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.0 --rotation_invariance --rebuild-hesaff python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance --rebuild-hesaff Example: >>> # DISABLE_DODCTEST >>> from pyhesaff._pyhesaff import * # NOQA >>> test_rot_invar() """ import cv2 import utool as ut import vtool as vt import plottool as pt TAU = 2 * np.pi fnum = pt.next_fnum() NUM_PTS = 5 # 9 theta_list = np.linspace(0, TAU, NUM_PTS, endpoint=False) nRows, nCols = pt.get_square_row_cols(len(theta_list), fix=True) next_pnum = pt.make_pnum_nextgen(nRows, nCols) # Expand the border a bit around star.png pad_ = 100 img_fpath = ut.grab_test_imgpath('star.png') img_fpath2 = vt.pad_image_ondisk(img_fpath, pad_, value=26) for theta in theta_list: print('-----------------') print('theta = %r' % (theta,)) #theta = ut.get_argval('--theta', type_=float, default=TAU * 3 / 8) img_fpath = vt.rotate_image_ondisk(img_fpath2, theta, borderMode=cv2.BORDER_REPLICATE) if not ut.get_argflag('--nocpp'): (kpts_list_ri, vecs_list2) = detect_feats(img_fpath, rotation_invariance=True) kpts_ri = ut.strided_sample(kpts_list_ri, 2) (kpts_list_gv, vecs_list1) = detect_feats(img_fpath, rotation_invariance=False) kpts_gv = ut.strided_sample(kpts_list_gv, 2) # find_kpts_direction imgBGR = vt.imread(img_fpath) kpts_ripy = vt.find_kpts_direction(imgBGR, kpts_gv, DEBUG_ROTINVAR=False) # Verify results stdout #print('nkpts = %r' % (len(kpts_gv))) #print(vt.kpts_repr(kpts_gv)) #print(vt.kpts_repr(kpts_ri)) #print(vt.kpts_repr(kpts_ripy)) # Verify results plot pt.figure(fnum=fnum, pnum=next_pnum()) pt.imshow(imgBGR) #if len(kpts_gv) > 0: # pt.draw_kpts2(kpts_gv, ori=True, ell_color=pt.BLUE, ell_linewidth=10.5) ell = False rect = True if not ut.get_argflag('--nocpp'): if len(kpts_ri) > 0: pt.draw_kpts2(kpts_ri, rect=rect, ell=ell, ori=True, ell_color=pt.RED, ell_linewidth=5.5) if len(kpts_ripy) > 0: pt.draw_kpts2(kpts_ripy, rect=rect, ell=ell, ori=True, ell_color=pt.GREEN, ell_linewidth=3.5) #print('\n'.join(vt.get_ori_strs(np.vstack([kpts_gv, kpts_ri, kpts_ripy])))) #ut.embed(exec_lines=['pt.update()']) pt.set_figtitle('green=python, red=C++') pt.show_if_requested()
def fourier_devtest(img): r""" Args: img (ndarray[uint8_t, ndim=2]): image data CommandLine: python -m vtool.quality_classifier --test-fourier_devtest --show References: http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.html http://cns-alumni.bu.edu/~slehar/fourier/fourier.html Example: >>> # DISABLE_DOCTEST >>> from vtool.quality_classifier import * # NOQA >>> import vtool as vt >>> # build test data >>> img_fpath = ut.grab_test_imgpath('lena.png') >>> img = vt.imread(img_fpath, grayscale=True) >>> # execute function >>> magnitude_spectrum = fourier_devtest(img) """ import plottool as pt def pad_img(img): rows, cols = img.shape nrows = cv2.getOptimalDFTSize(rows) ncols = cv2.getOptimalDFTSize(cols) right = ncols - cols bottom = nrows - rows bordertype = cv2.BORDER_CONSTANT nimg = cv2.copyMakeBorder(img, 0, bottom, 0, right, bordertype, value=0) return nimg def convert_to_fdomain(img): dft = cv2.dft(img.astype(np.float32), flags=cv2.DFT_COMPLEX_OUTPUT) #dft_shift = np.fft.fftshift(dft) return dft def convert_from_fdomain(dft): img = cv2.idft(dft) img = cv2.magnitude(img[:, :, 0], img[:, :, 1]) img /= img.max() return img * 255.0 def get_fdomain_mag(dft_shift): magnitude_spectrum = np.log( cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1])) return magnitude_spectrum def imgstats(img): print('stats:') print(' dtype = %r ' % (img.dtype, )) print(' ' + ut.get_stats_str(img, axis=None)) nimg = pad_img(img) dft = convert_to_fdomain(nimg) #freq_domain = np.fft.fft2(img) #freq_domain_shift = np.fft.fftshift(freq_domain) rows, cols = nimg.shape crow, ccol = rows / 2, cols / 2 # create a mask first, center square is 1, remaining all zeros mask = np.zeros((rows, cols, 2), np.uint8) mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1 dft_mask = np.fft.ifftshift(np.fft.fftshift(dft) * mask) img_back = convert_from_fdomain(dft_mask) imgstats(dft) imgstats(mask) imgstats(nimg) imgstats(nimg) print('nimg.shape = %r' % (nimg.shape, )) print('dft_shift.shape = %r' % (dft.shape, )) if ut.show_was_requested(): #import plottool as pt next_pnum = pt.make_pnum_nextgen(nRows=3, nCols=2) pt.imshow(nimg, pnum=next_pnum(), title='nimg') pt.imshow(20 * get_fdomain_mag(dft), pnum=next_pnum(), title='mag(f)') pt.imshow(20 * get_fdomain_mag(dft_mask), pnum=next_pnum(), title='dft_mask') pt.imshow(img_back, pnum=next_pnum(), title='img_back') pt.show_if_requested()
def show_augmented_patches(Xb, Xb_, yb, yb_, data_per_label=1, shadows=None): """ from ibeis_cnn.augment import * # NOQA std_ = center_std mean_ = center_mean """ import plottool as pt import vtool as vt Xb_old = vt.rectify_to_float01(Xb) Xb_new = vt.rectify_to_float01(Xb_) # only look at ones that were actually augmented sample1 = Xb_old[0::data_per_label] sample2 = Xb_new[0::data_per_label] diff = np.abs((sample1 - sample2)) diff_batches = diff.sum(-1).sum(-1).sum(-1) > 0 modified_indexes = np.where(diff_batches > 0)[0] print('modified_indexes = %r' % (modified_indexes,)) #modified_indexes = np.arange(num_examples) Xb_old = vt.rectify_to_uint8(Xb_old) Xb_new = vt.rectify_to_uint8(Xb_new) # Group data into n-tuples grouped_idxs = [np.arange(n, len(Xb_), data_per_label) for n in range(data_per_label)] data_lists_old = vt.apply_grouping(Xb_old, grouped_idxs, axis=0) data_lists_new = vt.apply_grouping(Xb_new, grouped_idxs, axis=0) import six #chunck_sizes = (4, 10) import utool with utool.embed_on_exception_context: chunk_sizes = pt.get_square_row_cols(len(modified_indexes), max_cols=10, fix=False, inclusive=False) _iter = ut.iter_multichunks(modified_indexes, chunk_sizes) multiindices = six.next(_iter) from ibeis_cnn import draw_results tup = draw_results.get_patch_multichunks(data_lists_old, yb, {}, multiindices) orig_stack = tup[0] #stacked_img, stacked_offsets, stacked_sfs = tup tup = draw_results.get_patch_multichunks(data_lists_new, yb_, {}, multiindices) warp_stack = tup[0] #stacked_img, stacked_offsets, stacked_sfs = tup #orig_stack = stacked_img_pairs(Xb_old, modified_indexes, yb) #warp_stack = stacked_img_pairs(Xb_new, modified_indexes, yb_) if shadows is not None: # hack shadow_stack = stacked_img_pairs(shadows, modified_indexes, yb_) fnum = None fnum = pt.ensure_fnum(fnum) pt.figure(fnum) #next_pnum = pt.make_pnum_nextgen(nRows=2 + (shadows is not None), nCols=1) next_pnum = pt.make_pnum_nextgen(nCols=2 + (shadows is not None), nRows=1) pt.imshow(orig_stack, pnum=next_pnum(), title='before') pt.imshow(warp_stack, pnum=next_pnum(), title='after') if shadows is not None: pt.imshow(shadow_stack, pnum=next_pnum(), title='shadow_stack')
def demo_classes(pblm): r""" CommandLine: python -m ibeis.algo.verif.vsone demo_classes --saveparts --save=classes.png --clipwhite python -m ibeis.algo.verif.vsone demo_classes --saveparts --save=figures/classes.png --clipwhite --dpath=~/latex/crall-iccv-2017 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.verif.vsone import * # NOQA >>> pblm = OneVsOneProblem.from_empty(defaultdb='PZ_PB_RF_TRAIN') >>> pblm.load_features() >>> pblm.load_samples() >>> pblm.build_feature_subsets() >>> pblm.demo_classes() >>> ut.show_if_requested() """ task_key = 'match_state' labels = pblm.samples.subtasks[task_key] pb_labels = pblm.samples.subtasks['photobomb_state'] classname_offset = { POSTV: 0, NEGTV: 0, INCMP: 0, } class_name = POSTV class_name = NEGTV class_name = INCMP feats = pblm.samples.X_dict['learn(sum,glob)'] offset = 0 class_to_edge = {} for class_name in labels.class_names: print('Find example of %r' % (class_name, )) # Find an example of each class (that is not a photobomb) pbflags = pb_labels.indicator_df['notpb'] flags = labels.indicator_df[class_name] assert np.all(pbflags.index == flags.index) flags = flags & pbflags ratio = feats['sum(ratio)'] if class_name == INCMP: # flags &= feats['global(delta_yaw)'] > 3 flags &= feats['global(delta_view)'] > 2 # flags &= feats['sum(ratio)'] > 0 if class_name == NEGTV: low = ratio[flags].max() flags &= feats['sum(ratio)'] >= low if class_name == POSTV: low = ratio[flags].median() / 2 high = ratio[flags].median() flags &= feats['sum(ratio)'] < high flags &= feats['sum(ratio)'] > low # flags &= pblm.samples.simple_scores[flags]['score_lnbnn_1vM'] > 0 idxs = np.where(flags)[0] print('Found %d candidates' % (len(idxs))) offset = classname_offset[class_name] idx = idxs[offset] series = labels.indicator_df.iloc[idx] assert series[class_name] edge = series.name class_to_edge[class_name] = edge import plottool as pt import guitool as gt gt.ensure_qapp() pt.qtensure() fnum = 1 pt.figure(fnum=fnum, pnum=(1, 3, 1)) pnum_ = pt.make_pnum_nextgen(1, 3) # classname_alias = { # POSTV: 'positive', # NEGTV: 'negative', # INCMP: 'incomparable', # } ibs = pblm.infr.ibs for class_name in class_to_edge.keys(): edge = class_to_edge[class_name] aid1, aid2 = edge # alias = classname_alias[class_name] print('class_name = %r' % (class_name, )) annot1 = ibs.annots([aid1])[0]._make_lazy_dict() annot2 = ibs.annots([aid2])[0]._make_lazy_dict() vt.matching.ensure_metadata_normxy(annot1) vt.matching.ensure_metadata_normxy(annot2) match = vt.PairwiseMatch(annot1, annot2) cfgdict = pblm.hyper_params.vsone_match.asdict() match.apply_all(cfgdict) pt.figure(fnum=fnum, pnum=pnum_()) match.show(show_ell=False, show_ori=False)