def update_image(self): import plottool as pt #print('update_image') self.ax.images.pop() #self.ax.imshow(self.mask, interpolation='nearest', alpha=0.6) pt.imshow(self.mask, ax=self.ax, interpolation='nearest', alpha=0.6) self.draw()
def chip_montage(ibs, qaids, config=None): r""" CommandLine: python -m ibeis.viz.viz_other chip_montage --show --db PZ_MTEST python -m ibeis.viz.viz_other chip_montage --show --db GZ_ALL Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_other import * # NOQA >>> defaltdb = 'seaturtles' >>> import ibeis >>> a = ['default'] >>> ibs = ibeis.opendb(defaultdb=defaltdb) >>> ibs, qaids, daids = ibeis.testdata_expanded_aids(ibs=ibs, a=a) >>> config = None >>> chip_montage(ibs, qaids, config) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ import vtool as vt chip_list = ibs.get_annot_chips(qaids, config2_=config) height = 2000 dsize = (int(height * ut.PHI), height) dst = vt.montage(chip_list, dsize) pt.imshow(dst)
def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None): import plottool as pt from ibeis import viz fnum = pt.ensure_fnum(fnum) idx_list = ut.dict_take(cm.daid2_idx, daids) nPlots = len(idx_list) + 1 nRows, nCols = pt.get_square_row_cols(nPlots) pnum_ = pt.make_pnum_nextgen(nRows, nCols) pt.figure(fnum=fnum, pnum=(1, 2, 1)) # Draw coverage masks with bbox # <FlipHack> #weight_mask_m = np.fliplr(np.flipud(weight_mask_m)) #weight_mask = np.fliplr(np.flipud(weight_mask)) # </FlipHack> stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True) (woff, hoff) = offset_tup[1] wh1 = weight_mask_m.shape[0:2][::-1] wh2 = weight_mask.shape[0:2][::-1] pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match') pt.draw_bbox(( 0, 0) + wh1, bbox_color=(0, 0, 1)) pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1)) # Get contributing matches qaid = cm.qaid daid_list = daids fm_list = ut.take(cm.fm_list, idx_list) fs_list = ut.take(cm.fs_list, idx_list) # Draw matches for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1): viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, draw_pts=False, draw_lines=True, draw_ell=False, fnum=fnum, pnum=pnum_(px), darken=.5) coverage_score = score_matching_mask(weight_mask_m, weight_mask) pt.set_figtitle('score=%.4f' % (coverage_score,))
def testshow_extramargin_info(ibs, aid_list, arg_list, newsize_list, halfoffset_cs_list): #cfpath, gfpath, bbox, theta, new_size, filter_list = tup # TEMP TESTING from vtool import chip as ctool import plottool as pt import vtool as vt from ibeis.viz import viz_chip index = 0 cfpath, gfpath, bbox, theta, new_size, filter_list = arg_list[index] chipBGR = ctool.compute_chip(gfpath, bbox, theta, new_size, filter_list) bbox_cs_list = [ (xo_pcs, yo_pcs, w_pcs, h_pcs) for (w_pcs, h_pcs), (xo_pcs, yo_pcs) in zip(newsize_list, halfoffset_cs_list) ] bbox_pcs = bbox_cs_list[index] aid = aid_list[0] print('new_size = %r' % (new_size,)) print('newsize_list[index] = %r' % (newsize_list[index],)) fnum = 1 viz_chip.show_chip(ibs, aid, pnum=(1, 3, 1), fnum=fnum, annote=False, in_image=True , title_suffix='\noriginal image') viz_chip.show_chip(ibs, aid, pnum=(1, 3, 2), fnum=fnum, annote=False, title_suffix='\noriginal chip') bboxed_chip = vt.draw_verts(chipBGR, vt.scaled_verts_from_bbox(bbox_pcs, theta, 1, 1)) pt.imshow(bboxed_chip, pnum=(1, 3, 3), fnum=fnum, title='scaled chip with expanded margin.\n(orig margin drawn in orange)') pt.show_if_requested()
def show_chip_distinctiveness_plot(chip, kpts, dstncvs, fnum=1, pnum=None): import plottool as pt pt.figure(fnum, pnum=pnum) ax = pt.gca() divider = pt.ensure_divider(ax) #ax1 = divider.append_axes("left", size="50%", pad=0) ax1 = ax ax2 = divider.append_axes("bottom", size="100%", pad=0.05) #f, (ax1, ax2) = pt.plt.subplots(1, 2, sharex=True) cmapstr = 'rainbow' # 'hot' color_list = pt.df2.plt.get_cmap(cmapstr)(ut.norm_zero_one(dstncvs)) sortx = dstncvs.argsort() #pt.df2.plt.plot(qfx2_dstncvs[sortx], c=color_list[sortx]) pt.plt.sca(ax1) pt.colorline(np.arange(len(sortx)), dstncvs[sortx], cmap=pt.plt.get_cmap(cmapstr)) pt.gca().set_xlim(0, len(sortx)) pt.dark_background() pt.plt.sca(ax2) pt.imshow(chip, darken=.2) # MATPLOTLIB BUG CANNOT SHOW DIFFERENT ALPHA FOR POINTS AND KEYPOINTS AT ONCE #pt.draw_kpts2(kpts, pts_color=color_list, ell_color=color_list, ell_alpha=.1, ell=True, pts=True) #pt.draw_kpts2(kpts, color_list=color_list, pts_alpha=1.0, pts_size=1.5, # ell=True, ell_alpha=.1, pts=False) ell = ut.get_argflag('--ell') pt.draw_kpts2(kpts, color_list=color_list, pts_alpha=1.0, pts_size=1.5, ell=ell, ell_alpha=.3, pts=not ell) pt.plt.sca(ax)
def show_notch_tips(depc, aid, config={}, fnum=None, pnum=None): import plottool as pt pt.figure(fnum=fnum, pnum=pnum) notch = depc.get('Notch_Tips', aid, config=config) chip = depc.get('chips', aid, 'img', config=config) pt.imshow(chip) pt.draw_kpts2(np.array(notch), pts=True, ell=False, pts_size=20)
def test_cv2_flann(): """ Ignore: [name for name in dir(cv2) if 'create' in name.lower()] [name for name in dir(cv2) if 'stereo' in name.lower()] ut.grab_zipped_url('https://priithon.googlecode.com/archive/a6117f5e81ec00abcfb037f0f9da2937bb2ea47f.tar.gz', download_dir='.') """ import cv2 from vtool.tests import dummy import plottool as pt import vtool as vt img1 = vt.imread(ut.grab_test_imgpath('easy1.png')) img2 = vt.imread(ut.grab_test_imgpath('easy2.png')) stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15) disparity = stereo.compute(img1, img2) pt.imshow(disparity) pt.show() #cv2.estima flow = cv2.createOptFlow_DualTVL1() img1, img2 = vt.convert_image_list_colorspace([img1, img2], 'gray', src_colorspace='bgr') img2 = vt.resize(img2, img1.shape[0:2][::-1]) out = img1.copy() flow.calc(img1, img2, out) orb = cv2.ORB_create() kp1, vecs1 = orb.detectAndCompute(img1, None) kp2, vecs2 = orb.detectAndCompute(img2, None) detector = cv2.FeatureDetector_create("SIFT") descriptor = cv2.DescriptorExtractor_create("SIFT") skp = detector.detect(img1) skp, sd = descriptor.compute(img1, skp) tkp = detector.detect(img2) tkp, td = descriptor.compute(img2, tkp) out = img1.copy() cv2.drawKeypoints(img1, kp1, outImage=out) pt.imshow(out) vecs1 = dummy.testdata_dummy_sift(10) vecs2 = dummy.testdata_dummy_sift(10) # NOQA FLANN_INDEX_KDTREE = 0 # bug: flann enums are missing #flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4) index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) # NOQA cv2.flann.Index(vecs1, index_params) #cv2.FlannBasedMatcher(flann_params) cv2.flann.Index(vecs1, flann_params) # NOQA
def draw_sift_on_patch(patch, sift, **kwargs): import plottool as pt pt.imshow(patch) ax = pt.gca() half_size = patch.shape[0] / 2 invVR = np.array([[half_size, 0, half_size], [0, half_size, half_size], [0, 0, 1]]) invVR_aff2Ds = np.array([invVR]) sifts = np.array([sift]) return draw_sifts(ax, sifts, invVR_aff2Ds)
def static_plot(self, fnum=None, pnum=(1, 1, 1)): import plottool as pt self.ax = pt.gca() #self.ax.imshow(img, interpolation='nearest', alpha=1) #self.ax.imshow(mask, interpolation='nearest', alpha=0.6) pt.imshow(self.img, ax=self.ax, interpolation='nearest', alpha=1) pt.imshow(self.mask, ax=self.ax, interpolation='nearest', alpha=0.6) self.update_title() self.ax.grid(False)
def visualize_vocab_word(ibs, invassign, wx, fnum=None): """ Example: >>> from ibeis.new_annots import * # NOQA >>> import plottool as pt >>> pt.qt4ensure() >>> ibs, aid_list, vocab = testdata_vocab() >>> #aid_list = aid_list[0:1] >>> fstack = StackedFeatures(ibs, aid_list) >>> nAssign = 2 >>> invassign = fstack.inverted_assignment(vocab, nAssign) >>> sortx = ut.argsort(invassign.num_list)[::-1] >>> wx_list = ut.take(invassign.wx_list, sortx) >>> wx = wx_list[0] """ import plottool as pt pt.qt4ensure() vecs = invassign.get_vecs(wx) word = invassign.vocab.wx2_word[wx] word_patches = invassign.get_patches(wx) average_patch = np.mean(word_patches, axis=0) average_vec = vecs.mean(axis=0) average_vec = word word with_sift = True fnum = 2 fnum = pt.ensure_fnum(fnum) if with_sift: patch_img = pt.render_sift_on_patch(average_patch, average_vec) #sift_word_patches = [pt.render_sift_on_patch(patch, vec) for patch, vec in ut.ProgIter(list(zip(word_patches, vecs)))] #stacked_patches = vt.stack_square_images(word_patches) #stacked_patches = vt.stack_square_images(sift_word_patches) else: patch_img = average_patch stacked_patches = vt.stack_square_images(word_patches) solidbar = np.zeros((patch_img.shape[0], int(patch_img.shape[1] * .1), 3), dtype=patch_img.dtype) border_color = (100, 10, 10) # bgr, darkblue if ut.is_float(solidbar): solidbar[:, :, :] = (np.array(border_color) / 255)[None, None] else: solidbar[:, :, :] = np.array(border_color)[None, None] word_img = vt.stack_image_list([patch_img, solidbar, stacked_patches], vert=False, modifysize=True) pt.imshow(word_img, fnum=fnum) #pt.imshow(patch_img, pnum=(1, 2, 1), fnum=fnum) #patch_size = 64 #half_size = patch_size / 2 #pt.imshow(stacked_patches, pnum=(1, 2, 2), fnum=fnum) pt.iup()
def intra_encounter_matching(): import numpy as np from scipy.sparse import coo_matrix, csgraph qreq_, cm_list = testdata_workflow() # qaids = [cm.qaid for cm in cm_list] # top_aids = [cm.get_top_aids(5) for cm in cm_list] aid_pairs = np.array([(cm.qaid, daid) for cm in cm_list for daid in cm.get_top_aids(5)]) top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list]) N = aid_pairs.max() + 1 mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N)) csgraph.connected_components(mat) tree = csgraph.minimum_spanning_tree(mat) # NOQA import plottool as pt dense = mat.todense() pt.imshow(dense / dense.max() * 255) pt.show_if_requested() # baseline jobid import opengm # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb numVar = 10 unaries = np.ones([numVar, 3], dtype=opengm.value_type) gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3) unary_fids = gm.addFunctions(unaries) gm.addFactors(unary_fids, np.arange(numVar)) infParam = opengm.InfParam( workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'), ) inf = opengm.inference.Multicut(gm, parameter=infParam) visitor = inf.verboseVisitor(printNth=1, multiline=False) inf.infer(visitor) arg = inf.arg() # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1]) # fid = gm.addFunction(regularizer) # gm.addFactors(fid, gridVariableIndices) # regularizer = opengm.pottsFunction([3, 3], 0.0, beta) # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1]) # fid = gm.addFunction(regularizer) # gm.addFactors(fid, gridVariableIndices) unaries = np.random.rand(10, 10, 2) potts = opengm.PottsFunction([2, 2], 0.0, 0.4) gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts) inf = opengm.inference.GraphCut(gm) inf.infer() arg = inf.arg() # NOQA """
def multidb_montage(): r""" CommandLine: python -m ibeis.scripts.specialdraw multidb_montage --save montage.jpg --dpath ~/slides --diskshow --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.scripts.specialdraw import * # NOQA >>> multidb_montage() """ import ibeis import plottool as pt import vtool as vt import numpy as np pt.ensure_pylab_qt4() ibs1 = ibeis.opendb('PZ_MTEST') ibs2 = ibeis.opendb('GZ_ALL') ibs3 = ibeis.opendb('GIRM_Master1') chip_lists = [] aids_list = [] for ibs in [ibs1, ibs2, ibs3]: aids = ibs.sample_annots_general(minqual='good', sample_size=400) aids_list.append(aids) print(ut.depth_profile(aids_list)) for ibs, aids in zip([ibs1, ibs2, ibs3], aids_list): chips = ibs.get_annot_chips(aids) chip_lists.append(chips) chip_list = ut.flatten(chip_lists) np.random.shuffle(chip_list) widescreen_ratio = 16 / 9 ratio = ut.PHI ratio = widescreen_ratio fpath = pt.get_save_directions() #height = 6000 width = 6000 #width = int(height * ratio) height = int(width / ratio) dsize = (width, height) dst = vt.montage(chip_list, dsize) vt.imwrite(fpath, dst) if ut.get_argflag('--show'): pt.imshow(dst)
def testshow_colors(rgb_list, gray=ut.get_argflag('--gray')): import plottool as pt import vtool as vt block = np.zeros((5, 5, 3)) block_list = [block + color[0:3] for color in rgb_list] #print(ut.list_str(block_list)) #print(ut.list_str(rgb_list)) stacked_block = vt.stack_image_list(block_list, vert=False) # convert to bgr stacked_block = stacked_block[:, :, ::-1] uint8_img = (255 * stacked_block).astype(np.uint8) if gray: import cv2 uint8_img = cv2.cvtColor(uint8_img, cv2.COLOR_RGB2GRAY) pt.imshow(uint8_img) pt.show_if_requested()
def update_ui(co_wgt): if not co_wgt.hack: if co_wgt.current_gindex == 0: co_wgt.button_list[0].setEnabled(False) else: co_wgt.button_list[0].setEnabled(True) if co_wgt.current_gindex == len(co_wgt.gid_list) - 1: co_wgt.button_list[1].setEnabled(False) else: co_wgt.button_list[1].setEnabled(True) #TODO Either integrate this into utool or check if it's already there def extract_tuple(li, idx): return list(zip(*li)[idx]) # Update option setting, assume datetime has been updated co_wgt.combo_list[1].setCurrentIndex(extract_tuple(co_wgt.opt_list['year'], 1).index(co_wgt.dtime.year)) co_wgt.combo_list[3].setCurrentIndex(extract_tuple(co_wgt.opt_list['month'], 1).index(co_wgt.dtime.month)) co_wgt.combo_list[5].setCurrentIndex(extract_tuple(co_wgt.opt_list['day'], 1).index(co_wgt.dtime.day)) co_wgt.combo_list[7].setCurrentIndex(extract_tuple(co_wgt.opt_list['hour'], 1).index(co_wgt.dtime.hour)) co_wgt.combo_list[9].setCurrentIndex(extract_tuple(co_wgt.opt_list['minute'], 1).index(co_wgt.dtime.minute)) co_wgt.combo_list[11].setCurrentIndex(extract_tuple(co_wgt.opt_list['second'], 1).index(co_wgt.dtime.second)) # Redraw image if not co_wgt.hack: if co_wgt.imfig is not None: close_figure(co_wgt.imfig) image = co_wgt.ibs.get_images(co_wgt.gid_list[co_wgt.current_gindex]) figtitle = "Time Synchronization Picture" co_wgt.imfig, co_wgt.imax = imshow(image, fnum=co_wgt.fnum, title=figtitle) co_wgt.imfig.show()
def show_coverage_map(chip, mask, patch, kpts, fnum=None, ell_alpha=.6, show_mask_kpts=False): """ testing function """ import plottool as pt if fnum is None: fnum = pt.next_fnum() pnum_ = pt.get_pnum_func(nRows=2, nCols=2) if patch is not None: pt.imshow((patch * 255).astype(np.uint8), fnum=fnum, pnum=pnum_(0), title='patch') #ut.embed() pt.imshow((mask * 255).astype(np.uint8), fnum=fnum, pnum=pnum_(1), title='mask') else: pt.imshow((mask * 255).astype(np.uint8), fnum=fnum, pnum=(2, 1, 1), title='mask') if show_mask_kpts: pt.draw_kpts2(kpts, rect=True, ell_alpha=ell_alpha) pt.imshow(chip, fnum=fnum, pnum=pnum_(2), title='chip') pt.draw_kpts2(kpts, rect=True, ell_alpha=ell_alpha) masked_chip = (chip * mask[:, :, None]).astype(np.uint8) pt.imshow(masked_chip, fnum=fnum, pnum=pnum_(3), title='masked chip')
def test_average_contrast(): import vtool as vt ut.get_valid_test_imgkeys() img_fpath_list = [ut.grab_test_imgpath(key) for key in ut.get_valid_test_imgkeys()] img_list = [vt.imread(img, grayscale=True) for img in img_fpath_list] avecontrast_list = np.array([compute_average_contrast(img) for img in img_list]) import plottool as pt nCols = len(img_list) fnum = None if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=(2, 1, 1)) sortx = avecontrast_list.argsort() y_list = avecontrast_list[sortx] x_list = np.arange(0, nCols) + .5 pt.plot(x_list, y_list, 'bo-') sorted_imgs = ut.take(img_list, sortx) for px, img in ut.ProgressIter(enumerate(sorted_imgs, start=1)): pt.imshow(img, fnum=fnum, pnum=(2, nCols, nCols + px))
def show_ori_image(gori, weights, patch, gradx=None, grady=None, gauss=None, fnum=None): """ python -m pyhesaff._pyhesaff --test-test_rot_invar --show --nocpp """ import plottool as pt if fnum is None: fnum = pt.next_fnum() print('gori.max = %r' % gori.max()) assert gori.max() <= TAU assert gori.min() >= 0 bgr_ori = pt.color_orimag(gori, weights, False, encoding='bgr') print('bgr_ori.max = %r' % bgr_ori.max()) #ut.embed() bgr_ori = (255 * bgr_ori).astype(np.uint8) print('bgr_ori.max = %r' % bgr_ori.max()) #bgr_ori = np.array(bgr_ori, dtype=np.uint8) legend = pt.make_ori_legend_img() #gorimag_, woff, hoff = vt.stack_images(bgr_ori, legend, vert=False, modifysize=True) import vtool as vt gorimag_, offsets, sftup = vt.stack_images(bgr_ori, legend, vert=False, modifysize=True, return_offset=True, return_sf=True) (woff, hoff) = offsets[1] if patch is None: pt.imshow(gorimag_, fnum=fnum) else: pt.imshow(gorimag_, fnum=fnum, pnum=(3, 1, 1), title='colored by orientation') #pt.imshow(patch, fnum=fnum, pnum=(2, 2, 1)) #gradx, grady = np.cos(gori + TAU / 4.0), np.sin(gori + TAU / 4.0) if gradx is not None and grady is not None: if weights is not None: gradx *= weights grady *= weights pt.imshow(np.array(gradx * 255, dtype=np.uint8), fnum=fnum, pnum=(3, 3, 4)) pt.imshow(np.array(grady * 255, dtype=np.uint8), fnum=fnum, pnum=(3, 3, 5)) #pt.imshow(bgr_ori, pnum=(2, 2, 4)) pt.draw_vector_field(gradx, grady, pnum=(3, 3, 6), invert=True) pt.imshow(patch, fnum=fnum, pnum=(3, 1, 3))
def testshow_extramargin_info(gfpath, bbox_gs, theta, new_size, halfoffset_ms, mbbox_gs, margin_size): import plottool as pt import vtool as vt imgBGR = vt.imread(gfpath) chipBGR = compute_chip(gfpath, bbox_gs, theta, new_size, []) mchipBGR = compute_chip(gfpath, mbbox_gs, theta, margin_size, []) #index = 0 w_cs, h_cs = new_size xo_ms, yo_ms = halfoffset_ms bbox_ms = [xo_ms, yo_ms, w_cs, h_cs] verts_gs = vt.scaled_verts_from_bbox(bbox_gs, theta, 1, 1) expanded_verts_gs = vt.scaled_verts_from_bbox(mbbox_gs, theta, 1, 1) expanded_verts_ms = vt.scaled_verts_from_bbox(bbox_ms, 0, 1, 1) # topheavy imgBGR = vt.draw_verts(imgBGR, verts_gs) imgBGR = vt.draw_verts(imgBGR, expanded_verts_gs) mchipBGR = vt.draw_verts(mchipBGR, expanded_verts_ms) fnum = 1 pt.imshow(imgBGR, pnum=(1, 3, 1), fnum=fnum, title='original image') pt.gca().set_xlabel(str(imgBGR.shape)) pt.imshow(chipBGR, pnum=(1, 3, 2), fnum=fnum, title='original chip') pt.gca().set_xlabel(str(chipBGR.shape)) pt.imshow(mchipBGR, pnum=(1, 3, 3), fnum=fnum, title='scaled chip with expanded margin.\n(orig margin drawn in orange)') pt.gca().set_xlabel(str(mchipBGR.shape)) pt.show_if_requested()
def show_many_chips(ibs, aid_list, config2_=None): r""" CommandLine: python -m ibeis.viz.viz_chip --test-show_many_chips python -m ibeis.viz.viz_chip --test-show_many_chips --show --db NNP_Master3 --aids=13276,14047,14489,14906,10194,10201,12656,10150,11002,15315,7191,13127,15591,12838,13970,14123,14167 --no-annote --dpath figures --save ~/latex/crall-candidacy-2015/figures/challengechips.jpg '--caption=challenging images' Example: >>> # ENABLE_DOCTEST >>> from ibeis.viz.viz_chip import * # NOQA >>> import numpy as np >>> in_image = False >>> ibs, aid_list, kwargs, config2_ = testdata_showchip() >>> # execute function >>> show_many_chips(ibs, aid_list, config2_) >>> ut.show_if_requested() """ if ut.VERBOSE: print('[viz] show_many_chips') in_image = False chip_list = vh.get_chips(ibs, aid_list, in_image=in_image, config2_=config2_) import vtool as vt stacked_chips = vt.stack_image_recurse(chip_list, modifysize=True) pt.imshow(stacked_chips)
def show_matches_(key, **kwargs): assert key in key_list, 'unknown key=%r' % (key,) showkw = locals_.copy() pnum = next_pnum() showkw['pnum'] = pnum showkw['fnum'] = fnum showkw.update(kwargs) _fm, _fs = matchtup_dict[key] title = keytitle_dict[key] if kwargs.get('coverage'): from vtool import coverage_kpts kpts2, rchip2 = ut.dict_get(locals_, ('kpts2', 'rchip2')) kpts2_m = kpts2.take(_fm.T[1], axis=0) chipshape2 = rchip2.shape chipsize2 = chipshape2[0:2][::-1] coverage_mask = coverage_kpts.make_kpts_coverage_mask(kpts2_m, chipsize2, fx2_score=_fs, resize=True, return_patch=False) pt.imshow(coverage_mask * 255, pnum=pnum, fnum=fnum) else: if kwargs.get('norm', False): _fm = normtup_dict[key] assert _fm is not None, key showkw['cmap'] = 'cool' title += ' normalizers' show_matches(_fm, _fs, title=title, key=key, **showkw)
def show_covimg_result(img, fnum=None, pnum=None): pt.imshow(255 * img, fnum=fnum, pnum=pnum)
def old_test_single_annot_distinctiveness_params(ibs, aid): r""" CommandLine: python -m ibeis.model.hots.distinctiveness_normalizer --test-old_test_single_annot_distinctiveness_params --show python -m ibeis.model.hots.distinctiveness_normalizer --test-old_test_single_annot_distinctiveness_params --show --db GZ_ALL Example: >>> # DISABLE_DOCTEST >>> from ibeis.model.hots.distinctiveness_normalizer import * # NOQA >>> import plottool as pt >>> import ibeis >>> # build test data >>> ibs = ibeis.opendb(ut.get_argval('--db', type_=str, default='PZ_MTEST')) >>> aid = ut.get_argval('--aid', type_=int, default=1) >>> # execute function >>> old_test_single_annot_distinctiveness_params(ibs, aid) >>> pt.show_if_requested() """ #### # TODO: Also paramatarize the downweighting based on the keypoint size #### # HACK IN ABILITY TO SET CONFIG from ibeis.dev.main_commands import postload_commands postload_commands(ibs, None) from vtool import coverage_image import plottool as pt from plottool import interact_impaint #cfglbl_list = cfgdict_list #ut.all_dict_combinations_lbls(varied_dict) # Get info to find distinctivness of species_text = ibs.get_annot_species(aid) vecs = ibs.get_annot_vecs(aid) kpts = ibs.get_annot_kpts(aid) print(kpts) chip = ibs.get_annot_chips(aid) chipsize = ibs.get_annot_chipsizes(aid) # Paramater space to search # TODO: use slicing to control the params being varied # Use GridSearch class to modify paramaters as you go. gauss_patch_varydict = { 'gauss_shape': [(7, 7), (19, 19), (41, 41), (5, 5), (3, 3)], 'gauss_sigma_frac': [.2, .5, .7, .95], } cov_blur_varydict = { 'cov_blur_on': [True, False], 'cov_blur_ksize': [(5, 5,), (7, 7), (17, 17)], 'cov_blur_sigma': [5.0, 1.2], } dstncvs_varydict = { 'dcvs_power': [.01, .1, .5, 1.0], 'dcvs_clip_max': [.05, .1, .2, .5], 'dcvs_K': [2, 3, 5], } size_penalty_varydict = { 'remove_affine_information': [False, True], 'constant_scaling': [False, True], 'size_penalty_on': [True, False], 'size_penalty_power': [.5, .1, 1.0], 'size_penalty_scale': [.1, 1.0], } keyval_iter = ut.iflatten([ dstncvs_varydict.items(), gauss_patch_varydict.items(), cov_blur_varydict.items(), size_penalty_varydict.items(), ]) # Dont vary most paramaters, specify how much of their list can be used param_slice_dict = { 'dcvs_power' : slice(0, 2), 'dcvs_K' : slice(0, 2), 'dcvs_clip_max' : slice(0, 2), 'dcvs_clip_max' : slice(0, 2), #'gauss_shape' : slice(0, 3), 'gauss_sigma_frac' : slice(0, 2), 'remove_affine_information' : slice(0, 2), 'constant_scaling' : slice(0, 2), 'size_penalty_on' : slice(0, 2), #'cov_blur_on' : slice(0, 2), #'cov_blur_ksize' : slice(0, 2), #'cov_blur_sigma' : slice(0, 1), #'size_penalty_power' : slice(0, 2), #'size_penalty_scale' : slice(0, 2), } varied_dict = { key: val[param_slice_dict.get(key, slice(0, 1))] for key, val in keyval_iter } def constrain_config(cfg): """ encode what makes a configuration feasible """ if cfg['cov_blur_on'] is False: cfg['cov_blur_ksize'] = None cfg['cov_blur_sigma'] = None if cfg['constant_scaling'] is True: cfg['remove_affine_information'] = True cfg['size_penalty_on'] = False if cfg['remove_affine_information'] is True: cfg['gauss_shape'] = (41, 41) if cfg['size_penalty_on'] is False: cfg['size_penalty_power'] = None cfg['size_penalty_scale'] = None print('Varied Dict: ') print(ut.dict_str(varied_dict)) cfgdict_list, cfglbl_list = ut.make_constrained_cfg_and_lbl_list(varied_dict, constrain_config) # Get groundtruthish distinctivness map # for objective function GT_IS_DSTNCVS = 255 GT_NOT_DSTNCVS = 100 GT_UNKNOWN = 0 label_colors = [GT_IS_DSTNCVS, GT_NOT_DSTNCVS, GT_UNKNOWN] gtmask = interact_impaint.cached_impaint(chip, 'dstncvnss', label_colors=label_colors, aug=True, refine=ut.get_argflag('--refine')) true_dstncvs_mask = gtmask == GT_IS_DSTNCVS false_dstncvs_mask = gtmask == GT_NOT_DSTNCVS true_dstncvs_mask_sum = true_dstncvs_mask.sum() false_dstncvs_mask_sum = false_dstncvs_mask.sum() def distinctiveness_objective_function(dstncvs_mask): true_mask = true_dstncvs_mask * dstncvs_mask false_mask = false_dstncvs_mask * dstncvs_mask true_score = true_mask.sum() / true_dstncvs_mask_sum false_score = false_mask.sum() / false_dstncvs_mask_sum score = true_score * (1 - false_score) return score # Load distinctivness normalizer with ut.Timer('Loading Distinctivness Normalizer for %s' % (species_text)): dstcvnss_normer = request_species_distinctiveness_normalizer(species_text) # Get distinctivness over all params dstncvs_list = [dstcvnss_normer.get_distinctiveness(vecs, **cfgdict) for cfgdict in ut.ProgressIter(cfgdict_list, lbl='get dstcvns')] # Then compute the distinctinvess coverage map #gauss_shape = kwargs.get('gauss_shape', (19, 19)) #sigma_frac = kwargs.get('sigma_frac', .3) dstncvs_mask_list = [ coverage_image.make_coverage_mask( kpts, chipsize, fx2_score=dstncvs, mode='max', return_patch=False, **cfg) for cfg, dstncvs in ut.ProgressIter(zip(cfgdict_list, dstncvs_list), lbl='Warping Image') ] score_list = [distinctiveness_objective_function(dstncvs_mask) for dstncvs_mask in dstncvs_mask_list] fnum = 1 def show_covimg_result(img, fnum=None, pnum=None): pt.imshow(255 * img, fnum=fnum, pnum=pnum) ut.interact_gridsearch_result_images( show_covimg_result, cfgdict_list, cfglbl_list, dstncvs_mask_list, score_list=score_list, fnum=fnum, figtitle='dstncvs gridsearch') # Show subcomponents of grid search gauss_patch_cfgdict_list, gauss_patch_cfglbl_list = ut.get_cfgdict_lbl_list_subset(cfgdict_list, gauss_patch_varydict) patch_list = [coverage_image.get_gaussian_weight_patch(**cfgdict) for cfgdict in ut.ProgressIter(gauss_patch_cfgdict_list, lbl='patch cfg')] ut.interact_gridsearch_result_images( show_covimg_result, gauss_patch_cfgdict_list, gauss_patch_cfglbl_list, patch_list, fnum=fnum + 1, figtitle='gaussian patches') patch = patch_list[0] # Show the first mask in more depth dstncvs = dstncvs_list[0] dstncvs_mask = dstncvs_mask_list[0] coverage_image.show_coverage_map(chip, dstncvs_mask, patch, kpts, fnum=fnum + 2, ell_alpha=.2, show_mask_kpts=False) pt.imshow(gtmask, fnum=fnum + 3, pnum=(1, 2, 1), title='ground truth distinctiveness') pt.imshow(chip, fnum=fnum + 3, pnum=(1, 2, 2)) pt.present()
def show_fgweight_mask(annot, title="fg", update=True, **kwargs): import plottool as pt pt.imshow(annot.fgweight_mask * 255.0, update=update, title=title, **kwargs)
def show_dstncvs_mask(annot, title="wd", update=True, **kwargs): import plottool as pt pt.imshow(annot.dstncvs_mask * 255.0, update=update, title=title, **kwargs)
def show(annot): import plottool as pt pt.imshow(annot.rchip) pt.draw_kpts2(annot.kpts)
def gridsearch_chipextract(): r""" CommandLine: python -m vtool.chip --test-gridsearch_chipextract --show Example: >>> # GRIDSEARCH >>> from vtool.chip import * # NOQA >>> gridsearch_chipextract() >>> ut.show_if_requested() """ import cv2 test_func = extract_chip_from_img if False: gpath = ut.grab_test_imgpath('carl.jpg') bbox = (100, 3, 100, 100) theta = 0.0 new_size = (58, 34) else: gpath = '/media/raid/work/GZ_Master1/_ibsdb/images/1524525d-2131-8770-d27c-3a5f9922e9e9.jpg' bbox = (450, 373, 2062, 1124) theta = 0.0 old_size = bbox[2:4] #target_area = 700 ** 2 target_area = 1200**2 new_size = get_scaled_sizes_with_area(target_area, [old_size])[0] print('old_size = %r' % (old_size, )) print('new_size = %r' % (new_size, )) #new_size = (677, 369) imgBGR = gtool.imread(gpath) args = (imgBGR, bbox, theta, new_size) param_info = ut.ParamInfoList( 'extract_params', [ ut.ParamInfo( 'interpolation', cv2.INTER_LANCZOS4, varyvals=[ cv2.INTER_LANCZOS4, cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_NEAREST, #cv2.INTER_AREA ], ) ]) show_func = None # Generalize import plottool as pt pt.imshow(imgBGR) # HACK cfgdict_list, cfglbl_list = param_info.get_gridsearch_input( defaultslice=slice(0, 10)) fnum = pt.ensure_fnum(None) if show_func is None: show_func = pt.imshow lbl = ut.get_funcname(test_func) cfgresult_list = [ test_func(*args, **cfgdict) for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl) ] onclick_func = None ut.interact_gridsearch_result_images(show_func, cfgdict_list, cfglbl_list, cfgresult_list, fnum=fnum, figtitle=lbl, unpack=False, max_plots=25, onclick_func=onclick_func) pt.iup()
def fourier_devtest(img): r""" Args: img (ndarray[uint8_t, ndim=2]): image data CommandLine: python -m vtool.quality_classifier --test-fourier_devtest --show References: http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.html http://cns-alumni.bu.edu/~slehar/fourier/fourier.html Example: >>> # DISABLE_DOCTEST >>> from vtool.quality_classifier import * # NOQA >>> import vtool as vt >>> # build test data >>> img_fpath = ut.grab_test_imgpath('lena.png') >>> img = vt.imread(img_fpath, grayscale=True) >>> # execute function >>> magnitude_spectrum = fourier_devtest(img) """ import plottool as pt def pad_img(img): rows, cols = img.shape nrows = cv2.getOptimalDFTSize(rows) ncols = cv2.getOptimalDFTSize(cols) right = ncols - cols bottom = nrows - rows bordertype = cv2.BORDER_CONSTANT nimg = cv2.copyMakeBorder(img, 0, bottom, 0, right, bordertype, value=0) return nimg def convert_to_fdomain(img): dft = cv2.dft(img.astype(np.float32), flags=cv2.DFT_COMPLEX_OUTPUT) #dft_shift = np.fft.fftshift(dft) return dft def convert_from_fdomain(dft): img = cv2.idft(dft) img = cv2.magnitude(img[:, :, 0], img[:, :, 1]) img /= img.max() return img * 255.0 def get_fdomain_mag(dft_shift): magnitude_spectrum = np.log( cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1])) return magnitude_spectrum def imgstats(img): print('stats:') print(' dtype = %r ' % (img.dtype, )) print(' ' + ut.get_stats_str(img, axis=None)) nimg = pad_img(img) dft = convert_to_fdomain(nimg) #freq_domain = np.fft.fft2(img) #freq_domain_shift = np.fft.fftshift(freq_domain) rows, cols = nimg.shape crow, ccol = rows / 2, cols / 2 # create a mask first, center square is 1, remaining all zeros mask = np.zeros((rows, cols, 2), np.uint8) mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1 dft_mask = np.fft.ifftshift(np.fft.fftshift(dft) * mask) img_back = convert_from_fdomain(dft_mask) imgstats(dft) imgstats(mask) imgstats(nimg) imgstats(nimg) print('nimg.shape = %r' % (nimg.shape, )) print('dft_shift.shape = %r' % (dft.shape, )) if ut.show_was_requested(): #import plottool as pt next_pnum = pt.make_pnum_nextgen(nRows=3, nCols=2) pt.imshow(nimg, pnum=next_pnum(), title='nimg') pt.imshow(20 * get_fdomain_mag(dft), pnum=next_pnum(), title='mag(f)') pt.imshow(20 * get_fdomain_mag(dft_mask), pnum=next_pnum(), title='dft_mask') pt.imshow(img_back, pnum=next_pnum(), title='img_back') pt.show_if_requested()
def test_rot_invar(): r""" CommandLine: python -m pyhesaff test_rot_invar --show --rebuild-hesaff --no-rmbuild python -m pyhesaff test_rot_invar --show --nocpp python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.0 --rotation_invariance --rebuild-hesaff python -m vtool.tests.dummy testdata_ratio_matches --show --ratio_thresh=1.1 --rotation_invariance --rebuild-hesaff Example: >>> # DISABLE_DODCTEST >>> from pyhesaff._pyhesaff import * # NOQA >>> test_rot_invar() """ import cv2 import utool as ut import vtool as vt import plottool as pt TAU = 2 * np.pi fnum = pt.next_fnum() NUM_PTS = 5 # 9 theta_list = np.linspace(0, TAU, NUM_PTS, endpoint=False) nRows, nCols = pt.get_square_row_cols(len(theta_list), fix=True) next_pnum = pt.make_pnum_nextgen(nRows, nCols) # Expand the border a bit around star.png pad_ = 100 img_fpath = ut.grab_test_imgpath('star.png') img_fpath2 = vt.pad_image_ondisk(img_fpath, pad_, value=26) for theta in theta_list: print('-----------------') print('theta = %r' % (theta,)) #theta = ut.get_argval('--theta', type_=float, default=TAU * 3 / 8) img_fpath = vt.rotate_image_ondisk(img_fpath2, theta, borderMode=cv2.BORDER_REPLICATE) if not ut.get_argflag('--nocpp'): (kpts_list_ri, vecs_list2) = detect_feats(img_fpath, rotation_invariance=True) kpts_ri = ut.strided_sample(kpts_list_ri, 2) (kpts_list_gv, vecs_list1) = detect_feats(img_fpath, rotation_invariance=False) kpts_gv = ut.strided_sample(kpts_list_gv, 2) # find_kpts_direction imgBGR = vt.imread(img_fpath) kpts_ripy = vt.find_kpts_direction(imgBGR, kpts_gv, DEBUG_ROTINVAR=False) # Verify results stdout #print('nkpts = %r' % (len(kpts_gv))) #print(vt.kpts_repr(kpts_gv)) #print(vt.kpts_repr(kpts_ri)) #print(vt.kpts_repr(kpts_ripy)) # Verify results plot pt.figure(fnum=fnum, pnum=next_pnum()) pt.imshow(imgBGR) #if len(kpts_gv) > 0: # pt.draw_kpts2(kpts_gv, ori=True, ell_color=pt.BLUE, ell_linewidth=10.5) ell = False rect = True if not ut.get_argflag('--nocpp'): if len(kpts_ri) > 0: pt.draw_kpts2(kpts_ri, rect=rect, ell=ell, ori=True, ell_color=pt.RED, ell_linewidth=5.5) if len(kpts_ripy) > 0: pt.draw_kpts2(kpts_ripy, rect=rect, ell=ell, ori=True, ell_color=pt.GREEN, ell_linewidth=3.5) #print('\n'.join(vt.get_ori_strs(np.vstack([kpts_gv, kpts_ri, kpts_ripy])))) #ut.embed(exec_lines=['pt.update()']) pt.set_figtitle('green=python, red=C++') pt.show_if_requested()
def show_chip(ibs, aid, in_image=False, annote=True, title_suffix='', weight_label=None, weights=None, config2_=None, **kwargs): r""" Driver function to show chips Args: ibs (ibeis.IBEISController): aid (int): annotation rowid in_image (bool): displays annotation with the context of its source image annote (bool): enables overlay annoations title_suffix (str): weight_label (None): (default = None) weights (None): (default = None) config2_ (dict): (default = None) Kwargs: enable_chip_title_prefix, nokpts, kpts_subset, kpts, text_color, notitle, draw_lbls, show_aidstr, show_gname, show_name, show_nid, show_exemplar, show_num_gt, show_quality_text, show_viewcode, fnum, title, figtitle, pnum, interpolation, cmap, heatmap, data_colorbar, darken, update, xlabel, redraw_image, ax, alpha, docla, doclf, projection, pts, ell color (3/4-tuple, ndarray, or str): colors for keypoints CommandLine: python -m ibeis.viz.viz_chip show_chip --show --ecc python -c "import utool as ut; ut.print_auto_docstr('ibeis.viz.viz_chip', 'show_chip')" python -m ibeis.viz.viz_chip show_chip --show --db NNP_Master3 --aids 14047 --no-annote python -m ibeis.viz.viz_chip show_chip --show --db NNP_Master3 --aids 14047 --no-annote python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --bgmethod=cnn python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --bgmethod=cnn --scale_max=30 python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --ecc --draw_lbls=False --notitle --save=~/slides/lnbnn_query.jpg --dpi=300 Example: >>> # VIZ_TEST >>> from ibeis.viz.viz_chip import * # NOQA >>> import numpy as np >>> import vtool as vt >>> in_image = False >>> ibs, aid_list, kwargs, config2_ = testdata_showchip() >>> aid = aid_list[0] >>> if True: >>> import matplotlib as mpl >>> from ibeis.scripts.thesis import TMP_RC >>> mpl.rcParams.update(TMP_RC) >>> if ut.get_argflag('--ecc'): >>> kpts = ibs.get_annot_kpts(aid, config2_=config2_) >>> weights = ibs.get_annot_fgweights([aid], ensure=True, config2_=config2_)[0] >>> kpts = ut.random_sample(kpts[weights > .9], 200, seed=0) >>> ecc = vt.get_kpts_eccentricity(kpts) >>> scale = 1 / vt.get_scales(kpts) >>> #s = ecc if config2_.affine_invariance else scale >>> s = scale >>> colors = pt.scores_to_color(s, cmap_='jet') >>> kwargs['color'] = colors >>> kwargs['kpts'] = kpts >>> kwargs['ell_linewidth'] = 3 >>> kwargs['ell_alpha'] = .7 >>> show_chip(ibs, aid, in_image=in_image, config2_=config2_, **kwargs) >>> pt.show_if_requested() """ if ut.VERBOSE: print('[viz] show_chip(aid=%r)' % (aid,)) #ibs.assert_valid_aids((aid,)) # Get chip #print('in_image = %r' % (in_image,)) chip = vh.get_chips(ibs, aid, in_image=in_image, config2_=config2_) # Create chip title chip_text = vh.get_annot_texts(ibs, [aid], **kwargs)[0] if kwargs.get('enable_chip_title_prefix', True): chip_title_text = chip_text + title_suffix else: chip_title_text = title_suffix chip_title_text = chip_title_text.strip('\n') # Draw chip fig, ax = pt.imshow(chip, **kwargs) # Populate axis user data vh.set_ibsdat(ax, 'viztype', 'chip') vh.set_ibsdat(ax, 'aid', aid) if annote and not kwargs.get('nokpts', False): # Get and draw keypoints if 'color' not in kwargs: if weight_label == 'fg_weights': if weights is None and ibs.has_species_detector(ibs.get_annot_species_texts(aid)): weight_label = 'fg_weights' weights = ibs.get_annot_fgweights([aid], ensure=True, config2_=config2_)[0] if weights is not None: cmap_ = 'hot' #if weight_label == 'dstncvs': # cmap_ = 'rainbow' color = pt.scores_to_color(weights, cmap_=cmap_, reverse_cmap=False) kwargs['color'] = color kwargs['ell_color'] = color kwargs['pts_color'] = color kpts_ = vh.get_kpts(ibs, aid, in_image, config2_=config2_, kpts_subset=kwargs.get('kpts_subset', None), kpts=kwargs.pop('kpts', None)) pt.viz_keypoints._annotate_kpts(kpts_, **kwargs) if kwargs.get('draw_lbls', True): pt.upperleft_text(chip_text, color=kwargs.get('text_color', None)) use_title = not kwargs.get('notitle', False) if use_title: pt.set_title(chip_title_text) if in_image: gid = ibs.get_annot_gids(aid) aid_list = ibs.get_image_aids(gid) annotekw = viz_image.get_annot_annotations( ibs, aid_list, sel_aids=[aid], draw_lbls=kwargs.get('draw_lbls', True)) # Put annotation centers in the axis ph.set_plotdat(ax, 'annotation_bbox_list', annotekw['bbox_list']) ph.set_plotdat(ax, 'aid_list', aid_list) pt.viz_image2.draw_image_overlay(ax, **annotekw) zoom_ = ut.get_argval('--zoom', type_=float, default=None) if zoom_ is not None: import vtool as vt # Zoom into the chip for some image context rotated_verts = ibs.get_annot_rotated_verts(aid) bbox = ibs.get_annot_bboxes(aid) #print(bbox) #print(rotated_verts) rotated_bbox = vt.bbox_from_verts(rotated_verts) imgw, imgh = ibs.get_image_sizes(gid) pad_factor = zoom_ pad_length = min(bbox[2], bbox[3]) * pad_factor minx = max(rotated_bbox[0] - pad_length, 0) miny = max(rotated_bbox[1] - pad_length, 0) maxx = min((rotated_bbox[0] + rotated_bbox[2]) + pad_length, imgw) maxy = min((rotated_bbox[1] + rotated_bbox[3]) + pad_length, imgh) #maxy = imgh - maxy #miny = imgh - miny ax = pt.gca() ax.set_xlim(minx, maxx) ax.set_ylim(miny, maxy) ax.invert_yaxis() else: ph.set_plotdat(ax, 'chipshape', chip.shape) #if 'featweights' in vars() and 'color' in kwargs: if weights is not None and weight_label is not None: ## HACK HACK HACK if len(weights) > 0: cb = pt.colorbar(weights, kwargs['color']) cb.set_label(weight_label) return fig, ax
def intra_encounter_matching(): qreq_, cm_list = testdata_workflow() # qaids = [cm.qaid for cm in cm_list] # top_aids = [cm.get_top_aids(5) for cm in cm_list] import numpy as np from scipy.sparse import coo_matrix, csgraph aid_pairs = np.array([(cm.qaid, daid) for cm in cm_list for daid in cm.get_top_aids(5)]) top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list]) N = aid_pairs.max() + 1 mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N)) csgraph.connected_components(mat) tree = csgraph.minimum_spanning_tree(mat) # NOQA import plottool as pt dense = mat.todense() pt.imshow(dense / dense.max() * 255) pt.show_if_requested() # load image and convert to LAB img_fpath = str(ut.grab_test_imgpath(str('lena.png'))) img = vigra.impex.readImage(img_fpath) imgLab = vigra.colors.transform_RGB2Lab(img) superpixelDiameter = 15 # super-pixel size slicWeight = 15.0 # SLIC color - spatial weight labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight, superpixelDiameter) labels = vigra.analysis.labelImage(labels)-1 # get 2D grid graph and RAG gridGraph = graphs.gridGraph(img.shape[0:2]) rag = graphs.regionAdjacencyGraph(gridGraph, labels) nodeFeatures = rag.accumulateNodeFeatures(imgLab) nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures) nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc") nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg) #from sklearn.cluster import MiniBatchKMeans, KMeans from sklearn import mixture nCluster = 3 g = mixture.GMM(n_components=nCluster) g.fit(nodeFeatures[:,:]) clusterProb = g.predict_proba(nodeFeatures) import numpy #https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb #https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb clusterProbImg = rag.projectNodeFeaturesToGridGraph(clusterProb.astype(numpy.float32)) clusterProbImg = vigra.taggedView(clusterProbImg, "xyc") # strength of potts regularizer beta = 40.0 # graphical model with as many variables # as superpixels, each has 3 states gm = opengm.gm(numpy.ones(rag.nodeNum,dtype=opengm.label_type)*nCluster) # convert probabilites to energies probs = numpy.clip(clusterProb, 0.00001, 0.99999) costs = -1.0*numpy.log(probs) # add ALL unaries AT ONCE fids = gm.addFunctions(costs) gm.addFactors(fids,numpy.arange(rag.nodeNum)) # add a potts function regularizer = opengm.pottsFunction([nCluster]*2,0.0,beta) fid = gm.addFunction(regularizer) # get variable indices of adjacent superpixels # - or "u" and "v" node id's for edges uvIds = rag.uvIds() uvIds = numpy.sort(uvIds,axis=1) # add all second order factors at once gm.addFactors(fid,uvIds) # get super-pixels with slic on LAB image import opengm # Matching Graph cost_matrix = np.array([ [0.5, 0.6, 0.2, 0.4, 0.1], [0.0, 0.5, 0.2, 0.9, 0.2], [0.0, 0.0, 0.5, 0.1, 0.1], [0.0, 0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.0, 0.0, 0.5], ]) cost_matrix += cost_matrix.T number_of_labels = 5 num_annots = 5 cost_matrix = (cost_matrix * 2) - 1 #gm = opengm.gm(number_of_labels) gm = opengm.gm(np.ones(num_annots) * number_of_labels) aids = np.arange(num_annots) aid_pairs = np.array([(a1, a2) for a1, a2 in ut.iprod(aids, aids) if a1 != a2], dtype=np.uint32) aid_pairs.sort(axis=1) # 2nd order function fid = gm.addFunction(cost_matrix) gm.addFactors(fid, aid_pairs) Inf = opengm.inference.BeliefPropagation #Inf = opengm.inference.Multicut parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001) parameter = opengm.InfParam() inf = Inf(gm, parameter=parameter) class PyCallback(object): def __init__(self,): self.labels=[] pass def begin(self,inference): print("begin of inference") pass def end(self,inference): self.labels.append(inference.arg()) pass def visit(self,inference): gm=inference.gm() labelVector=inference.arg() print("energy %r" % (gm.evaluate(labelVector),)) self.labels.append(labelVector) pass callback=PyCallback() visitor=inf.pythonVisitor(callback,visitNth=1) inf.infer(visitor) print(callback.labels) # baseline jobid # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb numVar = 10 unaries = np.ones([numVar, 3], dtype=opengm.value_type) gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3) unary_fids = gm.addFunctions(unaries) gm.addFactors(unary_fids, np.arange(numVar)) infParam = opengm.InfParam( workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'), ) inf = opengm.inference.Multicut(gm, parameter=infParam) visitor = inf.verboseVisitor(printNth=1, multiline=False) inf.infer(visitor) arg = inf.arg() # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1]) # fid = gm.addFunction(regularizer) # gm.addFactors(fid, gridVariableIndices) # regularizer = opengm.pottsFunction([3, 3], 0.0, beta) # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1]) # fid = gm.addFunction(regularizer) # gm.addFactors(fid, gridVariableIndices) unaries = np.random.rand(10, 10, 2) potts = opengm.PottsFunction([2, 2], 0.0, 0.4) gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts) inf = opengm.inference.GraphCut(gm) inf.infer() arg = inf.arg() # NOQA """
def gridsearch_chipextract(): r""" CommandLine: python -m vtool.chip --test-gridsearch_chipextract --show Example: >>> # GRIDSEARCH >>> from vtool.chip import * # NOQA >>> gridsearch_chipextract() >>> ut.show_if_requested() """ import cv2 test_func = extract_chip_from_img if False: gpath = ut.grab_test_imgpath('carl.jpg') bbox = (100, 3, 100, 100) theta = 0.0 new_size = (58, 34) else: gpath = '/media/raid/work/GZ_Master1/_ibsdb/images/1524525d-2131-8770-d27c-3a5f9922e9e9.jpg' bbox = (450, 373, 2062, 1124) theta = 0.0 old_size = bbox[2:4] #target_area = 700 ** 2 target_area = 1200 ** 2 new_size = get_scaled_sizes_with_area(target_area, [old_size])[0] print('old_size = %r' % (old_size,)) print('new_size = %r' % (new_size,)) #new_size = (677, 369) imgBGR = gtool.imread(gpath) args = (imgBGR, bbox, theta, new_size) param_info = ut.ParamInfoList('extract_params', [ ut.ParamInfo('interpolation', cv2.INTER_LANCZOS4, varyvals=[ cv2.INTER_LANCZOS4, cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_NEAREST, #cv2.INTER_AREA ],) ]) show_func = None # Generalize import plottool as pt pt.imshow(imgBGR) # HACK cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(defaultslice=slice(0, 10)) fnum = pt.ensure_fnum(None) if show_func is None: show_func = pt.imshow lbl = ut.get_funcname(test_func) cfgresult_list = [ test_func(*args, **cfgdict) for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl) ] onclick_func = None ut.interact_gridsearch_result_images( show_func, cfgdict_list, cfglbl_list, cfgresult_list, fnum=fnum, figtitle=lbl, unpack=False, max_plots=25, onclick_func=onclick_func) pt.iup()
def dummy_cut_example(): r""" CommandLine: python -m ibeis.workflow --exec-dummy_cut_example --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.workflow import * # NOQA >>> result = dummy_cut_example() >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ import opengm import numpy as np import plottool as pt pt.ensure_pylab_qt4() # Matching Graph cost_matrix = np.array([ [0.5, 0.6, 0.2, 0.4], [0.0, 0.5, 0.2, 0.9], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.0, 0.5], ]) cost_matrix += cost_matrix.T number_of_labels = 4 num_annots = 4 #cost_matrix = (cost_matrix * 2) - 1 #gm = opengm.gm(number_of_labels) gm = opengm.gm(np.ones(num_annots) * number_of_labels) aids = np.arange(num_annots) aid_pairs = np.array([(a1, a2) for a1, a2 in ut.iprod( aids, aids) if a1 != a2], dtype=np.uint32) aid_pairs.sort(axis=1) # add a potts function # penalizes neighbors for having different labels # beta = 0 # 0.1 # strength of potts regularizer #beta = 0.1 # 0.1 # strength of potts regularizer # Places to look for the definition of this stupid class # ~/code/opengm/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx # /src/interfaces/python/opengm/opengmcore/function_injector.py #shape = [number_of_labels] * 2 #regularizer = opengm.PottsGFunction(shape, 0.0, beta) # __init__( (object)arg1, (object)shape [, (object)values=()]) -> object : # values = np.arange(1, ut.num_partitions(num_annots) + 1) #regularizer = opengm.PottsGFunction(shape) #reg_fid = gm.addFunction(regularizer) # A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems # http://arxiv.org/pdf/1404.0533.pdf # regularizer1 = opengm.pottsFunction([number_of_labels] * 2, valueEqual=0.0, valueNotEqual=beta) # gm.addFactors(reg_fid, aid_pairs) # 2nd order function pair_fid = gm.addFunction(cost_matrix) gm.addFactors(pair_fid, aid_pairs) if False: Inf = opengm.inference.BeliefPropagation parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001) else: Inf = opengm.inference.Multicut parameter = opengm.InfParam() inf = Inf(gm, parameter=parameter) class PyCallback(object): def __init__(self,): self.labels = [] def begin(self, inference): print("begin of inference") def end(self, inference): self.labels.append(inference.arg()) def visit(self, inference): gm = inference.gm() labelVector = inference.arg() print("energy %r" % (gm.evaluate(labelVector),)) self.labels.append(labelVector) callback = PyCallback() visitor = inf.pythonVisitor(callback, visitNth=1) inf.infer(visitor) print(callback.labels) print(cost_matrix) pt.imshow(cost_matrix, cmap='magma') opengm.visualizeGm(gm=gm) pass
def dummy_multicut(): """ """ # Places to look for the definition of PottsGFunction class # ~/code/opengm/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx # /src/interfaces/python/opengm/opengmcore/function_injector.py # A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems # http://arxiv.org/pdf/1404.0533.pdf # __init__( (object)arg1, (object)shape [, (object)values=()]) -> object : # values = np.arange(1, ut.num_partitions(num_annots) + 1) # http://hci.iwr.uni-heidelberg.de/opengm2/doxygen/opengm-2.1.1/classopengm_1_1PottsGFunction.html import opengm import numpy as np from itertools import product cost_matrix = np.array([ [ 1. , 0.2, -0.6, -0.2], [ 0.2, 1. , -0.6, 0.8], [-0.6, -0.6, 1. , -0.8], [-0.2, 0.8, -0.8, 1. ]]) num_vars = len(cost_matrix) # Enumerate undirected edges (node index pairs) var_indices = np.arange(num_vars) varindex_pairs = np.array( [(a1, a2) for a1, a2 in product(var_indices, var_indices) if a1 != a2 and a1 > a2], dtype=np.uint32) varindex_pairs.sort(axis=1) # Create nodes in the graphical model. In this case there are <num_vars> # nodes and each node can be assigned to one of <num_vars> possible labels num_nodes = num_vars space = np.full((num_nodes,), fill_value=num_vars, dtype=np.int) gm = opengm.gm(space) # Use one potts function for each edge for varx1, varx2 in varindex_pairs: cost = cost_matrix[varx1, varx2] potts_func = opengm.PottsFunction((num_vars, num_vars), valueEqual=0, valueNotEqual=cost) potts_func_id = gm.addFunction(potts_func) var_indicies = np.array([varx1, varx2]) gm.addFactor(potts_func_id, var_indicies) #opengm.visualizeGm(gm=gm) InfAlgo = opengm.inference.Multicut parameter = opengm.InfParam() inf = InfAlgo(gm, parameter=parameter) inf.infer() labels = inf.arg() print(labels) import plottool as pt #varindex_pairs = np.vstack(np.triu_indices_from(cost_matrix)).T # Dummy unaries #for varx in var_indices: # unary_func = np.ones(num_vars) # unary_func_id = gm.addFunction(unary_func) # gm.addFactor(unary_func_id, varx1) #pt.ensure_pylab_qt4() # add a potts function #shape = [num_vars] * 2 # num_parts = 5 # possible number paritions with 4 variables # num_parts = ut.get_nth_bell_number(num_vars - 1) # Causes a segfault if values is passed in # values = np.arange(1, num_parts + 1).astype(np.float64) # gpotts_func = opengm.PottsGFunction(shape, values) #gpotts_func = opengm.PottsGFunction(shape) #gpotts_fid = gm.addFunction(gpotts_func) # Commenting out the next line results in a segfault #gm.addFactors(gpotts_fid, varindex_pairs) # 2nd order function # Seems to cause OpenGM error: Invalid Model for Multicut-Solver! Solver requires a generalized potts model! # pair_fid = gm.addFunction(cost_matrix) # gm.addFactors(pair_fid, varindex_pairs) InfAlgo = opengm.inference.Multicut # Not sure what parameters are allowed to be passed here. parameter = opengm.InfParam() inf = InfAlgo(gm, parameter=parameter) inf.infer() class PyCallback(object): def __init__(self,): self.labels = [] def begin(self, inference): print("begin of inference") def end(self, inference): self.labels.append(inference.arg()) def visit(self, inference): gm = inference.gm() labelVector = inference.arg() print("energy %r" % (gm.evaluate(labelVector),)) self.labels.append(labelVector) callback = PyCallback() visitor = inf.pythonVisitor(callback, visitNth=1) inf.infer(visitor) print(callback.labels) print(cost_matrix) pt.imshow(cost_matrix, cmap='magma') opengm.visualizeGm(gm=gm)
def segmentation_example(): import vigra import opengm import sklearn import sklearn.mixture import numpy as np from vigra import graphs import matplotlib as mpl import plottool as pt pt.ensure_pylab_qt4() # load image and convert to LAB img_fpath = str(ut.grab_test_imgpath(str('lena.png'))) img = vigra.impex.readImage(img_fpath) imgLab = vigra.colors.transform_RGB2Lab(img) superpixelDiameter = 15 # super-pixel size slicWeight = 15.0 # SLIC color - spatial weight labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight, superpixelDiameter) labels = vigra.analysis.labelImage(labels) - 1 # get 2D grid graph and RAG gridGraph = graphs.gridGraph(img.shape[0:2]) rag = graphs.regionAdjacencyGraph(gridGraph, labels) # Node Features nodeFeatures = rag.accumulateNodeFeatures(imgLab) nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures) nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc") nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg) nCluster = 5 g = sklearn.mixture.GMM(n_components=nCluster) g.fit(nodeFeatures[:, :]) clusterProb = g.predict_proba(nodeFeatures) # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb clusterProbImg = rag.projectNodeFeaturesToGridGraph( clusterProb.astype(np.float32)) clusterProbImg = vigra.taggedView(clusterProbImg, "xyc") ndim_data = clusterProbImg.reshape((-1, nCluster)) pca = sklearn.decomposition.PCA(n_components=3) print(ndim_data.shape) pca.fit(ndim_data) print(ut.repr2(pca.explained_variance_ratio_, precision=2)) oldshape = (clusterProbImg.shape[0:2] + (-1,)) clusterProgImg3 = pca.transform(ndim_data).reshape(oldshape) print(clusterProgImg3.shape) # graphical model with as many variables # as superpixels, each has 3 states gm = opengm.gm(np.ones(rag.nodeNum, dtype=opengm.label_type) * nCluster) # convert probabilites to energies probs = np.clip(clusterProb, 0.00001, 0.99999) costs = -1.0 * np.log(probs) # add ALL unaries AT ONCE fids = gm.addFunctions(costs) gm.addFactors(fids, np.arange(rag.nodeNum)) # add a potts function beta = 40.0 # strength of potts regularizer regularizer = opengm.pottsFunction([nCluster] * 2, 0.0, beta) fid = gm.addFunction(regularizer) # get variable indices of adjacent superpixels # - or "u" and "v" node id's for edges uvIds = rag.uvIds() uvIds = np.sort(uvIds, axis=1) # add all second order factors at once gm.addFactors(fid, uvIds) # get super-pixels with slic on LAB image Inf = opengm.inference.BeliefPropagation parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001) inf = Inf(gm, parameter=parameter) class PyCallback(object): def __init__(self,): self.labels = [] def begin(self, inference): print("begin of inference") def end(self, inference): self.labels.append(inference.arg()) def visit(self, inference): gm = inference.gm() labelVector = inference.arg() print("energy %r" % (gm.evaluate(labelVector),)) self.labels.append(labelVector) callback = PyCallback() visitor = inf.pythonVisitor(callback, visitNth=1) inf.infer(visitor) pt.imshow(clusterProgImg3.swapaxes(0, 1)) # plot superpixels cmap = mpl.colors.ListedColormap(np.random.rand(nseg, 3)) pt.imshow(labels.swapaxes(0, 1).squeeze(), cmap=cmap) pt.imshow(nodeFeaturesImgRgb) cmap = mpl.colors.ListedColormap(np.random.rand(nCluster, 3)) for arg in callback.labels: arg = vigra.taggedView(arg, "n") argImg = rag.projectNodeFeaturesToGridGraph(arg.astype(np.uint32)) argImg = vigra.taggedView(argImg, "xy") # plot superpixels pt.imshow(argImg.swapaxes(0, 1).squeeze(), cmap=cmap)
def show(self): # self.augment = False # self.augment = True loader = torch.utils.data.DataLoader(self, batch_size=6) iter_ = iter(loader) im_tensor, gt_tensor = next(iter_) # im_tensor = next(iter_) im_list, gt_list = self.from_tensor(im_tensor, gt_tensor) stacked_img = np.hstack([im[:, :, 0:3] for im in im_list]) stacked_gt = np.hstack(gt_list) # stacked_gtblend = self.task.colorize(stacked_gt, stacked_img) import plottool as pt n_rows = 2 if self.aux_keys: aux_imgs = [im[:, :, 3] for im in im_list] stacked_aux = np.hstack(aux_imgs) aux_imgs2 = [im[:, :, 4] for im in im_list] stacked_aux2 = np.hstack(aux_imgs2) n_rows += 2 n_rows = 6 pt.imshow(stacked_img[:, :, 0], pnum=(n_rows, 1, 1), cmap='viridis', norm=True) pt.imshow(stacked_img[:, :, 1], pnum=(n_rows, 1, 2), cmap='viridis', norm=True) pt.imshow(stacked_img[:, :, 2], pnum=(n_rows, 1, 3), cmap='viridis', norm=True) pt.imshow(stacked_gt[:, :], pnum=(n_rows, 1, 4), cmap='viridis', norm=True) # pt.imshow(stacked_img, pnum=(n_rows, 1, 1)) # pt.imshow(stacked_gtblend, pnum=(n_rows, 1, 2)) if self.aux_keys: pt.imshow(stacked_aux, pnum=(n_rows, 1, 5), cmap='viridis', norm=True) pt.imshow(stacked_aux2, pnum=(n_rows, 1, 6), cmap='viridis', norm=True)