def show_normalizers(match, fnum=None, pnum=None, update=True): import plottool as pt from plottool import plot_helpers as ph # hack keys out of namespace keys = ['rchip', 'kpts'] rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys) rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys) fs, fm = match.fs, match.fm_norm cmap = 'cool' draw_lines = True if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=pnum) #doclf=True, docla=True) ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, draw_lines=draw_lines) ph.set_plotdat(ax, 'viztype', 'matches') ph.set_plotdat(ax, 'key', match.key) title = match.key + '\n num=%d, sum=%.2f' % (len(fm), sum(fs)) pt.set_title(title) if update: pt.update() return ax, xywh1, xywh2
def visualize_probs(normalizer, update=True, fnum=None, pnum=(1, 1, 1)): plot_postbayes_pdf(normalizer.score_domain, 1 - normalizer.p_tp_given_score, normalizer.p_tp_given_score, cfgstr=normalizer.get_cfgstr(), fnum=fnum, pnum=pnum) if update: import plottool as pt pt.update()
def show_normalizers(match, fnum=None, pnum=None, update=True): import plottool as pt from plottool import plot_helpers as ph # hack keys out of namespace keys = ["rchip", "kpts"] rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys) rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys) fs, fm = match.fs, match.fm_norm cmap = "cool" draw_lines = True if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=pnum) # doclf=True, docla=True) ax, xywh1, xywh2 = pt.show_chipmatch2( rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, draw_lines=draw_lines ) ph.set_plotdat(ax, "viztype", "matches") ph.set_plotdat(ax, "key", match.key) title = match.key + "\n num=%d, sum=%.2f" % (len(fm), sum(fs)) pt.set_title(title) if update: pt.update() return ax, xywh1, xywh2
def visualize(normalizer, update=True, verbose=True, fnum=None): """ CommandLine: python -m ibeis.algo.hots.score_normalization --test-visualize --index 0 --cmd Example: >>> # DISABLE_DOCTEST >>> import plottool as pt >>> from ibeis.algo.hots.score_normalization import * # NOQA >>> #import ibeis >>> index = ut.get_argval('--index', type_=int, default=0) >>> normalizer = load_precomputed_normalizer(index, with_global=False) >>> normalizer.visualize() >>> six.exec_(pt.present(), globals(), locals()) """ import plottool as pt if verbose: print(normalizer.get_infostr()) if normalizer.score_domain is None: return if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=(2, 1, 1), doclf=True, docla=True) normalizer.visualize_probs(fnum=fnum, pnum=(2, 1, 1), update=False) normalizer.visualize_support(fnum=fnum, pnum=(2, 1, 2), update=False) if update: pt.update()
def show_selected(self, event): import plottool as pt print('show_selected') from ibeis.viz import viz_chip fnum = pt.ensure_fnum(None) print('fnum = %r' % (fnum,)) pt.figure(fnum=fnum) viz_chip.show_many_chips(self.ibs, self.selected_aids) pt.update()
def show_selected(self, event): import plottool as pt print('show_selected') from ibeis.viz import viz_chip fnum = pt.ensure_fnum(None) print('fnum = %r' % (fnum, )) pt.figure(fnum=fnum) viz_chip.show_many_chips(self.ibs, self.selected_aids) pt.update()
def plot_weights(self, event=None): scalars = self.infr.get_scalars() import plottool as pt inter = pt.ExpandableInteraction(fnum=1) for px, (key, vals) in enumerate(scalars.items()): print(key + ' = ' + ut.get_stats_str(vals, use_nan=True)) args = (np.arange(len(vals)), sorted(vals)) kw = dict(title=key, y_label=key, marker='-o', equal_aspect=False) inter.append_partial(pt.plot2, *args, **kw) inter.start() inter.show_page() pt.update()
def __init__(self, ibs, gid, next_callback=None, prev_callback=None, rows_updated_callback=None, reset_window=True, dodraw=True, fnum=None): """ TODO: rename to interact image annotations? """ self.ibs = ibs self.gid = gid self.rows_updated_callback = rows_updated_callback img = ibs.get_images(self.gid) self.aid_list = ibs.get_image_aids(self.gid) bbox_list = ibs.get_annot_bboxes(self.aid_list) #verts_list = ibs.get_annot_verts(self.aid_list) # TODO theta_list = ibs.get_annot_thetas(self.aid_list) species_list = ibs.get_annot_species_texts(self.aid_list) #valid_species = ibs.get_all_species_texts() valid_species = [tup[1] for tup in ibs.get_working_species()] metadata_list = [ibs.get_annot_lazy_dict(aid) for aid in self.aid_list] for metadata in metadata_list: # eager eval on name metadata['name'] if True: interact_annotations.rrr() self.interact_ANNOTATIONS = interact_annotations.ANNOTATIONInteraction( img, bbox_list=bbox_list, theta_list=theta_list, species_list=species_list, metadata_list=metadata_list, commit_callback=self.commit_callback, # TODO: get default species in a better way default_species=self.ibs.cfg.detect_cfg.species_text, next_callback=next_callback, prev_callback=prev_callback, fnum=fnum, valid_species=valid_species, #figure_to_use=None if reset_window else self.interact_ANNOTATIONS.fig, ) if dodraw: pt.update()
def test_sharpness(): import ibeis defaltdb = 'seaturtles' a = ['default'] ibs = ibeis.opendb(defaultdb=defaltdb) ibs, qaids, daids = ibeis.testdata_expanded_aids(ibs=ibs, a=a) from vtool import quality_classifier contrast_list = [quality_classifier.compute_average_contrast(chip) for chip in ibs.get_annot_chips(qaids)] sortx = ut.argsort(contrast_list)[::-1] sharpest_qaids = ut.take(qaids, sortx) aid = sharpest_qaids[0] ut.ensure_pylab_qt4() from ibeis import viz import plottool as pt for aid in ut.InteractiveIter(sharpest_qaids): viz.show_chip(ibs, aid, annot=False, nokpts=True) pt.update()
def test_sharpness(): import ibeis defaltdb = 'seaturtles' a = ['default'] ibs = ibeis.opendb(defaultdb=defaltdb) ibs, qaids, daids = ibeis.testdata_expanded_aids(ibs=ibs, a=a) from vtool import quality_classifier contrast_list = [ quality_classifier.compute_average_contrast(chip) for chip in ibs.get_annot_chips(qaids) ] sortx = ut.argsort(contrast_list)[::-1] sharpest_qaids = ut.take(qaids, sortx) aid = sharpest_qaids[0] ut.ensure_pylab_qt4() from ibeis import viz import plottool as pt for aid in ut.InteractiveIter(sharpest_qaids): viz.show_chip(ibs, aid, annot=False, nokpts=True) pt.update()
def view_training_data(ibs, **kwargs): """ CommandLine: python -m ibeis_cnn.ibsplugin --test-view_training_data --db NNP_Master3 Example: >>> # DISABLE_DOCTEST >>> from ibeis_cnn.ibsplugin import * # NOQA >>> import ibeis >>> # build test data >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> view_training_data(ibs) """ data_fpath, labels_fpath, training_dpath = get_identify_training_fpaths( ibs, **kwargs) data = np.load(data_fpath, mmap_mode='r') #width = height = base_size * 2 # HACK FIXME #channels = 3 #img_list = utils.convert_data_to_imglist(data, width, height, channels) import plottool as pt for img in ut.InteractiveIter(data, display_item=False): pt.imshow(img) pt.update()
def show_single_namematch(): import plottool as pt ax = cm.show_single_namematch(qreq_, aid2, mode=0) ax = pt.gca() ax.figure.canvas.draw() pt.update()
def export_testset_for_chuck(ibs, min_num_annots): """ Exports a set with some number of annotations that has good demo examples. multiple annotations per name and large time variation within names. Args: ibs (IBEISController): ibeis controller object CommandLine: python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/PZ_Master --min-num-annots 100 python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/PZ_Master --min-num-annots 500 python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/GZ_Master --min-num-annots 100 python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/GZ_Master --min-num-annots 500_DOCTEST python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --db GIR_Tanya --min-num-annots 100 Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.ibsfuncs import * # NOQA >>> import ibeis >>> # build test data >>> #dbdir = ut.get_argval(('--dbdir',), type_=str, default='testdb1') >>> min_num_annots = ut.get_argval(('--min-num-annots',), type_=int, default=500) >>> #ibs = ibeis.opendb('testdb1') >>> #ibs = ibeis.opendb(dbdir='/raid/work2/Turk/PZ_Master') >>> ibs = ibeis.opendb() # dbdir=dbdir) >>> #ibs = ibeis.opendb(dbdir='/raid/work2/Turk/GZ_Master') >>> print(ibs.get_dbinfo_str()) >>> #ibs = ibeis.opendb('testdb1') >>> # execute function >>> result = export_testset_for_chuck(ibs, min_num_annots) >>> # verify results >>> print(result) min_num_annots = 500 """ import numpy as np min_num_annots_per_name = 3 max_annot_per_image = 5 #3 #min_num_annots_per_name = 1 #max_annot_per_image = 3000 nid_list = ibs.get_valid_nids() aids_list = ibs.get_name_aids(nid_list) nAids_list = list(map(len, aids_list)) nOther_aids_list = ibs.unflat_map(ibs.get_annot_num_contact_aids, aids_list) invalid_by_num_annots = [ num < min_num_annots_per_name for num in nAids_list ] invalid_by_num_others = [ any([num > max_annot_per_image for num in nums]) for nums in nOther_aids_list ] invalid_list = ut.or_lists(invalid_by_num_annots, invalid_by_num_others) valid_nids = ut.filterfalse_items(nid_list, invalid_list) def get_name_time_variation(ibs, nid_list): aids_list = ibs.get_name_aids(nid_list) unixtimes_list = ibs.unflat_map(ibs.get_annot_image_unixtimes, aids_list) unixtimes_arrs = list(map(np.array, unixtimes_list)) fixtimes_list = [arr[arr > 0] for arr in unixtimes_arrs] std_list = [ np.std(arr) if len(arr) > 1 else 0 for arr in fixtimes_list ] return std_list std_list = get_name_time_variation(ibs, valid_nids) sorted_nids = ut.sortedby(valid_nids, std_list, reverse=True) # Find which names to include num_annot_cumsum = np.cumsum(ibs.get_name_num_annotations(sorted_nids)) pos_list = np.where(num_annot_cumsum >= min_num_annots)[0] assert len(pos_list) > 0 nid_list_chosen = sorted_nids[:pos_list[0] + 1] print('using names:') print(ibs.get_name_texts(nid_list_chosen)) aids_list_chosen = ibs.get_name_aids(nid_list_chosen) aid_list_chosen = ut.flatten(aids_list_chosen) gid_list_chosen = ibs.get_annot_gids(aid_list_chosen) #ut.debug_duplicate_items(gid_list_chosen) # make sure not too many other annots are along for the ride other_aids = ibs.get_annot_contact_aids(aid_list_chosen) unexpected_aids = list( set(ut.flatten(other_aids)).difference(set(aid_list_chosen))) print('got %d unexpected_aids' % (len(unexpected_aids), )) from ibeis.dbio import export_subset def new_nonconflicting_dbpath(ibs): dpath, dbname = split(ibs.get_dbdir()) base_fmtstr = dbname + '_demo' + str(min_num_annots) + '_export%d' new_dbpath = ut.get_nonconflicting_path(base_fmtstr, dpath) return new_dbpath #ut.embed() dbpath = new_nonconflicting_dbpath(ibs) ibs_dst = ibeis.opendb(dbdir=dbpath, allow_newdir=True) ibs_src = ibs gid_list = gid_list_chosen export_subset.merge_databases(ibs_src, ibs_dst, gid_list=gid_list) DEBUG_NAME = False if DEBUG_NAME: ibs.get_name_num_annotations(sorted_nids[0:10]) import plottool as pt ibeis.viz.viz_name.show_name(ibs, sorted_nids[0]) pt.update()
def toggle_samefig(self): self.same_fig = not self.same_fig if self.mx is not None: self.select_ith_match(self.mx) pt.update()
def param_interaction(): r""" CommandLine: python -m vtool.test_constrained_matching --test-param_interaction Notes: python -m vtool.test_constrained_matching --test-param_interaction setparam normalizer_mode=nearby setparam normalizer_mode=far setparam ratio_thresh=.625 setparam ratio_thresh=.5 setparam ratio_thresh2=.625 normalizer_mode=plus Example: >>> # DISABLE_DOCTEST >>> from vtool.test_constrained_matching import * # NOQA >>> # build test data >>> # execute function >>> testtup = param_interaction() >>> # verify results >>> result = str(testtup) >>> print(result) """ import plottool as pt USE_IBEIS = False and ut.is_developer() if USE_IBEIS: from ibeis.algo.hots import devcases index = 2 fpath1, fpath2, fpath3 = devcases.get_dev_test_fpaths(index) testtup1 = testdata_matcher(fpath1, fpath2) testtup2 = testdata_matcher(fpath1, fpath3) else: testtup1 = testdata_matcher('easy1.png', 'easy2.png') testtup2 = testdata_matcher('easy1.png', 'hard3.png') testtup_list = [testtup1, testtup2] simp_list = [SimpleMatcher(testtup) for testtup in testtup_list] varied_dict = dict([ ('sver_xy_thresh', .1), ('ratio_thresh', .625), ('search_K', 7), ('ratio_thresh2', .625), ('sver_xy_thresh2', .01), ('normalizer_mode', ['nearby', 'far', 'plus'][1]), ('match_xy_thresh', .1), ]) cfgdict_list = ut.all_dict_combinations(varied_dict) tried_configs = [] # DEFINE CUSTOM INTRACTIONS custom_actions, valid_vizmodes, viz_index_, offset_fnum_ = make_custom_interactions( simp_list) # /DEFINE CUSTOM INTRACTIONS for cfgdict in ut.InteractiveIter( cfgdict_list, #default_action='reload', custom_actions=custom_actions, wraparound=True): for simp in simp_list: simp.run_matching(cfgdict=cfgdict) vizkey = valid_vizmodes[viz_index_[0]].replace('visualize_', '') print('vizkey = %r' % (vizkey, )) for fnum_, simp in enumerate(simp_list): fnum = fnum_ + offset_fnum_[0] simp.visualize(vizkey, fnum=fnum) tried_configs.append(cfgdict.copy()) print('Current Config = ') print(ut.dict_str(cfgdict)) pt.present() pt.update()
def test_mser(): import cv2 import vtool as vt import plottool as pt import numpy as np pt.qt4ensure() class Keypoints(ut.NiceRepr): """ Convinence class for dealing with keypoints """ def __init__(self, kparr, info=None): self.kparr = kparr if info is None: info = {} self.info = info def add_info(self, key, val): self.info[key] = val def __nice__(self): return ' ' + str(len(self.kparr)) @property def scale(self): return vt.get_scales(self.kparr) @property def eccentricity(self): return vt.get_kpts_eccentricity(self.kparr) def compress(self, flags, inplace=False): subarr = self.kparr.compress(flags, axis=0) info = { key: ut.compress(val, flags) for key, val in self.info.items() } return Keypoints(subarr, info) img_fpath = ut.grab_test_imgpath( ut.get_argval('--fname', default='zebra.png')) imgBGR = vt.imread(img_fpath) imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0 # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x factory = cv2.MSER_create img_area = np.product(np.array(vt.get_size(imgGray))) _max_area = (img_area // 10) _delta = 8 _min_diversity = .5 extractor = factory(_delta=_delta, _max_area=_max_area, _min_diversity=_min_diversity) # bboxes are x,y,w,h regions, bboxes = extractor.detectRegions(imgGray) # ellipse definition from [Fitzgibbon95] # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518 # ell = [c_x, c_y, R_x, R_y, theta] # (cx, cy) = conic center # Rx and Ry = conic radii # theta is the counterclockwise angle fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions] # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/ #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions] #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls] invVR_mats = [] for ell in fitz_ellipses: ((cx, cy), (dx, dy), degrees) = ell theta = np.radians(degrees) # opencv lives in radians # Convert diameter to radians rx = dx / 2 ry = dy / 2 S = vt.scale_mat3x3(rx, ry) T = vt.translation_mat3x3(cx, cy) R = vt.rotation_mat3x3(theta) invVR = T.dot(R.dot(S)) invVR_mats.append(invVR) invVR_mats = np.array(invVR_mats) #_oris = vt.get_invVR_mats_oris(invVR_mats) kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats) self = Keypoints(kpts2_) self.add_info('regions', regions) flags = (self.eccentricity < .9) #flags = self.scale < np.mean(self.scale) #flags = self.scale < np.median(self.scale) self = self.compress(flags) import plottool as pt #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2) #import plottool as pt vis = imgBGR.copy() for region in self.info['regions']: vis[region.T[1], region.T[0], :] = 0 #regions, bbox = mser.detectRegions(gray) #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']] #cv2.polylines(vis, hulls, 1, (0, 255, 0)) #for region in self.info['regions']: # ell = cv2.fitEllipse(region) # cv2.ellipse(vis, ell, (255)) pt.interact_keypoints.ishow_keypoints(vis, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2) #pt.imshow(vis, fnum=2) pt.update() #extractor = extract_factory['DAISY']() #desc_type_to_dtype = { # cv2.CV_8U: np.uint8, # cv2.CV_8s: np.uint, #} #def alloc_desc(extractor): # desc_type = extractor.descriptorType() # desc_size = extractor.descriptorSize() # dtype = desc_type_to_dtype[desc_type] # shape = (len(cv2_kpts), desc_size) # desc = np.empty(shape, dtype=dtype) # return desc #ut.search_module(cv2, 'array', recursive=True) #ut.search_module(cv2, 'freak', recursive=True) #ut.search_module(cv2, 'out', recursive=True) #cv2_kpts = cv2_kpts[0:2] #for key, factory in just_desc_factory_.items(): # extractor = factory() # desc = alloc_desc(extractor) # desc = extractor.compute(imgGray, cv2_kpts) # feats[key] = (desc,) # #extractor.compute(imgGray, cv2_kpts, desc) # pass #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts))) #orb = cv2.ORB() #kp1, des1 = orb.detectAndCompute(imgGray, None) #blober = cv2.SimpleBlobDetector_create() #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04) #[name for name in dir(cv2) if 'mat' in name.lower()] #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()] #[name for name in dir(cv2) if 'detect' in name.lower()] #[name for name in dir(cv2) if 'extract' in name.lower()] #[name for name in dir(cv2) if 'ellip' in name.lower()] #sift = cv2.xfeatures2d.SIFT_create() #cv2_kpts = sift.detect(imgGray) #desc = sift.compute(imgGray, cv2_kpts)[1] #freak = cv2.xfeatures2d.FREAK_create() #cv2_kpts = freak.detect(imgGray) #desc = freak.compute(imgGray, cv2_kpts)[1] pass
def visualize_support(normalizer, update=True, fnum=None, pnum=(1, 1, 1)): plot_support(normalizer.tn_support, normalizer.tp_support, fnum=fnum, pnum=pnum) if update: import plottool as pt pt.update()
def export_testset_for_chuck(ibs, min_num_annots): """ Exports a set with some number of annotations that has good demo examples. multiple annotations per name and large time variation within names. Args: ibs (IBEISController): ibeis controller object CommandLine: python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/PZ_Master --min-num-annots 100 python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/PZ_Master --min-num-annots 500 python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/GZ_Master --min-num-annots 100 python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --dbdir /raid/work2/Turk/GZ_Master --min-num-annots 500_DOCTEST python -m ibeis.other.ibsfuncs --test-export_testset_for_chuck --db GIR_Tanya --min-num-annots 100 Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.ibsfuncs import * # NOQA >>> import ibeis >>> # build test data >>> #dbdir = ut.get_argval(('--dbdir',), type_=str, default='testdb1') >>> min_num_annots = ut.get_argval(('--min-num-annots',), type_=int, default=500) >>> #ibs = ibeis.opendb('testdb1') >>> #ibs = ibeis.opendb(dbdir='/raid/work2/Turk/PZ_Master') >>> ibs = ibeis.opendb() # dbdir=dbdir) >>> #ibs = ibeis.opendb(dbdir='/raid/work2/Turk/GZ_Master') >>> print(ibs.get_dbinfo_str()) >>> #ibs = ibeis.opendb('testdb1') >>> # execute function >>> result = export_testset_for_chuck(ibs, min_num_annots) >>> # verify results >>> print(result) min_num_annots = 500 """ import numpy as np min_num_annots_per_name = 3 max_annot_per_image = 5 #3 #min_num_annots_per_name = 1 #max_annot_per_image = 3000 nid_list = ibs.get_valid_nids() aids_list = ibs.get_name_aids(nid_list) nAids_list = list(map(len, aids_list)) nOther_aids_list = ibs.unflat_map(ibs.get_annot_num_contact_aids, aids_list) invalid_by_num_annots = [num < min_num_annots_per_name for num in nAids_list] invalid_by_num_others = [any([num > max_annot_per_image for num in nums]) for nums in nOther_aids_list] invalid_list = ut.or_lists(invalid_by_num_annots, invalid_by_num_others) valid_nids = ut.filterfalse_items(nid_list, invalid_list) def get_name_time_variation(ibs, nid_list): aids_list = ibs.get_name_aids(nid_list) unixtimes_list = ibs.unflat_map(ibs.get_annot_image_unixtimes, aids_list) unixtimes_arrs = list(map(np.array, unixtimes_list)) fixtimes_list = [arr[arr > 0] for arr in unixtimes_arrs] std_list = [np.std(arr) if len(arr) > 1 else 0 for arr in fixtimes_list] return std_list std_list = get_name_time_variation(ibs, valid_nids) sorted_nids = ut.sortedby(valid_nids, std_list, reverse=True) # Find which names to include num_annot_cumsum = np.cumsum(ibs.get_name_num_annotations(sorted_nids)) pos_list = np.where(num_annot_cumsum >= min_num_annots)[0] assert len(pos_list) > 0 nid_list_chosen = sorted_nids[:pos_list[0] + 1] print('using names:') print(ibs.get_name_texts(nid_list_chosen)) aids_list_chosen = ibs.get_name_aids(nid_list_chosen) aid_list_chosen = ut.flatten(aids_list_chosen) gid_list_chosen = ibs.get_annot_gids(aid_list_chosen) #ut.debug_duplicate_items(gid_list_chosen) # make sure not too many other annots are along for the ride other_aids = ibs.get_annot_contact_aids(aid_list_chosen) unexpected_aids = list(set(ut.flatten(other_aids)).difference(set(aid_list_chosen))) print('got %d unexpected_aids' % (len(unexpected_aids),)) from ibeis.dbio import export_subset def new_nonconflicting_dbpath(ibs): dpath, dbname = split(ibs.get_dbdir()) base_fmtstr = dbname + '_demo' + str(min_num_annots) + '_export%d' new_dbpath = ut.get_nonconflicting_path(base_fmtstr, dpath) return new_dbpath #ut.embed() dbpath = new_nonconflicting_dbpath(ibs) ibs_dst = ibeis.opendb(dbdir=dbpath, allow_newdir=True) ibs_src = ibs gid_list = gid_list_chosen export_subset.merge_databases(ibs_src, ibs_dst, gid_list=gid_list) DEBUG_NAME = False if DEBUG_NAME: ibs.get_name_num_annotations(sorted_nids[0:10]) import plottool as pt ibeis.viz.viz_name.show_name(ibs, sorted_nids[0]) pt.update()
def param_interaction(): r""" CommandLine: python -m vtool.test_constrained_matching --test-param_interaction Notes: python -m vtool.test_constrained_matching --test-param_interaction setparam normalizer_mode=nearby setparam normalizer_mode=far setparam ratio_thresh=.625 setparam ratio_thresh=.5 setparam ratio_thresh2=.625 normalizer_mode=plus Example: >>> # DISABLE_DOCTEST >>> from vtool.test_constrained_matching import * # NOQA >>> # build test data >>> # execute function >>> testtup = param_interaction() >>> # verify results >>> result = str(testtup) >>> print(result) """ import plottool as pt USE_IBEIS = False and ut.is_developer() if USE_IBEIS: from ibeis.algo.hots import devcases index = 2 fpath1, fpath2, fpath3 = devcases.get_dev_test_fpaths(index) testtup1 = testdata_matcher(fpath1, fpath2) testtup2 = testdata_matcher(fpath1, fpath3) else: testtup1 = testdata_matcher('easy1.png', 'easy2.png') testtup2 = testdata_matcher('easy1.png', 'hard3.png') testtup_list = [testtup1, testtup2] simp_list = [SimpleMatcher(testtup) for testtup in testtup_list] varied_dict = dict([ ('sver_xy_thresh', .1), ('ratio_thresh', .625), ('search_K', 7), ('ratio_thresh2', .625), ('sver_xy_thresh2', .01), ('normalizer_mode', ['nearby', 'far', 'plus'][1]), ('match_xy_thresh', .1), ]) cfgdict_list = ut.all_dict_combinations(varied_dict) tried_configs = [] # DEFINE CUSTOM INTRACTIONS custom_actions, valid_vizmodes, viz_index_, offset_fnum_ = make_custom_interactions(simp_list) # /DEFINE CUSTOM INTRACTIONS for cfgdict in ut.InteractiveIter(cfgdict_list, #default_action='reload', custom_actions=custom_actions, wraparound=True): for simp in simp_list: simp.run_matching(cfgdict=cfgdict) vizkey = valid_vizmodes[viz_index_[0]].replace('visualize_', '') print('vizkey = %r' % (vizkey,)) for fnum_, simp in enumerate(simp_list): fnum = fnum_ + offset_fnum_[0] simp.visualize(vizkey, fnum=fnum) tried_configs.append(cfgdict.copy()) print('Current Config = ') print(ut.dict_str(cfgdict)) pt.present() pt.update()
def test_mser(): import cv2 import vtool as vt import plottool as pt import numpy as np pt.qt4ensure() class Keypoints(ut.NiceRepr): """ Convinence class for dealing with keypoints """ def __init__(self, kparr, info=None): self.kparr = kparr if info is None: info = {} self.info = info def add_info(self, key, val): self.info[key] = val def __nice__(self): return ' ' + str(len(self.kparr)) @property def scale(self): return vt.get_scales(self.kparr) @property def eccentricity(self): return vt.get_kpts_eccentricity(self.kparr) def compress(self, flags, inplace=False): subarr = self.kparr.compress(flags, axis=0) info = {key: ut.compress(val, flags) for key, val in self.info.items()} return Keypoints(subarr, info) img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png')) imgBGR = vt.imread(img_fpath) imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0 # http://stackoverflow.com/questions/17647500/exact-meaning-of-the-parameters-given-to-initialize-mser-in-opencv-2-4-x factory = cv2.MSER_create img_area = np.product(np.array(vt.get_size(imgGray))) _max_area = (img_area // 10) _delta = 8 _min_diversity = .5 extractor = factory(_delta=_delta, _max_area=_max_area, _min_diversity=_min_diversity) # bboxes are x,y,w,h regions, bboxes = extractor.detectRegions(imgGray) # ellipse definition from [Fitzgibbon95] # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518 # ell = [c_x, c_y, R_x, R_y, theta] # (cx, cy) = conic center # Rx and Ry = conic radii # theta is the counterclockwise angle fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions] # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/ #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions] #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls] invVR_mats = [] for ell in fitz_ellipses: ((cx, cy), (dx, dy), degrees) = ell theta = np.radians(degrees) # opencv lives in radians # Convert diameter to radians rx = dx / 2 ry = dy / 2 S = vt.scale_mat3x3(rx, ry) T = vt.translation_mat3x3(cx, cy) R = vt.rotation_mat3x3(theta) invVR = T.dot(R.dot(S)) invVR_mats.append(invVR) invVR_mats = np.array(invVR_mats) #_oris = vt.get_invVR_mats_oris(invVR_mats) kpts2_ = vt.flatten_invV_mats_to_kpts(invVR_mats) self = Keypoints(kpts2_) self.add_info('regions', regions) flags = (self.eccentricity < .9) #flags = self.scale < np.mean(self.scale) #flags = self.scale < np.median(self.scale) self = self.compress(flags) import plottool as pt #pt.interact_keypoints.ishow_keypoints(imgBGR, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2) #import plottool as pt vis = imgBGR.copy() for region in self.info['regions']: vis[region.T[1], region.T[0], :] = 0 #regions, bbox = mser.detectRegions(gray) #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in self.info['regions']] #cv2.polylines(vis, hulls, 1, (0, 255, 0)) #for region in self.info['regions']: # ell = cv2.fitEllipse(region) # cv2.ellipse(vis, ell, (255)) pt.interact_keypoints.ishow_keypoints(vis, self.kparr, None, ell_alpha=.4, color='distinct', fnum=2) #pt.imshow(vis, fnum=2) pt.update() #extractor = extract_factory['DAISY']() #desc_type_to_dtype = { # cv2.CV_8U: np.uint8, # cv2.CV_8s: np.uint, #} #def alloc_desc(extractor): # desc_type = extractor.descriptorType() # desc_size = extractor.descriptorSize() # dtype = desc_type_to_dtype[desc_type] # shape = (len(cv2_kpts), desc_size) # desc = np.empty(shape, dtype=dtype) # return desc #ut.search_module(cv2, 'array', recursive=True) #ut.search_module(cv2, 'freak', recursive=True) #ut.search_module(cv2, 'out', recursive=True) #cv2_kpts = cv2_kpts[0:2] #for key, factory in just_desc_factory_.items(): # extractor = factory() # desc = alloc_desc(extractor) # desc = extractor.compute(imgGray, cv2_kpts) # feats[key] = (desc,) # #extractor.compute(imgGray, cv2_kpts, desc) # pass #kpts = np.array(list(map(from_cv2_kpts, cv2_kpts))) #orb = cv2.ORB() #kp1, des1 = orb.detectAndCompute(imgGray, None) #blober = cv2.SimpleBlobDetector_create() #haris_kpts = cv2.cornerHarris(imgGray, 2, 3, 0.04) #[name for name in dir(cv2) if 'mat' in name.lower()] #[name for name in dir(cv2.xfeatures2d) if 'desc' in name.lower()] #[name for name in dir(cv2) if 'detect' in name.lower()] #[name for name in dir(cv2) if 'extract' in name.lower()] #[name for name in dir(cv2) if 'ellip' in name.lower()] #sift = cv2.xfeatures2d.SIFT_create() #cv2_kpts = sift.detect(imgGray) #desc = sift.compute(imgGray, cv2_kpts)[1] #freak = cv2.xfeatures2d.FREAK_create() #cv2_kpts = freak.detect(imgGray) #desc = freak.compute(imgGray, cv2_kpts)[1] pass