def load_internal_data(): r""" wbia TestResult --db Oxford \ -p smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None \ -a oxford \ --dev-mode wbia TestResult --db GZ_Master1 \ -p smk:nWords=[64000],nAssign=[1],SV=[False],fg_on=False \ -a ctrl:qmingt=2 \ --dev-mode """ # from wbia.algo.smk.smk_pipeline import * # NOQA import wbia qreq_ = wbia.testdata_qreq_( defaultdb='Oxford', a='oxford', p= 'smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None', ) cm_list = qreq_.execute() ave_precisions = [cm.get_annot_ave_precision() for cm in cm_list] mAP = np.mean(ave_precisions) logger.info('mAP = %.3f' % (mAP, )) cm = cm_list[-1] return qreq_, cm
def testdata_match_interact(**kwargs): """ CommandLine: python -m wbia.viz.interact.interact_matches --test-testdata_match_interact --show --db PZ_MTEST --qaid 3 Example: >>> # VIZ_DOCTEST >>> from wbia.viz.interact.interact_matches import * # NOQA >>> import wbia.plottool as pt >>> kwargs = {} >>> mx = ut.get_argval('--mx', type_=int, default=None) >>> self = testdata_match_interact(mx=mx, **kwargs) >>> pt.show_if_requested() """ import wbia qreq_ = wbia.testdata_qreq_(defaultdb='testdb1', t=['default:Knorm=3']) ibs = qreq_.ibs cm = qreq_.execute()[0] cm.sortself() aid2 = None self = MatchInteraction(ibs, cm, aid2, mode=1, dodraw=False, qreq_=qreq_, **kwargs) self.start() return self
def testdata_post_sver( defaultdb='PZ_MTEST', qaid_list=None, daid_list=None, codename='vsmany', cfgdict=None, ): """ >>> from wbia.algo.hots._pipeline_helpers import * # NOQA """ # TODO: testdata_pre('end') # from wbia.algo import Config if cfgdict is None: cfgdict = dict(codename=codename) import wbia p = 'default' + ut.get_cfg_lbl(cfgdict) qreq_ = wbia.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p) ibs = qreq_.ibs locals_ = testrun_pipeline_upto(qreq_, 'end') cm_list = locals_['cm_list_SVER'] # nnfilts_list = locals_['nnfilts_list'] return ibs, qreq_, cm_list
def benchmark_knn(): r""" CommandLine: python ~/code/wbia/wbia/algo/hots/tests/bench.py benchmark_knn --profile Example: >>> # DISABLE_DOCTEST >>> from bench import * # NOQA >>> result = benchmark_knn() >>> print(result) """ from wbia.algo.hots import _pipeline_helpers as plh from wbia.algo.hots.pipeline import nearest_neighbors import wbia verbose = True qreq_ = wbia.testdata_qreq_( defaultdb='PZ_PB_RF_TRAIN', t='default:K=3,requery=True,can_match_samename=False', a='default:qsize=100', verbose=1, ) locals_ = plh.testrun_pipeline_upto(qreq_, 'nearest_neighbors') Kpad_list, impossible_daids_list = ut.dict_take( locals_, ['Kpad_list', 'impossible_daids_list']) nns_list1 = nearest_neighbors( # NOQA qreq_, Kpad_list, impossible_daids_list, verbose=verbose)
def train_featscore_normalizer(): r""" CommandLine: python -m wbia --tf train_featscore_normalizer --show # Write Encoder python -m wbia --tf train_featscore_normalizer --db PZ_MTEST -t best -a default --fsvx=0 --threshx=1 --show # Visualize encoder score adjustment python -m wbia --tf TestResult.draw_feat_scoresep --db PZ_MTEST -a timectrl -t best:lnbnn_normer=lnbnn_fg_featscore --show --nocache --nocache-hs # Compare ranking with encoder vs without python -m wbia --tf draw_rank_cmc --db PZ_MTEST -a timectrl -t best:lnbnn_normer=[None,wulu] --show python -m wbia --tf draw_rank_cmc --db PZ_MTEST -a default -t best:lnbnn_normer=[None,wulu] --show # Compare in ipynb python -m wbia --tf autogen_ipynb --ipynb --db PZ_MTEST -a default -t best:lnbnn_normer=[None,lnbnn_fg_0.9__featscore] # Big Test python -m wbia --tf draw_rank_cmc --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,lovb],lnbnn_norm_thresh=.5 --show python -m wbia --tf draw_rank_cmc --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,jypz],lnbnn_norm_thresh=.1 --show python -m wbia --tf draw_rank_cmc --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,jypz],lnbnn_norm_thresh=0 --show # Big Train python -m wbia --tf learn_featscore_normalizer --db PZ_Master1 -a timectrl -t best:K=1 --fsvx=0 --threshx=1 --show python -m wbia --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=1 --fsvx=0 --threshx=1 --show --ainfo python -m wbia --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=1 --fsvx=0 --threshx=1 --show python -m wbia --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=3 --fsvx=0 --threshx=1 --show Example: >>> # SCRIPT >>> from wbia.unstable.scorenorm import * # NOQA >>> encoder = train_featscore_normalizer() >>> encoder.visualize(figtitle=encoder.get_cfgstr()) >>> ut.show_if_requested() """ import wbia # TODO: training / loading / general external models qreq_ = wbia.testdata_qreq_(defaultdb='PZ_MTEST', a=['default'], p=['default']) datakw = NormFeatScoreConfig.from_argv_dict() # datakw = dict( # disttype=None, # namemode=ut.get_argval('--namemode', default=True), # fsvx=ut.get_argval('--fsvx', type_='fuzzy_subset', # default=slice(None, None, None)), # threshx=ut.get_argval('--threshx', type_=int, default=None), # thresh=ut.get_argval('--thresh', type_=float, default=.9), # ) encoder = learn_featscore_normalizer(qreq_, datakw=datakw) encoder.save() return encoder
def testdata_pre( stopnode, defaultdb='testdb1', p=['default'], a=['default:qindex=0:1,dindex=0:5'], **kwargs, ): """ New (1-1-2016) generic pipeline node testdata getter Args: stopnode (str): name of pipeline function to be tested defaultdb (str): (default = u'testdb1') p (list): (default = [u'default:']) a (list): (default = [u'default:qsize=1,dsize=4']) **kwargs: passed to testdata_qreq_ qaid_override, daid_override Returns: tuple: (ibs, qreq_, args) CommandLine: python -m wbia.algo.hots._pipeline_helpers --exec-testdata_pre --show Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots._pipeline_helpers import * # NOQA >>> stopnode = 'build_chipmatches' >>> defaultdb = 'testdb1' >>> p = ['default:'] >>> a = ['default:qindex=0:1,dindex=0:5'] >>> qreq_, args = testdata_pre(stopnode, defaultdb, p, a) """ import wbia from wbia.algo.hots import pipeline qreq_ = wbia.testdata_qreq_(defaultdb=defaultdb, p=p, a=a, **kwargs) locals_ = testrun_pipeline_upto(qreq_, stopnode) if stopnode == 'end': argnames = ['cm_list_SVER'] else: func = getattr(pipeline, stopnode) argnames = ut.get_argnames(func) # Hack to ignore qreq_, and verbose for ignore in ['qreq_', 'ibs', 'verbose']: try: argnames.remove(ignore) except ValueError: pass tupname = '_Ret_' + stopnode.upper() args = ut.dict_take_asnametup(locals_, argnames, name=tupname) return qreq_, args
def testdata_pre_baselinefilter(defaultdb='testdb1', qaid_list=None, daid_list=None, codename='vsmany'): cfgdict = dict(codename=codename) import wbia p = 'default' + ut.get_cfg_lbl(cfgdict) qreq_ = wbia.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p) locals_ = testrun_pipeline_upto(qreq_, 'baseline_neighbor_filter') nns_list, impossible_daids_list = ut.dict_take( locals_, ['nns_list', 'impossible_daids_list']) return qreq_, nns_list, impossible_daids_list
def testdata_inva(): """ from wbia.algo.smk.inverted_index import * # NOQA """ import wbia qreq_ = wbia.testdata_qreq_( defaultdb='PZ_MTEST', a='default', p='default:proot=smk,nAssign=1,num_words=64' ) aids = qreq_.daids cls = InvertedAnnots depc = qreq_.ibs.depc vocab_aids = qreq_.daids config = qreq_.qparams inva = cls.from_depc(depc, aids, vocab_aids, config) inva.wx_to_aids = inva.compute_inverted_list() return qreq_, inva
def compare_data(Y_list_): import wbia qreq_ = wbia.testdata_qreq_( defaultdb='Oxford', a='oxford', p= 'smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None', ) qreq_.ensure_data() gamma1s = [] gamma2s = [] logger.info(len(Y_list_)) logger.info(len(qreq_.daids)) dinva = qreq_.dinva bady = [] for Y in Y_list_: aid = Y.aid gamma1 = Y.gamma if aid in dinva.aid_to_idx: idx = dinva.aid_to_idx[aid] gamma2 = dinva.gamma_list[idx] gamma1s.append(gamma1) gamma2s.append(gamma2) else: bady += [Y] logger.info(Y.nid) # logger.info(Y.qual) # ibs = qreq_.ibs # z = ibs.annots([a.aid for a in bady]) import wbia.plottool as pt ut.qtensure() gamma1s = np.array(gamma1s) gamma2s = np.array(gamma2s) sortx = gamma1s.argsort() pt.plot(gamma1s[sortx], label='script') pt.plot(gamma2s[sortx], label='pipe') pt.legend()
def testdata_pre_sver(defaultdb='PZ_MTEST', qaid_list=None, daid_list=None): """ >>> from wbia.algo.hots._pipeline_helpers import * # NOQA """ # TODO: testdata_pre('sver') # from wbia.algo import Config cfgdict = dict() import wbia p = 'default' + ut.get_cfg_lbl(cfgdict) qreq_ = wbia.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p) ibs = qreq_.ibs locals_ = testrun_pipeline_upto(qreq_, 'spatial_verification') cm_list = locals_['cm_list_FILT'] # nnfilts_list = locals_['nnfilts_list'] return ibs, qreq_, cm_list
def photobombing_subset(): """ CommandLine: python -m wbia.scripts.script_vsone photobombing_subset """ import wbia # pair_sample = ut.odict([ # ('top_gt', 4), ('mid_gt', 2), ('bot_gt', 2), ('rand_gt', 2), # ('top_gf', 3), ('mid_gf', 2), ('bot_gf', 1), ('rand_gf', 2), # ]) qreq_ = wbia.testdata_qreq_( defaultdb='PZ_Master1', a=':mingt=2,species=primary', # t='default:K=4,Knorm=1,score_method=csum,prescore_method=csum', t='default:K=4,Knorm=1,score_method=csum,prescore_method=csum,QRH=True', ) ibs = qreq_.ibs # cm_list = qreq_.execute() # infr = wbia.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True) # aid_pairs_ = infr._cm_training_pairs(rng=np.random.RandomState(42), # **pair_sample) # # ut.dict_hist(ut.flatten(am_tags)) # am_rowids = ibs._get_all_annotmatch_rowids() # am_tags = ibs.get_annotmatch_case_tags(am_rowids) # am_flags = ut.filterflags_general_tags(am_tags, has_any=['photobomb']) # am_rowids_ = ut.compress(am_rowids, am_flags) # aids1 = ibs.get_annotmatch_aid1(am_rowids_) # aids2 = ibs.get_annotmatch_aid2(am_rowids_) # pb_aids_pairs = list(zip(aids1, aids2)) # # aids = unique_pb_aids = ut.unique(ut.flatten(pb_aids_pairs)) # # ut.compress(unique_pb_aids, ibs.is_aid_unknown(unique_pb_aids)) # assert len(pb_aids_pairs) > 0 # # Keep only a random subset # subset_idxs = list(range(len(aid_pairs_))) # rng = np.random.RandomState(3104855634) # num_max = len(pb_aids_pairs) # if num_max < len(subset_idxs): # subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False) # subset_idxs = sorted(subset_idxs) # aid_pairs_ = ut.take(aid_pairs_, subset_idxs) # aid_pairs_ += pb_aids_pairs # unique_aids = ut.unique(ut.flatten(aid_pairs_)) # a1 = ibs.filter_annots_general(unique_aids, is_known=True, verbose=True, min_pername=2, has_none=['photobomb']) # a2 = ibs.filter_annots_general(unique_aids, has_any=['photobomb'], verbose=True, is_known=True) # a = sorted(set(a1 + a2)) # ibs.print_annot_stats(a) # len(a) a = [ 8, 27, 30, 86, 87, 90, 92, 94, 99, 103, 104, 106, 111, 217, 218, 242, 298, 424, 425, 456, 464, 465, 472, 482, 529, 559, 574, 585, 588, 592, 598, 599, 601, 617, 630, 645, 661, 664, 667, 694, 723, 724, 759, 768, 843, 846, 861, 862, 866, 933, 934, 980, 987, 1000, 1003, 1005, 1011, 1017, 1020, 1027, 1059, 1074, 1076, 1080, 1095, 1096, 1107, 1108, 1192, 1203, 1206, 1208, 1220, 1222, 1223, 1224, 1256, 1278, 1293, 1294, 1295, 1296, 1454, 1456, 1474, 1484, 1498, 1520, 1521, 1548, 1563, 1576, 1593, 1669, 1675, 1680, 1699, 1748, 1751, 1811, 1813, 1821, 1839, 1927, 1934, 1938, 1952, 1992, 2003, 2038, 2054, 2066, 2080, 2103, 2111, 2170, 2171, 2175, 2192, 2216, 2227, 2240, 2250, 2253, 2266, 2272, 2288, 2292, 2314, 2329, 2341, 2344, 2378, 2397, 2417, 2429, 2444, 2451, 2507, 2551, 2552, 2553, 2581, 2628, 2640, 2642, 2646, 2654, 2667, 2686, 2733, 2743, 2750, 2759, 2803, 2927, 3008, 3054, 3077, 3082, 3185, 3205, 3284, 3306, 3334, 3370, 3386, 3390, 3393, 3401, 3448, 3508, 3542, 3597, 3614, 3680, 3684, 3695, 3707, 3727, 3758, 3765, 3790, 3812, 3813, 3818, 3858, 3860, 3874, 3875, 3887, 3892, 3915, 3918, 3924, 3927, 3929, 3933, 3941, 3952, 3955, 3956, 3959, 4004, 4059, 4073, 4076, 4089, 4094, 4124, 4126, 4128, 4182, 4189, 4217, 4222, 4229, 4257, 4266, 4268, 4288, 4289, 4296, 4306, 4339, 4353, 4376, 4403, 4428, 4455, 4487, 4494, 4515, 4517, 4524, 4541, 4544, 4556, 4580, 4585, 4597, 4604, 4629, 4639, 4668, 4671, 4672, 4675, 4686, 4688, 4693, 4716, 4730, 4731, 4749, 4772, 4803, 4820, 4823, 4832, 4833, 4836, 4900, 4902, 4909, 4924, 4936, 4938, 4939, 4944, 5004, 5006, 5034, 5043, 5044, 5055, 5064, 5072, 5115, 5131, 5150, 5159, 5165, 5167, 5168, 5174, 5218, 5235, 5245, 5249, 5309, 5319, 5334, 5339, 5344, 5347, 5378, 5379, 5384, 5430, 5447, 5466, 5509, 5546, 5587, 5588, 5621, 5640, 5663, 5676, 5682, 5685, 5687, 5690, 5707, 5717, 5726, 5732, 5733, 5791, 5830, 5863, 5864, 5869, 5870, 5877, 5879, 5905, 5950, 6008, 6110, 6134, 6160, 6167, 6234, 6238, 6265, 6344, 6345, 6367, 6384, 6386, 6437, 6495, 6533, 6538, 6569, 6587, 6626, 6634, 6643, 6659, 6661, 6689, 6714, 6725, 6739, 6754, 6757, 6759, 6763, 6781, 6830, 6841, 6843, 6893, 6897, 6913, 6930, 6932, 6936, 6944, 6976, 7003, 7022, 7037, 7052, 7058, 7074, 7103, 7107, 7108, 7113, 7143, 7183, 7185, 7187, 7198, 7200, 7202, 7207, 7222, 7275, 7285, 7388, 7413, 7421, 7425, 7429, 7445, 7487, 7507, 7508, 7528, 7615, 7655, 7696, 7762, 7786, 7787, 7796, 7797, 7801, 7807, 7808, 7809, 7826, 7834, 7835, 7852, 7861, 7874, 7881, 7901, 7902, 7905, 7913, 7918, 7941, 7945, 7990, 7999, 8007, 8009, 8017, 8018, 8019, 8034, 8041, 8057, 8058, 8079, 8080, 8086, 8089, 8092, 8094, 8100, 8105, 8109, 8147, 8149, 8153, 8221, 8264, 8302, 8303, 8331, 8366, 8367, 8370, 8376, 8474, 8501, 8504, 8506, 8507, 8514, 8531, 8532, 8534, 8538, 8563, 8564, 8587, 8604, 8608, 8751, 8771, 8792, 9175, 9204, 9589, 9726, 9841, 10674, 12122, 12305, 12796, 12944, 12947, 12963, 12966, 13098, 13099, 13101, 13103, 13109, 13147, 13157, 13168, 13194, 13236, 13253, 13255, 13410, 13450, 13474, 13477, 13481, 13508, 13630, 13670, 13727, 13741, 13819, 13820, 13908, 13912, 13968, 13979, 14007, 14009, 14010, 14019, 14066, 14067, 14072, 14074, 14148, 14153, 14224, 14230, 14237, 14239, 14241, 14274, 14277, 14290, 14293, 14308, 14309, 14313, 14319, 14668, 14670, 14776, 14918, 14920, 14924, 15135, 15157, 15318, 15319, 15490, 15518, 15531, 15777, 15903, 15913, 16004, 16012, 16013, 16014, 16020, 16215, 16221, 16235, 16240, 16259, 16273, 16279, 16284, 16289, 16316, 16322, 16329, 16336, 16364, 16389, 16706, 16897, 16898, 16903, 16949, 17094, 17101, 17137, 17200, 17222, 17290, 17327, 17336, ] from wbia.dbio import export_subset export_subset.export_annots(ibs, a, 'PZ_PB_RF_TRAIN')