def testdata_smk(*args, **kwargs): """ >>> from wbia.algo.smk.smk_pipeline import * # NOQA >>> kwargs = {} """ import wbia import sklearn import sklearn.model_selection # import sklearn.model_selection ibs, aid_list = wbia.testdata_aids(defaultdb='PZ_MTEST') nid_list = np.array(ibs.annots(aid_list).nids) rng = ut.ensure_rng(0) xvalkw = dict(n_splits=4, shuffle=False, random_state=rng) skf = sklearn.model_selection.StratifiedKFold(**xvalkw) train_idx, test_idx = six.next(skf.split(aid_list, nid_list)) daids = ut.take(aid_list, train_idx) qaids = ut.take(aid_list, test_idx) config = { 'num_words': 1000, } config.update(**kwargs) qreq_ = SMKRequest(ibs, qaids, daids, config) smk = qreq_.smk # qreq_ = ibs.new_query_request(qaids, daids, cfgdict={'pipeline_root': 'smk', 'proot': 'smk'}) # qreq_ = ibs.new_query_request(qaids, daids, cfgdict={}) return ibs, smk, qreq_
def trytest_incremental_add(ibs): r""" Args: ibs (IBEISController): CommandLine: python -m wbia.algo.hots._neighbor_experiment --test-test_incremental_add Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.neighbor_index_cache import * # NOQA >>> import wbia >>> ibs = wbia.opendb('PZ_MTEST') >>> result = test_incremental_add(ibs) >>> print(result) """ import wbia sample_aids = wbia.testdata_aids(a='default:pername=1,mingt=2') aids1 = sample_aids[::2] aids2 = sample_aids[0:5] aids3 = sample_aids[:-1] # NOQA daid_list = aids1 # NOQA qreq_ = ibs.new_query_request(aids1, aids1) nnindexer1 = neighbor_index_cache.request_wbia_nnindexer( # NOQA ibs.new_query_request(aids1, aids1)) nnindexer2 = neighbor_index_cache.request_wbia_nnindexer( # NOQA ibs.new_query_request(aids2, aids2)) # TODO: SYSTEM use visual uuids items = ibs.get_annot_visual_uuids(aids3) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) uncovered_aids = sorted( ibs.get_annot_aids_from_visual_uuid(uncovered_items)) nnindexer3 = neighbor_index_cache.request_wbia_nnindexer( # NOQA ibs.new_query_request(uncovered_aids, uncovered_aids)) # TODO: SYSTEM use visual uuids items = ibs.get_annot_visual_uuids(sample_aids) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) # contextlib.closing(shelve.open(uuid_map_fpath)) as uuid_map: candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted( ibs.get_annot_aids_from_visual_uuid(covered_items)) # NOQA uncovered_aids = sorted( ibs.get_annot_aids_from_visual_uuid(uncovered_items))
def testdata_multichips(): import wbia ibs = wbia.opendb(defaultdb='testdb1') nid = ut.get_argval('--nid', type_=int, default=None) tags = ut.get_argval('--tags', type_=list, default=None) if nid is not None: aid_list = ibs.get_name_aids(nid) elif tags is not None: index = ut.get_argval('--index', default=0) aid_list = ibs.filter_aidpairs_by_tags(any_tags=tags)[index] else: # aid_list = ut.get_argval('--aids', type_=list, default=[1, 2, 3]) aid_list = wbia.testdata_aids(default_aids=[1, 2, 3], ibs=ibs) in_image = not ut.get_argflag('--no-inimage') return ibs, aid_list, in_image
def drive_test_script(ibs): r""" Test script where we drive around and take pictures of animals both in a given database and not in a given databse to make sure the system works. CommandLine: python -m wbia.viz.viz_image --test-drive_test_script python -m wbia.viz.viz_image --test-drive_test_script --db PZ_MTEST --show python -m wbia.viz.viz_image --test-drive_test_script --db GIR_Tanya --show python -m wbia.viz.viz_image --test-drive_test_script --db GIR_Master0 --show python -m wbia.viz.viz_image --test-drive_test_script --db PZ_Master0 --show python -m wbia.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show python -m wbia.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show python -m wbia.viz.viz_image --test-drive_test_script --dbdir /raid/work2/Turk/GIR_Master --show Example: >>> # DISABLE_DOCTEST >>> from wbia.viz.viz_image import * # NOQA >>> import wbia >>> ibs = wbia.opendb() >>> drive_test_script(ibs) """ import wbia aid_list = wbia.testdata_aids(a='default:pername=1') logger.info('Running with (annot) aid_list = %r' % (aid_list)) gid_list = ibs.get_annot_gids(aid_list) logger.info('Running with (image) gid_list = %r' % (gid_list)) avuuid_list = ibs.get_annot_visual_uuids(aid_list) guuid_list = ibs.get_image_uuids(gid_list) logger.info('Running with annot_visual_uuid_list = %s' % (ut.repr2(zip(aid_list, avuuid_list)))) logger.info('Running with image_uuid_list = %s' % (ut.repr2(zip(gid_list, guuid_list)))) for gid, aid in ut.ProgressIter(zip(gid_list, aid_list), lbl='progress '): logger.info('\ngid, aid, nid = %r, %r, %r' % ( gid, aid, ibs.get_annot_nids(aid), )) show_image(ibs, gid, annote=False, rich_title=True) pt.show_if_requested()
def testdata_vocab(defaultdb='testdb1', **kwargs): """ >>> from wbia.algo.smk.vocab_indexer import * # NOQA >>> defaultdb='testdb1' >>> kwargs = {'num_words': 1000} """ import wbia ibs, aids = wbia.testdata_aids(defaultdb=defaultdb) config = kwargs # vocab = new_load_vocab(ibs, aid_list, kwargs) # Hack in depcache info to the loaded vocab class # (maybe this becomes part of the depcache) rowid = ibs.depc.get_rowids('vocab', [aids], config=config)[0] # rowid = 1 table = ibs.depc['vocab'] vocab = table.get_row_data([rowid], 'words')[0] vocab.rowid = rowid vocab.config_history = table.get_config_history([vocab.rowid])[0] vocab.config = table.get_row_configs([vocab.rowid])[0] return vocab
def latex_dbstats(ibs_list, **kwargs): r""" Args: ibs (IBEISController): wbia controller object CommandLine: python -m wbia.other.dbinfo --exec-latex_dbstats --dblist testdb1 python -m wbia.other.dbinfo --exec-latex_dbstats --dblist testdb1 --show python -m wbia.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 testdb1 --show python -m wbia.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 PZ_MTEST GZ_ALL --show python -m wbia.other.dbinfo --test-latex_dbstats --dblist GZ_ALL NNP_MasterGIRM_core --show Example: >>> # DISABLE_DOCTEST >>> from wbia.other.dbinfo import * # NOQA >>> import wbia >>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1']) >>> ibs_list = [wbia.opendb(db=db) for db in db_list] >>> tabular_str = latex_dbstats(ibs_list) >>> tabular_cmd = ut.latex_newcommand(ut.latex_sanitize_command_name('DatabaseInfo'), tabular_str) >>> ut.copy_text_to_clipboard(tabular_cmd) >>> write_fpath = ut.get_argval('--write', type_=str, default=None) >>> if write_fpath is not None: >>> fpath = ut.truepath(write_fpath) >>> text = ut.readfrom(fpath) >>> new_text = ut.replace_between_tags(text, tabular_cmd, '% <DBINFO>', '% </DBINFO>') >>> ut.writeto(fpath, new_text) >>> ut.print_code(tabular_cmd, 'latex') >>> ut.quit_if_noshow() >>> ut.render_latex_text('\\noindent \n' + tabular_str) """ import wbia # Parse for aids test data aids_list = [wbia.testdata_aids(ibs=ibs) for ibs in ibs_list] # dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False) for ibs in ibs_list] dbinfo_list = [ get_dbinfo(ibs, with_contrib=False, verbose=False, aid_list=aids) for ibs, aids in zip(ibs_list, aids_list) ] # title = db_name + ' database statistics' title = 'Database statistics' stat_title = '# Annotations per name (multiton)' # col_lbls = [ # 'multiton', # #'singleton', # 'total', # 'multiton', # 'singleton', # 'total', # ] key_to_col_lbls = { 'num_names_multiton': 'multiton', 'num_names_singleton': 'singleton', 'num_names': 'total', 'num_multiton_annots': 'multiton', 'num_singleton_annots': 'singleton', 'num_unknown_annots': 'unknown', 'num_annots': 'total', } # Structure of columns / multicolumns multi_col_keys = [ ( '# Names', ( 'num_names_multiton', # 'num_names_singleton', 'num_names', ), ), ( '# Annots', ( 'num_multiton_annots', 'num_singleton_annots', # 'num_unknown_annots', 'num_annots', ), ), ] # multicol_lbls = [('# Names', 3), ('# Annots', 3)] multicol_lbls = [(mcolname, len(mcols)) for mcolname, mcols in multi_col_keys] # Flatten column labels col_keys = ut.flatten(ut.get_list_column(multi_col_keys, 1)) col_lbls = ut.dict_take(key_to_col_lbls, col_keys) row_lbls = [] row_values = [] # stat_col_lbls = ['max', 'min', 'mean', 'std', 'nMin', 'nMax'] stat_col_lbls = ['max', 'min', 'mean', 'std', 'med'] # stat_row_lbls = ['# Annot per Name (multiton)'] stat_row_lbls = [] stat_row_values = [] SINGLE_TABLE = False EXTRA = True for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): row_ = ut.dict_take(dbinfo_locals, col_keys) dbname = ibs.get_dbname_alias() row_lbls.append(dbname) multiton_annot_stats = ut.get_stats( dbinfo_locals['multiton_nid2_nannots'], use_median=True, nl=1 ) stat_rows = ut.dict_take(multiton_annot_stats, stat_col_lbls) if SINGLE_TABLE: row_.extend(stat_rows) else: stat_row_lbls.append(dbname) stat_row_values.append(stat_rows) row_values.append(row_) CENTERLINE = False AS_TABLE = True tablekw = dict( astable=AS_TABLE, centerline=CENTERLINE, FORCE_INT=False, precision=2, col_sep='', multicol_sep='|', **kwargs ) if EXTRA: extra_keys = [ # 'species2_nAids', 'qualtext2_nAnnots', 'viewcode2_nAnnots', ] extra_titles = { 'species2_nAids': 'Annotations per species.', 'qualtext2_nAnnots': 'Annotations per quality.', 'viewcode2_nAnnots': 'Annotations per viewpoint.', } extra_collbls = ut.ddict(list) extra_rowvalues = ut.ddict(list) extra_tables = ut.ddict(list) for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): for key in extra_keys: extra_collbls[key] = ut.unique_ordered( extra_collbls[key] + list(dbinfo_locals[key].keys()) ) extra_collbls['qualtext2_nAnnots'] = [ 'excellent', 'good', 'ok', 'poor', 'junk', 'UNKNOWN', ] # extra_collbls['viewcode2_nAnnots'] = ['backleft', 'left', 'frontleft', 'front', 'frontright', 'right', 'backright', 'back', None] extra_collbls['viewcode2_nAnnots'] = [ 'BL', 'L', 'FL', 'F', 'FR', 'R', 'BR', 'B', None, ] for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): for key in extra_keys: extra_rowvalues[key].append( ut.dict_take(dbinfo_locals[key], extra_collbls[key], 0) ) qualalias = {'UNKNOWN': None} extra_collbls['viewcode2_nAnnots'] = [ ibs.const.YAWALIAS.get(val, val) for val in extra_collbls['viewcode2_nAnnots'] ] extra_collbls['qualtext2_nAnnots'] = [ qualalias.get(val, val) for val in extra_collbls['qualtext2_nAnnots'] ] for key in extra_keys: extra_tables[key] = ut.util_latex.make_score_tabular( row_lbls, extra_collbls[key], extra_rowvalues[key], title=extra_titles[key], col_align='r', table_position='[h!]', **tablekw ) # tabular_str = util_latex.tabular_join(tabular_body_list) if SINGLE_TABLE: col_lbls += stat_col_lbls multicol_lbls += [(stat_title, len(stat_col_lbls))] count_tabular_str = ut.util_latex.make_score_tabular( row_lbls, col_lbls, row_values, title=title, multicol_lbls=multicol_lbls, table_position='[ht!]', **tablekw ) # logger.info(row_lbls) if SINGLE_TABLE: tabular_str = count_tabular_str else: stat_tabular_str = ut.util_latex.make_score_tabular( stat_row_lbls, stat_col_lbls, stat_row_values, title=stat_title, col_align='r', table_position='[h!]', **tablekw ) # Make a table of statistics if tablekw['astable']: tablesep = '\n%--\n' else: tablesep = '\\\\\n%--\n' if EXTRA: tabular_str = tablesep.join( [count_tabular_str, stat_tabular_str] + ut.dict_take(extra_tables, extra_keys) ) else: tabular_str = tablesep.join([count_tabular_str, stat_tabular_str]) return tabular_str