def collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=True): # load a dataset #dbname = 'PZ_MTEST' #dbname = 'GZ_ALL' def get_set_groundfalse(ibs, qaids): # get groundfalse annots relative to the entire set valid_nids = ibs.get_valid_nids() qnids = ibs.get_annot_nids(qaids) nid_list = list(set(valid_nids) - set(qnids)) aids_list = ibs.get_name_aids(nid_list) return ut.flatten(aids_list) # determanism np.random.seed(0) random.seed(0) # TODO: USE ANOT FILTERINGS import ibeis qaids_all = ibeis.testdata_aids( a='default:pername=1,mingt=2,is_known=True') qaids = qaids_all[::2] print('nQaids = %r' % len(qaids)) def get_annot_groundtruth_sample(ibs, aid_list, per_name=1, isexemplar=True): r""" DEPRICATE """ all_trues_list = ibs.get_annot_groundtruth(aid_list, noself=True, is_exemplar=isexemplar) def random_choice(aids): size = min(len(aids), per_name) return np.random.choice(aids, size, replace=False).tolist() sample_trues_list = [ random_choice(aids) if len(aids) > 0 else [] for aids in all_trues_list ] return sample_trues_list daids_gt_sample = ut.flatten( ibs.get_annot_groundtruth_sample(ibs, qaids, isexemplar=None)) daids_gf_all = get_set_groundfalse(ibs, qaids) ut.assert_eq(len(daids_gt_sample), len(qaids), 'missing gt') daids_list = [] for target_nDaids in ut.ProgressIter(nDaids_basis, lbl='testing dbsize'): print('---------------------------') # Sample one match from the groundtruth with padding daids_gf_sample = ut.random_sample( daids_gf_all, max(0, target_nDaids - len(daids_gt_sample))) daids = sorted(daids_gt_sample + daids_gf_sample) nDaids = len(daids) if target_nDaids != nDaids: continue daids_list.append(daids) return qaids, daids_list
def testdata_smk(*args, **kwargs): """ >>> from ibeis.algo.smk.smk_pipeline import * # NOQA >>> kwargs = {} """ import ibeis import sklearn import sklearn.cross_validation # import sklearn.model_selection ibs, aid_list = ibeis.testdata_aids(defaultdb='PZ_MTEST') nid_list = np.array(ibs.annots(aid_list).nids) rng = ut.ensure_rng(0) xvalkw = dict(n_folds=4, shuffle=False, random_state=rng) skf = sklearn.cross_validation.StratifiedKFold(nid_list, **xvalkw) train_idx, test_idx = six.next(iter(skf)) daids = ut.take(aid_list, train_idx) qaids = ut.take(aid_list, test_idx) config = { 'num_words': 1000, } config.update(**kwargs) qreq_ = SMKRequest(ibs, qaids, daids, config) smk = qreq_.smk #qreq_ = ibs.new_query_request(qaids, daids, cfgdict={'pipeline_root': 'smk', 'proot': 'smk'}) #qreq_ = ibs.new_query_request(qaids, daids, cfgdict={}) return ibs, smk, qreq_
def trytest_incremental_add(ibs): r""" Args: ibs (IBEISController): CommandLine: python -m ibeis.algo.hots._neighbor_experiment --test-test_incremental_add Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('PZ_MTEST') >>> result = test_incremental_add(ibs) >>> print(result) """ import ibeis sample_aids = ibeis.testdata_aids(a='default:pername=1,mingt=2') aids1 = sample_aids[::2] aids2 = sample_aids[0:5] aids3 = sample_aids[:-1] # NOQA daid_list = aids1 # NOQA qreq_ = ibs.new_query_request(aids1, aids1) nnindexer1 = neighbor_index_cache.request_ibeis_nnindexer( ibs.new_query_request(aids1, aids1)) # NOQA nnindexer2 = neighbor_index_cache.request_ibeis_nnindexer( ibs.new_query_request(aids2, aids2)) # NOQA # TODO: SYSTEM use visual uuids items = ibs.get_annot_visual_uuids(aids3) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) uncovered_aids = sorted( ibs.get_annot_aids_from_visual_uuid(uncovered_items)) nnindexer3 = neighbor_index_cache.request_ibeis_nnindexer( ibs.new_query_request(uncovered_aids, uncovered_aids)) # NOQA # TODO: SYSTEM use visual uuids items = ibs.get_annot_visual_uuids(sample_aids) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) #contextlib.closing(shelve.open(uuid_map_fpath)) as uuid_map: candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted( ibs.get_annot_aids_from_visual_uuid(covered_items)) # NOQA uncovered_aids = sorted( ibs.get_annot_aids_from_visual_uuid(uncovered_items))
def testdata_vocab(): from ibeis.new_annots import * # NOQA import ibeis ibs, aid_list = ibeis.testdata_aids('testdb1') depc = ibs.depc_annot fid_list = depc.get_rowids('feat', aid_list) config = VocabConfig() vocab = compute_vocab(depc, fid_list, config) return ibs, aid_list, vocab
def test_incremental_add(ibs): r""" Args: ibs (IBEISController): CommandLine: python -m ibeis.algo.hots._neighbor_experiment --test-test_incremental_add Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('PZ_MTEST') >>> result = test_incremental_add(ibs) >>> print(result) """ import ibeis sample_aids = ibeis.testdata_aids(a='default:pername=1,mingt=2') aids1 = sample_aids[::2] aids2 = sample_aids[0:5] aids3 = sample_aids[:-1] # NOQA daid_list = aids1 # NOQA qreq_ = ibs.new_query_request(aids1, aids1) nnindexer1 = neighbor_index_cache.request_ibeis_nnindexer(ibs.new_query_request(aids1, aids1)) # NOQA nnindexer2 = neighbor_index_cache.request_ibeis_nnindexer(ibs.new_query_request(aids2, aids2)) # NOQA # TODO: SYSTEM use visual uuids #daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list) # get_internal_data_hashid() items = ibs.get_annot_visual_uuids(aids3) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) uncovered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(uncovered_items)) nnindexer3 = neighbor_index_cache.request_ibeis_nnindexer(ibs.new_query_request(uncovered_aids, uncovered_aids)) # NOQA # TODO: SYSTEM use visual uuids #daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list) # get_internal_data_hashid() items = ibs.get_annot_visual_uuids(sample_aids) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) #contextlib.closing(shelve.open(uuid_map_fpath)) as uuid_map: candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) # NOQA uncovered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(uncovered_items))
def collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=True): # load a dataset #dbname = 'PZ_MTEST' #dbname = 'GZ_ALL' def get_set_groundfalse(ibs, qaids): # get groundfalse annots relative to the entire set valid_nids = ibs.get_valid_nids() qnids = ibs.get_annot_nids(qaids) nid_list = list(set(valid_nids) - set(qnids)) aids_list = ibs.get_name_aids(nid_list) return ut.flatten(aids_list) # determanism np.random.seed(0) random.seed(0) # TODO: USE ANOT FILTERINGS import ibeis qaids_all = ibeis.testdata_aids(a='default:pername=1,mingt=2,is_known=True') qaids = qaids_all[::2] print('nQaids = %r' % len(qaids)) def get_annot_groundtruth_sample(ibs, aid_list, per_name=1, isexemplar=True): r""" DEPRICATE """ all_trues_list = ibs.get_annot_groundtruth(aid_list, noself=True, is_exemplar=isexemplar) def random_choice(aids): size = min(len(aids), per_name) return np.random.choice(aids, size, replace=False).tolist() sample_trues_list = [random_choice(aids) if len(aids) > 0 else [] for aids in all_trues_list] return sample_trues_list daids_gt_sample = ut.flatten(ibs.get_annot_groundtruth_sample(ibs, qaids, isexemplar=None)) daids_gf_all = get_set_groundfalse(ibs, qaids) ut.assert_eq(len(daids_gt_sample), len(qaids), 'missing gt') daids_list = [] for target_nDaids in ut.ProgressIter(nDaids_basis, lbl='testing dbsize'): print('---------------------------') # Sample one match from the groundtruth with padding daids_gf_sample = ut.random_sample(daids_gf_all, max(0, target_nDaids - len(daids_gt_sample))) daids = sorted(daids_gt_sample + daids_gf_sample) nDaids = len(daids) if target_nDaids != nDaids: continue daids_list.append(daids) return qaids, daids_list
def testdata_multichips(): import ibeis ibs = ibeis.opendb(defaultdb='testdb1') nid = ut.get_argval('--nid', type_=int, default=None) tags = ut.get_argval('--tags', type_=list, default=None) if nid is not None: aid_list = ibs.get_name_aids(nid) elif tags is not None: index = ut.get_argval('--index', default=0) aid_list = ibs.filter_aidpairs_by_tags(any_tags=tags)[index] else: #aid_list = ut.get_argval('--aids', type_=list, default=[1, 2, 3]) aid_list = ibeis.testdata_aids(default_aids=[1, 2, 3], ibs=ibs) in_image = not ut.get_argflag('--no-inimage') return ibs, aid_list, in_image
def drive_test_script(ibs): r""" Test script where we drive around and take pictures of animals both in a given database and not in a given databse to make sure the system works. CommandLine: python -m ibeis.viz.viz_image --test-drive_test_script python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_MTEST --show python -m ibeis.viz.viz_image --test-drive_test_script --db GIR_Tanya --show python -m ibeis.viz.viz_image --test-drive_test_script --db GIR_Master0 --show python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_Master0 --show python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show python -m ibeis.viz.viz_image --test-drive_test_script --dbdir /raid/work2/Turk/GIR_Master --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_image import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb() >>> drive_test_script(ibs) """ import ibeis aid_list = ibeis.testdata_aids(a='default:pername=1') print('Running with (annot) aid_list = %r' % (aid_list)) gid_list = ibs.get_annot_gids(aid_list) print('Running with (image) gid_list = %r' % (gid_list)) avuuid_list = ibs.get_annot_visual_uuids(aid_list) guuid_list = ibs.get_image_uuids(gid_list) print('Running with annot_visual_uuid_list = %s' % (ut.repr2(zip(aid_list, avuuid_list)))) print('Running with image_uuid_list = %s' % (ut.repr2(zip(gid_list, guuid_list)))) for gid, aid in ut.ProgressIter(zip(gid_list, aid_list), lbl='progress '): print('\ngid, aid, nid = %r, %r, %r' % ( gid, aid, ibs.get_annot_nids(aid), )) show_image(ibs, gid, annote=False, rich_title=True) pt.show_if_requested()
def testdata_vocab(defaultdb='testdb1', **kwargs): """ >>> from ibeis.algo.smk.vocab_indexer import * # NOQA >>> defaultdb='testdb1' >>> kwargs = {'num_words': 1000} """ import ibeis ibs, aids = ibeis.testdata_aids(defaultdb=defaultdb) config = kwargs # vocab = new_load_vocab(ibs, aid_list, kwargs) # Hack in depcache info to the loaded vocab class # (maybe this becomes part of the depcache) rowid = ibs.depc.get_rowids('vocab', [aids], config=config)[0] # rowid = 1 table = ibs.depc['vocab'] vocab = table.get_row_data([rowid], 'words')[0] vocab.rowid = rowid vocab.config_history = table.get_config_history([vocab.rowid])[0] vocab.config = table.get_row_configs([vocab.rowid])[0] return vocab
def drive_test_script(ibs): r""" Test script where we drive around and take pictures of animals both in a given database and not in a given databse to make sure the system works. CommandLine: python -m ibeis.viz.viz_image --test-drive_test_script python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_MTEST --show python -m ibeis.viz.viz_image --test-drive_test_script --db GIR_Tanya --show python -m ibeis.viz.viz_image --test-drive_test_script --db GIR_Master0 --show python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_Master0 --show python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show python -m ibeis.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show python -m ibeis.viz.viz_image --test-drive_test_script --dbdir /raid/work2/Turk/GIR_Master --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_image import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb() >>> drive_test_script(ibs) """ import ibeis aid_list = ibeis.testdata_aids(a='default:pername=1') print('Running with (annot) aid_list = %r' % (aid_list)) gid_list = ibs.get_annot_gids(aid_list) print('Running with (image) gid_list = %r' % (gid_list)) avuuid_list = ibs.get_annot_visual_uuids(aid_list) guuid_list = ibs.get_image_uuids(gid_list) print('Running with annot_visual_uuid_list = %s' % (ut.list_str(zip(aid_list, avuuid_list)))) print('Running with image_uuid_list = %s' % (ut.list_str(zip(gid_list, guuid_list)))) for gid, aid in ut.ProgressIter(zip(gid_list, aid_list), lbl='progress '): print('\ngid, aid, nid = %r, %r, %r' % (gid, aid, ibs.get_annot_nids(aid),)) show_image(ibs, gid, annote=False, rich_title=True) pt.show_if_requested()
def latex_dbstats(ibs_list, **kwargs): r""" Args: ibs (IBEISController): ibeis controller object CommandLine: python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1 python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1 --show python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 testdb1 --show python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 PZ_MTEST GZ_ALL --show python -m ibeis.other.dbinfo --test-latex_dbstats --dblist GZ_ALL NNP_MasterGIRM_core --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.dbinfo import * # NOQA >>> import ibeis >>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1']) >>> ibs_list = [ibeis.opendb(db=db) for db in db_list] >>> tabular_str = latex_dbstats(ibs_list) >>> tabular_cmd = ut.latex_newcommand(ut.latex_sanitize_command_name('DatabaseInfo'), tabular_str) >>> ut.copy_text_to_clipboard(tabular_cmd) >>> write_fpath = ut.get_argval('--write', type_=str, default=None) >>> if write_fpath is not None: >>> fpath = ut.truepath(write_fpath) >>> text = ut.readfrom(fpath) >>> new_text = ut.replace_between_tags(text, tabular_cmd, '% <DBINFO>', '% </DBINFO>') >>> ut.writeto(fpath, new_text) >>> ut.print_code(tabular_cmd, 'latex') >>> ut.quit_if_noshow() >>> ut.render_latex_text('\\noindent \n' + tabular_str) """ import ibeis # Parse for aids test data aids_list = [ibeis.testdata_aids(ibs=ibs) for ibs in ibs_list] #dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False) for ibs in ibs_list] dbinfo_list = [ get_dbinfo(ibs, with_contrib=False, verbose=False, aid_list=aids) for ibs, aids in zip(ibs_list, aids_list) ] #title = db_name + ' database statistics' title = 'Database statistics' stat_title = '# Annotations per name (multiton)' #col_lbls = [ # 'multiton', # #'singleton', # 'total', # 'multiton', # 'singleton', # 'total', #] key_to_col_lbls = { 'num_names_multiton': 'multiton', 'num_names_singleton': 'singleton', 'num_names': 'total', 'num_multiton_annots': 'multiton', 'num_singleton_annots': 'singleton', 'num_unknown_annots': 'unknown', 'num_annots': 'total', } # Structure of columns / multicolumns multi_col_keys = [ ( '# Names', ( 'num_names_multiton', #'num_names_singleton', 'num_names', )), ( '# Annots', ( 'num_multiton_annots', 'num_singleton_annots', #'num_unknown_annots', 'num_annots')), ] #multicol_lbls = [('# Names', 3), ('# Annots', 3)] multicol_lbls = [(mcolname, len(mcols)) for mcolname, mcols in multi_col_keys] # Flatten column labels col_keys = ut.flatten(ut.get_list_column(multi_col_keys, 1)) col_lbls = ut.dict_take(key_to_col_lbls, col_keys) row_lbls = [] row_values = [] #stat_col_lbls = ['max', 'min', 'mean', 'std', 'nMin', 'nMax'] stat_col_lbls = ['max', 'min', 'mean', 'std', 'med'] #stat_row_lbls = ['# Annot per Name (multiton)'] stat_row_lbls = [] stat_row_values = [] SINGLE_TABLE = False EXTRA = True for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): row_ = ut.dict_take(dbinfo_locals, col_keys) dbname = ibs.get_dbname_alias() row_lbls.append(dbname) multiton_annot_stats = ut.get_stats( dbinfo_locals['multiton_nid2_nannots'], use_median=True, nl=1) stat_rows = ut.dict_take(multiton_annot_stats, stat_col_lbls) if SINGLE_TABLE: row_.extend(stat_rows) else: stat_row_lbls.append(dbname) stat_row_values.append(stat_rows) row_values.append(row_) CENTERLINE = False AS_TABLE = True tablekw = dict(astable=AS_TABLE, centerline=CENTERLINE, FORCE_INT=False, precision=2, col_sep='', multicol_sep='|', **kwargs) if EXTRA: extra_keys = [ #'species2_nAids', 'qualtext2_nAnnots', 'viewcode2_nAnnots', ] extra_titles = { 'species2_nAids': 'Annotations per species.', 'qualtext2_nAnnots': 'Annotations per quality.', 'viewcode2_nAnnots': 'Annotations per viewpoint.', } extra_collbls = ut.ddict(list) extra_rowvalues = ut.ddict(list) extra_tables = ut.ddict(list) for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): for key in extra_keys: extra_collbls[key] = ut.unique_ordered( extra_collbls[key] + list(dbinfo_locals[key].keys())) extra_collbls['qualtext2_nAnnots'] = [ 'excellent', 'good', 'ok', 'poor', 'junk', 'UNKNOWN' ] #extra_collbls['viewcode2_nAnnots'] = ['backleft', 'left', 'frontleft', 'front', 'frontright', 'right', 'backright', 'back', None] extra_collbls['viewcode2_nAnnots'] = [ 'BL', 'L', 'FL', 'F', 'FR', 'R', 'BR', 'B', None ] for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): for key in extra_keys: extra_rowvalues[key].append( ut.dict_take(dbinfo_locals[key], extra_collbls[key], 0)) qualalias = {'UNKNOWN': None} extra_collbls['viewcode2_nAnnots'] = [ ibs.const.YAWALIAS.get(val, val) for val in extra_collbls['viewcode2_nAnnots'] ] extra_collbls['qualtext2_nAnnots'] = [ qualalias.get(val, val) for val in extra_collbls['qualtext2_nAnnots'] ] for key in extra_keys: extra_tables[key] = ut.util_latex.make_score_tabular( row_lbls, extra_collbls[key], extra_rowvalues[key], title=extra_titles[key], col_align='r', table_position='[h!]', **tablekw) #tabular_str = util_latex.tabular_join(tabular_body_list) if SINGLE_TABLE: col_lbls += stat_col_lbls multicol_lbls += [(stat_title, len(stat_col_lbls))] count_tabular_str = ut.util_latex.make_score_tabular( row_lbls, col_lbls, row_values, title=title, multicol_lbls=multicol_lbls, table_position='[ht!]', **tablekw) #print(row_lbls) if SINGLE_TABLE: tabular_str = count_tabular_str else: stat_tabular_str = ut.util_latex.make_score_tabular( stat_row_lbls, stat_col_lbls, stat_row_values, title=stat_title, col_align='r', table_position='[h!]', **tablekw) # Make a table of statistics if tablekw['astable']: tablesep = '\n%--\n' else: tablesep = '\\\\\n%--\n' if EXTRA: tabular_str = tablesep.join([count_tabular_str, stat_tabular_str] + ut.dict_take(extra_tables, extra_keys)) else: tabular_str = tablesep.join([count_tabular_str, stat_tabular_str]) return tabular_str
def latex_dbstats(ibs_list, **kwargs): r""" Args: ibs (IBEISController): ibeis controller object CommandLine: python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1 python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1 --show python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 testdb1 --show python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 PZ_MTEST GZ_ALL --show python -m ibeis.other.dbinfo --test-latex_dbstats --dblist GZ_ALL NNP_MasterGIRM_core --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.dbinfo import * # NOQA >>> import ibeis >>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1']) >>> ibs_list = [ibeis.opendb(db=db) for db in db_list] >>> tabular_str = latex_dbstats(ibs_list) >>> tabular_cmd = ut.latex_newcommand(ut.latex_sanitize_command_name('DatabaseInfo'), tabular_str) >>> ut.copy_text_to_clipboard(tabular_cmd) >>> write_fpath = ut.get_argval('--write', type_=str, default=None) >>> if write_fpath is not None: >>> fpath = ut.truepath(write_fpath) >>> text = ut.readfrom(fpath) >>> new_text = ut.replace_between_tags(text, tabular_cmd, '% <DBINFO>', '% </DBINFO>') >>> ut.writeto(fpath, new_text) >>> ut.print_code(tabular_cmd, 'latex') >>> ut.quit_if_noshow() >>> ut.render_latex_text('\\noindent \n' + tabular_str) """ import ibeis # Parse for aids test data aids_list = [ibeis.testdata_aids(ibs=ibs) for ibs in ibs_list] #dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False) for ibs in ibs_list] dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False, aid_list=aids) for ibs, aids in zip(ibs_list, aids_list)] #title = db_name + ' database statistics' title = 'Database statistics' stat_title = '# Annotations per name (multiton)' #col_lbls = [ # 'multiton', # #'singleton', # 'total', # 'multiton', # 'singleton', # 'total', #] key_to_col_lbls = { 'num_names_multiton': 'multiton', 'num_names_singleton': 'singleton', 'num_names': 'total', 'num_multiton_annots': 'multiton', 'num_singleton_annots': 'singleton', 'num_unknown_annots': 'unknown', 'num_annots': 'total', } # Structure of columns / multicolumns multi_col_keys = [ ('# Names', ( 'num_names_multiton', #'num_names_singleton', 'num_names', )), ('# Annots', ( 'num_multiton_annots', 'num_singleton_annots', #'num_unknown_annots', 'num_annots')), ] #multicol_lbls = [('# Names', 3), ('# Annots', 3)] multicol_lbls = [(mcolname, len(mcols)) for mcolname, mcols in multi_col_keys] # Flatten column labels col_keys = ut.flatten(ut.get_list_column(multi_col_keys, 1)) col_lbls = ut.dict_take(key_to_col_lbls, col_keys) row_lbls = [] row_values = [] #stat_col_lbls = ['max', 'min', 'mean', 'std', 'nMin', 'nMax'] stat_col_lbls = ['max', 'min', 'mean', 'std', 'med'] #stat_row_lbls = ['# Annot per Name (multiton)'] stat_row_lbls = [] stat_row_values = [] SINGLE_TABLE = False EXTRA = True for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): row_ = ut.dict_take(dbinfo_locals, col_keys) dbname = ibs.get_dbname_alias() row_lbls.append(dbname) multiton_annot_stats = ut.get_stats(dbinfo_locals['multiton_nid2_nannots'], use_median=True) stat_rows = ut.dict_take(multiton_annot_stats, stat_col_lbls) if SINGLE_TABLE: row_.extend(stat_rows) else: stat_row_lbls.append(dbname) stat_row_values.append(stat_rows) row_values.append(row_) CENTERLINE = False AS_TABLE = True tablekw = dict( astable=AS_TABLE, centerline=CENTERLINE, FORCE_INT=False, precision=2, col_sep='', multicol_sep='|', **kwargs) if EXTRA: extra_keys = [ #'species2_nAids', 'qualtext2_nAnnots', 'yawtext2_nAnnots', ] extra_titles = { 'species2_nAids': 'Annotations per species.', 'qualtext2_nAnnots': 'Annotations per quality.', 'yawtext2_nAnnots': 'Annotations per viewpoint.', } extra_collbls = ut.ddict(list) extra_rowvalues = ut.ddict(list) extra_tables = ut.ddict(list) for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): for key in extra_keys: extra_collbls[key] = ut.unique_ordered(extra_collbls[key] + list(dbinfo_locals[key].keys())) extra_collbls['qualtext2_nAnnots'] = ['excellent', 'good', 'ok', 'poor', 'junk', 'UNKNOWN'] #extra_collbls['yawtext2_nAnnots'] = ['backleft', 'left', 'frontleft', 'front', 'frontright', 'right', 'backright', 'back', None] extra_collbls['yawtext2_nAnnots'] = ['BL', 'L', 'FL', 'F', 'FR', 'R', 'BR', 'B', None] for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list): for key in extra_keys: extra_rowvalues[key].append(ut.dict_take(dbinfo_locals[key], extra_collbls[key], 0)) qualalias = {'UNKNOWN': None} extra_collbls['yawtext2_nAnnots'] = [ibs.const.YAWALIAS.get(val, val) for val in extra_collbls['yawtext2_nAnnots']] extra_collbls['qualtext2_nAnnots'] = [qualalias.get(val, val) for val in extra_collbls['qualtext2_nAnnots']] for key in extra_keys: extra_tables[key] = ut.util_latex.make_score_tabular( row_lbls, extra_collbls[key], extra_rowvalues[key], title=extra_titles[key], col_align='r', table_position='[h!]', **tablekw) #tabular_str = util_latex.tabular_join(tabular_body_list) if SINGLE_TABLE: col_lbls += stat_col_lbls multicol_lbls += [(stat_title, len(stat_col_lbls))] count_tabular_str = ut.util_latex.make_score_tabular( row_lbls, col_lbls, row_values, title=title, multicol_lbls=multicol_lbls, table_position='[ht!]', **tablekw) #print(row_lbls) if SINGLE_TABLE: tabular_str = count_tabular_str else: stat_tabular_str = ut.util_latex.make_score_tabular( stat_row_lbls, stat_col_lbls, stat_row_values, title=stat_title, col_align='r', table_position='[h!]', **tablekw) # Make a table of statistics if tablekw['astable']: tablesep = '\n%--\n' else: tablesep = '\\\\\n%--\n' if EXTRA: tabular_str = tablesep.join([count_tabular_str, stat_tabular_str] + ut.dict_take(extra_tables, extra_keys)) else: tabular_str = tablesep.join([count_tabular_str, stat_tabular_str]) return tabular_str