def get_column_keys(metadata): unflat_colname_list = [ [cols.keys() for cols in qaid2_cols.values()] for qaid2_cols in six.itervalues(metadata.dictstore) ] colname_list = ut.unique_keep_order(ut.flatten(ut.flatten(unflat_colname_list))) return colname_list
def delete_contributors(ibs, contrib_rowid_list): r""" deletes contributors from the database and all information associated RESTful: Method: DELETE URL: /api/contributor/ """ # TODO: FIXME TESTME if not ut.QUIET: print('[ibs] deleting %d contributors' % len(contrib_rowid_list)) config_rowid_list = ut.flatten(ibs.get_contributor_config_rowids(contrib_rowid_list)) # Delete configs (UNSURE IF THIS IS CORRECT) ibs.delete_configs(config_rowid_list) # CONTRIBUTORS SHOULD NOT DELETE IMAGES # Delete encounters #eid_list = ibs.get_valid_eids() #eid_config_list = ibs.get_encounter_configid(eid_list) #valid_list = [config in config_rowid_list for config in eid_config_list ] #eid_list = ut.compress(eid_list, valid_list) #ibs.delete_encounters(eid_list) # Remote image contributors ~~~Delete images~~~~ gid_list = ut.flatten(ibs.get_contributor_gids(contrib_rowid_list)) ibs.set_image_contributor_rowid(gid_list, [None] * len(gid_list)) #ibs.delete_images(gid_list) # Delete contributors ibs.db.delete_rowids(const.CONTRIBUTOR_TABLE, contrib_rowid_list)
def get_varied_acfg_labels(acfg_list, mainkey='_cfgname', checkname=False): """ >>> from ibeis.expt.annotation_configs import * # NOQA """ #print(ut.list_str(varied_acfg_list, nl=2)) for acfg in acfg_list: assert acfg['qcfg'][mainkey] == acfg['dcfg'][mainkey], ( 'should be the same for now') cfgname_list = [acfg['qcfg'][mainkey] for acfg in acfg_list] if checkname and ut.allsame(cfgname_list): cfgname_list = [None] * len(cfgname_list) # Hack to make common params between q and d appear the same _acfg_list = [compress_aidcfg(acfg) for acfg in acfg_list] flat_acfg_list = flatten_acfg_list(_acfg_list) nonvaried_dict, varied_acfg_list = ut.partition_varied_cfg_list( flat_acfg_list) SUPER_HACK = True if SUPER_HACK: # SUPER HACK, recompress remake the varied list after knowing what is varied _varied_keys = list(set(ut.flatten( [list(ut.flatten( [list(x.keys()) for x in unflatten_acfgdict(cfg).values()] )) for cfg in varied_acfg_list] ))) _acfg_list = [ compress_aidcfg(acfg, force_noncommon=_varied_keys) for acfg in acfg_list] flat_acfg_list = flatten_acfg_list(_acfg_list) nonvaried_dict, varied_acfg_list = ut.partition_varied_cfg_list( flat_acfg_list) shortened_cfg_list = [ #{shorten_to_alias_labels(key): val for key, val in _dict.items()} ut.map_dict_keys(shorten_to_alias_labels, _dict) for _dict in varied_acfg_list] nonlbl_keys = ut.INTERNAL_CFGKEYS nonlbl_keys = [prefix + key for key in nonlbl_keys for prefix in ['', 'q', 'd']] # hack for sorting by q/d stuff first def get_key_order(cfg): keys = [k for k in cfg.keys() if k not in nonlbl_keys] sortorder = [2 * k.startswith('q') + 1 * k.startswith('d') for k in keys] return ut.sortedby(keys, sortorder)[::-1] cfglbl_list = [ ut.get_cfg_lbl(cfg, name, nonlbl_keys, key_order=get_key_order(cfg)) for cfg, name in zip(shortened_cfg_list, cfgname_list)] if checkname: cfglbl_list = [x.lstrip(':') for x in cfglbl_list] return cfglbl_list
def _build_inverted_descriptor_index(aid_list, desc_list): """ Stacks descriptors into a flat structure and returns inverse mapping from flat database descriptor indexes (dx) to annotation ids (aid) and feature indexes (fx). Feature indexes are w.r.t. annotation indexes. Output: dx2_desc - flat descriptor stack dx2_aid - inverted index into annotations dx2_fx - inverted index into features # Example with 2D Descriptors >>> from ibeis.model.hots.hots_nn_index import * # NOQA >>> from ibeis.model.hots.hots_nn_index import _build_inverted_descriptor_index >>> DESC_TYPE = np.uint8 >>> aid_list = [1, 2, 3, 4, 5] >>> desc_list = [ ... np.array([[0, 0], [0, 1]], dtype=DESC_TYPE), ... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE), ... np.empty((0, 2), dtype=DESC_TYPE), ... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE), ... np.array([[3, 3], [42, 42], [2, 6]], dtype=DESC_TYPE), ... ] >>> dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list) >>> print(repr(dx2_desc.T)) array([[ 0, 0, 5, 2, 1, 5, 2, 1, 3, 42, 2], [ 0, 1, 3, 30, 1, 3, 30, 1, 3, 42, 6]], dtype=uint8) >>> print(repr(dx2_aid)) array([1, 1, 2, 2, 2, 4, 4, 4, 5, 5, 5]) >>> print(repr(dx2_fx)) array([0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]) cdef: list aid_list, desc_list long nFeat, aid iter aid_nFeat_iter, nFeat_iter, _ax2_aid, _ax2_fx np.ndarray dx2_aid, dx2_fx, dx2_desc """ # Build inverted index of (aid, fx) pairs aid_nFeat_iter = zip(aid_list, map(len, desc_list)) nFeat_iter = map(len, desc_list) # generate aid inverted index for each feature in each annotation _ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter) # Avi: please test the timing of the lines neighboring this statement. #_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter) # generate featx inverted index for each feature in each annotation _ax2_fx = (range(nFeat) for nFeat in nFeat_iter) # Flatten generators into the inverted index #dx2_aid = np.array(list(chain.from_iterable(_ax2_aid))) #dx2_fx = np.array(list(chain.from_iterable(_ax2_fx))) dx2_aid = np.array(utool.flatten(_ax2_aid)) dx2_fx = np.array(utool.flatten(_ax2_fx)) # Stack descriptors into numpy array corresponding to inverted inexed # This might throw a MemoryError dx2_desc = np.vstack(desc_list) return dx2_desc, dx2_aid, dx2_fx
def get_varied_params_list(test_cfg_name_list): vary_dicts = get_vary_dicts(test_cfg_name_list) dict_comb_list = [utool.all_dict_combinations(dict_) for dict_ in vary_dicts] dict_comb_lbls = [utool.all_dict_combinations_lbls(dict_) for dict_ in vary_dicts] # Append testname dict_comb_lbls = [[name_lbl + lbl for lbl in comb_lbls] for name_lbl, comb_lbls in izip(test_cfg_name_list, dict_comb_lbls)] varied_params_list = utool.flatten(dict_comb_list) # [comb for dict_comb in dict_comb_list for comb in dict_comb] varied_param_lbls = utool.flatten(dict_comb_lbls) return varied_params_list, varied_param_lbls
def test_incremental_add(ibs): r""" Args: ibs (IBEISController): CommandLine: python -m ibeis.algo.hots._neighbor_experiment --test-test_incremental_add Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('PZ_MTEST') >>> result = test_incremental_add(ibs) >>> print(result) """ import ibeis sample_aids = ibeis.testdata_aids(a='default:pername=1,mingt=2') aids1 = sample_aids[::2] aids2 = sample_aids[0:5] aids3 = sample_aids[:-1] # NOQA daid_list = aids1 # NOQA qreq_ = ibs.new_query_request(aids1, aids1) nnindexer1 = neighbor_index_cache.request_ibeis_nnindexer(ibs.new_query_request(aids1, aids1)) # NOQA nnindexer2 = neighbor_index_cache.request_ibeis_nnindexer(ibs.new_query_request(aids2, aids2)) # NOQA # TODO: SYSTEM use visual uuids #daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list) # get_internal_data_hashid() items = ibs.get_annot_visual_uuids(aids3) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) uncovered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(uncovered_items)) nnindexer3 = neighbor_index_cache.request_ibeis_nnindexer(ibs.new_query_request(uncovered_aids, uncovered_aids)) # NOQA # TODO: SYSTEM use visual uuids #daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list) # get_internal_data_hashid() items = ibs.get_annot_visual_uuids(sample_aids) uuid_map_fpath = neighbor_index_cache.get_nnindexer_uuid_map_fpath(qreq_) #contextlib.closing(shelve.open(uuid_map_fpath)) as uuid_map: candidate_uuids = neighbor_index_cache.read_uuid_map(uuid_map_fpath, 0) candidate_sets = candidate_uuids covertup = ut.greedy_max_inden_setcover(candidate_sets, items) uncovered_items, covered_items_list, accepted_keys = covertup covered_items = ut.flatten(covered_items_list) covered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(covered_items)) # NOQA uncovered_aids = sorted(ibs.get_annot_aids_from_visual_uuid(uncovered_items))
def __init__(fstack, ibs, aid_list, config=None): ax2_vecs = ibs.depc_annot.d.get_feat_vecs(aid_list, config=config) fstack.config = config fstack.ibs = ibs fstack.ax2_aid = aid_list fstack.ax2_nFeat = [len(vecs) for vecs in ax2_vecs] fstack.idx2_fxs = ut.flatten([list(range(num)) for num in fstack.ax2_nFeat]) fstack.idx2_axs = ut.flatten([[ax] * num for ax, num in enumerate(fstack.ax2_nFeat)]) fstack.idx2_vec = np.vstack(ax2_vecs) #fstack.idx2_fxs = vt.atleast_nd(fstack.idx2_fxs, 2) #fstack.idx2_axs = vt.atleast_nd(fstack.idx2_axs, 2) fstack.num_feat = sum(fstack.ax2_nFeat)
def intersect_hack(): failed = testres.rank_mat > 0 colx2_failed = [np.nonzero(failed_col)[0] for failed_col in failed.T] #failed_col2_only = np.setdiff1d(colx2_failed[1], colx2_failed[0]) #failed_col2_only_aids = ut.take(testres.qaids, failed_col2_only) failed_col1_only = np.setdiff1d(colx2_failed[0], colx2_failed[1]) failed_col1_only_aids = ut.take(testres.qaids, failed_col1_only) gt_aids1 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[0].daids) gt_aids2 = ibs.get_annot_groundtruth(failed_col1_only_aids, daid_list=testres.cfgx2_qreq_[1].daids) qaids_expt = failed_col1_only_aids gt_avl_aids1 = ut.flatten(gt_aids1) gt_avl_aids2 = list(set(ut.flatten(gt_aids2)).difference(gt_avl_aids1)) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids1) ibs.print_annotconfig_stats(qaids_expt, gt_avl_aids2) #jsontext = ut.to_json({ # 'qaids': list(qaids_expt), # 'dinclude_aids1': list(gt_aids_expt1), # 'dinclude_aids2': list(gt_aids_expt2), #}) #annotation_configs.varysize_pzm #from ibeis.expt import annotation_configs acfg = testres.acfg_list[0] import copy acfg1 = copy.deepcopy(acfg) acfg2 = copy.deepcopy(acfg) acfg1['qcfg']['min_pername'] = None acfg2['qcfg']['min_pername'] = None acfg1['dcfg']['min_pername'] = None acfg2['dcfg']['min_gt_per_name'] = None acfg1['qcfg']['default_aids'] = qaids_expt acfg1['dcfg']['gt_avl_aids'] = gt_avl_aids1 acfg2['qcfg']['default_aids'] = qaids_expt acfg2['dcfg']['gt_avl_aids'] = gt_avl_aids2 from ibeis.init import filter_annots from ibeis.expt import experiment_helpers annots1 = filter_annots.expand_acfgs(ibs, acfg1, verbose=True) annots2 = filter_annots.expand_acfgs(ibs, acfg2, verbose=True) acfg_name_list = dict( # NOQA acfg_list=[acfg1, acfg2], expanded_aids_list=[annots1, annots2], ) test_cfg_name_list = ['candidacy_k'] cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list, ibs=ibs) t1, t2 = testres_list # NOQA
def parse_all_fname_tags(fname): _tags = [splitext(fname)[0]] _tags = ut.flatten([t.split('_') for t in _tags]) _tags = ut.flatten([t.split('.') for t in _tags]) _tags = [t.lower() for t in _tags] _tags = [tag_alias_map.get(t, t) for t in _tags] for key, vals in regex_alias_map.items(): pat = ut.regex_or(vals) _tags = [key if re.match(pat, t) else t for t in _tags] pat = ut.regex_or(invalid_tag_patterns) _tags = [t for t in _tags if not re.match(pat, t)] _tags = ut.unique_ordered(_tags) return _tags
def make_ibeis_matching_graph(ibs, qaid_list, daids_list, scores_list): print('make_ibeis_matching_graph') aid1_list = ut.flatten([[qaid] * len(daids) for qaid, daids in zip(qaid_list, daids_list)]) aid2_list = ut.flatten(daids_list) unique_aids = list(set(aid2_list + qaid_list)) score_list = ut.flatten(scores_list) # Make a graph between the chips nodes = list(zip(unique_aids)) edges = list(zip(aid1_list, aid2_list, score_list)) node_lbls = [('aid', 'int')] edge_lbls = [('weight', 'float')] netx_graph = make_netx_graph(nodes, edges, node_lbls, edge_lbls) return netx_graph
def get_name_imgsetids(ibs, nid_list): r""" RESTful: Method: GET URL: /api/name/imgsetids/ """ import utool as ut name_aids_list = ibs.get_name_aids(nid_list) name_aid_list = ut.flatten(name_aids_list) name_gid_list = ibs.get_annot_gids(name_aid_list) name_imgsetids_list = ibs.get_image_imgsetids(name_gid_list) name_imgsetid_list = ut.flatten(name_imgsetids_list) name_imgsetids = list(set(name_imgsetid_list)) return name_imgsetids
def load_gztest(ibs): r""" CommandLine: python -m ibeis.algo.hots.special_query --test-load_gztest Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.devcases import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('GZ_ALL') """ from os.path import join from ibeis.algo.hots import match_chips4 as mc4 dir_ = ut.get_module_dir(mc4) eval_text = ut.read_from(join(dir_, 'GZ_TESTTUP.txt')) testcases = eval(eval_text) count_dict = ut.count_dict_vals(testcases) print(ut.dict_str(count_dict)) testtup_list = ut.flatten(ut.dict_take_list(testcases, ['vsone_wins', 'vsmany_outperformed', 'vsmany_dominates', 'vsmany_wins'])) qaid_list = [testtup.qaid_t for testtup in testtup_list] visual_uuids = ibs.get_annot_visual_uuids(qaid_list) visual_uuids
def group_review(): prefill = request.args.get('prefill', '') if len(prefill) > 0: ibs = current_app.ibs aid_list = ibs.get_valid_aids() bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list) GROUP_BY_PREDICTION = True if GROUP_BY_PREDICTION: grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3)) grouped_list = grouped_dict.values() regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list))) candidate_aid_list = ut.get_list_column(regrouped_items, 0) else: candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list] elif request.args.get('aid_list', None) is not None: aid_list = request.args.get('aid_list', '') if len(aid_list) > 0: aid_list = aid_list.replace('[', '') aid_list = aid_list.replace(']', '') aid_list = aid_list.strip().split(',') candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ] else: candidate_aid_list = '' else: candidate_aid_list = '' return appf.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=appf.VALID_TURK_MODES)
def ensure_mana_list(manas=None, source=None): from mtgmonte import mtgobjs #if sources is None: # source = None #else: # source = None if manas is None: manas = [] elif hasattr(manas, '_manas'): # isinstance(manas, ManaSet): manas = manas._manas #elif isinstance(manas, mtgobjs.Mana): # isinstance(manas, ManaSet): elif hasattr(manas, 'color'): manas = [manas] elif isinstance(manas, dict): # isinstance(manas, ManaSet): manas = [mtgobjs.Mana(color, source, num) for color, num in manas.items()] elif isinstance(manas, six.string_types): tokens = tokenize_manacost(manas) manas = [mtgobjs.Mana(color, source, type_=type_) for color, type_ in tokens] # colors = manas.strip('{}') # manas = [mtgobjs.Mana(color, source) for color in colors] elif isinstance(manas, (list, tuple)): manas = ut.flatten([ensure_mana_list(m) for m in manas]) else: raise ValueError('Cannot ensure unknown type=%r, manas=%r' % (type(manas), manas,)) return manas
def parse_items(cfg): r""" Returns: list: param_list CommandLine: python -m dtool.base --exec-parse_items Example: >>> # ENABLE_DOCTEST >>> from dtool.base import * # NOQA >>> from dtool.example_depcache import DummyVsManyConfig >>> cfg = DummyVsManyConfig() >>> param_list = cfg.parse_items() >>> result = ('param_list = %s' % (ut.repr2(param_list, nl=1),)) >>> print(result) """ namespace_param_list = cfg.parse_namespace_config_items() param_names = ut.get_list_column(namespace_param_list, 1) needs_namespace_keys = ut.find_duplicate_items(param_names) param_list = ut.get_list_column(namespace_param_list, [1, 2]) # prepend namespaces to variables that need it for idx in ut.flatten(needs_namespace_keys.values()): name = namespace_param_list[idx][0] param_list[idx][0] = name + '_' + param_list[idx][0] duplicate_keys = ut.find_duplicate_items(ut.get_list_column(param_list, 0)) # hack to let version through import utool with utool.embed_on_exception_context: assert len(duplicate_keys) == 0, ( 'Configs have duplicate names: %r' % duplicate_keys) return param_list
def test_vsone_verified(ibs): """ hack in vsone-reranking Example: >>> # DISABLE_DOCTEST >>> from ibeis.all_imports import * # NOQA >>> #reload_all() >>> from ibeis.algo.hots.automated_matcher import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('PZ_MTEST') >>> test_vsone_verified(ibs) """ import plottool as pt #qaids = ibs.get_easy_annot_rowids() nids = ibs.get_valid_nids(filter_empty=True) grouped_aids_ = ibs.get_name_aids(nids) grouped_aids = list(filter(lambda x: len(x) > 1, grouped_aids_)) items_list = grouped_aids sample_aids = ut.flatten(ut.sample_lists(items_list, num=2, seed=0)) qaid2_qres, qreq_ = query_vsone_verified(ibs, sample_aids, sample_aids) for cm in ut.InteractiveIter(list(six.itervalues(qaid2_qres))): pt.close_all_figures() fig = cm.ishow_top(ibs) fig.show()
def get_set_groundfalse(ibs, qaids): # get groundfalse annots relative to the entire set valid_nids = ibs.get_valid_nids() qnids = ibs.get_annot_nids(qaids) nid_list = list(set(valid_nids) - set(qnids)) aids_list = ibs.get_name_aids(nid_list) return ut.flatten(aids_list)
def get_allkeys(dict_): import utool as ut if not isinstance(dict_, dict): return [] subkeys = [[key] + get_allkeys(val) for key, val in dict_.items()] return ut.unique_ordered(ut.flatten(subkeys))
def get_param_info_list(nnweight_cfg): # new way to try and specify config options. # not sure if i like it yet param_info_list = ut.flatten([ [ ut.ParamInfo('ratio_thresh', None, type_=float, hideif=None), ut.ParamInfoBool('lnbnn_on', True, hideif=False), ut.ParamInfoBool('const_on', False, hideif=False), ut.ParamInfoBool('borda_on', False, hideif=False), ut.ParamInfoBool('lograt_on', False, hideif=False), #ut.ParamInfoBool('loglnbnn_on', False, hideif=False), #ut.ParamInfoBool('logdist_on', False, hideif=False), #ut.ParamInfoBool('dist_on', False, hideif=False), ut.ParamInfoBool('normonly_on', False, hideif=False), ut.ParamInfoBool('bar_l2_on', False, hideif=False), ut.ParamInfoBool('cos_on', False, hideif=False), ut.ParamInfoBool('fg_on', True, hideif=False), ut.ParamInfo('normalizer_rule', 'last', '', valid_values=['last', 'name']), ut.ParamInfo('lnbnn_normer', None, hideif=None, help_='config string for lnbnn score normalizer'), ut.ParamInfo('lnbnn_norm_thresh', .5, type_=float, hideif=lambda cfg: not cfg['lnbnn_normer'] , help_='config string for lnbnn score normalizer'), # ut.ParamInfoBool('can_match_sameimg', False, 'sameimg', hideif=False), ut.ParamInfoBool('can_match_samename', True, 'samename', hideif=True), # Hacked in #ut.ParamInfoBool('root_sift_on', False, hideif=False), ut.ParamInfoBool('sqrd_dist_on', False, hideif=True), #ut.ParamInfoBool('sqrd_dist_on', True, hideif=True), ], ]) return param_info_list
def convert_multigraph_to_graph(G): """ For each duplicate edge make a dummy node. TODO: preserve data, keys, and directedness """ import utool as ut edge_list = list(G.edges()) node_list = list(G.nodes()) dupitem_to_idx = ut.find_duplicate_items(edge_list) node_to_freq = ut.ddict(lambda: 0) remove_idxs = ut.flatten(dupitem_to_idx.values()) ut.delete_items_by_index(edge_list, remove_idxs) for dup_edge in dupitem_to_idx.keys(): freq = len(dupitem_to_idx[dup_edge]) u, v = dup_edge[0:2] pair_node = dup_edge pair_nodes = [pair_node + tuple([count]) for count in range(freq)] for pair_node in pair_nodes: node_list.append(pair_node) for node in dup_edge: node_to_freq[node] += freq edge_list.append((u, pair_node)) edge_list.append((pair_node, v)) import networkx as nx G2 = nx.DiGraph() G2.add_edges_from(edge_list) G2.add_nodes_from(node_list) return G2
def get_annot_annotmatch_tags(ibs, aid_list): r""" Args: ibs (IBEISController): ibeis controller object aid_list (list): list of annotation rowids Returns: list: annotmatch_tags_list CommandLine: python -m ibeis.tag_funcs --exec-get_annot_annotmatch_tags --db GZ_Master1 Example: >>> # ENABLE_DOCTEST >>> from ibeis.tag_funcs import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> aid_list = ibs.get_valid_aids() >>> all_tags = ut.flatten(get_annot_annotmatch_tags(ibs, aid_list)) >>> tag_hist = ut.dict_hist(all_tags) >>> ut.print_dict(tag_hist) """ annotmatch_rowids = ibs.get_annotmatch_rowids_from_aid(aid_list) unflat_tags_list = ibs.unflat_map(ibs.get_annotmatch_case_tags, annotmatch_rowids) annotmatch_tags_list = [list(set(ut.flatten(_unflat_tags))) for _unflat_tags in unflat_tags_list] return annotmatch_tags_list
def unnest_data(data): unnested_cols = list(zip(ut.take(data, idxs2))) nested_cols = ut.take(data, idxs1) grouped_items = [nested_cols, unnested_cols] groupxs = [idxs1, idxs2] unflat = ut.ungroup(grouped_items, groupxs, nested_nCols - 1) return tuple(ut.flatten(unflat))
def make_ibeis_cell_list(ibs): cell_template_list = get_default_cell_template_list(ibs) autogen_str = make_autogen_str() dbname = ibs.get_dbname() #if ut.get_argflag('--hacktestscore'): # annotconfig_list_body = ut.codeblock( # ''' # 'timectrl', # ''' # ) #else: default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True') annotconfig_list_body = ut.codeblock( ut.repr2(default_acfgstr) + '\n' + ut.codeblock(''' # See ibeis/expt/annotation_configs.py for names of annot configuration options #'default:has_any=(query,),dpername=1,exclude_reference=True', #'default:is_known=True', #'default:qsame_encounter=True,been_adjusted=True,excluderef=True' #'default:qsame_encounter=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20', #'default:require_timestamp=True,min_timedelta=3600', #'default:species=primary', #'timectrl:', #'timectrl:been_adjusted=True,dpername=3', #'timectrl:qsize=10,dsize=20', #'unctrl:been_adjusted=True', ''') ) #if ut.get_argflag('--hacktestscore'): # pipeline_list_body = ut.codeblock( # ''' # # See ibeis/algo/Config.py for names of pipeline config options # 'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=True', # 'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=True', # 'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=True', # 'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=False', # 'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=False', # 'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=False', # ''' # ) #elif True: default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default') default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True) pipeline_list_body = ut.codeblock( default_pcfgstr + '\n' + ut.codeblock(''' #'default', #'default:K=1', #'default:K=1,AI=False', #'default:K=1,AI=False,QRH=True', #'default:K=1,RI=True,AI=False', #'default:K=1,adapteq=True', #'default:fg_on=[True,False]', ''') ) locals_ = locals() _format = partial(format_cells, locals_=locals_) cell_list = ut.flatten(map(_format, cell_template_list)) return cell_list
def dump_word_patches(ibs, vocabdir, invindex, wx_sample, metrics): """ Dumps word member patches to disk """ wx2_dpath = get_word_dpaths(vocabdir, wx_sample, metrics) # Write each patch from each annotation to disk idx2_daid = invindex.idx2_daid daids = invindex.daids idx2_dfx = invindex.idx2_dfx #maws_list = invindex.idx2_wxs[idxs] # Loop over all annotations skipping the ones without any words in the sample ax2_idxs = [np.where(idx2_daid == aid_)[0] for aid_ in ut.progiter(daids, 'Building Forward Index: ', freq=100)] patchdump_iter = ut.progiter(zip(daids, ax2_idxs), freq=1, lbl='Dumping Selected Patches: ', num=len(daids)) for aid, idxs in patchdump_iter: wxs_list = invindex.idx2_wxs[idxs] if len(set(ut.flatten(wxs_list)).intersection(set(wx_sample))) == 0: # skip this annotation continue fx_list = idx2_dfx[idxs] chip = ibs.get_annot_chips(aid) chip_kpts = ibs.get_annot_kpts(aid) nid = ibs.get_annot_name_rowids(aid) patches, subkpts = ptool.get_warped_patches(chip, chip_kpts) for fx, wxs, patch in zip(fx_list, wxs_list, patches): assert len(wxs) == 1, 'did you multiassign the database? If so implement it here too' for k, wx in enumerate(wxs): if wx not in wx_sample: continue patch_fname = 'patch_nid=%04d_aid=%04d_fx=%04d_k=%d' % (nid, aid, fx, k) fpath = join(wx2_dpath[wx], patch_fname) #gtool.imwrite(fpath, patch, fallback=True) gtool.imwrite_fallback(fpath, patch)
def union(cls, *sets): """ >>> from utool.util_set import * # NOQA """ import utool as ut lists_ = ut.flatten([list(s) for s in sets]) return cls(lists_)
def modify_tags(tags_list, direct_map=None, regex_map=None, regex_aug=None, delete_unmapped=False, return_unmapped=False, return_map=False): import utool as ut tag_vocab = ut.unique(ut.flatten(tags_list)) alias_map = ut.odict() if regex_map is not None: alias_map.update(**ut.build_alias_map(regex_map, tag_vocab)) if direct_map is not None: alias_map.update(ut.odict(direct_map)) new_tags_list = tags_list new_tags_list = ut.alias_tags(new_tags_list, alias_map) if regex_aug is not None: alias_aug = ut.build_alias_map(regex_aug, tag_vocab) aug_tags_list = ut.alias_tags(new_tags_list, alias_aug) new_tags_list = [ut.unique(t1 + t2) for t1, t2 in zip(new_tags_list, aug_tags_list)] unmapped = list(set(tag_vocab) - set(alias_map.keys())) if delete_unmapped: new_tags_list = [ut.setdiff(tags, unmapped) for tags in new_tags_list] toreturn = None if return_map: toreturn = (alias_map,) if return_unmapped: toreturn = toreturn + (unmapped,) if toreturn is None: toreturn = new_tags_list else: toreturn = (new_tags_list,) + toreturn return toreturn
def get_topname_training_idxs(cm, num=5): """ gets the index of the annots in the top groundtrue name and the top groundfalse names. Args: cm (ibeis.ChipMatch): object of feature correspondences and scores num(int): number of false names (default = 5) Returns: tuple: (tp_idxs, tn_idxs) cm.daid_list[tp_idxs] are all of the annotations in the correct name. cm.daid_list[tn_idxs] are all of the annotations in the top `num_false` incorrect names. CommandLine: python -m ibeis --tf get_topname_training_idxs --show Example: >>> # ENABLE_DOCTEST >>> from ibeis.algo.hots.scorenorm import * # NOQA >>> import ibeis >>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST', a='default:dindex=0:10,qindex=0:1', t='best') >>> num = 1 >>> (tp_idxs, tn_idxs) = get_topname_training_idxs(cm, num) >>> result = ('(tp_idxs, tn_idxs) = %s' % (ut.repr2((tp_idxs, tn_idxs), nl=1),)) >>> print(result) (tp_idxs, tn_idxs) = ( np.array([0, 1, 2], dtype=np.int64), [3, 4, 5, 6], ) """ if num is None: num = 5 sortx = cm.name_argsort() sorted_nids = vt.take2(cm.unique_nids, sortx) sorted_groupxs = ut.take(cm.name_groupxs, sortx) # name ranks of the groundtrue name tp_ranks = np.where(sorted_nids == cm.qnid)[0] if len(tp_ranks) == 0: #if ut.STRICT: # raise Exception('tp_ranks=0') #else: raise UnbalancedExampleException('tp_ranks=0') # name ranks of the top groundfalse names tp_rank = tp_ranks[0] tn_ranks = [rank for rank in range(num + 1) if rank != tp_rank and rank < len(sorted_groupxs)] if len(tn_ranks) == 0: #if ut.STRICT: # raise Exception('tn_ranks=0') #else: raise UnbalancedExampleException('tn_ranks=0') # annot idxs of the examples tp_idxs = sorted_groupxs[tp_rank] tn_idxs = ut.flatten(ut.take(sorted_groupxs, tn_ranks)) return tp_idxs, tn_idxs
def ensure_dependencies(request): r""" CommandLine: python -m dtool.base --exec-BaseRequest.ensure_dependencies Example: >>> # ENABLE_DOCTEST >>> from dtool.base import * # NOQA >>> from dtool.example_depcache import testdata_depc >>> depc = testdata_depc() >>> request = depc.new_request('vsmany', [1, 2], [2, 3, 4]) >>> request.ensure_dependencies() """ import networkx as nx depc = request.depc if False: dependencies = nx.ancestors(depc.graph, request.tablename) subgraph = depc.graph.subgraph(set.union(dependencies, {request.tablename})) dependency_order = nx.topological_sort(subgraph) root = dependency_order[0] [nx.algorithms.dijkstra_path(subgraph, root, start)[:-1] + nx.algorithms.dijkstra_path(subgraph, start, request.tablename) for start in dependency_order] graph = depc.graph root = list(nx.topological_sort(graph))[0] edges = graph.edges() #parent_to_children = ut.edges_to_adjacency_list(edges) child_to_parents = ut.edges_to_adjacency_list([t[::-1] for t in edges]) to_root = {request.tablename: ut.paths_to_root(request.tablename, root, child_to_parents)} from_root = ut.reverse_path(to_root, root, child_to_parents) dependency_levels_ = ut.get_levels(from_root) dependency_levels = ut.longest_levels(dependency_levels_) true_order = ut.flatten(dependency_levels)[1:-1] #print('[req] Ensuring %s request dependencies: %r' % (request, true_order,)) ut.colorprint( '[req] Ensuring request %s dependencies: %r' % (request, true_order,), 'yellow') for tablename in true_order: table = depc[tablename] if table.ismulti: pass else: # HACK FOR IBEIS all_aids = ut.flat_unique(request.qaids, request.daids) depc.get_rowids(tablename, all_aids) pass pass #zip(depc.get_implicit_edges()) #zip(depc.get_implicit_edges()) #raise NotImplementedError('todo') #depc = request.depc #parent_rowids = request.parent_rowids #config = request.config #rowid_dict = depc.get_all_descendant_rowids( # request.tablename, root_rowids, config=config) pass
def compute_annot_occurrence_ids(ibs, aid_list): from ibeis.algo.preproc import preproc_occurrence gid_list = ibs.get_annot_gids(aid_list) gid2_aids = ut.group_items(aid_list, gid_list) flat_imgsetids, flat_gids = preproc_occurrence.ibeis_compute_occurrences(ibs, gid_list, seconds_thresh=4 * 60 * 60, verbose=False) occurid2_gids = ut.group_items(flat_gids, flat_imgsetids) occurid2_aids = {oid: ut.flatten(ut.take(gid2_aids, gids)) for oid, gids in occurid2_gids.items()} return occurid2_aids
def id_algo_bc_dtw(depc, qaid_list, daid_list, config): r""" CommandLine: python -m ibeis_flukematch.plugin --exec-id_algo_bc_dtw:0 --show # IBEIS Experiments ibeis -e draw_cases --db humpbacks --show \ -a default:has_any=hasnotch,mingt=2,size=50 \ -t default:proot=BC_DTW -f :fail=False,index=0:3,sortdsc=gtscore,max_pername=1 Example: >>> # DISABLE_DOCTEST >>> from ibeis_flukematch.plugin import * # NOQA >>> import ibeis >>> # Setup Inputs >>> ibs, aid_list = ibeis.testdata_aids( >>> defaultdb='humpbacks', a='default:has_any=hasnotch,pername=2,mingt=2,size=10') >>> depc = ibs.depc >>> root_rowids = list(zip(*ut.iprod(aid_list, aid_list))) >>> qaid_list, daid_list = root_rowids >>> cfgdict = dict(weights=None, decision='average', sizes=(5, 10, 15, 20)) >>> config = BC_DTW_Config(**cfgdict) >>> # Call function via request >>> request = BC_DTW_Request.new(depc, aid_list, aid_list) >>> am_list1 = request.execute() >>> # Call function via depcache >>> prop_list = depc.get('BC_DTW', root_rowids, config=config) >>> # Call function normally >>> score_list = list(id_algo_bc_dtw(depc, qaid_list, daid_list, config)) >>> am_list2 = list(get_match_results(depc, qaid_list, daid_list, score_list, config)) >>> assert score_list == prop_list, 'error in cache' >>> assert np.all(am_list1[0].score_list == am_list2[0].score_list) >>> ut.quit_if_noshow() >>> am = am_list2[0] >>> am.ishow_analysis(request) >>> ut.show_if_requested() """ print('Executing BC_DTW') sizes = list(range(config.block_curv_cfg['csize_min'], config.block_curv_cfg['csize_max'] + 1, config.block_curv_cfg['csize_step'])) curv_weights = curv_weight_gen(config['weight_import'], sizes) # Group pairs by qaid all_aids = np.unique(ut.flatten([qaid_list, daid_list])) all_curves = depc.get('Block_Curvature', all_aids, 'curvature', config=config) aid_to_curves = dict(zip(all_aids, all_curves)) for qaid, daid in zip(qaid_list, daid_list): query_curv = aid_to_curves[qaid] db_curv = aid_to_curves[daid] if query_curv is None or db_curv is None: #print("Comparison of qaid: %d and daid: %d -- one of the curvatures is None, skipping" % (qaid, daid)) yield None else: # determine window as a percentage of the query trailing edge window_size = int(math.ceil((config['window'] / 100) * query_curv.shape[0])) distance = get_distance_curvweighted(query_curv, db_curv, curv_weights, window=window_size) score = np.exp(-distance / 50) yield (score,)
def purge_ensure_one_annot_per_images(ibs): """ pip install Pipe """ # Purge all but one annotation images = ibs.images() # images.aids groups = images._annot_groups import numpy as np # Take all but the largest annotations per images large_masks = [ ut.index_to_boolmask([np.argmax(x)], len(x)) for x in groups.bbox_area ] small_masks = ut.lmap(ut.not_list, large_masks) # Remove all but the largets annotation small_aids = ut.zipcompress(groups.aid, small_masks) small_aids = ut.flatten(small_aids) # Fix any empty images images = ibs.images() empty_images = ut.where(np.array(images.num_annotations) == 0) logger.info('empty_images = %r' % (empty_images, )) # list(map(basename, map(dirname, images.uris_original))) def VecPipe(func): import pipe @pipe.Pipe def wrapped(sequence): return map(func, sequence) # return (None if item is None else func(item) for item in sequence) return wrapped name_list = list(images.uris_original | VecPipe(dirname) | VecPipe(basename)) aids_list = images.aids ut.assert_all_eq(list(aids_list | VecPipe(len))) annots = ibs.annots(ut.flatten(aids_list)) annots.names = name_list
def get_chip_imagesets(ibs): gid2_ex, ex2_gxs = compute_occurrences(ibs) # NOQA # Build imageset to chips from imageset to images ex2_cxs = [None for _ in range(len(ex2_gxs))] for ex, gids in enumerate(ex2_gxs): ex2_cxs[ex] = utool.flatten(ibs.gid2_cxs(gids)) # optional # resort imagesets by number of chips ex2_nCxs = list(map(len, ex2_cxs)) ex2_cxs = [y for (x, y) in sorted(zip(ex2_nCxs, ex2_cxs))] return ex2_cxs
def demodata_mtest_infr(state='empty'): import wbia ibs = wbia.opendb(db='PZ_MTEST') annots = ibs.annots() names = list(annots.group_items(annots.nids).values()) ut.shuffle(names, rng=321) test_aids = ut.flatten(names[1::2]) infr = wbia.AnnotInference(ibs, test_aids, autoinit=True) infr.reset(state=state) return infr
def unrelate_images_and_imagesets(ibs, gid_list, imgsetid_list): """ Seems to unrelate specific image imageset pairs Args: ibs (IBEISController): ibeis controller object gid_list (list): imgsetid_list (list): Returns: list: gids_list CommandLine: python -m ibeis.control.manual_gsgrelate_funcs --test-unrelate_images_and_imagesets python -c "import utool; print(utool.auto_docstr('ibeis.control.manual_gsgrelate_funcs', 'delete_gsgr_image_relations'))" Example: >>> # ENABLE_DOCTEST >>> from ibeis.control.manual_gsgrelate_funcs import * # NOQA >>> import ibeis >>> # build test data >>> ibs = ibeis.opendb('testdb1') >>> # Reset and compute imagesets >>> ibs.delete_all_imagesets() >>> ibs.compute_occurrences() >>> imgsetid_list = ibs.get_valid_imgsetids() >>> gids_list = ibs.get_imageset_gids(imgsetid_list) >>> assert len(imgsetid_list) == 2 >>> assert len(gids_list) == 2 >>> assert len(gids_list[0]) == 7 >>> assert len(gids_list[1]) == 6 >>> # Add imageset 2 gids to imageset 1 so an image belongs to multiple imagesets >>> imgset2_gids = gids_list[1][0:1] >>> imgset1_imgsetids = imgsetid_list[0:1] >>> ibs.add_image_relationship(imgset2_gids, imgset1_imgsetids) >>> # Now delete the image from the imageset 2 >>> imgset2_imgsetids = imgsetid_list[1:2] >>> # execute function >>> ibs.unrelate_images_and_imagesets(imgset2_gids, imgset2_imgsetids) >>> # verify results >>> ibs.print_egpairs_table() >>> imgsetid_list_ = ibs.get_valid_imgsetids() >>> gids_list_ = ibs.get_imageset_gids(imgsetid_list_) >>> result = str(gids_list_) >>> print(result) >>> # imgset2_gids should now only be in imageset1 >>> assert imgset2_gids[0] in gids_list_[0] >>> assert imgset2_gids[0] not in gids_list_[1] """ # WHAT IS THIS FUNCTION? FIXME CALLS WEIRD FUNCTION if ut.VERBOSE: print('[ibs] deleting %r image\'s imageset ids' % len(gid_list)) gsgrid_list = ut.flatten(ibs.get_imageset_gsgrids(imgsetid_list=imgsetid_list, gid_list=gid_list)) ibs.db.delete_rowids(const.GSG_RELATION_TABLE, gsgrid_list)
def find_minority_class_ccs(infr): # Finds ccs involved in photobombs and incomparble cases pb_edges = [ edge for edge, tags in infr.gen_edge_attrs('tags') if 'photobomb' in tags ] incomp_edges = list(infr.incomp_graph.edges()) minority_edges = pb_edges + incomp_edges minority_nids = set(infr.node_labels(*set(ut.flatten(minority_edges)))) minority_ccs = [infr.pos_graph._ccs[nid] for nid in minority_nids] return minority_ccs
def get_name_shortlist_aids( daid_list, dnid_list, annot_score_list, name_score_list, nid2_nidx, nNameShortList, nAnnotPerName, ): r""" CommandLine: python -m wbia.algo.hots.scoring --test-get_name_shortlist_aids Example: >>> # ENABLE_DOCTEST >>> from wbia.algo.hots.scoring import * # NOQA >>> daid_list = np.array([11, 12, 13, 14, 15, 16, 17]) >>> dnid_list = np.array([21, 21, 21, 22, 22, 23, 24]) >>> annot_score_list = np.array([ 6, 2, 3, 5, 6, 3, 2]) >>> name_score_list = np.array([ 8, 9, 5, 4]) >>> nid2_nidx = {21:0, 22:1, 23:2, 24:3} >>> nNameShortList, nAnnotPerName = 3, 2 >>> args = (daid_list, dnid_list, annot_score_list, name_score_list, ... nid2_nidx, nNameShortList, nAnnotPerName) >>> top_daids = get_name_shortlist_aids(*args) >>> result = str(top_daids) >>> print(result) [15, 14, 11, 13, 16] """ unique_nids, groupxs = vt.group_indices(np.array(dnid_list)) grouped_annot_scores = vt.apply_grouping(annot_score_list, groupxs) grouped_daids = vt.apply_grouping(np.array(daid_list), groupxs) # Ensure name score list is aligned with the unique_nids aligned_name_score_list = name_score_list.take( ut.dict_take(nid2_nidx, unique_nids)) # Sort each group by the name score group_sortx = aligned_name_score_list.argsort()[::-1] _top_daid_groups = ut.take(grouped_daids, group_sortx) _top_annot_score_groups = ut.take(grouped_annot_scores, group_sortx) top_daid_groups = ut.listclip(_top_daid_groups, nNameShortList) top_annot_score_groups = ut.listclip(_top_annot_score_groups, nNameShortList) # Sort within each group by the annotation score top_daid_sortx_groups = [ annot_score_group.argsort()[::-1] for annot_score_group in top_annot_score_groups ] top_sorted_daid_groups = vt.ziptake(top_daid_groups, top_daid_sortx_groups) top_clipped_daids = [ ut.listclip(sorted_daid_group, nAnnotPerName) for sorted_daid_group in top_sorted_daid_groups ] top_daids = ut.flatten(top_clipped_daids) return top_daids
def make_bayes_notebook(): r""" CommandLine: python -m wbia.unstable.demobayes --exec-make_bayes_notebook Example: >>> # DISABLE_DOCTEST >>> from wbia.unstable.demobayes import * # NOQA >>> result = make_bayes_notebook() >>> print(result) """ from wbia.templates import generate_notebook initialize = ut.codeblock(r""" # STARTBLOCK import os os.environ['UTOOL_NO_CNN'] = 'True' from wbia.unstable.demobayes import * # NOQA # Matplotlib stuff import matplotlib as mpl %matplotlib inline %load_ext autoreload %autoreload from IPython.core.display import HTML HTML("<style>body .container { width:99% !important; }</style>") # ENDBLOCK """) cell_list_def = [ initialize, show_model_templates, demo_modes, demo_name_annot_complexity, # demo_model_idependencies, demo_single_add, demo_ambiguity, demo_conflicting_evidence, demo_annot_idependence_overlap, ] def format_cell(cell): if ut.is_funclike(cell): header = '# ' + ut.to_title_caps(ut.get_funcname(cell)) code = (header, ut.get_func_sourcecode(cell, stripdef=True, stripret=True)) else: code = (None, cell) return generate_notebook.format_cells(code) cell_list = ut.flatten([format_cell(cell) for cell in cell_list_def]) nbstr = generate_notebook.make_notebook(cell_list) logger.info('nbstr = %s' % (nbstr, )) fpath = 'demobayes.ipynb' ut.writeto(fpath, nbstr) ut.startfile(fpath)
def ensure_flatiterable(input_): if isinstance(input_, six.string_types): input_ = ut.fuzzy_int(input_) if isinstance(input_, int) or not ut.isiterable(input_): return [input_] elif isinstance(input_, (list, tuple)): #print(input_) if len(input_) > 0 and ut.isiterable(input_[0]): return ut.flatten(input_) return input_ else: raise TypeError('cannot ensure %r input_=%r is iterable', (type(input_), input_))
def glossterms(): re_glossterm = ut.named_field('glossterm', '.' + ut.REGEX_NONGREEDY) pat = r'\\glossterm{' + re_glossterm + '}' tup = ut.grep(pat, fpath_list=testdata_fpaths(), verbose=True) found_fpath_list, found_lines_list, found_lxs_list = tup glossterm_list = [] for line in ut.flatten(found_lines_list): match = re.search(pat, line) glossterm = match.groupdict()['glossterm'] glossterm_list.append(glossterm) print('Glossary Terms: ') print(ut.repr2(ut.dict_hist(glossterm_list), nl=True, strvals=True))
def define_model(cpd_list): """ Custom extensions of pgmpy modl """ input_graph = ut.flatten([ [(evar, cpd.variable) for evar in cpd.evidence] for cpd in cpd_list if cpd.evidence is not None ]) model = pgmpy.models.BayesianModel(input_graph) model.add_cpds(*cpd_list) customize_model(model) return model
def get_am_rowids(self, internal=True): """ if `internal is True` returns am rowids only between annotations in this Annots object, otherwise returns any am rowid that contains any aid in this Annots object. """ ibs = self._ibs if internal: ams = ibs.get_annotmatch_rowids_between(self.aids, self.aids) else: ams = ut.flatten(ibs.get_annotmatch_rowids_from_aid(self.aids)) return ams
def make_ibeis_cell_list(ibs): cell_template_list = get_default_cell_template_list(ibs) autogen_str = '# python -m ibeis autogen_ipynb --launch --dbdir %r' % ( ibs.get_dbdir()) # autogen_str = ut.make_autogen_str() dbname = ibs.get_dbname() dbdir = ibs.dbdir default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True') asreport = ut.get_argflag('--asreport') default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default') default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True) if asreport: annotconfig_list_body = ut.codeblock(ut.repr2(default_acfgstr)) pipeline_list_body = ut.codeblock(default_pcfgstr) else: annotconfig_list_body = ut.codeblock( ut.repr2(default_acfgstr) + '\n' + ut.codeblock(''' #'default:has_any=(query,),dpername=1,exclude_reference=True', #'default:is_known=True', #'default:is_known=True,minqual=good,require_timestamp=True,dcrossval_enc=1,view=left' #'default:qsame_imageset=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20', #'default:require_timestamp=True,min_timedelta=3600', #'default:species=primary', #'unctrl:been_adjusted=True', #'timectrl:', #'timectrl:view=primary,minqual=good', #'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1', #'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1', #'default:minqual=ok,require_timestamp=True,view=left,dcrossval_enc=1,joinme=2', #'default:minqual=ok,require_timestamp=True,view=right,dcrossval_enc=1,joinme=2', ''')) pipeline_list_body = ut.codeblock(default_pcfgstr + '\n' + ut.codeblock(''' #'default', #'default:K=1,AI=False,QRH=True', #'default:K=1,RI=True,AI=False', #'default:K=1,adapteq=True', #'default:fg_on=[True,False]', ''')) locals_ = locals() _format = partial(ut.format_cells, locals_=locals_) cell_list = ut.flatten(map(_format, cell_template_list)) return cell_list
def build_vsone_shortlist(ibs, qaid2_qres_vsmany): """ looks that the top N names in a vsmany query to apply vsone reranking Args: ibs (IBEISController): ibeis controller object qaid2_qres_vsmany (dict): dict of query result objects Returns: list: vsone_query_pairs CommandLine: python -m ibeis.algo.hots.special_query --test-build_vsone_shortlist Example: >>> # SLOW_DOCTEST >>> from ibeis.algo.hots.special_query import * # NOQA >>> ibs, valid_aids = testdata_special_query() >>> qaids = valid_aids[0:1] >>> daids = valid_aids[1:] >>> qaid2_qres_vsmany, qreq_vsmany_ = query_vsmany_initial(ibs, qaids, daids) >>> # execute function >>> vsone_query_pairs = build_vsone_shortlist(ibs, qaid2_qres_vsmany) >>> qaid, top_aid_list = vsone_query_pairs[0] >>> top_nid_list = ibs.get_annot_name_rowids(top_aid_list) >>> assert top_nid_list.index(1) == 0, 'name 1 should be rank 1' >>> assert len(top_nid_list) == 5, 'should have 3 names and up to 2 image per name' [(1, [3, 2, 6, 5, 4])] [(1, [2, 3, 6, 5, 4])] """ vsone_query_pairs = [] nNameShortlist = 3 nAnnotPerName = 2 for qaid, qres_vsmany in six.iteritems(qaid2_qres_vsmany): nscoretup = qres_vsmany.get_nscoretup() (sorted_nids, sorted_nscores, sorted_aids, sorted_scores) = nscoretup #top_nid_list = ut.listclip(sorted_nids, nNameShortlist) top_aids_list = ut.listclip(sorted_aids, nNameShortlist) top_aids_list_ = [ ut.listclip(aids, nAnnotPerName) for aids in top_aids_list ] top_aid_list = ut.flatten(top_aids_list_) # get top annotations beloning to the database query # TODO: allow annots not in daids to be included #top_unflataids = ibs.get_name_aids(top_nid_list, enable_unknown_fix=True) #flat_top_aids = ut.flatten(top_unflataids) #top_aid_list = ut.intersect_ordered(flat_top_aids, qres_vsmany.daids) vsone_query_pairs.append((qaid, top_aid_list)) print('built %d pairs' % (len(vsone_query_pairs), )) return vsone_query_pairs
def get_turk_annot_args(is_reviewed_func): """ Helper to return aids in an imageset or a group review """ ibs = current_app.ibs def _ensureid(_id): return None if _id == 'None' or _id == '' else int(_id) imgsetid = request.args.get('imgsetid', '') src_ag = request.args.get('src_ag', '') dst_ag = request.args.get('dst_ag', '') imgsetid = _ensureid(imgsetid) src_ag = _ensureid(src_ag) dst_ag = _ensureid(dst_ag) group_review_flag = src_ag is not None and dst_ag is not None if not group_review_flag: gid_list = ibs.get_valid_gids(imgsetid=imgsetid) aid_list = ut.flatten(ibs.get_image_aids(gid_list)) reviewed_list = is_reviewed_func(ibs, aid_list) else: src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag) dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag) src_aid_list = ibs.get_gar_aid(src_gar_rowid_list) dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list) aid_list = src_aid_list reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ] try: progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), ) except ZeroDivisionError: progress = '0.00' aid = request.args.get('aid', '') if len(aid) > 0: aid = int(aid) else: aid_list_ = ut.filterfalse_items(aid_list, reviewed_list) if len(aid_list_) == 0: aid = None else: if group_review_flag: aid = aid_list_[0] else: aid = random.choice(aid_list_) previous = request.args.get('previous', None) print('aid = %r' % (aid,)) #print(ut.dict_str(ibs.get_annot_info(aid))) print(ut.obj_str(ibs.get_annot_info(aid, default=True, nl=True))) return aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous
def compute_annot_occurrence_ids(ibs, aid_list): from ibeis.algo.preproc import preproc_occurrence gid_list = ibs.get_annot_gids(aid_list) gid2_aids = ut.group_items(aid_list, gid_list) config = {'seconds_thresh': 4 * 60 * 60} flat_imgsetids, flat_gids = preproc_occurrence.ibeis_compute_occurrences( ibs, gid_list, config=config, verbose=False) occurid2_gids = ut.group_items(flat_gids, flat_imgsetids) occurid2_aids = { oid: ut.flatten(ut.take(gid2_aids, gids)) for oid, gids in occurid2_gids.items() } return occurid2_aids
def voting_uuid_list(ibs, team_list): blacklist = [] image_uuid_list = ibs.get_image_uuids(ibs.get_valid_gids()) # image_uuid_list = image_uuid_list[:100] annot_uuid_list = ut.flatten( ibs.get_image_annot_uuids( ibs.get_image_gids_from_uuid(image_uuid_list))) for team in team_list: logger.info('Checking team %r' % (team, )) try: gid_list = team.get_image_gids_from_uuid(image_uuid_list) assert None not in gid_list except AssertionError: zipped = zip(image_uuid_list, gid_list) blacklist += [ image_uuid for image_uuid, gid in zipped if gid is None ] try: aid_list = team.get_annot_aids_from_uuid(annot_uuid_list) assert None not in aid_list except AssertionError: zipped = zip(annot_uuid_list, aid_list) blacklist += [ ibs.get_image_uuids( ibs.get_annot_image_rowids( ibs.get_annot_aids_from_uuid(annot_uuid))) for annot_uuid, aid in zipped if aid is None ] blacklist = list(set(blacklist)) assert None not in blacklist logger.info('Blacklisted %d / %d' % ( len(blacklist), len(image_uuid_list), )) image_uuid_list = list(set(image_uuid_list) - set(blacklist)) annot_uuid_list = ut.flatten( ibs.get_image_annot_uuids( ibs.get_image_gids_from_uuid(image_uuid_list))) return image_uuid_list, annot_uuid_list
def get_aidpair_tags(ibs, aid1_list, aid2_list, directed=True): r""" Args: ibs (IBEISController): ibeis controller object aid1_list (list): aid2_list (list): directed (bool): (default = True) Returns: list: tags_list CommandLine: python -m ibeis.tag_funcs --exec-get_aidpair_tags --db PZ_Master1 --tags Hard interesting Example: >>> # DISABLE_DOCTEST >>> from ibeis.tag_funcs import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> has_any = ut.get_argval('--tags', type_=list, default=None) >>> min_num = ut.get_argval('--min_num', type_=int, default=1) >>> aid_pairs = filter_aidpairs_by_tags(ibs, has_any=has_any, min_num=1) >>> aid1_list = aid_pairs.T[0] >>> aid2_list = aid_pairs.T[1] >>> undirected_tags = get_aidpair_tags(ibs, aid1_list, aid2_list, directed=False) >>> tagged_pairs = list(zip(aid_pairs.tolist(), undirected_tags)) >>> print(ut.repr2(tagged_pairs)) >>> tag_dict = ut.groupby_tags(tagged_pairs, undirected_tags) >>> print(ut.repr2(tag_dict, nl=2)) >>> print(ut.repr2(ut.map_dict_vals(len, tag_dict))) """ aid_pairs = np.vstack([aid1_list, aid2_list]).T if directed: annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(aid_pairs.T[0], aid_pairs.T[1]) tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid) else: annotmatch_rowid = ibs.get_annotmatch_rowid_from_undirected_superkey(aid_pairs.T[0], aid_pairs.T[1]) tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid) if False: expanded_aid_pairs = np.vstack([aid_pairs, aid_pairs[:, ::-1]]) expanded_annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey( expanded_aid_pairs.T[0], expanded_aid_pairs.T[1]) expanded_edgeids = vt.get_undirected_edge_ids(expanded_aid_pairs) unique_edgeids, groupxs = vt.group_indices(expanded_edgeids) expanded_tags_list = ibs.get_annotmatch_case_tags(expanded_annotmatch_rowid) grouped_tags = vt.apply_grouping(np.array(expanded_tags_list, dtype=object), groupxs) undirected_tags = [list(set(ut.flatten(tags))) for tags in grouped_tags] edgeid2_tags = dict(zip(unique_edgeids, undirected_tags)) input_edgeids = expanded_edgeids[:len(aid_pairs)] tags_list = ut.dict_take(edgeid2_tags, input_edgeids) return tags_list
def fix_splits_interaction(ibs): """ python -m wbia fix_splits_interaction --show Example: >>> # DISABLE_DOCTEST GGR >>> from wbia.other.dbinfo import * # NOQA >>> import wbia >>> dbdir = '/media/danger/GGR/GGR-IBEIS' >>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS') >>> ibs = wbia.opendb(dbdir=dbdir, allow_newdir=False) >>> import wbia.guitool as gt >>> gt.ensure_qtapp() >>> win = fix_splits_interaction(ibs) >>> ut.quit_if_noshow() >>> import wbia.plottool as pt >>> gt.qtapp_loop(qwin=win) """ split_props = {'splitcase', 'photobomb'} all_annot_groups = ibs._annot_groups( ibs.group_annots_by_name(ibs.get_valid_aids())[0]) all_has_split = [ len(split_props.intersection(ut.flatten(tags))) > 0 for tags in all_annot_groups.match_tags ] tosplit_annots = ut.compress(all_annot_groups.annots_list, all_has_split) tosplit_annots = ut.take(tosplit_annots, ut.argsort(ut.lmap(len, tosplit_annots)))[::-1] if ut.get_argflag('--reverse'): tosplit_annots = tosplit_annots[::-1] logger.info('len(tosplit_annots) = %r' % (len(tosplit_annots), )) aids_list = [a.aids for a in tosplit_annots] from wbia.algo.graph import graph_iden from wbia.viz import viz_graph2 import wbia.guitool as gt import wbia.plottool as pt pt.qt4ensure() gt.ensure_qtapp() for aids in ut.InteractiveIter(aids_list): infr = graph_iden.AnnotInference(ibs, aids) infr.initialize_graph() win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False, init_mode='rereview') win.populate_edge_model() win.show() return win
def multidb_montage(): r""" CommandLine: python -m ibeis.scripts.specialdraw multidb_montage --save montage.jpg --dpath ~/slides --diskshow --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.scripts.specialdraw import * # NOQA >>> multidb_montage() """ import ibeis import plottool as pt import vtool as vt import numpy as np pt.ensure_pylab_qt4() ibs1 = ibeis.opendb('PZ_MTEST') ibs2 = ibeis.opendb('GZ_ALL') ibs3 = ibeis.opendb('GIRM_Master1') chip_lists = [] aids_list = [] for ibs in [ibs1, ibs2, ibs3]: aids = ibs.sample_annots_general(minqual='good', sample_size=400) aids_list.append(aids) print(ut.depth_profile(aids_list)) for ibs, aids in zip([ibs1, ibs2, ibs3], aids_list): chips = ibs.get_annot_chips(aids) chip_lists.append(chips) chip_list = ut.flatten(chip_lists) np.random.shuffle(chip_list) widescreen_ratio = 16 / 9 ratio = ut.PHI ratio = widescreen_ratio fpath = pt.get_save_directions() #height = 6000 width = 6000 #width = int(height * ratio) height = int(width / ratio) dsize = (width, height) dst = vt.montage(chip_list, dsize) vt.imwrite(fpath, dst) if ut.get_argflag('--show'): pt.imshow(dst)
def get_photobomber_map(ibs, aids, aid_to_nid=None): """ Builds map of which names that photobomb other names. python -m wbia.gui.id_review_api --test-test_review_widget --show --db PZ_MTEST -a default:qindex=0 >>> import wbia >>> dbdir = ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS') >>> ibs = wbia.opendb(dbdir='/home/joncrall/lev/media/danger/GGR/GGR-IBEIS') >>> filter_kw = { >>> 'multiple': False, >>> 'minqual': 'good', >>> 'is_known': True, >>> 'min_pername': 2, >>> 'view': ['right'], >>> } >>> aids = ibs.filter_annots_general(ibs.get_valid_aids(), filter_kw=filter_kw) """ ams_list = ibs.get_annotmatch_rowids_from_aid(aids) flags_list = ibs.unflat_map( ut.partial(ibs.get_annotmatch_prop, 'Photobomb'), ams_list) pb_ams = ut.zipcompress(ams_list, flags_list) has_pb_ams = [len(ams) > 0 for ams in pb_ams] pb_ams_ = ut.compress(pb_ams, has_pb_ams) # aids_ = ut.compress(aids, has_pb_ams) pb_ams_flat = ut.flatten(pb_ams_) pb_aids1_ = ibs.get_annotmatch_aid1(pb_ams_flat) pb_aids2_ = ibs.get_annotmatch_aid2(pb_ams_flat) pb_aid_pairs_ = list(zip(pb_aids1_, pb_aids2_)) if aid_to_nid is None: pb_nid_pairs_ = ibs.unflat_map(ibs.get_annot_nids, pb_aid_pairs_) else: pb_nid_pairs_ = ibs.unflat_map(ut.partial(ut.take, aid_to_nid), pb_aid_pairs_) # invalid_aid_map = ut.ddict(set) # for aid1, aid2 in pb_aid_pairs_: # if aid1 != aid2: # invalid_aid_map[aid1].add(aid2) # invalid_aid_map[aid2].add(aid1) invalid_nid_map = ut.ddict(set) for nid1, nid2 in pb_nid_pairs_: if nid1 != nid2: invalid_nid_map[nid1].add(nid2) invalid_nid_map[nid2].add(nid1) return invalid_nid_map
def commit_detection_results_filtered( ibs, gid_list, filter_species_list=None, filter_viewpoint_list=None, note=None, update_json_log=True, ): depc = ibs.depc_image results_list = depc.get_property('detections', gid_list, None) zipped_list = list(zip(gid_list, results_list)) aids_list = [] for ( gid, (score, bbox_list, theta_list, species_list, viewpoint_list, conf_list), ) in zipped_list: aid_list = [] result_list = list( zip(bbox_list, theta_list, species_list, viewpoint_list, conf_list)) for bbox, theta, species, viewpoint, conf in result_list: if not (filter_species_list is None or species in filter_species_list): continue if not (filter_viewpoint_list is None or viewpoint in filter_viewpoint_list): continue note_ = None if note is None else [note] temp_list = ibs.add_annots( [gid], [bbox], [theta], [species], detect_confidence_list=[conf], notes_list=note_, quiet_delete_thumbs=True, skip_cleaning=True, ) aid = temp_list[0] ibs.set_annot_viewpoints([aid], [viewpoint]) # TODO ibs.set_annot_viewpoint_code([aid], [viewpoint]) aid_list.append(aid) aids_list.append(aid_list) ibs._clean_species() if update_json_log: aid_list = ut.flatten(aids_list) ibs.log_detections(aid_list) return aids_list
def intra_encounter_matching(): import numpy as np from scipy.sparse import coo_matrix, csgraph qreq_, cm_list = testdata_workflow() # qaids = [cm.qaid for cm in cm_list] # top_aids = [cm.get_top_aids(5) for cm in cm_list] aid_pairs = np.array([(cm.qaid, daid) for cm in cm_list for daid in cm.get_top_aids(5)]) top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list]) N = aid_pairs.max() + 1 mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N)) csgraph.connected_components(mat) tree = csgraph.minimum_spanning_tree(mat) # NOQA import plottool as pt dense = mat.todense() pt.imshow(dense / dense.max() * 255) pt.show_if_requested() # baseline jobid import opengm # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb numVar = 10 unaries = np.ones([numVar, 3], dtype=opengm.value_type) gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3) unary_fids = gm.addFunctions(unaries) gm.addFactors(unary_fids, np.arange(numVar)) infParam = opengm.InfParam(workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'), ) inf = opengm.inference.Multicut(gm, parameter=infParam) visitor = inf.verboseVisitor(printNth=1, multiline=False) inf.infer(visitor) arg = inf.arg() # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1]) # fid = gm.addFunction(regularizer) # gm.addFactors(fid, gridVariableIndices) # regularizer = opengm.pottsFunction([3, 3], 0.0, beta) # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1]) # fid = gm.addFunction(regularizer) # gm.addFactors(fid, gridVariableIndices) unaries = np.random.rand(10, 10, 2) potts = opengm.PottsFunction([2, 2], 0.0, 0.4) gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts) inf = opengm.inference.GraphCut(gm) inf.infer() arg = inf.arg() # NOQA """
def get_param_info_list(rrvsone_cfg): # new way to try and specify config options. # not sure if i like it yet param_info_list = ut.flatten([ [ ut.ParamInfo('index_method', 'single', ''), ut.ParamInfo('K', 4, type_=int), ut.ParamInfo('Knorm', 1, 'Kn='), ut.ParamInfo('use_k_padding', False, 'padk='), ut.ParamInfo('single_name_condition', False, 'nameknn', type_=bool, hideif=False), ut.ParamInfo('checks', 800, 'cks', type_=int), #ut.ParamInfo('ratio_thresh', None, type_=float, hideif=None), ], ]) return param_info_list
def __init__(self, config_list): self._orig_config_list = config_list # Cast all inputs to config classes self._new_config_list = [ cfg if hasattr(cfg, 'get_cfgstr') else make_configclass(cfg, '') for cfg in self._orig_config_list ] # Parse out items self._items = ut.flatten([ list(cfg.parse_items()) if hasattr(cfg, 'parse_items') else list(cfg.items()) for cfg in self._orig_config_list ]) for key, val in self._items: setattr(self, key, val)
def commit_localization_results(ibs, gid_list, results_list, note=None, labeler_algo='pipeline', labeler_model_tag=None, use_labeler_species=False, update_json_log=True, **kwargs): zipped_list = list(zip(gid_list, results_list)) aids_list = [] for gid, results in zipped_list: score, bbox_list, theta_list, conf_list, class_list = results num = len(bbox_list) notes_list = None if note is None else [note] * num aid_list = ibs.add_annots([gid] * num, bbox_list, theta_list, class_list, detect_confidence_list=conf_list, notes_list=notes_list, quiet_delete_thumbs=True, skip_cleaning=True) aids_list.append(aid_list) aid_list = ut.flatten(aids_list) if labeler_model_tag is not None: labeler_config = {} labeler_config['labeler_algo'] = labeler_algo labeler_config['labeler_weight_filepath'] = labeler_model_tag viewpoint_list = ibs.depc_annot.get_property('labeler', aid_list, 'viewpoint', config=labeler_config) ibs.set_annot_viewpoints(aid_list, viewpoint_list) if use_labeler_species: species_list = ibs.depc_annot.get_property('labeler', aid_list, 'species', config=labeler_config) ibs.set_annot_species(aid_list, species_list) ibs._clean_species() if update_json_log: ibs.log_detections(aid_list) return aids_list
def test_transforms(): r""" CommandLine: python -m ibeis_cnn.augment --test-test_transforms --show Example: >>> # ENABLE_DOCTEST >>> from ibeis_cnn.augment import * # NOQA >>> test_transforms() """ from ibeis_cnn import ingest_data, utils, draw_results data, labels = ingest_data.testdata_patchmatch() cv2_data = utils.convert_theano_images_to_cv2_images(data) patches_ = cv2_data[::2] transform_list = ut.flatten(all_transforms) num_random = 5 import vtool as vt for x in range(num_random): affine_kw = random_affine_kwargs() func = functools.partial(vt.affine_warp_around_center, **affine_kw) transform_list.append(func) orig_list = [] warped_list = [] name_list = [] for patch, func in zip(patches_, transform_list): if isinstance(func, functools.partial): name = ut.get_partial_func_name(func) else: name = ut.get_funcname(func) print(name) warped = func(patch) orig_list.append(patch) name_list.append(name) warped_list.append(warped) index_list = list(range(len(orig_list))) label_list = None tup = draw_results.get_patch_sample_img(orig_list, warped_list, label_list, {'text': name_list}, index_list, (1, len(index_list))) stacked_img, stacked_offsets, stacked_sfs = tup ut.quit_if_noshow() import plottool as pt pt.imshow(stacked_img) ut.show_if_requested()
def get_square_data(metadata, cfgstr=None): # can only support one config at a time right now if cfgstr is None: cfgstr = metadata.get_cfgstr_list()[0] qaid2_cols = metadata.dictstore[cfgstr] qaids = list(qaid2_cols.keys()) col_name_list = ut.unique_ordered(ut.flatten([cols.keys() for cols in qaid2_cols.values()])) #col_name_list = ['qx2_scoreexpdiff', 'qx2_gt_aid'] #colname2_colvals = [None for colname in col_name_list] column_list = [ [colvals.get(colname, None) for qaid, colvals in six.iteritems(qaid2_cols)] for colname in col_name_list] col_name_list = ['qaids'] + col_name_list column_list = [qaids] + column_list print('depth_profile(column_list) = %r' % (ut.depth_profile(column_list),)) return col_name_list, column_list
def find_module_callers(): """ TODO: attempt to build a call graph between module functions to make it easy to see what can be removed and what cannot. """ import utool as ut from os.path import normpath mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py') mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py') mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py') module = ut.import_module_from_fpath(mod_fpath) user_profile = ut.ensure_user_profile() doctestables = list( ut.iter_module_doctestable(module, include_builtin=False)) grepkw = {} grepkw['exclude_dirs'] = user_profile.project_exclude_dirs grepkw['dpath_list'] = user_profile.project_dpaths grepkw['verbose'] = True usage_map = {} for funcname, func in doctestables: print('Searching for funcname = %r' % (funcname, )) found_fpath_list, found_lines_list, found_lxs_list = ut.grep( [funcname], **grepkw) used_in = (found_fpath_list, found_lines_list, found_lxs_list) usage_map[funcname] = used_in external_usage_map = {} for funcname, used_in in usage_map.items(): (found_fpath_list, found_lines_list, found_lxs_list) = used_in isexternal_flag = [ normpath(fpath) != normpath(mod_fpath) for fpath in found_fpath_list ] ext_used_in = (ut.compress(found_fpath_list, isexternal_flag), ut.compress(found_lines_list, isexternal_flag), ut.compress(found_lxs_list, isexternal_flag)) external_usage_map[funcname] = ext_used_in for funcname, used_in in external_usage_map.items(): (found_fpath_list, found_lines_list, found_lxs_list) = used_in print('Calling modules: \n' + ut.repr2(ut.unique_ordered( ut.flatten([used_in[0] for used_in in external_usage_map.values()])), nl=True))
def init_score_matrix(allres): print('[rr2] init score matrix') ibs = allres.ibs qrid2_qres = allres.qrid2_qres qrid_list = allres.qrid_list nx_list = np.unique(ibs.tables.cx2_nx[qrid_list]) #nx_list = ibs.get_valid_nxs(unknown=False) cxs_list = ibs.nx2_rids(nx_list, aslist=True) # Sort names by number of chips nx_size = map(len, cxs_list) # Build sorted chip list nx_cxs_tuples = zip(nx_size, cxs_list) # Sort by name cx_sorted = [x for (y, x) in sorted(nx_cxs_tuples)] # Subsort by chip cx_sorted = map(sorted, cx_sorted) cx_sorted = utool.flatten(cx_sorted) row_label_rid = [] row_scores = [] qcx_set = set(qrid_list) # Build each row in the score matrix for qrid in iter(cx_sorted): if not qrid in qcx_set: continue try: qres = qrid2_qres[qrid] except IndexError: print('qrid = %r' % qrid) print('len(qrid2_qres) = %r' % len(qrid2_qres)) raise if qres is None: continue # Append a label to score matrix row_label_rid.append(qrid) # Append a column to score matrix row_scores.append(qres.cx2_score[cx_sorted]) col_label_rid = cx_sorted # convert to numpy matrix array score_matrix = np.array(row_scores, dtype=np.float64) # Fill diagonal with -1's np.fill_diagonal(score_matrix, -np.ones(len(row_label_rid))) # Add score matrix to allres allres.score_matrix = score_matrix allres.col_label_rid = col_label_rid allres.row_label_rid = row_label_rid
def inter_imageset_match(ibs, imgsetid2_names=None, **kwargs): # Perform Inter-ImageSet Matching #if imgsetid2_names is None: #imgsetid2_names = intra_occurrence_match(ibs, **kwargs) all_nxs = utool.flatten(imgsetid2_names.values()) for nid2_cxs in imgsetid2_names: qnxs = nid2_cxs dnxs = all_nxs name_result = ibs.query(qnxs=qnxs, dnxs=dnxs) qcx2_res = name_result.chip_results() graph = netx.Graph() graph.add_nodes_from(list(range(len(qcx2_res)))) graph.add_edges_from([res.aid2_fm for res in six.itervalues(qcx2_res)]) graph.setWeights([(res.aid2_fs, res.aid2_fk) for res in six.itervalues(qcx2_res)]) graph.cutEdges(**kwargs) aid2_nx, nid2_cxs = graph.getConnectedComponents() return aid2_nx