def demodata_infr2(defaultdb='PZ_MTEST'): defaultdb = 'PZ_MTEST' import ibeis ibs = ibeis.opendb(defaultdb=defaultdb) annots = ibs.annots() names = list(annots.group_items(annots.nids).values())[0:20] def dummy_phi(c, n): x = np.arange(n) phi = c * x / (c * x + 1) phi = phi / phi.sum() phi = np.diff(phi) return phi phis = {c: dummy_phi(c, 30) for c in range(1, 4)} aids = ut.flatten(names) infr = ibeis.AnnotInference(ibs, aids, autoinit=True) infr.init_termination_criteria(phis) infr.init_refresh_criteria() # Partially review n1, n2, n3, n4 = names[0:4] for name in names[4:]: for a, b in ut.itertwo(name.aids): infr.add_feedback((a, b), POSTV) for name1, name2 in it.combinations(names[4:], 2): infr.add_feedback((name1.aids[0], name2.aids[0]), NEGTV) return infr
def demodata_mtest_infr(state='empty'): import ibeis ibs = ibeis.opendb(db='PZ_MTEST') annots = ibs.annots() names = list(annots.group_items(annots.nids).values()) ut.shuffle(names, rng=321) test_aids = ut.flatten(names[1::2]) infr = ibeis.AnnotInference(ibs, test_aids, autoinit=True) infr.reset(state=state) return infr
def make_dummy_infr(annots_per_name): import ibeis nids = [ val for val, num in enumerate(annots_per_name, start=1) for _ in range(num) ] aids = range(len(nids)) infr = ibeis.AnnotInference(None, aids, nids=nids, autoinit=True, verbose=1) return infr
def start(actor, dbdir, aids='all', config={}, **kwargs): import ibeis assert dbdir is not None, 'must specify dbdir' assert actor.infr is None, ('AnnotInference already running') ibs = ibeis.opendb(dbdir=dbdir, use_cache=False, web=False, force_serial=True) # Create the AnnotInference print('starting via actor with ibs = %r' % (ibs, )) actor.infr = ibeis.AnnotInference(ibs=ibs, aids=aids, autoinit=True) actor.infr.print('started via actor') actor.infr.print('config = {}'.format(ut.repr3(config))) # Configure query_annot_infr for key in config: actor.infr.params[key] = config[key] # Initialize # TODO: Initialize state from staging reviews after annotmatch # timestamps (in case of crash) actor.infr.print('Initializing infr tables') table = kwargs.get('init', 'staging') actor.infr.reset_feedback(table, apply=True) actor.infr.ensure_mst() actor.infr.apply_nondynamic_update() actor.infr.print('infr.status() = {}'.format(ut.repr4(actor.infr.status()))) # Load random forests (TODO: should this be config specifiable?) actor.infr.print('loading published models') try: actor.infr.load_published() except: pass # Start actor.infr Main Loop actor.infr.print('start id review') actor.infr.start_id_review() return 'initialized'
def reset_mtest_graph(): """ Resets the annotmatch and stating table CommandLine: python -m ibeis reset_mtest_graph Example: >>> # SCRIPT >>> from ibeis.init.sysres import * # NOQA >>> reset_mtest_graph() """ if True: # Delete the graph databases to and set them up for tests import ibeis ibs = ibeis.opendb('PZ_MTEST') annotmatch = ibs.db['annotmatch'] staging = ibs.staging['reviews'] annotmatch.clear() staging.clear() # Make this CC connected using positive edges from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, DIFF, NULL, SAME # NOQA from ibeis.algo.graph import nx_utils as nxu import itertools as it # Add some graph properties to MTEST infr = ibeis.AnnotInference(ibs, 'all', autoinit=True) # Connect the names with meta decisions infr.ensure_mst(meta_decision=SAME) # big_ccs = [cc for cc in infr.positive_components() if len(cc) > 3] small_ccs = [ cc for cc in infr.positive_components() if len(cc) <= 3 and len(cc) > 1 ] # single_ccs = [cc for cc in infr.positive_components() if len(cc) == 1] cc = infr.pos_graph.connected_to(1) for edge in nxu.edges_between(infr.graph, cc): infr.add_feedback(edge, POSTV, user_id='user:setup1') # Make all small PCCs k-negative-redundant count = 0 for cc1, cc2 in it.combinations(small_ccs, 2): count += 1 for edge in infr.find_neg_augment_edges(cc1, cc2, k=1): if count > 10: # So some with meta infr.add_feedback(edge, meta_decision=DIFF, user_id='user:setup2') else: # So some with evidence infr.add_feedback(edge, NEGTV, user_id='user:setup3') # Make some small PCCs k-positive-redundant from ibeis.algo.graph.state import POSTV, NEGTV, INCMP, UNREV, UNKWN # NOQA cand = list(infr.find_pos_redun_candidate_edges()) for edge in cand[0:2]: infr.add_feedback(edge, evidence_decision=POSTV, user_id='user:setup4') assert infr.status()['nInconsistentCCs'] == 0 # Write consistent state to both annotmatch and staging infr.write_ibeis_staging_feedback() infr.write_ibeis_annotmatch_feedback() # Add an 2 inconsistencies to the staging database ONLY cand = list(infr.find_pos_redun_candidate_edges()) for edge in cand[0:2]: infr.add_feedback(edge, evidence_decision=NEGTV, user_id='user:voldemort') assert infr.status()['nInconsistentCCs'] == 2 infr.write_ibeis_staging_feedback() infr.reset_feedback('annotmatch', apply=True) assert infr.status()['nInconsistentCCs'] == 0
def _precollect(self): """ Sets up an ibs object with an aids_pool Example: >>> from ibeis.scripts.thesis import * >>> self = Chap3('humpbacks_fb') >>> self = Chap3('GZ_Master1') >>> self = Chap3('GIRM_Master1') >>> self = Chap3('PZ_MTEST') >>> self = Chap3('PZ_PB_RF_TRAIN') >>> self = Chap3('PZ_Master1') >>> self = Chap3('RotanTurtles') >>> self._precollect() >>> from ibeis.scripts.thesis import * >>> self = Chap4('PZ_Master1') >>> self._precollect() """ import ibeis from ibeis.init import main_helpers self.dbdir = ibeis.sysres.lookup_dbdir(self.dbname) ibs = ibeis.opendb(dbdir=self.dbdir) if ibs.dbname.startswith('PZ_PB_RF_TRAIN'): aids = ibs.get_valid_aids() elif ibs.dbname.startswith('LF_ALL'): aids = ibs.get_valid_aids() elif ibs.dbname.startswith('PZ_Master'): # PZ_Master is too big to run in full. Select a smaller sample. # Be sure to include photobomb and incomparable cases. aids = ibs.filter_annots_general( require_timestamp=True, species='primary', is_known=True, minqual='poor', ) infr = ibeis.AnnotInference(ibs=ibs, aids=aids) infr.reset_feedback('staging', apply=True) minority_ccs = find_minority_class_ccs(infr) minority_aids = set(ut.flatten(minority_ccs)) # We need to do our best to select a small sample here flags = [ 'left' in text for text in ibs.annots(aids).viewpoint_code ] left_aids = ut.compress(aids, flags) majority_aids = set( ibs.filter_annots_general(left_aids, require_timestamp=True, species='primary', minqual='poor', require_quality=True, min_pername=2, max_pername=15)) # This produces 5720 annotations aids = sorted(majority_aids.union(minority_aids)) else: aids = ibs.filter_annots_general(require_timestamp=True, is_known=True, species='primary', minqual='poor') if ibs.dbname.startswith('MantaMatcher'): # Remove some of the singletons for this db annots = ibs.annots(aids) names = annots.group2(annots.nids) multis = [aids for aids in names if len(aids) > 1] singles = [aids for aids in names if len(aids) == 1] rng = np.random.RandomState(3988708794) aids = ut.flatten(multis) aids += ut.shuffle(ut.flatten(singles), rng=rng)[0:358] # ibs.print_annot_stats(aids, prefix='P') main_helpers.monkeypatch_encounters(ibs, aids, minutes=30) print('post monkey patch') # if False: # ibs.print_annot_stats(aids, prefix='P') self.ibs = ibs self.aids_pool = aids
def remerge_subset(): """ Assumes ibs1 is an updated subset of ibs2. Re-merges ibs1 back into ibs2. TODO: annotmatch table must have non-directional edges for this to work. I.e. u < v Ignore: # Ensure annotmatch and names are up to date with staging # Load graph import ibei ibs = ibeis.opendb('PZ_PB_RF_TRAIN') infr = ibeis.AnnotInference(aids='all', ibs=ibs, verbose=3) infr.reset_feedback('staging', apply=True) infr.relabel_using_reviews() # Check deltas infr.ibeis_name_group_delta_info() infr.ibeis_delta_info() # Write if it looks good infr.write_ibeis_annotmatch_feedback() infr.write_ibeis_name_assignment() Ignore: import ibeis ibs = ibeis.opendb('PZ_Master1') infr = ibeis.AnnotInference(ibs, 'all') infr.reset_feedback('annotmatch', apply=True) CommandLine: python -m ibeis.dbio.export_subset remerge_subset """ import ibeis ibs1 = ibeis.opendb('PZ_PB_RF_TRAIN') ibs2 = ibeis.opendb('PZ_Master1') gids1, gids2 = ibs1.images(), ibs2.images() idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids) isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2) assert all( set.issubset(set(a1), set(a2)) for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids) ) annot_uuids = ut.flatten(isect_gids1.annot_uuids) # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True) # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True) aids1 = ibs1.annots(uuids=annot_uuids, asarray=True) aids2 = ibs2.annots(uuids=annot_uuids, asarray=True) import numpy as np to_aids2 = dict(zip(aids1, aids2)) # to_aids1 = dict(zip(aids2, aids1)) # Step 1) Update individual annot properties # These annots need updates # np.where(aids1.visual_uuids != aids2.visual_uuids) # np.where(aids1.semantic_uuids != aids2.semantic_uuids) annot_unary_props = [ # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags'] 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'case_tags', 'multiple', 'age_months_est_max', 'age_months_est_min', # 'sex_texts' ] to_change = {} for key in annot_unary_props: prop1 = getattr(aids1, key) prop2 = getattr(aids2, key) diff_idxs = set(np.where(prop1 != prop2)[0]) if diff_idxs: diff_prop1 = ut.take(prop1, diff_idxs) diff_prop2 = ut.take(prop2, diff_idxs) print('key = %r' % (key,)) print('diff_prop1 = %r' % (diff_prop1,)) print('diff_prop2 = %r' % (diff_prop2,)) to_change[key] = diff_idxs if to_change: changed_idxs = ut.unique(ut.flatten(to_change.values())) print('Found %d annots that need updated properties' % len(changed_idxs)) print('changing unary attributes: %r' % (to_change,)) if False and ut.are_you_sure('apply change'): for key, idxs in to_change.items(): subaids1 = aids1.take(idxs) subaids2 = aids2.take(idxs) prop1 = getattr(subaids1, key) # prop2 = getattr(subaids2, key) setattr(subaids2, key, prop1) else: print('Annot properties are in sync. Nothing to change') # Step 2) Update annotmatch - pairwise relationships infr1 = ibeis.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False) # infr2 = ibeis.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3) aids2 = ibs2.get_valid_aids(is_known=True) infr2 = ibeis.AnnotInference(aids=aids2, ibs=ibs2, verbose=3) infr2.reset_feedback('annotmatch', apply=True) # map feedback from ibs1 onto ibs2 using ibs2 aids. fb1 = infr1.read_ibeis_annotmatch_feedback() fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()} fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1) # Add transformed feedback into ibs2 infr2.add_feedback_from(fb1_df_t) # Now ensure that dummy connectivity exists to preserve origninal names # from ibeis.algo.graph import nx_utils # for (u, v) in infr2.find_mst_edges('name_label'): # infr2.draw_aids((u, v)) # cc1 = infr2.pos_graph.connected_to(u) # cc2 = infr2.pos_graph.connected_to(v) # print(nx_utils.edges_cross(infr2.graph, cc1, cc2)) # infr2.neg_redundancy(cc1, cc2) # infr2.pos_redundancy(cc2) infr2.relabel_using_reviews(rectify=True) infr2.apply_nondynamic_update() if False: infr2.ibeis_delta_info() infr2.ibeis_name_group_delta_info() if len(list(infr2.inconsistent_components())) > 0: raise NotImplementedError('need to fix inconsistencies first') # Make it so it just loops until inconsistencies are resolved infr2.prioritize() infr2.qt_review_loop() else: infr2.write_ibeis_staging_feedback() infr2.write_ibeis_annotmatch_feedback() infr2.write_ibeis_name_assignment() # if False: # # Fix any inconsistency # infr2.start_qt_interface(loop=False) # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399, # 5338, 2654] # import networkx as nx # nx.is_connected(infr2.graph.subgraph(test_nodes)) # # infr = ibeis.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5) # # randomly sample some new labels to verify # import guitool_ibeis as gt # from ibeis.gui import inspect_gui # gt.ensure_qapp() # ut.qtensure() # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name']) # del old_groups['____'] # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name']) # from ibeis.algo.hots import simulate # c = simulate.compare_groups( # list(new_groups.values()), # list(old_groups.values()), # ) # ut.map_vals(len, c) # for aids in c['pred_splits']: # old_nids = ibs2.get_annot_nids(aids) # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1) # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0) # aid1, aid2 = split_aids[0:2] # if False: # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2) # infr2.start_qt_interface(loop=False) # if False: # # import ibeis # ibs1 = ibeis.opendb('PZ_PB_RF_TRAIN') # infr1 = ibeis.AnnotInference(aids='all', ibs=ibs1, verbose=3) # infr1.initialize_graph() # # infr1.reset_feedback('staging') # infr1.reset_feedback('annotmatch') # infr1.apply_feedback_edges() # infr1.relabel_using_reviews() # infr1.apply_review_inference() # infr1.start_qt_interface(loop=False) # delta = infr2.match_state_delta() # print('delta = %r' % (delta,)) # infr2.ensure_mst() # infr2.relabel_using_reviews() # infr2.apply_review_inference() # mst_edges = infr2.find_mst_edges() # set(infr2.graph.edges()).intersection(mst_edges) return """
def fix_annotmatch_pzmaster1(): """ PZ_Master1 had annotmatch rowids that did not agree with the current name labeling. Looking at the inconsistencies in the graph interface was too cumbersome, because over 3000 annots were incorrectly grouped together. This function deletes any annotmatch rowid that is not consistent with the current labeling so we can go forward with using the new AnnotInference object """ import ibeis ibs = ibeis.opendb('PZ_Master1') infr = ibeis.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5) infr.initialize_graph() annots = ibs.annots() aid_to_nid = ut.dzip(annots.aids, annots.nids) if False: infr.reset_feedback() infr.ensure_mst() infr.apply_feedback_edges() infr.relabel_using_reviews() infr.start_qt_interface() # Get annotmatch rowids that agree with current labeling if False: annotmatch = ibs.db.get_table_as_pandas('annotmatch') import pandas as pd flags1 = pd.isnull(annotmatch['annotmatch_evidence_decision']) flags2 = annotmatch['annotmatch_tag_text'] == '' bad_part = annotmatch[flags1 & flags2] rowids = bad_part.index.tolist() ibs.delete_annotmatch(rowids) if False: # Delete bidirectional annotmatches annotmatch = ibs.db.get_table_as_pandas('annotmatch') df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2']) # Find entires that have both directions pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values f_edges = {tuple(p) for p in pairs1} b_edges = {tuple(p[::-1]) for p in pairs1} isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)} isect_edges1 = list(isect_edges) isect_edges2 = [p[::-1] for p in isect_edges] # cols = ['annotmatch_evidence_decision', 'annotmatch_tag_text'] import pandas as pd custom_ = { (559, 4909): (False, ['photobomb']), (7918, 8041): (False, ['photobomb']), (6634, 6754): (False, ['photobomb']), (3707, 3727): (False, ['photobomb']), (86, 103): (False, ['photobomb']), } extra_ = { } fixme_edges = [] d1 = df.loc[isect_edges1].reset_index(drop=False) d2 = df.loc[isect_edges2].reset_index(drop=False) flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision'] from ibeis.tag_funcs import _parse_tags for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()): v1, v2 = r1[1], r2[1] aid1 = v1['annot_rowid1'] aid2 = v1['annot_rowid2'] truth_real = (ibs.const.EVIDENCE_DECISION.POSITIVE if aid_to_nid[aid1] == aid_to_nid[aid2] else ibs.const.EVIDENCE_DECISION.NEGATIVE) truth1 = v1['annotmatch_evidence_decision'] truth2 = v2['annotmatch_evidence_decision'] t1 = _parse_tags(v1['annotmatch_tag_text']) t2 = _parse_tags(v2['annotmatch_tag_text']) newtag = ut.union_ordered(t1, t2) if (aid1, aid2) in custom_: continue fixme_flag = False if not pd.isnull(truth1): if truth_real != truth1: fixme_flag = True if not pd.isnull(truth2): if truth_real != truth2: fixme_flag = True if fixme_flag: print('newtag = %r' % (newtag,)) print('truth_real = %r' % (truth_real,)) print('truth1 = %r' % (truth1,)) print('truth2 = %r' % (truth2,)) print('aid1 = %r' % (aid1,)) print('aid2 = %r' % (aid2,)) fixme_edges.append((aid1, aid2)) else: extra_[(aid1, aid2)] = (truth_real, newtag) extra_.update(custom_) new_pairs = extra_.keys() new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0) new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1) new_tag_texts = [';'.join(t) for t in new_tags] aids1, aids2 = ut.listT(new_pairs) # Delete the old ibs.delete_annotmatch((d1['annotmatch_rowid'].values.tolist() + d2['annotmatch_rowid'].values.tolist())) # Add the new ams = ibs.add_annotmatch_undirected(aids1, aids2) ibs.set_annotmatch_evidence_decision(ams, new_truths) ibs.set_annotmatch_tag_text(ams, new_tag_texts) if False: import guitool_ibeis as gt gt.ensure_qapp() ut.qtensure() from ibeis.gui import inspect_gui inspect_gui.show_vsone_tuner(ibs, aid1, aid2) # pairs2 = pairs1.T[::-1].T # idx1, idx2 = ut.isect_indices(list(map(tuple, pairs1)), # list(map(tuple, pairs2))) # r_edges = list(set(map(tuple, map(sorted, pairs1[idx1])))) # unique_pairs = list(set(map(tuple, map(sorted, pairs1[idx1])))) # df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2']) x = ut.ddict(list) annotmatch = ibs.db.get_table_as_pandas('annotmatch') import ubelt as ub _iter = annotmatch.iterrows() prog = ub.ProgIter(_iter, length=len(annotmatch)) for k, m in prog: aid1 = m['annot_rowid1'] aid2 = m['annot_rowid2'] if m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.POSITIVE: if aid_to_nid[aid1] == aid_to_nid[aid2]: x['agree1'].append(k) else: x['disagree1'].append(k) elif m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.NEGATIVE: if aid_to_nid[aid1] == aid_to_nid[aid2]: x['disagree2'].append(k) else: x['agree2'].append(k) ub.map_vals(len, x) ut.dict_hist(annotmatch.loc[x['disagree1']]['annotmatch_tag_text']) disagree1 = annotmatch.loc[x['disagree1']] pb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] == 'photobomb'] aids1 = pb_disagree1['annot_rowid1'].values.tolist() aids2 = pb_disagree1['annot_rowid2'].values.tolist() aid_pairs = list(zip(aids1, aids2)) infr = ibeis.AnnotInference.from_pairs(aid_pairs, ibs=ibs, verbose=5) if False: feedback = infr.read_ibeis_annotmatch_feedback(edges=infr.edges()) infr.external_feedback = feedback infr.apply_feedback_edges() infr.start_qt_interface(loop=False) # Delete these values if False: nonpb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] != 'photobomb'] disagree2 = annotmatch.loc[x['disagree2']] ibs.delete_annotmatch(nonpb_disagree1['annotmatch_rowid']) ibs.delete_annotmatch(disagree2['annotmatch_rowid']) # ut.dict_hist(disagree1['annotmatch_tag_text']) import networkx as nx graph = nx.Graph() graph.add_edges_from(zip(pb_disagree1['annot_rowid1'], pb_disagree1['annot_rowid2'])) list(nx.connected_components(graph)) set(annotmatch.loc[x['disagree2']]['annotmatch_tag_text'])
def fix_bidirectional_annotmatch(ibs): import ibeis infr = ibeis.AnnotInference(ibs=ibs, aids='all', verbose=5) infr.initialize_graph() annots = ibs.annots() aid_to_nid = ut.dzip(annots.aids, annots.nids) # Delete bidirectional annotmatches annotmatch = ibs.db.get_table_as_pandas('annotmatch') df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2']) # Find entires that have both directions pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values f_edges = {tuple(p) for p in pairs1} b_edges = {tuple(p[::-1]) for p in pairs1} isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)} print('Found %d bidirectional edges' % len(isect_edges)) isect_edges1 = list(isect_edges) isect_edges2 = [p[::-1] for p in isect_edges] import pandas as pd extra_ = {} fixme_edges = [] d1 = df.loc[isect_edges1].reset_index(drop=False) d2 = df.loc[isect_edges2].reset_index(drop=False) flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision'] from ibeis.tag_funcs import _parse_tags for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()): v1, v2 = r1[1], r2[1] aid1 = v1['annot_rowid1'] aid2 = v1['annot_rowid2'] truth_real = (ibs.const.EVIDENCE_DECISION.POSITIVE if aid_to_nid[aid1] == aid_to_nid[aid2] else ibs.const.EVIDENCE_DECISION.NEGATIVE) truth1 = v1['annotmatch_evidence_decision'] truth2 = v2['annotmatch_evidence_decision'] t1 = _parse_tags(v1['annotmatch_tag_text']) t2 = _parse_tags(v2['annotmatch_tag_text']) newtag = ut.union_ordered(t1, t2) fixme_flag = False if not pd.isnull(truth1): if truth_real != truth1: fixme_flag = True if not pd.isnull(truth2): if truth_real != truth2: fixme_flag = True if fixme_flag: print('--') print('t1, t2 = %r, %r' % (t1, t2)) print('newtag = %r' % (newtag,)) print('truth_real, truth1, truth2 = %r, %r, %r' % ( truth_real, truth1, truth2,)) print('aid1, aid2 = %r, %r' % (aid1, aid2)) fixme_edges.append(tuple(sorted((aid1, aid2)))) else: extra_[(aid1, aid2)] = (truth_real, newtag) if len(fixme_edges) > 0: # need to manually fix these edges fix_infr = ibeis.AnnotInference.from_pairs(fixme_edges, ibs=ibs, verbose=5) feedback = fix_infr.read_ibeis_annotmatch_feedback(only_existing_edges=True) infr = fix_infr fix_infr.external_feedback = feedback fix_infr.apply_feedback_edges() fix_infr.start_qt_interface(loop=False) # DELETE OLD EDGES TWICE ams = ibs.get_annotmatch_rowid_from_edges(fixme_edges) ibs.delete_annotmatch(ams) ams = ibs.get_annotmatch_rowid_from_edges(fixme_edges) ibs.delete_annotmatch(ams) # MANUALLY CALL THIS ONCE FINISHED # TO ONLY CHANGE ANNOTMATCH EDGES infr.write_ibeis_staging_feedback() infr.write_ibeis_annotmatch_feedback() # extra_.update(custom_) new_pairs = extra_.keys() new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0) new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1) new_tag_texts = [';'.join(t) for t in new_tags] aids1, aids2 = ut.listT(new_pairs) # Delete the old ibs.delete_annotmatch((d1['annotmatch_rowid'].values.tolist() + d2['annotmatch_rowid'].values.tolist())) # Add the new ams = ibs.add_annotmatch_undirected(aids1, aids2) ibs.set_annotmatch_evidence_decision(ams, new_truths) ibs.set_annotmatch_tag_text(ams, new_tag_texts) if False: import guitool_ibeis as gt gt.ensure_qapp() ut.qtensure() from ibeis.gui import inspect_gui inspect_gui.show_vsone_tuner(ibs, aid1, aid2)