def show_normalizers(match, fnum=None, pnum=None, update=True):
        import plottool as pt
        from plottool import plot_helpers as ph

        # hack keys out of namespace
        keys = ["rchip", "kpts"]
        rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys)
        rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys)
        fs, fm = match.fs, match.fm_norm
        cmap = "cool"
        draw_lines = True
        if fnum is None:
            fnum = pt.next_fnum()
        pt.figure(fnum=fnum, pnum=pnum)
        # doclf=True, docla=True)
        ax, xywh1, xywh2 = pt.show_chipmatch2(
            rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, draw_lines=draw_lines
        )
        ph.set_plotdat(ax, "viztype", "matches")
        ph.set_plotdat(ax, "key", match.key)
        title = match.key + "\n num=%d, sum=%.2f" % (len(fm), sum(fs))
        pt.set_title(title)
        if update:
            pt.update()
        return ax, xywh1, xywh2
 def show_normalizers(match, fnum=None, pnum=None, update=True):
     import plottool as pt
     from plottool import plot_helpers as ph
     # hack keys out of namespace
     keys = ['rchip', 'kpts']
     rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys)
     rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys)
     fs, fm = match.fs, match.fm_norm
     cmap = 'cool'
     draw_lines = True
     if fnum is None:
         fnum = pt.next_fnum()
     pt.figure(fnum=fnum, pnum=pnum)
     #doclf=True, docla=True)
     ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1,
                                           rchip2,
                                           kpts1,
                                           kpts2,
                                           fm=fm,
                                           fs=fs,
                                           fnum=fnum,
                                           cmap=cmap,
                                           draw_lines=draw_lines)
     ph.set_plotdat(ax, 'viztype', 'matches')
     ph.set_plotdat(ax, 'key', match.key)
     title = match.key + '\n num=%d, sum=%.2f' % (len(fm), sum(fs))
     pt.set_title(title)
     if update:
         pt.update()
     return ax, xywh1, xywh2
Exemple #3
0
def score_chipmatch_csum(qaid, chipmatch, qreq_):
    """
    score_chipmatch_csum

    Args:
        chipmatch (tuple):

    Returns:
        tuple: aid_list, score_list

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.model.hots.voting_rules2 import *  # NOQA
        >>> ibs, qreq_, qaid, chipmatch = get_chipmatch_testdata()
        >>> (aid_list, score_list) = score_chipmatch_csum(qaid, chipmatch, qreq_)
        >>> print(aid_list, score_list)
    """
    #(aid2_fm, aid2_fsv, aid2_fk, aid2_score, aid2_H) = chipmatch
    aid2_fsv = chipmatch.aid2_fsv
    if False:
        aid2_fs = {
            aid: fsv.prod(axis=1)
            for aid, fsv in six.iteritems(aid2_fsv)
        }
        aid_list = list(six.iterkeys(aid2_fs))
        fs_list = ut.dict_take(aid2_fs, aid_list)
        #fs_list  = list(six.itervalues(aid2_fs))
        score_list = [np.sum(fs) for fs in fs_list]
    else:
        aid_list = list(six.iterkeys(aid2_fsv))
        fsv_list = ut.dict_take(aid2_fsv, aid_list)
        fs_list = [fsv.prod(axis=1) for fsv in fsv_list]
        score_list = [np.sum(fs) for fs in fs_list]
    return (aid_list, score_list)
Exemple #4
0
    def from_cmtup_old(cls, cmtup_old, qaid=None, fsv_col_lbls=None, daid_list=None):
        """ convert QueryResult styles fields to ChipMatch style fields """

        (aid2_fm_, aid2_fsv_, aid2_fk_, aid2_score_, aid2_H_) = cmtup_old
        assert len(aid2_fsv_) == len(aid2_fm_), "bad old cmtup_old"
        assert len(aid2_fk_) == len(aid2_fm_), "bad old cmtup_old"
        assert aid2_score_ is None or len(aid2_score_) == 0 or len(aid2_score_) == len(aid2_fm_), "bad old cmtup_old"
        assert aid2_H_ is None or len(aid2_H_) == len(aid2_fm_), "bad old cmtup_old"
        if daid_list is None:
            daid_list = list(six.iterkeys(aid2_fm_))

        # WARNING: dict_take will not copy these default items
        # Maybe these should be separate instances for different items?
        _empty_fm = np.empty((0, 2), dtype=hstypes.FM_DTYPE)
        _empty_fsv = np.empty((0, 1), dtype=hstypes.FS_DTYPE)
        _empty_fk = np.empty((0), dtype=hstypes.FK_DTYPE)
        # convert dicts to lists
        fm_list = ut.dict_take(aid2_fm_, daid_list, _empty_fm)
        fsv_list = ut.dict_take(aid2_fsv_, daid_list, _empty_fsv)
        fk_list = ut.dict_take(aid2_fk_, daid_list, _empty_fk)
        no_scores = aid2_score_ is None or (len(aid2_score_) == 0 and len(daid_list) > 0)
        score_list = None if no_scores else np.array(ut.dict_take(aid2_score_, daid_list, np.nan))
        H_list = None if aid2_H_ is None else ut.dict_take(aid2_H_, daid_list, None)
        fsv_col_lbls = fsv_col_lbls
        cm = cls(qaid, daid_list, fm_list, fsv_list, fk_list, score_list, H_list, fsv_col_lbls)
        return cm
Exemple #5
0
def score_chipmatch_csum(qaid, chipmatch, qreq_):
    """
    score_chipmatch_csum

    Args:
        chipmatch (tuple):

    Returns:
        tuple: aid_list, score_list

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.model.hots.voting_rules2 import *  # NOQA
        >>> ibs, qreq_, qaid, chipmatch = get_chipmatch_testdata()
        >>> (aid_list, score_list) = score_chipmatch_csum(qaid, chipmatch, qreq_)
        >>> print(aid_list, score_list)
    """
    #(aid2_fm, aid2_fsv, aid2_fk, aid2_score, aid2_H) = chipmatch
    aid2_fsv = chipmatch.aid2_fsv
    if False:
        aid2_fs = {aid: fsv.prod(axis=1) for aid, fsv in six.iteritems(aid2_fsv)}
        aid_list = list(six.iterkeys(aid2_fs))
        fs_list = ut.dict_take(aid2_fs, aid_list)
        #fs_list  = list(six.itervalues(aid2_fs))
        score_list = [np.sum(fs) for fs in fs_list]
    else:
        aid_list = list(six.iterkeys(aid2_fsv))
        fsv_list = ut.dict_take(aid2_fsv, aid_list)
        fs_list = [fsv.prod(axis=1) for fsv in fsv_list]
        score_list = [np.sum(fs) for fs in fs_list]
    return (aid_list, score_list)
 def baseline_match(annot, annot2):
     cfgdict = {}
     annot1 = annot
     keys = ["kpts", "vecs"]
     kpts1, vecs1 = ut.dict_take(annot1.__dict__, keys)
     kpts2, vecs2 = ut.dict_take(annot2.__dict__, keys)
     dlen_sqrd2 = annot2.dlen_sqrd
     basetup, base_meta = baseline_vsone_ratio_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict)
     (fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT) = basetup
     (fm_norm_RAT, fm_norm_SV) = base_meta
     match_ORIG = AnnotMatch(annot1, annot2, fm_ORIG, fs_ORIG, "ORIG")  # NOQA
     match_RAT = AnnotMatch(annot1, annot2, fm_RAT, fs_RAT, "RAT", fm_norm_RAT)  # NOQA
     match_SV = AnnotMatch(annot1, annot2, fm_SV, fs_SV, "SV", fm_norm_SV)
     match_SV.H = H_RAT
     return match_ORIG, match_RAT, match_SV
Exemple #7
0
def show_matches(fm,
                 fs,
                 fnum=1,
                 pnum=None,
                 title='',
                 key=None,
                 simp=None,
                 cmap='hot',
                 draw_lines=True,
                 **locals_):
    #locals_ = locals()
    import plottool as pt
    from plottool import plot_helpers as ph
    # hack keys out of namespace
    keys = 'rchip1, rchip2, kpts1, kpts2'.split(', ')
    rchip1, rchip2, kpts1, kpts2 = ut.dict_take(locals_, keys)
    pt.figure(fnum=fnum, pnum=pnum)
    #doclf=True, docla=True)
    ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1,
                                          rchip2,
                                          kpts1,
                                          kpts2,
                                          fm=fm,
                                          fs=fs,
                                          fnum=fnum,
                                          cmap=cmap,
                                          draw_lines=draw_lines,
                                          ori=True)
    ph.set_plotdat(ax, 'viztype', 'matches')
    ph.set_plotdat(ax, 'simp', simp)
    ph.set_plotdat(ax, 'key', key)
    title = title + '\n num=%d, sum=%.2f' % (len(fm), sum(fs))
    pt.set_title(title)
    return ax, xywh1, xywh2
Exemple #8
0
 def draw_shape(x, y):
     keys = ['mode', 'ix', 'iy', 'color', 'radius']
     mode, ix, iy, color, radius = ut.dict_take(globals_, keys)
     if mode == 'rect':
         cv2.rectangle(mask, (ix, iy), (x, y), color, -1)
     elif mode == 'circ':
         cv2.circle(mask, (x, y), radius, color, -1)
Exemple #9
0
 def annot_to_class_feats2(aid, aid2_nid, top=None):
     pair_list = []
     score_list = []
     nexemplar_list = []
     for nid in unique_nids:
         label = (aid2_nid[aid] == nid)
         num_exemplars = nid2_nexemp.get(nid, 0)
         if num_exemplars == 0:
             continue
         params = toy_params[label]
         mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
         score_ = rng.normal(mu, sigma, size=num_exemplars).max()
         score = np.clip(score_, 0, np.inf)
         pair_list.append((aid, nid))
         score_list.append(score)
         nexemplar_list.append(num_exemplars)
     rank_list = ut.argsort(score_list, reverse=True)
     feat_list = np.array([score_list, rank_list, nexemplar_list]).T
     sortx = np.argsort(rank_list)
     feat_list = feat_list.take(sortx, axis=0)
     pair_list = np.array(pair_list).take(sortx, axis=0)
     if top is not None:
         feat_list = feat_list[:top]
         pair_list = pair_list[0:top]
     return pair_list, feat_list
 def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
     if aidx1 == aidx2:
         return 0
     rng = np.random.RandomState(int(aidx1 + aidx2))
     same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
     mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
     return np.clip(rng.normal(mu, sigma), 0, np.inf)
Exemple #11
0
def make_match_interaction(matches, metadata, type_='RAT+SV', **kwargs):
    import plottool.interact_matches
    #import plottool as pt
    fm, fs = matches[type_][0:2]
    H1 = metadata['H_' + type_.split('+')[0]]
    #fm, fs = matches['RAT'][0:2]
    annot1 = metadata['annot1']
    annot2 = metadata['annot2']
    rchip1, kpts1, vecs1 = ut.dict_take(annot1, ['rchip', 'kpts', 'vecs'])
    rchip2, kpts2, vecs2 = ut.dict_take(annot2, ['rchip', 'kpts', 'vecs'])
    #pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs)
    fsv = fs[:, None]
    interact = plottool.interact_matches.MatchInteraction2(
        rchip1, rchip2, kpts1, kpts2, fm, fs, fsv, vecs1, vecs2, H1=H1,
        **kwargs)
    return interact
Exemple #12
0
def benchmark_knn():
    r"""
    CommandLine:
        python ~/code/ibeis/ibeis/algo/hots/tests/bench.py benchmark_knn --profile

    Example:
        >>> # DISABLE_DOCTEST
        >>> from bench import *  # NOQA
        >>> result = benchmark_knn()
        >>> print(result)
    """
    from ibeis.algo.hots import _pipeline_helpers as plh
    from ibeis.algo.hots.pipeline import nearest_neighbors
    import ibeis
    verbose = True
    qreq_ = ibeis.testdata_qreq_(
        defaultdb='PZ_PB_RF_TRAIN',
        t='default:K=3,requery=True,can_match_samename=False',
        a='default:qsize=100',
        verbose=1)
    locals_ = plh.testrun_pipeline_upto(qreq_, 'nearest_neighbors')
    Kpad_list, impossible_daids_list = ut.dict_take(
        locals_, ['Kpad_list', 'impossible_daids_list'])
    nns_list1 = nearest_neighbors(qreq_,
                                  Kpad_list,
                                  impossible_daids_list,
                                  verbose=verbose)
Exemple #13
0
 def extract_connected_compoments(graph):
     import scipy.sparse as spsparse
     import utool as ut
     # I think this is how extraction is done?
     # only returns edge info
     # so singletons are not represented
     shape = graph.shape
     csr_graph = graph.tocsr()
     num_components, labels = spsparse.csgraph.connected_components(csr_graph)
     unique_labels = np.unique(labels)
     group_flags_list = [labels == groupid for groupid in unique_labels]
     subgraph_list = []
     for label, group_flags in zip(unique_labels, group_flags_list):
         num_members = group_flags.sum()
         ixs = list(range(num_members))
         if num_members == 0:
             continue
         group_rowix, group_cols = csr_graph[group_flags, :].nonzero()
         if len(group_cols) == 0:
             continue
         ix2_row = dict(zip(ixs, np.nonzero(group_flags)[0]))
         group_rows = ut.dict_take(ix2_row, group_rowix)
         component = (group_rows, group_cols.tolist())
         data = csr_graph[component].tolist()[0]
         subgraph = spsparse.coo_matrix((data, component), shape=shape)
         subgraph_list.append(subgraph)
     #assert len(compoment_list) == num_components, 'bad impl'
     return subgraph_list
Exemple #14
0
        def on_click_inside(self, event, ax):
            self.ax = ax
            self.event = event
            event = self.event
            #print(ax)
            #print(event.x)
            #print(event.y)
            aids = list(self.pos.keys())
            pos_list = ut.dict_take(self.pos, aids)
            #x = 10
            #y = 10
            import numpy as np  # NOQA
            x, y = event.xdata, event.ydata
            point = np.array([x, y])
            pos_list = np.array(pos_list)
            index, dist = vt.closest_point(point, pos_list, distfunc=vt.L2)
            print('dist = %r' % (dist,))

            aid = aids[index]
            context_shown = False

            CHECK_PAIR = True
            if CHECK_PAIR:
                if self.event.button == 3 and not context_shown:
                    if len(self.selected_aids) != 2:
                        print('This funciton only work if exactly 2 are selected')
                    else:
                        from ibeis.gui import inspect_gui
                        context_shown = True
                        aid1, aid2 = (self.selected_aids)
                        qres = None
                        qreq_ = None
                        options = inspect_gui.get_aidpair_context_menu_options(ibs, aid1, aid2, qres, qreq_=qreq_)
                        self.show_popup_menu(options, event)

            SELECT_ANNOT = dist < 35
            if SELECT_ANNOT:
                print(ut.obj_str(ibs.get_annot_info(aid, default=True, name=False, gname=False)))

                if self.event.button == 1:
                    self.toggle_selected_aid(aid)

                if self.event.button == 3 and not context_shown:
                    # right click
                    from ibeis.viz.interact import interact_chip
                    context_shown = True
                    #refresh_func = functools.partial(viz.show_name, ibs, nid, fnum=fnum, sel_aids=sel_aids)
                    refresh_func = None
                    config2_ = None
                    options = interact_chip.build_annot_context_options(
                        ibs, aid, refresh_func=refresh_func,
                        with_interact_name=False,
                        config2_=config2_)
                    self.show_popup_menu(options, event)
            else:
                if self.event.button == 3:
                    options = [
                        ('Toggle images', self.toggle_images),
                    ]
                    self.show_popup_menu(options, event)
Exemple #15
0
 def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
     if aidx1 == aidx2:
         return 0
     rng = np.random.RandomState(int(aidx1 + aidx2))
     same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
     mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
     return np.clip(rng.normal(mu, sigma), 0, np.inf)
Exemple #16
0
def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None):
    import plottool as pt
    from ibeis import viz
    fnum = pt.ensure_fnum(fnum)
    idx_list = ut.dict_take(cm.daid2_idx, daids)
    nPlots = len(idx_list) + 1
    nRows, nCols = pt.get_square_row_cols(nPlots)
    pnum_ = pt.make_pnum_nextgen(nRows, nCols)
    pt.figure(fnum=fnum, pnum=(1, 2, 1))
    # Draw coverage masks with bbox
    # <FlipHack>
    #weight_mask_m = np.fliplr(np.flipud(weight_mask_m))
    #weight_mask = np.fliplr(np.flipud(weight_mask))
    # </FlipHack>
    stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True)
    (woff, hoff) = offset_tup[1]
    wh1 = weight_mask_m.shape[0:2][::-1]
    wh2 = weight_mask.shape[0:2][::-1]
    pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match')
    pt.draw_bbox((   0,    0) + wh1, bbox_color=(0, 0, 1))
    pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1))
    # Get contributing matches
    qaid = cm.qaid
    daid_list = daids
    fm_list = ut.take(cm.fm_list, idx_list)
    fs_list = ut.take(cm.fs_list, idx_list)
    # Draw matches
    for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1):
        viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs,
                                      draw_pts=False, draw_lines=True,
                                      draw_ell=False, fnum=fnum, pnum=pnum_(px),
                                      darken=.5)
    coverage_score = score_matching_mask(weight_mask_m, weight_mask)
    pt.set_figtitle('score=%.4f' % (coverage_score,))
Exemple #17
0
    def _update_state_gco(model,
                          weight_key='cut_weight',
                          name_label_key='name_label'):
        import networkx as nx
        # Get nx graph properties
        external_nodes = sorted(list(model.graph.nodes()))
        external_edges = list(model.graph.edges())
        edge_to_weights = nx.get_edge_attributes(model.graph, weight_key)
        node_to_labeling = nx.get_node_attributes(model.graph, name_label_key)
        edge_weights = ut.dict_take(edge_to_weights, external_edges, 0)
        external_labeling = [
            node_to_labeling.get(node, -node) for node in external_nodes
        ]
        # Map to internal ids for pygco
        internal_nodes = ut.rebase_labels(external_nodes)
        extern2_intern = dict(zip(external_nodes, internal_nodes))
        internal_edges = ut.unflat_take(extern2_intern, external_edges)
        internal_labeling = ut.rebase_labels(external_labeling)

        internal_labeling = np.array(internal_labeling)
        internal_edges = np.array(internal_edges)

        n_nodes = len(internal_nodes)
        # Model state
        model.n_nodes = n_nodes
        model.extern2_intern = extern2_intern
        model.intern2_extern = ut.invert_dict(extern2_intern)
        model.edges = internal_edges
        model.edge_weights = edge_weights
        # Model parameters
        model.labeling = np.zeros(model.n_nodes, dtype=np.int32)
        model._update_labels(labeling=internal_labeling)
        model._update_weights()
Exemple #18
0
 def annot_to_class_feats2(aid, aid2_nid, top=None):
     pair_list = []
     score_list = []
     nexemplar_list = []
     for nid in unique_nids:
         label = (aid2_nid[aid] == nid)
         num_exemplars = nid2_nexemp.get(nid, 0)
         if num_exemplars == 0:
             continue
         params = toy_params[label]
         mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
         score_ = rng.normal(mu, sigma, size=num_exemplars).max()
         score = np.clip(score_, 0, np.inf)
         pair_list.append((aid, nid))
         score_list.append(score)
         nexemplar_list.append(num_exemplars)
     rank_list = ut.argsort(score_list, reverse=True)
     feat_list = np.array([score_list, rank_list, nexemplar_list]).T
     sortx = np.argsort(rank_list)
     feat_list = feat_list.take(sortx, axis=0)
     pair_list = np.array(pair_list).take(sortx, axis=0)
     if top is not None:
         feat_list = feat_list[:top]
         pair_list = pair_list[0:top]
     return pair_list, feat_list
Exemple #19
0
def testdata_pre_baselinefilter(defaultdb='testdb1', qaid_list=None, daid_list=None, codename='vsmany'):
    cfgdict = dict(codename=codename)
    ibs, qreq_ = get_pipeline_testdata(
        qaid_list=qaid_list, daid_list=daid_list, defaultdb=defaultdb, cfgdict=cfgdict)
    locals_ = testrun_pipeline_upto(qreq_, 'baseline_neighbor_filter')
    nns_list, impossible_daids_list = ut.dict_take(locals_, ['nns_list', 'impossible_daids_list'])
    return qreq_, nns_list, impossible_daids_list
Exemple #20
0
    def new_query_request(
        cls,
        qaid_list,
        daid_list,
        qparams,
        qresdir,
        ibs,
        query_config2_,
        data_config2_,
        _indexer_request_params,
        custom_nid_lookup=None,
    ):
        """
        old way of calling new

        Args:
            qaid_list (list):
            daid_list (list):
            qparams (QueryParams):  query hyper-parameters
            qresdir (str):
            ibs (wbia.IBEISController):  image analysis api
            _indexer_request_params (dict):

        Returns:
            wbia.QueryRequest
        """
        qreq_ = cls()
        qreq_.ibs = ibs
        qreq_.qparams = qparams  # Parameters relating to pipeline execution
        qreq_.query_config2_ = query_config2_
        qreq_.data_config2_ = data_config2_
        qreq_.qresdir = qresdir
        qreq_._indexer_request_params = _indexer_request_params
        qreq_.set_external_daids(daid_list)
        qreq_.set_external_qaids(qaid_list)

        # Load name information so it can change in the database and that's ok.
        # I'm not 100% liking how this works.
        qreq_.unique_aids = np.union1d(qreq_.qaids, qreq_.daids)
        qreq_.unique_aids.sort()

        # Internal caching objects and views
        _annots = ibs.annots(qreq_.unique_aids)
        # I think the views copy the original cache
        qreq_._unique_annots = _annots.view(_annots.aids)
        qreq_._unique_dannots = qreq_._unique_annots.view(sorted(qreq_.daids))

        qreq_.aid_to_idx = ut.make_index_lookup(qreq_.unique_aids)
        if custom_nid_lookup is None:
            qreq_.unique_nids = ibs.get_annot_nids(qreq_.unique_aids)
        else:
            qreq_.unique_nids = ut.dict_take(custom_nid_lookup, qreq_.unique_aids)
        qreq_.unique_nids = np.array(qreq_.unique_nids)

        # qreq_.nid_to_groupuuid = qreq_._make_namegroup_uuids()
        # qreq_.dnid_to_groupuuid = qreq_._make_namegroup_data_uuids()
        qreq_.nid_to_grouphash = qreq_._make_namegroup_hashes()
        qreq_.dnid_to_grouphash = qreq_._make_namegroup_data_hashes()
        return qreq_
Exemple #21
0
def nx_get_default_node_attributes(graph, key, default=None):
    import networkx as nx
    import utool as ut
    node_list = list(graph.nodes())
    partial_attr_dict = nx.get_node_attributes(graph, key)
    attr_list = ut.dict_take(partial_attr_dict, node_list, default)
    attr_dict = dict(zip(node_list, attr_list))
    return attr_dict
 def compute_fgweight_mask(annot):
     keys = ["kpts", "chipshape", "fgweights"]
     kpts, chipshape, fgweights = ut.dict_take(annot.__dict__, keys)
     chipsize = chipshape[0:2][::-1]
     fgweight_mask = coverage_kpts.make_kpts_coverage_mask(
         kpts, chipsize, fgweights, mode="max", resize=True, return_patch=False
     )
     annot.fgweight_mask = fgweight_mask
 def compute_dstncvs_mask(annot):
     keys = ["kpts", "chipshape", "dstncvs"]
     kpts, chipshape, dstncvs = ut.dict_take(annot.__dict__, keys)
     chipsize = chipshape[0:2][::-1]
     dstncvs_mask = coverage_kpts.make_kpts_coverage_mask(
         kpts, chipsize, dstncvs, mode="max", resize=True, return_patch=False
     )
     annot.dstncvs_mask = dstncvs_mask
Exemple #24
0
def testdata_pre_baselinefilter(defaultdb='testdb1', qaid_list=None, daid_list=None, codename='vsmany'):
    cfgdict = dict(codename=codename)
    import ibeis
    p = 'default' + ut.get_cfg_lbl(cfgdict)
    qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p)
    locals_ = testrun_pipeline_upto(qreq_, 'baseline_neighbor_filter')
    nns_list, impossible_daids_list = ut.dict_take(locals_, ['nns_list', 'impossible_daids_list'])
    return qreq_, nns_list, impossible_daids_list
Exemple #25
0
def get_annotmatch_rowids_from_aid2(ibs, aid2_list, eager=True, nInput=None,
                                    force_method=None):
    """
    # This one is slow because aid2 is the second part of the index

    TODO autogenerate

    Returns a list of the aids that were reviewed as candidate matches to the input aid

    aid_list = ibs.get_valid_aids()

    CommandLine:
        python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid2 --show

    Example2:
        >>> # TIME TEST
        >>> # setup_pzmtest_subgraph()
        >>> from ibeis.annotmatch_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
        >>> aid2_list = ibs.get_valid_aids()
        >>> func_list = [
        >>>     partial(ibs.get_annotmatch_rowids_from_aid2, force_method=1),
        >>>     partial(ibs.get_annotmatch_rowids_from_aid2, force_method=2),
        >>> ]
        >>> num_list = [1, 10, 50, 100, 300, 325, 350, 400, 500]
        >>> def args_list(count, aid2_list=aid2_list, num_list=num_list):
        >>>    return (aid2_list[0:num_list[count]],)
        >>> searchkw = dict(
        >>>     func_labels=['sql', 'numpy'],
        >>>     count_to_xtick=lambda count, args: len(args[0]),
        >>>     title='Timings of get_annotmatch_rowids_from_aid2',
        >>> )
        >>> niters = len(num_list)
        >>> time_result = ut.gridsearch_timer(func_list, args_list, niters, **searchkw)
        >>> time_result['plot_timings']()
        >>> ut.show_if_requested()
    """
    from ibeis.control import _autogen_annotmatch_funcs
    if force_method != 2 and (nInput < 128 or (force_method == 1)):
        colnames = (_autogen_annotmatch_funcs.ANNOTMATCH_ROWID,)
        # FIXME: col_rowid is not correct
        params_iter = zip(aid2_list)
        andwhere_colnames = [_autogen_annotmatch_funcs.ANNOT_ROWID2]
        annotmatch_rowid_list = ibs.db.get_where2(
            ibs.const.ANNOTMATCH_TABLE, colnames, params_iter, andwhere_colnames,
            eager=eager, nInput=nInput, unpack_scalars=False)
    elif force_method == 2:
        import vtool as vt
        all_annotmatch_rowids = np.array(ibs._get_all_annotmatch_rowids())
        aids2 = np.array(ibs.get_annotmatch_aid2(all_annotmatch_rowids))
        unique_aid2, groupxs2 = vt.group_indices(aids2)
        rowids2_ = vt.apply_grouping(all_annotmatch_rowids, groupxs2)
        rowids2_ = [_.tolist() for _ in rowids2_]
        maping2 = ut.defaultdict(list, zip(unique_aid2, rowids2_))
        annotmatch_rowid_list = ut.dict_take(maping2, aid2_list)
    annotmatch_rowid_list = list(map(sorted, annotmatch_rowid_list))
    return annotmatch_rowid_list
Exemple #26
0
def _get_parent_rowids(depc, table, rowid_dict):
    # FIXME to handle multiedges correctly
    parent_rowidsT = ut.dict_take(rowid_dict,
                                  table.parent_id_tablenames)
    if table.ismulti:
        parent_rowids = parent_rowidsT
    else:
        parent_rowids = ut.list_transpose(parent_rowidsT)
    return parent_rowids
Exemple #27
0
def set_name_sex_text(ibs, name_rowid_list, name_sex_text_list):
    r"""

    RESTful:
        Method: PUT
        URL:    /api/name/sex_text/
    """
    name_sex_list = ut.dict_take(const.SEX_TEXT_TO_INT, name_sex_text_list)
    return ibs.set_name_sex(name_rowid_list, name_sex_list)
def get_aidpair_tags(ibs, aid1_list, aid2_list, directed=True):
    r"""
    Args:
        ibs (IBEISController):  wbia controller object
        aid1_list (list):
        aid2_list (list):
        directed (bool): (default = True)

    Returns:
        list: tags_list

    CommandLine:
        python -m wbia.tag_funcs --exec-get_aidpair_tags --db PZ_Master1 --tags Hard interesting

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.tag_funcs import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb(defaultdb='testdb1')
        >>> has_any = ut.get_argval('--tags', type_=list, default=None)
        >>> min_num = ut.get_argval('--min_num', type_=int, default=1)
        >>> aid_pairs = filter_aidpairs_by_tags(ibs, has_any=has_any, min_num=1)
        >>> aid1_list = aid_pairs.T[0]
        >>> aid2_list = aid_pairs.T[1]
        >>> undirected_tags = get_aidpair_tags(ibs, aid1_list, aid2_list, directed=False)
        >>> tagged_pairs = list(zip(aid_pairs.tolist(), undirected_tags))
        >>> print(ut.repr2(tagged_pairs))
        >>> tag_dict = ut.groupby_tags(tagged_pairs, undirected_tags)
        >>> print(ut.repr2(tag_dict, nl=2))
        >>> print(ut.repr2(ut.map_dict_vals(len, tag_dict)))
    """
    aid_pairs = np.vstack([aid1_list, aid2_list]).T
    if directed:
        annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(
            aid_pairs.T[0], aid_pairs.T[1])
        tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid)
    else:
        annotmatch_rowid = ibs.get_annotmatch_rowid_from_undirected_superkey(
            aid_pairs.T[0], aid_pairs.T[1])
        tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid)
        if False:
            expanded_aid_pairs = np.vstack([aid_pairs, aid_pairs[:, ::-1]])
            expanded_annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(
                expanded_aid_pairs.T[0], expanded_aid_pairs.T[1])
            expanded_edgeids = vt.get_undirected_edge_ids(expanded_aid_pairs)
            unique_edgeids, groupxs = vt.group_indices(expanded_edgeids)
            expanded_tags_list = ibs.get_annotmatch_case_tags(
                expanded_annotmatch_rowid)
            grouped_tags = vt.apply_grouping(
                np.array(expanded_tags_list, dtype=object), groupxs)
            undirected_tags = [
                list(set(ut.flatten(tags))) for tags in grouped_tags
            ]
            edgeid2_tags = dict(zip(unique_edgeids, undirected_tags))
            input_edgeids = expanded_edgeids[:len(aid_pairs)]
            tags_list = ut.dict_take(edgeid2_tags, input_edgeids)
    return tags_list
 def constrained_match(annot, match_SV):
     cfgdict = {}
     annot1 = match_SV.annot1
     assert annot1 is annot
     annot2 = match_SV.annot2
     keys = ["kpts", "vecs"]
     kpts1, vecs1 = ut.dict_take(annot1.__dict__, keys)
     kpts2, vecs2 = ut.dict_take(annot2.__dict__, keys)
     dlen_sqrd2 = annot2.dlen_sqrd
     H_RAT = match_SV.H
     nexttup, next_meta = spatially_constrianed_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, H_RAT, cfgdict)
     (fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR) = nexttup
     (fm_norm_SC, fm_norm_SCR, fm_norm_SCRSV) = next_meta
     match_SC = AnnotMatch(annot1, annot2, fm_SC, fs_SC, "SC", fm_norm_SC)  # NOQA
     match_SCR = AnnotMatch(annot1, annot2, fm_SCR, fs_SCR, "SCR", fm_norm_SCR)  # NOQA
     match_SCRSV = AnnotMatch(annot1, annot2, fm_SCRSV, fs_SCRSV, "SCRSV", fm_norm_SCRSV)
     match_SCRSV.H = H_SCR
     return match_SC, match_SCR, match_SCRSV
 def baseline_match(annot, annot2):
     cfgdict = {}
     annot1 = annot
     keys = ['kpts', 'vecs']
     kpts1, vecs1 = ut.dict_take(annot1.__dict__, keys)
     kpts2, vecs2 = ut.dict_take(annot2.__dict__, keys)
     dlen_sqrd2 = annot2.dlen_sqrd
     basetup, base_meta = baseline_vsone_ratio_matcher_(
         kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict)
     (fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT) = basetup
     (fm_norm_RAT, fm_norm_SV) = base_meta
     match_ORIG = AnnotMatch(annot1, annot2, fm_ORIG, fs_ORIG,
                             'ORIG')  # NOQA
     match_RAT = AnnotMatch(annot1, annot2, fm_RAT, fs_RAT, 'RAT',
                            fm_norm_RAT)  # NOQA
     match_SV = AnnotMatch(annot1, annot2, fm_SV, fs_SV, 'SV', fm_norm_SV)
     match_SV.H = H_RAT
     return match_ORIG, match_RAT, match_SV
Exemple #31
0
def get_name_sex_text(ibs, name_rowid_list, eager=True, nInput=None):
    r"""

    RESTful:
        Method: GET
        URL:    /api/name/sex_text/
    """
    name_sex_list = ibs.get_name_sex(name_rowid_list, eager=eager, nInput=nInput)
    name_sex_text_list = ut.dict_take(const.SEX_INT_TO_TEXT, name_sex_list)
    return name_sex_text_list
Exemple #32
0
def config_graph_subattrs(cfg, depc):
    # TODO: if this hack is fully completed need a way of getting the
    # full config belonging to both chip + feat
    # cfg = request.config.feat_cfg
    import networkx as netx
    tablename = ut.invert_dict(depc.configclass_dict)[cfg.__class__]
    #tablename = cfg.get_config_name()
    ancestors = netx.dag.ancestors(depc.graph, tablename)
    subconfigs_ = ut.dict_take(depc.configclass_dict, ancestors, None)
    subconfigs = ut.filter_Nones(subconfigs_)  # NOQA
Exemple #33
0
def testdata_pre_weight_neighbors(defaultdb='testdb1', qaid_list=[1, 2], daid_list=None, codename='vsmany', cfgdict=None):
    if cfgdict is None:
        cfgdict = dict(codename=codename)
    ibs, qreq_ = get_pipeline_testdata(
        qaid_list=qaid_list, daid_list=daid_list, defaultdb=defaultdb, cfgdict=cfgdict)
    locals_ = testrun_pipeline_upto(qreq_, 'weight_neighbors')
    nns_list, nnvalid0_list = ut.dict_take(locals_, ['nns_list', 'nnvalid0_list'])

    # qreq_, args = testdata_pre('weight_neighbors', defaultdb=defaultdb, p=['default:bar_l2_on=True,fg_on=False'])
    return ibs, qreq_, nns_list, nnvalid0_list
Exemple #34
0
def config_graph_subattrs(cfg, depc):
    # TODO: if this hack is fully completed need a way of getting the
    # full config belonging to both chip + feat
    # cfg = request.config.feat_cfg
    import networkx as netx
    tablename = ut.invert_dict(depc.configclass_dict)[cfg.__class__]
    #tablename = cfg.get_config_name()
    ancestors = netx.dag.ancestors(depc.graph, tablename)
    subconfigs_ = ut.dict_take(depc.configclass_dict, ancestors, None)
    subconfigs = ut.filter_Nones(subconfigs_)  # NOQA
Exemple #35
0
def augment_graph_mst(ibs, graph):
    import wbia.plottool as pt

    # spantree_aids1_ = []
    # spantree_aids2_ = []
    # Add edges between all names
    aid_list = list(graph.nodes())
    aug_digraph = graph.copy()
    # Change all weights in initial graph to be small (likely to be part of mst)
    nx.set_edge_attributes(aug_digraph, name='weight', values=0.0001)
    aids1, aids2 = get_name_rowid_edges_from_aids(ibs, aid_list)
    if False:
        # Weight edges in the MST based on tenative distances
        # Get tentative node positions
        initial_pos = pt.get_nx_layout(graph.to_undirected(),
                                       'graphviz')['node_pos']
        # initial_pos = pt.get_nx_layout(graph.to_undirected(), 'agraph')['node_pos']
        edge_pts1 = ut.dict_take(initial_pos, aids1)
        edge_pts2 = ut.dict_take(initial_pos, aids2)
        edge_pts1 = vt.atleast_nd(np.array(edge_pts1, dtype=np.int32), 2)
        edge_pts2 = vt.atleast_nd(np.array(edge_pts2, dtype=np.int32), 2)
        edge_weights = vt.L2(edge_pts1, edge_pts2)
    else:
        edge_weights = [1.0] * len(aids1)
    # Create implicit fully connected (by name) graph
    aug_edges = [(a1, a2, {
        'weight': w
    }) for a1, a2, w in zip(aids1, aids2, edge_weights)]
    aug_digraph.add_edges_from(aug_edges)

    # Determine which edges need to be added to
    # make original graph connected by name
    aug_graph = aug_digraph.to_undirected()
    for cc_sub_graph in connected_component_subgraphs(aug_graph):
        mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
        mst_edges = mst_sub_graph.edges()
        for edge in mst_edges:
            redge = edge[::-1]
            # attr_dict = {'color': pt.DARK_ORANGE[0:3]}
            attr_dict = {'color': pt.BLACK[0:3]}
            if not (graph.has_edge(*edge) or graph.has_edge(*redge)):
                graph.add_edge(*redge, attr_dict=attr_dict)
Exemple #36
0
def convert_smkmatch_to_chipmatch(qaid2_chipmatch, qaid2_scores):
    """ function to fix oldstyle chipmatches into newstyle that is accepted by the pipeline """
    from ibeis.algo.hots import chip_match
    qaid_list = list(six.iterkeys(qaid2_chipmatch))
    score_smk_list = ut.dict_take(qaid2_scores, qaid_list)
    chipmatch_smk_list = ut.dict_take(qaid2_chipmatch, qaid_list)
    aid2_H = None

    def aid2_fs_to_fsv(aid2_fs):
        return {aid: fs[:, None] for aid, fs in six.iteritems(aid2_fs)}

    cmtup_old_list = [
        (aid2_fm, aid2_fs_to_fsv(aid2_fs), aid2_fk, aid2_score, aid2_H)
        for (aid2_fm, aid2_fs, aid2_fk), aid2_score in zip(chipmatch_smk_list, score_smk_list)
    ]
    cm_list = [
        chip_match.ChipMatch.from_cmtup_old(cmtup_old, qaid=qaid)
        for qaid, cmtup_old in zip(qaid_list, cmtup_old_list)
    ]
    return cm_list
def get_part_quality_texts(ibs, part_rowid_list):
    r"""
    Auto-docstr for 'get_part_quality_texts'

    RESTful:
        Method: GET
        URL:    /api/part/quality/text/
    """
    quality_list = ibs.get_part_qualities(part_rowid_list)
    quality_text_list = ut.dict_take(const.QUALITY_INT_TO_TEXT, quality_list)
    return quality_text_list
Exemple #38
0
def get_name_shortlist_aids(
    daid_list,
    dnid_list,
    annot_score_list,
    name_score_list,
    nid2_nidx,
    nNameShortList,
    nAnnotPerName,
):
    r"""
    CommandLine:
        python -m wbia.algo.hots.scoring --test-get_name_shortlist_aids

    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.algo.hots.scoring import *  # NOQA
        >>> daid_list        = np.array([11, 12, 13, 14, 15, 16, 17])
        >>> dnid_list        = np.array([21, 21, 21, 22, 22, 23, 24])
        >>> annot_score_list = np.array([ 6,  2,  3,  5,  6,  3,  2])
        >>> name_score_list  = np.array([ 8,  9,  5,  4])
        >>> nid2_nidx        = {21:0, 22:1, 23:2, 24:3}
        >>> nNameShortList, nAnnotPerName = 3, 2
        >>> args = (daid_list, dnid_list, annot_score_list, name_score_list,
        ...         nid2_nidx, nNameShortList, nAnnotPerName)
        >>> top_daids = get_name_shortlist_aids(*args)
        >>> result = str(top_daids)
        >>> print(result)
        [15, 14, 11, 13, 16]
    """
    unique_nids, groupxs = vt.group_indices(np.array(dnid_list))
    grouped_annot_scores = vt.apply_grouping(annot_score_list, groupxs)
    grouped_daids = vt.apply_grouping(np.array(daid_list), groupxs)
    # Ensure name score list is aligned with the unique_nids
    aligned_name_score_list = name_score_list.take(
        ut.dict_take(nid2_nidx, unique_nids))
    # Sort each group by the name score
    group_sortx = aligned_name_score_list.argsort()[::-1]
    _top_daid_groups = ut.take(grouped_daids, group_sortx)
    _top_annot_score_groups = ut.take(grouped_annot_scores, group_sortx)
    top_daid_groups = ut.listclip(_top_daid_groups, nNameShortList)
    top_annot_score_groups = ut.listclip(_top_annot_score_groups,
                                         nNameShortList)
    # Sort within each group by the annotation score
    top_daid_sortx_groups = [
        annot_score_group.argsort()[::-1]
        for annot_score_group in top_annot_score_groups
    ]
    top_sorted_daid_groups = vt.ziptake(top_daid_groups, top_daid_sortx_groups)
    top_clipped_daids = [
        ut.listclip(sorted_daid_group, nAnnotPerName)
        for sorted_daid_group in top_sorted_daid_groups
    ]
    top_daids = ut.flatten(top_clipped_daids)
    return top_daids
Exemple #39
0
def nx_topsort_rank(graph, nodes=None):
    """
    graph = inputs.exi_graph.reverse()
    nodes = flat_node_order_
    """
    import networkx as nx
    import utool as ut
    topsort = list(nx.topological_sort(graph))
    node_to_top_rank = ut.make_index_lookup(topsort)
    toprank = ut.dict_take(node_to_top_rank, nodes)
    return toprank
 def compute_fgweight_mask(annot):
     keys = ['kpts', 'chipshape', 'fgweights']
     kpts, chipshape, fgweights = ut.dict_take(annot.__dict__, keys)
     chipsize = chipshape[0:2][::-1]
     fgweight_mask = coverage_kpts.make_kpts_coverage_mask(
         kpts,
         chipsize,
         fgweights,
         mode='max',
         resize=True,
         return_patch=False)
     annot.fgweight_mask = fgweight_mask
Exemple #41
0
 def pairwise_feature(aidx1, aidx2, all_nids=all_nids, toy_params=toy_params):
     if aidx1 == aidx2:
         score = -1
     else:
         #rng = np.random.RandomState(int((aidx1 + 13) * (aidx2 + 13)))
         nid1 = all_nids[int(aidx1)]
         nid2 = all_nids[int(aidx2)]
         params = toy_params[nid1 == nid2]
         mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
         score_ = rng.normal(mu, sigma)
         score = np.clip(score_, 0, np.inf)
     return score
Exemple #42
0
def convert_smkmatch_to_chipmatch(qaid2_chipmatch, qaid2_scores):
    """ function to fix oldstyle chipmatches into newstyle that is accepted by the pipeline """
    from ibeis.algo.hots import chip_match
    qaid_list = list(six.iterkeys(qaid2_chipmatch))
    score_smk_list = ut.dict_take(qaid2_scores, qaid_list)
    chipmatch_smk_list = ut.dict_take(qaid2_chipmatch, qaid_list)
    aid2_H = None

    def aid2_fs_to_fsv(aid2_fs):
        return {aid: fs[:, None] for aid, fs in six.iteritems(aid2_fs)}

    cmtup_old_list = [
        (aid2_fm, aid2_fs_to_fsv(aid2_fs), aid2_fk, aid2_score, aid2_H)
        for (aid2_fm, aid2_fs,
             aid2_fk), aid2_score in zip(chipmatch_smk_list, score_smk_list)
    ]
    cm_list = [
        chip_match.ChipMatch.from_cmtup_old(cmtup_old, qaid=qaid)
        for qaid, cmtup_old in zip(qaid_list, cmtup_old_list)
    ]
    return cm_list
 def compute_dstncvs_mask(annot):
     keys = ['kpts', 'chipshape', 'dstncvs']
     kpts, chipshape, dstncvs = ut.dict_take(annot.__dict__, keys)
     chipsize = chipshape[0:2][::-1]
     dstncvs_mask = coverage_kpts.make_kpts_coverage_mask(
         kpts,
         chipsize,
         dstncvs,
         mode='max',
         resize=True,
         return_patch=False)
     annot.dstncvs_mask = dstncvs_mask
Exemple #44
0
def show_single_coverage_mask(qreq_,
                              cm,
                              weight_mask_m,
                              weight_mask,
                              daids,
                              fnum=None):
    import plottool as pt
    from ibeis import viz
    fnum = pt.ensure_fnum(fnum)
    idx_list = ut.dict_take(cm.daid2_idx, daids)
    nPlots = len(idx_list) + 1
    nRows, nCols = pt.get_square_row_cols(nPlots)
    pnum_ = pt.make_pnum_nextgen(nRows, nCols)
    pt.figure(fnum=fnum, pnum=(1, 2, 1))
    # Draw coverage masks with bbox
    # <FlipHack>
    #weight_mask_m = np.fliplr(np.flipud(weight_mask_m))
    #weight_mask = np.fliplr(np.flipud(weight_mask))
    # </FlipHack>
    stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m,
                                                          weight_mask,
                                                          return_sf=True)
    (woff, hoff) = offset_tup[1]
    wh1 = weight_mask_m.shape[0:2][::-1]
    wh2 = weight_mask.shape[0:2][::-1]
    pt.imshow(255 * (stacked_weights),
              fnum=fnum,
              pnum=pnum_(0),
              title='(query image) What did match vs what should match')
    pt.draw_bbox((0, 0) + wh1, bbox_color=(0, 0, 1))
    pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1))
    # Get contributing matches
    qaid = cm.qaid
    daid_list = daids
    fm_list = ut.take(cm.fm_list, idx_list)
    fs_list = ut.take(cm.fs_list, idx_list)
    # Draw matches
    for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list),
                                        start=1):
        viz.viz_matches.show_matches2(qreq_.ibs,
                                      qaid,
                                      daid,
                                      fm,
                                      fs,
                                      draw_pts=False,
                                      draw_lines=True,
                                      draw_ell=False,
                                      fnum=fnum,
                                      pnum=pnum_(px),
                                      darken=.5)
    coverage_score = score_matching_mask(weight_mask_m, weight_mask)
    pt.set_figtitle('score=%.4f' % (coverage_score, ))
Exemple #45
0
def augment_graph_mst(ibs, graph):
    import plottool as pt
    #spantree_aids1_ = []
    #spantree_aids2_ = []
    # Add edges between all names
    aid_list = list(graph.nodes())
    aug_digraph = graph.copy()
    # Change all weights in initial graph to be small (likely to be part of mst)
    nx.set_edge_attributes(aug_digraph, 'weight', .0001)
    aids1, aids2 = get_name_rowid_edges_from_aids(ibs, aid_list)
    if False:
        # Weight edges in the MST based on tenative distances
        # Get tentative node positions
        initial_pos = pt.get_nx_layout(graph.to_undirected(), 'graphviz')['node_pos']
        #initial_pos = pt.get_nx_layout(graph.to_undirected(), 'agraph')['node_pos']
        edge_pts1 = ut.dict_take(initial_pos, aids1)
        edge_pts2 = ut.dict_take(initial_pos, aids2)
        edge_pts1 = vt.atleast_nd(np.array(edge_pts1, dtype=np.int32), 2)
        edge_pts2 = vt.atleast_nd(np.array(edge_pts2, dtype=np.int32), 2)
        edge_weights = vt.L2(edge_pts1, edge_pts2)
    else:
        edge_weights = [1.0] * len(aids1)
    # Create implicit fully connected (by name) graph
    aug_edges = [(a1, a2, {'weight': w})
                  for a1, a2, w in zip(aids1, aids2, edge_weights)]
    aug_digraph.add_edges_from(aug_edges)

    # Determine which edges need to be added to
    # make original graph connected by name
    aug_graph = aug_digraph.to_undirected()
    for cc_sub_graph in nx.connected_component_subgraphs(aug_graph):
        mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
        mst_edges = mst_sub_graph.edges()
        for edge in mst_edges:
            redge = edge[::-1]
            #attr_dict = {'color': pt.DARK_ORANGE[0:3]}
            attr_dict = {'color': pt.BLACK[0:3]}
            if not (graph.has_edge(*edge) or graph.has_edge(*redge)):
                graph.add_edge(*redge, attr_dict=attr_dict)
Exemple #46
0
def define_named_aid_cfgs():
    """
    Definitions for common aid configurations
    TODO: potentially move to experiment configs
    """
    from ibeis.expt import annotation_configs

    named_defaults_dict = ut.dict_take(annotation_configs.__dict__, annotation_configs.TEST_NAMES)
    named_qcfg_defaults = dict(zip(annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, "qcfg")))
    named_dcfg_defaults = dict(zip(annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, "dcfg")))
    alias_keys = annotation_configs.alias_keys
    named_cfg_dict = {"qcfg": named_qcfg_defaults, "dcfg": named_dcfg_defaults}
    return named_cfg_dict, alias_keys
 def constrained_match(annot, match_SV):
     cfgdict = {}
     annot1 = match_SV.annot1
     assert annot1 is annot
     annot2 = match_SV.annot2
     keys = ['kpts', 'vecs']
     kpts1, vecs1 = ut.dict_take(annot1.__dict__, keys)
     kpts2, vecs2 = ut.dict_take(annot2.__dict__, keys)
     dlen_sqrd2 = annot2.dlen_sqrd
     H_RAT = match_SV.H
     nexttup, next_meta = spatially_constrianed_matcher_(
         kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, H_RAT, cfgdict)
     (fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR) = nexttup
     (fm_norm_SC, fm_norm_SCR, fm_norm_SCRSV) = next_meta
     match_SC = AnnotMatch(annot1, annot2, fm_SC, fs_SC, 'SC',
                           fm_norm_SC)  # NOQA
     match_SCR = AnnotMatch(annot1, annot2, fm_SCR, fs_SCR, 'SCR',
                            fm_norm_SCR)  # NOQA
     match_SCRSV = AnnotMatch(annot1, annot2, fm_SCRSV, fs_SCRSV, 'SCRSV',
                              fm_norm_SCRSV)
     match_SCRSV.H = H_SCR
     return match_SC, match_SCR, match_SCRSV
def set_part_quality_texts(ibs, part_rowid_list, quality_text_list):
    r"""
    Auto-docstr for 'set_part_quality_texts'

    RESTful:
        Method: PUT
        URL:    /api/part/quality/text/
    """
    if not ut.isiterable(part_rowid_list):
        part_rowid_list = [part_rowid_list]
    if isinstance(quality_text_list, six.string_types):
        quality_text_list = [quality_text_list]
    quality_list = ut.dict_take(const.QUALITY_TEXT_TO_INT, quality_text_list)
    ibs.set_part_qualities(part_rowid_list, quality_list)
Exemple #49
0
    def get_ancestor_rowids(depc, tablename, root_rowids, config=None,
                            ensure=True, eager=True, nInput=None):
        r"""
        Args:
            tablename (str):
            root_rowids (list):
            config (None): (default = None)
            ensure (bool):  eager evaluation if True(default = True)
            eager (bool): (default = True)
            nInput (None): (default = None)

        CommandLine:
            python -m ibeis.depends_cache --exec-get_ancestor_rowids

        Example:
            >>> # ENABLE_DOCTEST
            >>> from ibeis.depends_cache import *  # NOQA
            >>> depc = testdata_depc()
            >>> tablename = 'spam'
            >>> root_rowids = [1, 2, 3]
            >>> config, ensure, eager, nInput = None, True, True, None
            >>> result = ut.repr3(depc.get_ancestor_rowids(tablename, root_rowids, config, ensure, eager, nInput), nl=1)
            >>> print(result)
            {
                'chip': [1, 2, 3],
                'dummy_annot': [1, 2, 3],
                'fgweight': [1, 2, 3],
                'keypoint': [1, 2, 3],
                'probchip': [1, 2, 3],
                'spam': [1, 2, 3],
            }
        """
        # print('GET ANCESTOR ROWIDS %s ' % (tablename,))
        dependency_levels = depc.get_dependencies(tablename)
        # print('root_rowids = %r' % (root_rowids,))
        #print('dependency_levels = %s' % (ut.repr3(dependency_levels, nl=2),))
        rowid_dict = {depc.root: root_rowids}
        for level_keys in dependency_levels[1:]:
            #print('* level_keys %s ' % (level_keys,))
            for key in level_keys:
                #print('  * key = %r' % (key,))
                table = depc[key]
                parent_rowids = list(zip(*ut.dict_take(rowid_dict, table.parents)))
                # print('parent_rowids = %r' % (parent_rowids,))
                child_rowids = table.get_rowid_from_superkey(
                    parent_rowids, config=config, eager=eager, nInput=nInput,
                    ensure=ensure)
                # print('child_rowids = %r' % (child_rowids,))
                rowid_dict[key] = child_rowids
        return rowid_dict
Exemple #50
0
    def biggest_dirs(drive):
        print('Biggest Dirs in %r' % (drive,))
        dpath_list = drive.dpath_list
        fidxs_list = ut.dict_take(drive.dpath_to_fidx, dpath_list)
        unflat_dpath_bytes_list = ut.list_unflat_take(drive.fpath_bytes_list, fidxs_list)
        dpath_nbytes_list = list(map(sum, unflat_dpath_bytes_list))

        sortx = ut.list_argsort(dpath_nbytes_list)[::-1]
        sel = sortx[0:10]
        biggest_nbytes = ut.take(dpath_nbytes_list, sel)
        biggest_dpaths = ut.take(dpath_list, sel)
        biginfo_list = list(zip(map(ut.byte_str2, biggest_nbytes), biggest_dpaths))
        print(ut.list_str(biginfo_list, strvals=True))
        pass
Exemple #51
0
def score_chipmatch_true_nsum(qaid, chipmatch, qreq_, return_wrt_aids=False):
    """
    Sums scores over all annots with those names.
    Dupvote weighting should be on to combat double counting
    """
    # Nonhacky version of name scoring
    #(aid2_fm, aid2_fsv, aid2_fk, aid2_score, aid2_H) = chipmatch
    aid2_fsv = chipmatch.aid2_fsv
    NEW_WAY = True
    if NEW_WAY:
        # New version
        aid_list = list(six.iterkeys(aid2_fsv))
        fsv_list = ut.dict_take(aid2_fsv, aid_list)
        #fs_list = [fsv.prod(axis=1) if fsv.shape[1] > 1 else fsv.T[0] for fsv in fsv_list]
        fs_list = [fsv.prod(axis=1) for fsv in fsv_list]
        annot_score_list = np.array([fs.sum() for fs in fs_list])
        annot_nid_list = np.array(qreq_.ibs.get_annot_name_rowids(aid_list))
        nid_list, groupxs = vtool.group_indicies(annot_nid_list)
        grouped_scores = vtool.apply_grouping(annot_score_list, groupxs)
    else:
        aid2_fs = {
            aid: fsv.prod(axis=1)
            for aid, fsv in six.iteritems(aid2_fsv)
        }
        aid_list = list(six.iterkeys(aid2_fs))
        annot_score_list = np.array(
            [fs.sum() for fs in six.itervalues(aid2_fs)])
        annot_nid_list = np.array(qreq_.ibs.get_annot_name_rowids(aid_list))
        nid_list, groupxs = vtool.group_indicies(annot_nid_list)
        grouped_scores = vtool.apply_grouping(annot_score_list, groupxs)
    if return_wrt_aids:

        def indicator_array(size, pos, value):
            """ creates zero array and places value at pos """
            arr = np.zeros(size)
            arr[pos] = value
            return arr

        grouped_nscores = [
            indicator_array(scores.size, scores.argmax(), scores.sum())
            for scores in grouped_scores
        ]
        nscore_list = vtool.clustering2.invert_apply_grouping(
            grouped_nscores, groupxs)
        #nscore_list = ut.flatten(grouped_nscores)
        return aid_list, nscore_list
    else:
        score_list = [scores.sum() for scores in grouped_scores]
        return nid_list, score_list
Exemple #52
0
 def pairwise_feature(aidx1,
                      aidx2,
                      all_nids=all_nids,
                      toy_params=toy_params):
     if aidx1 == aidx2:
         score = -1
     else:
         #rng = np.random.RandomState(int((aidx1 + 13) * (aidx2 + 13)))
         nid1 = all_nids[int(aidx1)]
         nid2 = all_nids[int(aidx2)]
         params = toy_params[nid1 == nid2]
         mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
         score_ = rng.normal(mu, sigma)
         score = np.clip(score_, 0, np.inf)
     return score
Exemple #53
0
def define_named_aid_cfgs():
    """
    Definitions for common aid configurations
    TODO: potentially move to experiment configs
    """
    from ibeis.expt import annotation_configs
    named_defaults_dict = ut.dict_take(annotation_configs.__dict__, annotation_configs.TEST_NAMES)
    named_qcfg_defaults = dict(zip(annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, 'qcfg')))
    named_dcfg_defaults = dict(zip(annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, 'dcfg')))
    alias_keys = annotation_configs.alias_keys
    named_cfg_dict = {
        'qcfg': named_qcfg_defaults,
        'dcfg': named_dcfg_defaults,
    }
    return named_cfg_dict, alias_keys
Exemple #54
0
 def make_prob_annots(infr):
     cm_list = infr.cm_list
     unique_aids = sorted(ut.list_union(*[cm.daid_list for cm in cm_list] +
                                        [[cm.qaid for cm in cm_list]]))
     aid2_aidx = ut.make_index_lookup(unique_aids)
     prob_annots = np.zeros((len(unique_aids), len(unique_aids)))
     for count, cm in enumerate(cm_list):
         idx = aid2_aidx[cm.qaid]
         annot_scores = ut.dict_take(cm.aid2_annot_score, unique_aids, 0)
         prob_annots[idx][:] = annot_scores
     prob_annots[np.diag_indices(len(prob_annots))] = np.inf
     prob_annots += 1E-9
     #print(ut.hz_str('prob_names = ', ut.array2string2(prob_names,
     #precision=2, max_line_width=140, suppress_small=True)))
     return unique_aids, prob_annots
Exemple #55
0
    def from_cmtup_old(cls,
                       cmtup_old,
                       qaid=None,
                       fsv_col_lbls=None,
                       daid_list=None):
        """ convert QueryResult styles fields to ChipMatch style fields """

        (aid2_fm_, aid2_fsv_, aid2_fk_, aid2_score_, aid2_H_) = cmtup_old
        assert len(aid2_fsv_) == len(aid2_fm_), 'bad old cmtup_old'
        assert len(aid2_fk_) == len(aid2_fm_), 'bad old cmtup_old'
        assert (aid2_score_ is None or len(aid2_score_) == 0
                or len(aid2_score_) == len(aid2_fm_)), 'bad old cmtup_old'
        assert aid2_H_ is None or len(aid2_H_) == len(aid2_fm_), (
            'bad old cmtup_old')
        if daid_list is None:
            daid_list = list(six.iterkeys(aid2_fm_))

        # WARNING: dict_take will not copy these default items
        # Maybe these should be separate instances for different items?
        _empty_fm = np.empty((0, 2), dtype=hstypes.FM_DTYPE)
        _empty_fsv = np.empty((0, 1), dtype=hstypes.FS_DTYPE)
        _empty_fk = np.empty((0), dtype=hstypes.FK_DTYPE)
        # convert dicts to lists
        fm_list = ut.dict_take(aid2_fm_, daid_list, _empty_fm)
        fsv_list = ut.dict_take(aid2_fsv_, daid_list, _empty_fsv)
        fk_list = ut.dict_take(aid2_fk_, daid_list, _empty_fk)
        no_scores = (aid2_score_ is None
                     or (len(aid2_score_) == 0 and len(daid_list) > 0))
        score_list = (None if no_scores else np.array(
            ut.dict_take(aid2_score_, daid_list, np.nan)))
        H_list = (None if aid2_H_ is None else ut.dict_take(
            aid2_H_, daid_list, None))
        fsv_col_lbls = fsv_col_lbls
        cm = cls(qaid, daid_list, fm_list, fsv_list, fk_list, score_list,
                 H_list, fsv_col_lbls)
        return cm
Exemple #56
0
def make_match_interaction(matches, metadata, type_='RAT+SV', **kwargs):
    import plottool.interact_matches
    #import plottool as pt
    fm, fs = matches[type_][0:2]
    H1 = metadata['H_' + type_.split('+')[0]]
    #fm, fs = matches['RAT'][0:2]
    annot1 = metadata['annot1']
    annot2 = metadata['annot2']
    rchip1, kpts1, vecs1 = ut.dict_take(annot1, ['rchip', 'kpts', 'vecs'])
    rchip2, kpts2, vecs2 = ut.dict_take(annot2, ['rchip', 'kpts', 'vecs'])
    #pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs)
    fsv = fs[:, None]
    interact = plottool.interact_matches.MatchInteraction2(rchip1,
                                                           rchip2,
                                                           kpts1,
                                                           kpts2,
                                                           fm,
                                                           fs,
                                                           fsv,
                                                           vecs1,
                                                           vecs2,
                                                           H1=H1,
                                                           **kwargs)
    return interact
Exemple #57
0
def get_aidpair_tags(ibs, aid1_list, aid2_list, directed=True):
    r"""
    Args:
        ibs (IBEISController):  ibeis controller object
        aid1_list (list):
        aid2_list (list):
        directed (bool): (default = True)

    Returns:
        list: tags_list

    CommandLine:
        python -m ibeis.tag_funcs --exec-get_aidpair_tags --db PZ_Master1 --tags Hard interesting

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.tag_funcs import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> has_any = ut.get_argval('--tags', type_=list, default=None)
        >>> min_num = ut.get_argval('--min_num', type_=int, default=1)
        >>> aid_pairs = filter_aidpairs_by_tags(ibs, has_any=has_any, min_num=1)
        >>> aid1_list = aid_pairs.T[0]
        >>> aid2_list = aid_pairs.T[1]
        >>> undirected_tags = get_aidpair_tags(ibs, aid1_list, aid2_list, directed=False)
        >>> tagged_pairs = list(zip(aid_pairs.tolist(), undirected_tags))
        >>> print(ut.list_str(tagged_pairs))
        >>> tag_dict = ut.groupby_tags(tagged_pairs, undirected_tags)
        >>> print(ut.dict_str(tag_dict, nl=2))
        >>> print(ut.dict_str(ut.map_dict_vals(len, tag_dict)))
    """
    aid_pairs = np.vstack([aid1_list, aid2_list]).T
    if directed:
        annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(aid_pairs.T[0], aid_pairs.T[1])
        tags_list = ibs.get_annotmatch_case_tags(annotmatch_rowid)
    else:
        expanded_aid_pairs = np.vstack([aid_pairs, aid_pairs[:, ::-1]])
        expanded_annotmatch_rowid = ibs.get_annotmatch_rowid_from_superkey(
            expanded_aid_pairs.T[0], expanded_aid_pairs.T[1])
        expanded_edgeids = vt.get_undirected_edge_ids(expanded_aid_pairs)
        unique_edgeids, groupxs = vt.group_indices(expanded_edgeids)
        expanded_tags_list = ibs.get_annotmatch_case_tags(expanded_annotmatch_rowid)
        grouped_tags = vt.apply_grouping(np.array(expanded_tags_list, dtype=object), groupxs)
        undirected_tags = [list(set(ut.flatten(tags))) for tags in grouped_tags]
        edgeid2_tags = dict(zip(unique_edgeids, undirected_tags))
        input_edgeids = expanded_edgeids[:len(aid_pairs)]
        tags_list = ut.dict_take(edgeid2_tags, input_edgeids)
    return tags_list