예제 #1
0
파일: sysres.py 프로젝트: Erotemic/ibeis
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m ibeis.init.sysres --exec-ensure_pz_mtest
        python -m ibeis --tf ensure_pz_mtest

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    print('ensure_pz_mtest')
    from ibeis import sysres
    workdir = sysres.get_workdir()
    mtest_zipped_url = const.ZIPPED_URLS.PZ_MTEST
    mtest_dir = ut.grab_zipped_url(mtest_zipped_url, ensure=True, download_dir=workdir)
    print('have mtest_dir=%r' % (mtest_dir,))
    # update the the newest database version
    import ibeis
    ibs = ibeis.opendb('PZ_MTEST')
    print('cleaning up old database and ensureing everything is properly computed')
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # Remove old imagesets and update to new special ones
    all_imgset_ids = ibs.get_valid_imgsetids()
    special_imgset_ids = ibs.get_special_imgsetids()
    other_imgset_ids = ut.setdiff(all_imgset_ids, special_imgset_ids)
    ibs.delete_imagesets(other_imgset_ids)
    ibs.set_exemplars_from_quality_and_viewpoint()
    ibs.update_all_image_special_imageset()

    occurrence_gids = [2, 9, 12, 16, 25, 26, 29, 30, 32, 33, 35, 46, 47, 52,
                       57, 61, 66, 70, 71, 73, 74, 76, 77, 78, 79, 87, 88, 90,
                       96, 97, 103, 106, 108, 110, 112, 113]

    other_gids = ut.setdiff(ibs.get_valid_gids(), occurrence_gids)
    other_gids1 = other_gids[0::2]
    other_gids2 = other_gids[1::2]
    ibs.set_image_imagesettext(occurrence_gids, ['Occurrence 1'] * len(occurrence_gids))
    ibs.set_image_imagesettext(other_gids1, ['Occurrence 2'] * len(other_gids1))
    ibs.set_image_imagesettext(other_gids2, ['Occurrence 3'] * len(other_gids2))

    # hack in some tags
    print('Hacking in some tags')
    foal_aids = [4, 8, 15, 21, 28, 34, 38, 41, 45, 49, 51, 56, 60, 66, 69, 74, 80, 83, 91, 97, 103, 107, 109, 119]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))
예제 #2
0
def modify_tags(tags_list, direct_map=None, regex_map=None, regex_aug=None,
                delete_unmapped=False, return_unmapped=False,
                return_map=False):
    import utool as ut
    tag_vocab = ut.unique(ut.flatten(tags_list))
    alias_map = ut.odict()
    if regex_map is not None:
        alias_map.update(**ut.build_alias_map(regex_map, tag_vocab))
    if direct_map is not None:
        alias_map.update(ut.odict(direct_map))

    new_tags_list = tags_list
    new_tags_list = ut.alias_tags(new_tags_list, alias_map)

    if regex_aug is not None:
        alias_aug = ut.build_alias_map(regex_aug, tag_vocab)
        aug_tags_list = ut.alias_tags(new_tags_list, alias_aug)
        new_tags_list = [ut.unique(t1 + t2) for t1, t2 in zip(new_tags_list, aug_tags_list)]

    unmapped = list(set(tag_vocab) - set(alias_map.keys()))
    if delete_unmapped:
        new_tags_list = [ut.setdiff(tags, unmapped) for tags in new_tags_list]

    toreturn = None
    if return_map:
        toreturn = (alias_map,)

    if return_unmapped:
        toreturn = toreturn + (unmapped,)

    if toreturn is None:
        toreturn = new_tags_list
    else:
        toreturn = (new_tags_list,) + toreturn
    return toreturn
예제 #3
0
def execute_singles(qreq_):
    if qreq_.use_single_cache:
        qaid_to_hit = _load_singles(qreq_)
    else:
        qaid_to_hit = {}
    hit_all = len(qaid_to_hit) == len(qreq_.qaids)
    hit_any = len(qaid_to_hit) > 0

    if hit_all:
        qaid_to_cm = qaid_to_hit
    else:
        if hit_any:
            logger.info('... partial cm cache hit %d/%d' %
                        (len(qaid_to_hit), len(qreq_)))
            hit_aids = list(qaid_to_hit.keys())
            miss_aids = ut.setdiff(qreq_.qaids, hit_aids)
            qreq_miss = qreq_.shallowcopy(miss_aids)
        else:
            qreq_miss = qreq_
        # Compute misses
        qreq_miss.ensure_data()
        qaid_to_cm = execute_and_save(qreq_miss)
        # Merge misses with hits
        if hit_any:
            qaid_to_cm.update(qaid_to_hit)
    cm_list = ut.take(qaid_to_cm, qreq_.qaids)
    return cm_list
예제 #4
0
def ensure_names_are_connected(graph, aids_list):
    aug_graph = graph.copy().to_undirected()
    orig_edges = aug_graph.edges()
    unflat_edges = [list(itertools.product(aids, aids)) for aids in aids_list]
    aid_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]]
    new_edges = ut.setdiff_ordered(aid_pairs, aug_graph.edges())

    preweighted_edges = nx.get_edge_attributes(aug_graph, 'weight')
    if preweighted_edges:
        orig_edges = ut.setdiff(orig_edges, list(preweighted_edges.keys()))

    aug_graph.add_edges_from(new_edges)
    # Ensure the largest possible set of original edges is in the MST
    nx.set_edge_attributes(aug_graph,
                           name='weight',
                           values=dict([(edge, 1.0) for edge in new_edges]))
    nx.set_edge_attributes(aug_graph,
                           name='weight',
                           values=dict([(edge, 0.1) for edge in orig_edges]))
    for cc_sub_graph in nx.connected_component_subgraphs(aug_graph):
        mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
        for edge in mst_sub_graph.edges():
            redge = edge[::-1]
            if not (graph.has_edge(*edge) or graph.has_edge(*redge)):
                graph.add_edge(*redge, attr_dict={})
예제 #5
0
def predict_proba_df(clf, X_df, class_names=None):
    """
    Calls sklearn classifier predict_proba but then puts results in a dataframe
    using the same index as X_df and incorporating all possible class_names
    given
    """
    if class_names is not None:
        columns = ut.take(class_names, clf.classes_)
    else:
        columns = None
    if len(X_df) == 0:
        return pd.DataFrame(columns=columns)
    try:
        probs = clf.predict_proba(X_df)
    except ValueError:
        # solves a problem when values are infinity for whatever reason
        X = X_df.values.copy()
        X[~np.isfinite(X)] = np.nan
        probs = clf.predict_proba(X)

    probs_df = pd.DataFrame(probs, columns=columns, index=X_df.index)
    # add in zero probability for classes without training data
    if class_names is not None:
        missing = ut.setdiff(class_names, columns)
        if missing:
            for classname in missing:
                probs_df = probs_df.assign(**{
                    classname: np.zeros(len(probs_df))})
    return probs_df
예제 #6
0
def remove_annot_case_tags(ibs, aid_list, tag_list):
    if isinstance(tag_list, six.string_types):
        # Apply single tag to everybody
        tag_list = [tag_list] * len(aid_list)
    tags_list = [ut.ensure_iterable(tag) for tag in tag_list]
    text_list = ibs.get_annot_tag_text(aid_list)
    orig_tags_list = [[] if note is None else _parse_tags(note) for note in text_list]
    new_tags_list = [ut.setdiff(t2, t1) for t1, t2 in zip(tags_list, orig_tags_list)]
    new_text_list = [';'.join(tags) for tags in new_tags_list]
    ibs.set_annot_tag_text(aid_list, new_text_list)
예제 #7
0
파일: viz_graph.py 프로젝트: Erotemic/ibeis
def ensure_names_are_connected(graph, aids_list):
    aug_graph = graph.copy().to_undirected()
    orig_edges = aug_graph.edges()
    unflat_edges = [list(itertools.product(aids, aids)) for aids in aids_list]
    aid_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]]
    new_edges = ut.setdiff_ordered(aid_pairs, aug_graph.edges())

    preweighted_edges = nx.get_edge_attributes(aug_graph, 'weight')
    if preweighted_edges:
        orig_edges = ut.setdiff(orig_edges, list(preweighted_edges.keys()))

    aug_graph.add_edges_from(new_edges)
    # Ensure the largest possible set of original edges is in the MST
    nx.set_edge_attributes(aug_graph, 'weight', dict([(edge, 1.0) for edge in new_edges]))
    nx.set_edge_attributes(aug_graph, 'weight', dict([(edge, 0.1) for edge in orig_edges]))
    for cc_sub_graph in nx.connected_component_subgraphs(aug_graph):
        mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
        for edge in mst_sub_graph.edges():
            redge = edge[::-1]
            if not (graph.has_edge(*edge) or graph.has_edge(*redge)):
                graph.add_edge(*redge, attr_dict={})
예제 #8
0
def double_depcache_graph():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw double_depcache_graph --show --testmode

        python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/  --diskshow  --figsize=8,20 --dpi=220 --testmode --show --clipwhite
        python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/  --diskshow  --figsize=8,20 --dpi=220 --testmode --show --clipwhite --arrow-width=.5

        python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/  --diskshow  --figsize=8,20 --dpi=220 --testmode --show --clipwhite --arrow-width=5

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> result = double_depcache_graph()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import ibeis
    import networkx as nx
    import plottool as pt
    pt.ensure_pylab_qt4()
    # pt.plt.xkcd()
    ibs = ibeis.opendb('testdb1')
    reduced = True
    implicit = True
    annot_graph = ibs.depc_annot.make_graph(reduced=reduced, implicit=implicit)
    image_graph = ibs.depc_image.make_graph(reduced=reduced, implicit=implicit)
    to_rename = ut.isect(image_graph.nodes(), annot_graph.nodes())
    nx.relabel_nodes(annot_graph, {x: 'annot_' + x for x in to_rename}, copy=False)
    nx.relabel_nodes(image_graph, {x: 'image_' + x for x in to_rename}, copy=False)
    graph = nx.compose_all([image_graph, annot_graph])
    #graph = nx.union_all([image_graph, annot_graph], rename=('image', 'annot'))
    # userdecision = ut.nx_makenode(graph, 'user decision', shape='rect', color=pt.DARK_YELLOW, style='diagonals')
    # userdecision = ut.nx_makenode(graph, 'user decision', shape='circle', color=pt.DARK_YELLOW)
    userdecision = ut.nx_makenode(graph, 'User decision', shape='rect',
                                  #width=100, height=100,
                                  color=pt.YELLOW, style='diagonals')
    #longcat = True
    longcat = False

    #edge = ('feat', 'neighbor_index')
    #data = graph.get_edge_data(*edge)[0]
    #print('data = %r' % (data,))
    #graph.remove_edge(*edge)
    ## hack
    #graph.add_edge('featweight', 'neighbor_index', **data)

    graph.add_edge('detections', userdecision, constraint=longcat, color=pt.PINK)
    graph.add_edge(userdecision, 'annotations', constraint=longcat, color=pt.PINK)
    # graph.add_edge(userdecision, 'annotations', implicit=True, color=[0, 0, 0])
    if not longcat:
        pass
        #graph.add_edge('images', 'annotations', style='invis')
        #graph.add_edge('thumbnails', 'annotations', style='invis')
        #graph.add_edge('thumbnails', userdecision, style='invis')
    graph.remove_node('Has_Notch')
    graph.remove_node('annotmask')
    layoutkw = {
        'ranksep': 5,
        'nodesep': 5,
        'dpi': 96,
        # 'nodesep': 1,
    }
    ns = 1000

    ut.nx_set_default_node_attributes(graph, 'fontsize', 72)
    ut.nx_set_default_node_attributes(graph, 'fontname', 'Ubuntu')
    ut.nx_set_default_node_attributes(graph, 'style',  'filled')

    ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
    ut.nx_set_default_node_attributes(graph, 'height', ns * (1 / ut.PHI))

    #for u, v, d in graph.edge(data=True):
    for u, vkd in graph.edge.items():
        for v, dk in vkd.items():
            for k, d in dk.items():
                localid = d.get('local_input_id')
                if localid:
                    # d['headlabel'] = localid
                    if localid not in ['1']:
                        d['taillabel'] = localid
                    #d['label'] = localid
                if d.get('taillabel') in {'1'}:
                    del d['taillabel']

    node_alias = {
        'chips': 'Chip',
        'images': 'Image',
        'feat': 'Feat',
        'featweight': 'Feat Weights',
        'thumbnails': 'Thumbnail',
        'detections': 'Detections',
        'annotations': 'Annotation',
        'Notch_Tips': 'Notch Tips',
        'probchip': 'Prob Chip',
        'Cropped_Chips': 'Croped Chip',
        'Trailing_Edge': 'Trailing\nEdge',
        'Block_Curvature': 'Block\nCurvature',
        # 'BC_DTW': 'block curvature /\n dynamic time warp',
        'BC_DTW': 'DTW Distance',
        'vsone': 'Hots vsone',
        'feat_neighbs': 'Nearest\nNeighbors',
        'neighbor_index': 'Neighbor\nIndex',
        'vsmany': 'Hots vsmany',
        'annot_labeler': 'Annot Labeler',
        'labeler': 'Labeler',
        'localizations': 'Localizations',
        'classifier': 'Classifier',
        'sver': 'Spatial\nVerification',
        'Classifier': 'Existence',
        'image_labeler': 'Image Labeler',
    }
    node_alias = {
        'Classifier': 'existence',
        'feat_neighbs': 'neighbors',
        'sver': 'spatial_verification',
        'Cropped_Chips': 'cropped_chip',
        'BC_DTW': 'dtw_distance',
        'Block_Curvature': 'curvature',
        'Trailing_Edge': 'trailing_edge',
        'Notch_Tips': 'notch_tips',
        'thumbnails': 'thumbnail',
        'images': 'image',
        'annotations': 'annotation',
        'chips': 'chip',
        #userdecision: 'User de'
    }
    node_alias = ut.delete_dict_keys(node_alias, ut.setdiff(node_alias.keys(),
                                                            graph.nodes()))
    nx.relabel_nodes(graph, node_alias, copy=False)

    fontkw = dict(fontname='Ubuntu', fontweight='normal', fontsize=12)
    #pt.gca().set_aspect('equal')
    #pt.figure()
    pt.show_nx(graph, layoutkw=layoutkw, fontkw=fontkw)
    pt.zoom_factory()
예제 #9
0
파일: bayes.py 프로젝트: heroinlin/ibeis
def cluster_query(model, query_vars=None, evidence=None, soft_evidence=None,
                  method=None, operation='maximize'):
    """
    CommandLine:
        python -m ibeis.algo.hots.bayes --exec-cluster_query --show

    ParamGrid:
        >>> param_grid = dict(
        >>>     #method=['approx', 'bf', 'bp'],
        >>>     method=['approx', 'bp'],
        >>> )
        >>> combos = ut.all_dict_combinations(param_grid)
        >>> index = 0
        >>> keys = 'method'.split(', ')
        >>> method, = ut.dict_take(combos[index], keys)

    Setup:
        >>> from ibeis.algo.hots.bayes import *  # NOQA
        >>> verbose = True
        >>> other_evidence = {}
        >>> name_evidence = [1, None, None, 0]
        >>> score_evidence = [2, 0, 2]
        >>> special_names = ['fred', 'sue', 'tom', 'paul']
        >>> model = make_name_model(
        >>>     num_annots=4, num_names=4, num_scores=3, verbose=True, mode=1,
        >>>     special_names=special_names)
        >>> method = None
        >>> model, evidence, soft_evidence = update_model_evidence(
        >>>     model, name_evidence, score_evidence, other_evidence)
        >>> evidence = model._ensure_internal_evidence(evidence)
        >>> query_vars = ut.list_getattr(model.ttype2_cpds['name'], 'variable')

    Example:
        >>> # DISABLE_DOCTEST
        >>> query_results = cluster_query(model, query_vars, evidence,
        >>>                               method=method)
        >>> print(ut.repr2(query_results['top_assignments'], nl=1))
        >>> ut.quit_if_noshow()
        >>> pgm_ext.show_model(model, evidence=evidence, **query_results)
        >>> ut.show_if_requested()
    """
    evidence = model._ensure_internal_evidence(evidence)
    if query_vars is None:
        query_vars = model.nodes()
    orig_query_vars = query_vars  # NOQA
    query_vars = ut.setdiff(query_vars, list(evidence.keys()))

    if method is None:
        method = ut.get_argval('--method', type_=str, default='bp')

    reduced_joint = compute_reduced_joint(model, query_vars, evidence,
                                          method, operation)

    new_reduced_joint = collapse_factor_labels(model, reduced_joint, evidence)

    if False:
        report_partitioning_statistics(new_reduced_joint)

    # FIXME: are these max marginals?
    max_marginals = {}
    for i, var in enumerate(query_vars):
        one_out = query_vars[:i] + query_vars[i + 1:]
        max_marginals[var] = new_reduced_joint.marginalize(one_out,
                                                           inplace=False)
        # max_marginals[var] = joint2.maximize(one_out, inplace=False)
    factor_list = max_marginals.values()

    # Now find the most likely state
    reduced_variables = new_reduced_joint.variables
    new_state_idxs = np.array(new_reduced_joint._row_labels(asindex=True))
    new_values = new_reduced_joint.values.ravel()
    sortx = new_values.argsort()[::-1]
    sort_new_state_idxs = new_state_idxs.take(sortx, axis=0)
    sort_new_values = new_values.take(sortx)
    sort_new_states = list(zip(*[
        ut.dict_take(model.statename_dict[var], idx)
        for var, idx in
        zip(reduced_variables, sort_new_state_idxs.T)]))

    # Better map assignment based on knowledge of labels
    map_assign = dict(zip(reduced_variables, sort_new_states[0]))

    sort_reduced_rowstr_lbls = [
        ut.repr2(dict(zip(reduced_variables, lbls)), explicit=True,
                 nobraces=True,
                 strvals=True)
        for lbls in sort_new_states
    ]

    top_assignments = list(zip(sort_reduced_rowstr_lbls[:4], sort_new_values))
    if len(sort_new_values) > 3:
        top_assignments += [('other', 1 - sum(sort_new_values[:4]))]
    query_results = {
        'factor_list': factor_list,
        'top_assignments': top_assignments,
        'map_assign': map_assign,
        'method': method,
    }
    print('query_results = %s' % (ut.repr3(query_results, nl=2),))
    return query_results
예제 #10
0
파일: bayes.py 프로젝트: heroinlin/ibeis
def test_model(num_annots, num_names, score_evidence=[], name_evidence=[],
               other_evidence={}, noquery=False, verbose=None,
               **kwargs):
    if verbose is None:
        verbose = ut.VERBOSE

    method = kwargs.pop('method', None)
    model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs)

    if verbose:
        model.print_priors(ignore_ttypes=['match', 'score'])

    model, evidence, soft_evidence = update_model_evidence(
        model, name_evidence, score_evidence, other_evidence)

    if verbose and len(soft_evidence) != 0:
        model.print_priors(ignore_ttypes=['match', 'score'],
                           title='Soft Evidence', color='green')

    #if verbose:
    #    ut.colorprint('\n --- Soft Evidence ---', 'white')
    #    for ttype, cpds in model.ttype2_cpds.items():
    #        if ttype != 'match':
    #            for fs_ in ut.ichunks(cpds, 4):
    #                ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
    #                              'green')

    if verbose:
        ut.colorprint('\n --- Inference ---', 'red')

    if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery:
        evidence = model._ensure_internal_evidence(evidence)
        query_vars = []
        query_vars += ut.list_getattr(model.ttype2_cpds['name'], 'variable')
        #query_vars += ut.list_getattr(model.ttype2_cpds['match'], 'variable')
        query_vars = ut.setdiff(query_vars, evidence.keys())
        #query_vars = ut.setdiff(query_vars, soft_evidence.keys())
        query_results = cluster_query(model, query_vars, evidence,
                                      soft_evidence, method)
    else:
        query_results = {}

    factor_list = query_results['factor_list']

    if verbose:
        if verbose:
            print('+--------')
        semtypes = [model.var2_cpd[f.variables[0]].ttype
                    for f in factor_list]
        for type_, factors in ut.group_items(factor_list, semtypes).items():
            print('Result Factors (%r)' % (type_,))
            factors = ut.sortedby(factors, [f.variables[0] for f in factors])
            for fs_ in ut.ichunks(factors, 4):
                ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                              'yellow')
        print('MAP assignments')
        top_assignments = query_results.get('top_assignments', [])
        tmp = []
        for lbl, val in top_assignments:
            tmp.append('%s : %.4f' % (ut.repr2(lbl), val))
        print(ut.align('\n'.join(tmp), ' :'))
        print('L_____\n')

    showkw = dict(evidence=evidence,
                  soft_evidence=soft_evidence,
                  **query_results)

    pgm_viz.show_model(model, **showkw)
    return (model, evidence, query_results)
예제 #11
0
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m wbia.init.sysres --exec-ensure_pz_mtest
        python -m wbia --tf ensure_pz_mtest

    Ignore:
        from wbia.sysres import delete_dbdir
        delete_dbdir('PZ_MTEST')

    Example:
        >>> # SCRIPT
        >>> from wbia.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    logger.info('ensure_pz_mtest')
    dbdir = ensure_db_from_url(const.ZIPPED_URLS.PZ_MTEST)
    # update the the newest database version
    import wbia

    ibs = wbia.opendb(dbdir=dbdir)
    logger.info('cleaning up old database and ensureing everything is properly computed')
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # Remove old imagesets and update to new special ones
    all_imgset_ids = ibs.get_valid_imgsetids()
    special_imgset_ids = ibs.get_special_imgsetids()
    other_imgset_ids = ut.setdiff(all_imgset_ids, special_imgset_ids)
    ibs.delete_imagesets(other_imgset_ids)
    ibs.set_exemplars_from_quality_and_viewpoint()
    ibs.update_all_image_special_imageset()

    occurrence_gids = [
        2,
        9,
        12,
        16,
        25,
        26,
        29,
        30,
        32,
        33,
        35,
        46,
        47,
        52,
        57,
        61,
        66,
        70,
        71,
        73,
        74,
        76,
        77,
        78,
        79,
        87,
        88,
        90,
        96,
        97,
        103,
        106,
        108,
        110,
        112,
        113,
    ]

    other_gids = ut.setdiff(ibs.get_valid_gids(), occurrence_gids)
    other_gids1 = other_gids[0::2]
    other_gids2 = other_gids[1::2]
    ibs.set_image_imagesettext(occurrence_gids, ['Occurrence 1'] * len(occurrence_gids))
    ibs.set_image_imagesettext(other_gids1, ['Occurrence 2'] * len(other_gids1))
    ibs.set_image_imagesettext(other_gids2, ['Occurrence 3'] * len(other_gids2))

    # hack in some tags
    logger.info('Hacking in some tags')
    foal_aids = [
        4,
        8,
        15,
        21,
        28,
        34,
        38,
        41,
        45,
        49,
        51,
        56,
        60,
        66,
        69,
        74,
        80,
        83,
        91,
        97,
        103,
        107,
        109,
        119,
    ]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))

    # make part of the database complete and the other part semi-complete
    # make staging ahead of annotmatch.
    reset_mtest_graph()
예제 #12
0
def draw_twoday_count(ibs, visit_info_list_):
    import copy
    visit_info_list = copy.deepcopy(visit_info_list_)

    aids_day1, aids_day2 = ut.take_column(visit_info_list_, 'aids')
    nids_day1, nids_day2 = ut.take_column(visit_info_list_, 'unique_nids')
    resight_nids = ut.isect(nids_day1, nids_day2)

    if False:
        # HACK REMOVE DATA TO MAKE THIS FASTER
        num = 20
        for info in visit_info_list:
            non_resight_nids = list(set(info['unique_nids']) - set(resight_nids))
            sample_nids2 = non_resight_nids[0:num] + resight_nids[:num]
            info['grouped_aids'] = ut.dict_subset(info['grouped_aids'], sample_nids2)
            info['unique_nids'] = sample_nids2

    # Build a graph of matches
    if False:

        debug = False

        for info in visit_info_list:
            edges = []
            grouped_aids = info['grouped_aids']

            aids_list = list(grouped_aids.values())
            ams_list = ibs.get_annotmatch_rowids_in_cliques(aids_list)
            aids1_list = ibs.unflat_map(ibs.get_annotmatch_aid1, ams_list)
            aids2_list = ibs.unflat_map(ibs.get_annotmatch_aid2, ams_list)
            for ams, aids, aids1, aids2 in zip(ams_list, aids_list, aids1_list, aids2_list):
                edge_nodes = set(aids1 + aids2)
                ##if len(edge_nodes) != len(set(aids)):
                #    #print('--')
                #    #print('aids = %r' % (aids,))
                #    #print('edge_nodes = %r' % (edge_nodes,))
                bad_aids = edge_nodes - set(aids)
                if len(bad_aids) > 0:
                    print('bad_aids = %r' % (bad_aids,))
                unlinked_aids = set(aids) - edge_nodes
                mst_links = list(ut.itertwo(list(unlinked_aids) + list(edge_nodes)[:1]))
                bad_aids.add(None)
                user_links = [(u, v) for (u, v) in zip(aids1, aids2) if u not in bad_aids and v not in bad_aids]
                new_edges = mst_links + user_links
                new_edges = [(int(u), int(v)) for u, v in new_edges if u not in bad_aids and v not in bad_aids]
                edges += new_edges
            info['edges'] = edges

        # Add edges between days
        grouped_aids1, grouped_aids2 = ut.take_column(visit_info_list, 'grouped_aids')
        nids_day1, nids_day2 = ut.take_column(visit_info_list, 'unique_nids')
        resight_nids = ut.isect(nids_day1, nids_day2)

        resight_aids1 = ut.take(grouped_aids1, resight_nids)
        resight_aids2 = ut.take(grouped_aids2, resight_nids)
        #resight_aids3 = [list(aids1) + list(aids2) for aids1, aids2 in zip(resight_aids1, resight_aids2)]

        ams_list = ibs.get_annotmatch_rowids_between_groups(resight_aids1, resight_aids2)
        aids1_list = ibs.unflat_map(ibs.get_annotmatch_aid1, ams_list)
        aids2_list = ibs.unflat_map(ibs.get_annotmatch_aid2, ams_list)

        between_edges = []
        for ams, aids1, aids2, rawaids1, rawaids2 in zip(ams_list, aids1_list, aids2_list, resight_aids1, resight_aids2):
            link_aids = aids1 + aids2
            rawaids3 = rawaids1 + rawaids2
            badaids = ut.setdiff(link_aids, rawaids3)
            assert not badaids
            user_links = [(int(u), int(v)) for (u, v) in zip(aids1, aids2)
                          if u is not None and v is not None]
            # HACK THIS OFF
            user_links = []
            if len(user_links) == 0:
                # Hack in an edge
                between_edges += [(rawaids1[0], rawaids2[0])]
            else:
                between_edges += user_links

        assert np.all(0 == np.diff(np.array(ibs.unflat_map(ibs.get_annot_nids, between_edges)), axis=1))

        import plottool_ibeis as pt
        import networkx as nx
        #pt.qt4ensure()
        #len(list(nx.connected_components(graph1)))
        #print(ut.graph_info(graph1))

        # Layout graph
        layoutkw = dict(
            prog='neato',
            draw_implicit=False, splines='line',
            #splines='curved',
            #splines='spline',
            #sep=10 / 72,
            #prog='dot', rankdir='TB',
        )

        def translate_graph_to_origin(graph):
            x, y, w, h = ut.get_graph_bounding_box(graph)
            ut.translate_graph(graph, (-x, -y))

        def stack_graphs(graph_list, vert=False, pad=None):
            graph_list_ = [g.copy() for g in graph_list]
            for g in graph_list_:
                translate_graph_to_origin(g)
            bbox_list = [ut.get_graph_bounding_box(g) for g in graph_list_]
            if vert:
                dim1 = 3
                dim2 = 2
            else:
                dim1 = 2
                dim2 = 3
            dim1_list = np.array([bbox[dim1] for bbox in bbox_list])
            dim2_list = np.array([bbox[dim2] for bbox in bbox_list])
            if pad is None:
                pad = np.mean(dim1_list) / 2
            offset1_list = ut.cumsum([0] + [d + pad for d in dim1_list[:-1]])
            max_dim2 = max(dim2_list)
            offset2_list = [(max_dim2 - d2) / 2 for d2 in dim2_list]
            if vert:
                t_xy_list = [(d2, d1) for d1, d2 in zip(offset1_list, offset2_list)]
            else:
                t_xy_list = [(d1, d2) for d1, d2 in zip(offset1_list, offset2_list)]

            for g, t_xy in zip(graph_list_, t_xy_list):
                ut.translate_graph(g, t_xy)
                nx.set_node_attributes(g, name='pin', values='true')

            new_graph = nx.compose_all(graph_list_)
            #pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False)  # NOQA
            return new_graph

        # Construct graph
        for count, info in enumerate(visit_info_list):
            graph = nx.Graph()
            edges = [(int(u), int(v)) for u, v in info['edges']
                     if u is not None and v is not None]
            graph.add_edges_from(edges, attr_dict={'zorder': 10})
            nx.set_node_attributes(graph, name='zorder', values=20)

            # Layout in neato
            _ = pt.nx_agraph_layout(graph, inplace=True, **layoutkw)  # NOQA

            # Extract components and then flatten in nid ordering
            ccs = list(nx.connected_components(graph))
            root_aids = []
            cc_graphs = []
            for cc_nodes in ccs:
                cc = graph.subgraph(cc_nodes)
                try:
                    root_aids.append(list(ut.nx_source_nodes(cc.to_directed()))[0])
                except nx.NetworkXUnfeasible:
                    root_aids.append(list(cc.nodes())[0])
                cc_graphs.append(cc)

            root_nids = ibs.get_annot_nids(root_aids)
            nid2_graph = dict(zip(root_nids, cc_graphs))

            resight_nids_ = set(resight_nids).intersection(set(root_nids))
            noresight_nids_ = set(root_nids) - resight_nids_

            n_graph_list = ut.take(nid2_graph, sorted(noresight_nids_))
            r_graph_list = ut.take(nid2_graph, sorted(resight_nids_))

            if len(n_graph_list) > 0:
                n_graph = nx.compose_all(n_graph_list)
                _ = pt.nx_agraph_layout(n_graph, inplace=True, **layoutkw)  # NOQA
                n_graphs = [n_graph]
            else:
                n_graphs = []

            r_graphs = [stack_graphs(chunk) for chunk in ut.ichunks(r_graph_list, 100)]
            if count == 0:
                new_graph = stack_graphs(n_graphs + r_graphs, vert=True)
            else:
                new_graph = stack_graphs(r_graphs[::-1] + n_graphs, vert=True)

            #pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False)  # NOQA
            info['graph'] = new_graph

        graph1_, graph2_ = ut.take_column(visit_info_list, 'graph')
        if False:
            _ = pt.show_nx(graph1_, layout='custom', node_labels=False, as_directed=False)  # NOQA
            _ = pt.show_nx(graph2_, layout='custom', node_labels=False, as_directed=False)  # NOQA

        graph_list = [graph1_, graph2_]
        twoday_graph = stack_graphs(graph_list, vert=True, pad=None)
        nx.set_node_attributes(twoday_graph, name='pin', values='true')

        if debug:
            ut.nx_delete_None_edge_attr(twoday_graph)
            ut.nx_delete_None_node_attr(twoday_graph)
            print('twoday_graph(pre) info' + ut.repr3(ut.graph_info(twoday_graph), nl=2))

        # Hack, no idea why there are nodes that dont exist here
        between_edges_ = [edge for edge in between_edges
                          if twoday_graph.has_node(edge[0]) and twoday_graph.has_node(edge[1])]

        twoday_graph.add_edges_from(between_edges_, attr_dict={'alpha': .2, 'zorder': 0})
        ut.nx_ensure_agraph_color(twoday_graph)

        layoutkw['splines'] = 'line'
        layoutkw['prog'] = 'neato'
        agraph = pt.nx_agraph_layout(twoday_graph, inplace=True, return_agraph=True, **layoutkw)[-1]  # NOQA
        if False:
            fpath = ut.truepath('~/ggr_graph.png')
            agraph.draw(fpath)
            ut.startfile(fpath)

        if debug:
            print('twoday_graph(post) info' + ut.repr3(ut.graph_info(twoday_graph)))

        _ = pt.show_nx(twoday_graph, layout='custom', node_labels=False, as_directed=False)  # NOQA
예제 #13
0
def temp_model(num_annots,
               num_names,
               score_evidence=[],
               name_evidence=[],
               other_evidence={},
               noquery=False,
               verbose=None,
               **kwargs):
    if verbose is None:
        verbose = ut.VERBOSE

    method = kwargs.pop('method', None)
    model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs)

    if verbose:
        model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE])

    model, evidence, soft_evidence = update_model_evidence(
        model, name_evidence, score_evidence, other_evidence)

    if verbose and len(soft_evidence) != 0:
        model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE],
                           title='Soft Evidence',
                           color='green')

    # if verbose:
    #    ut.colorprint('\n --- Soft Evidence ---', 'white')
    #    for ttype, cpds in model.ttype2_cpds.items():
    #        if ttype != MATCH_TTYPE:
    #            for fs_ in ut.ichunks(cpds, 4):
    #                ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
    #                              'green')

    if verbose:
        ut.colorprint('\n --- Inference ---', 'red')

    if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery:
        evidence = model._ensure_internal_evidence(evidence)
        query_vars = []
        query_vars += ut.list_getattr(model.ttype2_cpds[NAME_TTYPE],
                                      'variable')
        # query_vars += ut.list_getattr(model.ttype2_cpds[MATCH_TTYPE], 'variable')
        query_vars = ut.setdiff(query_vars, evidence.keys())
        # query_vars = ut.setdiff(query_vars, soft_evidence.keys())
        query_results = cluster_query(model, query_vars, evidence,
                                      soft_evidence, method)
    else:
        query_results = {}

    factor_list = query_results['factor_list']

    if verbose:
        if verbose:
            logger.info('+--------')
        semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list]
        for type_, factors in ut.group_items(factor_list, semtypes).items():
            logger.info('Result Factors (%r)' % (type_, ))
            factors = ut.sortedby(factors, [f.variables[0] for f in factors])
            for fs_ in ut.ichunks(factors, 4):
                ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                              'yellow')
        logger.info('MAP assignments')
        top_assignments = query_results.get('top_assignments', [])
        tmp = []
        for lbl, val in top_assignments:
            tmp.append('%s : %.4f' % (ut.repr2(lbl), val))
        logger.info(ut.align('\n'.join(tmp), ' :'))
        logger.info('L_____\n')

    showkw = dict(evidence=evidence,
                  soft_evidence=soft_evidence,
                  **query_results)

    from wbia.algo.hots import pgm_viz

    pgm_viz.show_model(model, **showkw)
    return (model, evidence, query_results)
예제 #14
0
def cluster_query(
    model,
    query_vars=None,
    evidence=None,
    soft_evidence=None,
    method=None,
    operation='maximize',
):
    """
    CommandLine:
        python -m wbia.algo.hots.bayes --exec-cluster_query --show

    GridParams:
        >>> param_grid = dict(
        >>>     #method=['approx', 'bf', 'bp'],
        >>>     method=['approx', 'bp'],
        >>> )
        >>> combos = ut.all_dict_combinations(param_grid)
        >>> index = 0
        >>> keys = 'method'.split(', ')
        >>> method, = ut.dict_take(combos[index], keys)

    GridSetup:
        >>> from wbia.algo.hots.bayes import *  # NOQA
        >>> verbose = True
        >>> other_evidence = {}
        >>> name_evidence = [1, None, None, 0]
        >>> score_evidence = [2, 0, 2]
        >>> special_names = ['fred', 'sue', 'tom', 'paul']
        >>> model = make_name_model(
        >>>     num_annots=4, num_names=4, num_scores=3, verbose=True, mode=1,
        >>>     special_names=special_names)
        >>> method = None
        >>> model, evidence, soft_evidence = update_model_evidence(
        >>>     model, name_evidence, score_evidence, other_evidence)
        >>> evidence = model._ensure_internal_evidence(evidence)
        >>> query_vars = ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable')

    GridExample:
        >>> # DISABLE_DOCTEST
        >>> query_results = cluster_query(model, query_vars, evidence,
        >>>                               method=method)
        >>> print(ut.repr2(query_results['top_assignments'], nl=1))
        >>> ut.quit_if_noshow()
        >>> from wbia.algo.hots import pgm_viz
        >>> pgm_viz.show_model(model, evidence=evidence, **query_results)
        >>> ut.show_if_requested()
    """
    evidence = model._ensure_internal_evidence(evidence)
    if query_vars is None:
        query_vars = model.nodes()
    orig_query_vars = query_vars  # NOQA
    query_vars = ut.setdiff(query_vars, list(evidence.keys()))

    if method is None:
        method = ut.get_argval('--method', type_=str, default='bp')

    reduced_joint = compute_reduced_joint(model, query_vars, evidence, method,
                                          operation)

    new_reduced_joint = collapse_factor_labels(model, reduced_joint, evidence)

    if False:
        report_partitioning_statistics(new_reduced_joint)

    # FIXME: are these max marginals?
    max_marginals = {}
    for i, var in enumerate(query_vars):
        one_out = query_vars[:i] + query_vars[i + 1:]
        max_marginals[var] = new_reduced_joint.marginalize(one_out,
                                                           inplace=False)
        # max_marginals[var] = joint2.maximize(one_out, inplace=False)
    factor_list = max_marginals.values()

    # Now find the most likely state
    reduced_variables = new_reduced_joint.variables
    new_state_idxs = np.array(new_reduced_joint._row_labels(asindex=True))
    new_values = new_reduced_joint.values.ravel()
    sortx = new_values.argsort()[::-1]
    sort_new_state_idxs = new_state_idxs.take(sortx, axis=0)
    sort_new_values = new_values.take(sortx)
    sort_new_states = list(
        zip(*[
            ut.dict_take(model.statename_dict[var], idx)
            for var, idx in zip(reduced_variables, sort_new_state_idxs.T)
        ]))

    # Better map assignment based on knowledge of labels
    map_assign = dict(zip(reduced_variables, sort_new_states[0]))

    sort_reduced_rowstr_lbls = [
        ut.repr2(dict(zip(reduced_variables, lbls)),
                 explicit=True,
                 nobraces=True,
                 strvals=True) for lbls in sort_new_states
    ]

    top_assignments = list(zip(sort_reduced_rowstr_lbls[:4], sort_new_values))
    if len(sort_new_values) > 3:
        top_assignments += [('other', 1 - sum(sort_new_values[:4]))]
    query_results = {
        'factor_list': factor_list,
        'top_assignments': top_assignments,
        'map_assign': map_assign,
        'method': method,
    }
    logger.info('query_results = %s' % (ut.repr3(query_results, nl=2), ))
    return query_results
예제 #15
0
def ensure_pz_mtest():
    """
    Ensures that you have the PZ_MTEST dataset

    CommandLine:
        python -m ibeis.init.sysres --exec-ensure_pz_mtest
        python -m ibeis --tf ensure_pz_mtest

    Example:
        >>> # SCRIPT
        >>> from ibeis.init.sysres import *  # NOQA
        >>> ensure_pz_mtest()
    """
    print('ensure_pz_mtest')
    from ibeis import sysres
    workdir = sysres.get_workdir()
    mtest_zipped_url = const.ZIPPED_URLS.PZ_MTEST
    mtest_dir = ut.grab_zipped_url(mtest_zipped_url,
                                   ensure=True,
                                   download_dir=workdir)
    print('have mtest_dir=%r' % (mtest_dir, ))
    # update the the newest database version
    import ibeis
    ibs = ibeis.opendb('PZ_MTEST')
    print(
        'cleaning up old database and ensureing everything is properly computed'
    )
    ibs.db.vacuum()
    valid_aids = ibs.get_valid_aids()
    assert len(valid_aids) == 119
    ibs.update_annot_semantic_uuids(valid_aids)
    if ut.VERYVERBOSE:
        ibs.print_annotation_table()
    nid = ibs.get_name_rowids_from_text('', ensure=False)
    if nid is not None:
        ibs.set_name_texts([nid], ['lostname'])

    # Remove old imagesets and update to new special ones
    all_imgset_ids = ibs.get_valid_imgsetids()
    special_imgset_ids = ibs.get_special_imgsetids()
    other_imgset_ids = ut.setdiff(all_imgset_ids, special_imgset_ids)
    ibs.delete_imagesets(other_imgset_ids)
    ibs.set_exemplars_from_quality_and_viewpoint()
    ibs.update_all_image_special_imageset()

    occurrence_gids = [
        2, 9, 12, 16, 25, 26, 29, 30, 32, 33, 35, 46, 47, 52, 57, 61, 66, 70,
        71, 73, 74, 76, 77, 78, 79, 87, 88, 90, 96, 97, 103, 106, 108, 110,
        112, 113
    ]

    other_gids = ut.setdiff(ibs.get_valid_gids(), occurrence_gids)
    other_gids1 = other_gids[0::2]
    other_gids2 = other_gids[1::2]
    ibs.set_image_imagesettext(occurrence_gids,
                               ['Occurrence 1'] * len(occurrence_gids))
    ibs.set_image_imagesettext(other_gids1,
                               ['Occurrence 2'] * len(other_gids1))
    ibs.set_image_imagesettext(other_gids2,
                               ['Occurrence 3'] * len(other_gids2))

    # hack in some tags
    print('Hacking in some tags')
    foal_aids = [
        4, 8, 15, 21, 28, 34, 38, 41, 45, 49, 51, 56, 60, 66, 69, 74, 80, 83,
        91, 97, 103, 107, 109, 119
    ]
    mother_aids = [9, 16, 35, 42, 52, 57, 61, 67, 75, 84, 98, 104, 108, 114]
    ibs.append_annot_case_tags(foal_aids, ['foal'] * len(foal_aids))
    ibs.append_annot_case_tags(mother_aids, ['mother'] * len(mother_aids))
예제 #16
0
    def update_visual_attrs(infr,
                            graph=None,
                            show_reviewed_edges=True,
                            show_unreviewed_edges=False,
                            show_inferred_diff=True,
                            show_inferred_same=True,
                            show_recent_review=False,
                            highlight_reviews=True,
                            show_inconsistency=True,
                            wavy=False,
                            simple_labels=False,
                            show_labels=True,
                            reposition=True,
                            use_image=False,
                            edge_overrides=None,
                            node_overrides=None,
                            colorby='name_label',
                            **kwargs
                            # hide_unreviewed_inferred=True
                            ):
        import wbia.plottool as pt

        infr.print('update_visual_attrs', 3)
        if graph is None:
            graph = infr.graph
        # if hide_cuts is not None:
        #     # show_unreviewed_cuts = not hide_cuts
        #     show_reviewed_cuts = not hide_cuts

        if not getattr(infr, '_viz_init_nodes', False):
            infr._viz_init_nodes = True
            nx.set_node_attributes(graph, name='shape', values='circle')
            # infr.set_node_attrs('shape', 'circle')

        if getattr(infr, '_viz_image_config_dirty', True):
            infr.update_node_image_attribute(graph=graph, use_image=use_image)

        def get_any(dict_, keys, default=None):
            for key in keys:
                if key in dict_:
                    return dict_[key]
            return default

        show_cand = get_any(
            kwargs, ['show_candidate_edges', 'show_candidates', 'show_cand'])
        if show_cand is not None:
            show_cand = True
            show_reviewed_edges = True
            show_unreviewed_edges = True
            show_inferred_diff = True
            show_inferred_same = True

        if kwargs.get('show_all'):
            show_cand = True

        # alpha_low = .5
        alpha_med = 0.9
        alpha_high = 1.0

        dark_background = graph.graph.get('dark_background', None)

        # Ensure we are starting from a clean slate
        # if reposition:
        ut.nx_delete_edge_attr(graph, infr.visual_edge_attrs_appearance)

        # Set annotation node labels
        node_to_nid = None
        if not show_labels:
            nx.set_node_attributes(graph,
                                   name='label',
                                   values=ut.dzip(graph.nodes(), ['']))
        else:
            if simple_labels:
                nx.set_node_attributes(
                    graph,
                    name='label',
                    values={n: str(n)
                            for n in graph.nodes()})
            else:
                if node_to_nid is None:
                    node_to_nid = nx.get_node_attributes(graph, 'name_label')
                node_to_view = nx.get_node_attributes(graph, 'viewpoint')
                if node_to_view:
                    annotnode_to_label = {
                        aid: 'aid=%r%s\nnid=%r' %
                        (aid, node_to_view[aid], node_to_nid[aid])
                        for aid in graph.nodes()
                    }
                else:
                    annotnode_to_label = {
                        aid: 'aid=%r\nnid=%r' % (aid, node_to_nid[aid])
                        for aid in graph.nodes()
                    }
                nx.set_node_attributes(graph,
                                       name='label',
                                       values=annotnode_to_label)

        # NODE_COLOR: based on name_label
        ut.color_nodes(graph,
                       labelattr=colorby,
                       outof=kwargs.get('outof', None),
                       sat_adjust=-0.4)

        # EDGES:
        # Grab different types of edges
        edges, edge_colors = infr.get_colored_edge_weights(
            graph, highlight_reviews)

        # reviewed_states = nx.get_edge_attributes(graph, 'evidence_decision')
        reviewed_states = {
            e: infr.edge_decision(e)
            for e in infr.graph.edges()
        }
        edge_to_inferred_state = nx.get_edge_attributes(
            graph, 'inferred_state')
        # dummy_edges = [edge for edge, flag in
        #                nx.get_edge_attributes(graph, '_dummy_edge').items()
        #                if flag]
        edge_to_reviewid = nx.get_edge_attributes(graph, 'review_id')
        recheck_edges = [
            edge for edge, split in nx.get_edge_attributes(
                graph, 'maybe_error').items() if split
        ]
        decision_to_edge = ut.group_pairs(reviewed_states.items())
        neg_edges = decision_to_edge[NEGTV]
        pos_edges = decision_to_edge[POSTV]
        incomp_edges = decision_to_edge[INCMP]
        unreviewed_edges = decision_to_edge[UNREV]

        inferred_same = [
            edge for edge, state in edge_to_inferred_state.items()
            if state == 'same'
        ]
        inferred_diff = [
            edge for edge, state in edge_to_inferred_state.items()
            if state == 'diff'
        ]
        inconsistent_external = [
            edge for edge, state in edge_to_inferred_state.items()
            if state == 'inconsistent_external'
        ]
        inferred_notcomp = [
            edge for edge, state in edge_to_inferred_state.items()
            if state == 'notcomp'
        ]

        reviewed_edges = incomp_edges + pos_edges + neg_edges
        compared_edges = pos_edges + neg_edges
        uncompared_edges = ut.setdiff(edges, compared_edges)
        nontrivial_inferred_same = ut.setdiff(
            inferred_same, pos_edges + neg_edges + incomp_edges)
        nontrivial_inferred_diff = ut.setdiff(
            inferred_diff, pos_edges + neg_edges + incomp_edges)
        nontrivial_inferred_edges = nontrivial_inferred_same + nontrivial_inferred_diff

        # EDGE_COLOR: based on edge_weight
        nx.set_edge_attributes(graph,
                               name='color',
                               values=ut.dzip(edges, edge_colors))

        # LINE_WIDTH: based on review_state
        # unreviewed_width = 2.0
        # reviewed_width = 5.0
        unreviewed_width = 1.0
        reviewed_width = 2.0
        if highlight_reviews:
            nx.set_edge_attributes(
                graph,
                name='linewidth',
                values=ut.dzip(reviewed_edges, [reviewed_width]),
            )
            nx.set_edge_attributes(
                graph,
                name='linewidth',
                values=ut.dzip(unreviewed_edges, [unreviewed_width]),
            )
        else:
            nx.set_edge_attributes(graph,
                                   name='linewidth',
                                   values=ut.dzip(edges, [unreviewed_width]))

        # EDGE_STROKE: based on decision and maybe_error
        # fg = pt.WHITE if dark_background else pt.BLACK
        # nx.set_edge_attributes(graph, name='stroke', values=ut.dzip(reviewed_edges, [{'linewidth': 3, 'foreground': fg}]))
        if show_inconsistency:
            nx.set_edge_attributes(
                graph,
                name='stroke',
                values=ut.dzip(recheck_edges, [{
                    'linewidth': 5,
                    'foreground': infr._error_color
                }]),
            )

        # Set linestyles to emphasize PCCs
        # Dash lines between PCCs inferred to be different
        nx.set_edge_attributes(graph,
                               name='linestyle',
                               values=ut.dzip(inferred_diff, ['dashed']))

        # Treat incomparable/incon-external inference as different
        nx.set_edge_attributes(graph,
                               name='linestyle',
                               values=ut.dzip(inferred_notcomp, ['dashed']))
        nx.set_edge_attributes(graph,
                               name='linestyle',
                               values=ut.dzip(inconsistent_external,
                                              ['dashed']))

        # Dot lines that we are unsure of
        nx.set_edge_attributes(graph,
                               name='linestyle',
                               values=ut.dzip(unreviewed_edges, ['dotted']))

        # Cut edges are implicit and dashed
        # nx.set_edge_attributes(graph, name='implicit', values=ut.dzip(cut_edges, [True]))
        # nx.set_edge_attributes(graph, name='linestyle', values=ut.dzip(cut_edges, ['dashed']))
        # nx.set_edge_attributes(graph, name='alpha', values=ut.dzip(cut_edges, [alpha_med]))

        nx.set_edge_attributes(graph,
                               name='implicit',
                               values=ut.dzip(uncompared_edges, [True]))

        # Only matching edges should impose constraints on the graph layout
        nx.set_edge_attributes(graph,
                               name='implicit',
                               values=ut.dzip(neg_edges, [True]))
        nx.set_edge_attributes(graph,
                               name='alpha',
                               values=ut.dzip(neg_edges, [alpha_med]))
        nx.set_edge_attributes(graph,
                               name='implicit',
                               values=ut.dzip(incomp_edges, [True]))
        nx.set_edge_attributes(graph,
                               name='alpha',
                               values=ut.dzip(incomp_edges, [alpha_med]))

        # Ensure reviewed edges are visible
        nx.set_edge_attributes(graph,
                               name='implicit',
                               values=ut.dzip(reviewed_edges, [False]))
        nx.set_edge_attributes(graph,
                               name='alpha',
                               values=ut.dzip(reviewed_edges, [alpha_high]))

        if True:
            # Infered same edges can be allowed to constrain in order
            # to make things look nice sometimes
            nx.set_edge_attributes(graph,
                                   name='implicit',
                                   values=ut.dzip(inferred_same, [False]))
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(inferred_same, [alpha_high]))

        if not kwargs.get('show_same', True):
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(inferred_same, [0]))

        if not kwargs.get('show_diff', True):
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(inferred_diff, [0]))

        if not kwargs.get('show_positive_edges', True):
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(pos_edges, [0]))

        if not kwargs.get('show_negative_edges', True):
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(neg_edges, [0]))

        if not kwargs.get('show_incomparable_edges', True):
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(incomp_edges, [0]))

        if not kwargs.get('show_between', True):
            if node_to_nid is None:
                node_to_nid = nx.get_node_attributes(graph, 'name_label')
            between_edges = [(u, v) for u, v in edges
                             if node_to_nid[u] != node_to_nid[v]]
            nx.set_edge_attributes(graph,
                                   name='alpha',
                                   values=ut.dzip(between_edges, [0]))

        # SKETCH: based on inferred_edges
        # Make inferred edges wavy
        if wavy:
            # dict(scale=3.0, length=18.0, randomness=None)]
            nx.set_edge_attributes(
                graph,
                name='sketch',
                values=ut.dzip(
                    nontrivial_inferred_edges,
                    [dict(scale=10.0, length=64.0, randomness=None)],
                ),
            )

        # Make dummy edges more transparent
        # nx.set_edge_attributes(graph, name='alpha', values=ut.dzip(dummy_edges, [alpha_low]))
        selected_edges = kwargs.pop('selected_edges', None)

        # SHADOW: based on most recent
        # Increase visibility of nodes with the most recently changed timestamp
        if show_recent_review and edge_to_reviewid and selected_edges is None:
            review_ids = list(edge_to_reviewid.values())
            recent_idxs = ut.argmax(review_ids, multi=True)
            recent_edges = ut.take(list(edge_to_reviewid.keys()), recent_idxs)
            selected_edges = recent_edges

        if selected_edges is not None:
            # TODO: add photoshop-like parameters like
            # spread and size. offset is the same as angle and distance.
            nx.set_edge_attributes(
                graph,
                name='shadow',
                values=ut.dzip(
                    selected_edges,
                    [{
                        'rho': 0.3,
                        'alpha': 0.6,
                        'shadow_color': 'w' if dark_background else 'k',
                        'offset': (0, 0),
                        'scale': 3.0,
                    }],
                ),
            )

        # Z_ORDER: make sure nodes are on top
        nodes = list(graph.nodes())
        nx.set_node_attributes(graph,
                               name='zorder',
                               values=ut.dzip(nodes, [10]))
        nx.set_edge_attributes(graph,
                               name='zorder',
                               values=ut.dzip(edges, [0]))
        nx.set_edge_attributes(graph,
                               name='picker',
                               values=ut.dzip(edges, [10]))

        # VISIBILITY: Set visibility of edges based on arguments
        if not show_reviewed_edges:
            infr.print('Making reviewed edges invisible', 10)
            nx.set_edge_attributes(graph,
                                   name='style',
                                   values=ut.dzip(reviewed_edges, ['invis']))

        if not show_unreviewed_edges:
            infr.print('Making un-reviewed edges invisible', 10)
            nx.set_edge_attributes(graph,
                                   name='style',
                                   values=ut.dzip(unreviewed_edges, ['invis']))

        if not show_inferred_same:
            infr.print('Making nontrivial_same edges invisible', 10)
            nx.set_edge_attributes(graph,
                                   name='style',
                                   values=ut.dzip(nontrivial_inferred_same,
                                                  ['invis']))

        if not show_inferred_diff:
            infr.print('Making nontrivial_diff edges invisible', 10)
            nx.set_edge_attributes(graph,
                                   name='style',
                                   values=ut.dzip(nontrivial_inferred_diff,
                                                  ['invis']))

        if selected_edges is not None:
            # Always show the most recent review (remove setting of invis)
            # infr.print('recent_edges = %r' % (recent_edges,))
            nx.set_edge_attributes(graph,
                                   name='style',
                                   values=ut.dzip(selected_edges, ['']))

        if reposition:
            # LAYOUT: update the positioning layout
            def get_layoutkw(key, default):
                return kwargs.get(key, graph.graph.get(key, default))

            layoutkw = dict(
                prog='neato',
                splines=get_layoutkw('splines', 'line'),
                fontsize=get_layoutkw('fontsize', None),
                fontname=get_layoutkw('fontname', None),
                sep=10 / 72,
                esep=1 / 72,
                nodesep=0.1,
            )
            layoutkw.update(kwargs)
            # logger.info(ut.repr3(graph.edges))
            pt.nx_agraph_layout(graph, inplace=True, **layoutkw)

        if edge_overrides:
            for key, edge_to_attr in edge_overrides.items():
                nx.set_edge_attributes(graph, name=key, values=edge_to_attr)
        if node_overrides:
            for key, node_to_attr in node_overrides.items():
                nx.set_node_attributes(graph, name=key, values=node_to_attr)
예제 #17
0
def intraoccurrence_connected():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw intraoccurrence_connected --show
        python -m ibeis.scripts.specialdraw intraoccurrence_connected --show --postcut
        python -m ibeis.scripts.specialdraw intraoccurrence_connected --show --smaller

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> result = intraoccurrence_connected()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import ibeis
    import plottool as pt
    from ibeis.viz import viz_graph
    import networkx as nx
    pt.ensure_pylab_qt4()
    ibs = ibeis.opendb(defaultdb='PZ_Master1')
    nid2_aid = {
        #4880: [3690, 3696, 3703, 3706, 3712, 3721],
        4880: [3690, 3696, 3703],
        6537: [3739],
        6653: [7671],
        6610: [7566, 7408],
        #6612: [7664, 7462, 7522],
        #6624: [7465, 7360],
        #6625: [7746, 7383, 7390, 7477, 7376, 7579],
        6630: [7586, 7377, 7464, 7478],
        #6677: [7500]
    }
    nid2_dbaids = {
        4880: [33, 6120, 7164],
        6537: [7017, 7206],
        6653: [7660]
    }
    if ut.get_argflag('--small') or ut.get_argflag('--smaller'):
        del nid2_aid[6630]
        del nid2_aid[6537]
        del nid2_dbaids[6537]
        if ut.get_argflag('--smaller'):
            nid2_dbaids[4880].remove(33)
            nid2_aid[4880].remove(3690)
            nid2_aid[6610].remove(7408)
        #del nid2_aid[4880]
        #del nid2_dbaids[4880]

    aids = ut.flatten(nid2_aid.values())

    temp_nids = [1] * len(aids)
    postcut = ut.get_argflag('--postcut')
    aids_list = ibs.group_annots_by_name(aids)[0]
    ensure_edges = 'all' if True or not postcut else None
    unlabeled_graph = viz_graph.make_netx_graph_from_aid_groups(
        ibs, aids_list,
        #invis_edges=invis_edges,
        ensure_edges=ensure_edges, temp_nids=temp_nids)
    viz_graph.color_by_nids(unlabeled_graph, unique_nids=[1] *
                            len(list(unlabeled_graph.nodes())))
    viz_graph.ensure_node_images(ibs, unlabeled_graph)
    nx.set_node_attributes(unlabeled_graph, 'shape', 'rect')
    #unlabeled_graph = unlabeled_graph.to_undirected()

    # Find the "database exemplars for these annots"
    if False:
        gt_aids = ibs.get_annot_groundtruth(aids)
        gt_aids = [ut.setdiff(s, aids) for s in gt_aids]
        dbaids = ut.unique(ut.flatten(gt_aids))
        dbaids = ibs.filter_annots_general(dbaids, minqual='good')
        ibs.get_annot_quality_texts(dbaids)
    else:
        dbaids = ut.flatten(nid2_dbaids.values())
    exemplars = nx.DiGraph()
    #graph = exemplars  # NOQA
    exemplars.add_nodes_from(dbaids)

    def add_clique(graph, nodes, edgeattrs={}, nodeattrs={}):
        edge_list = ut.upper_diag_self_prodx(nodes)
        graph.add_edges_from(edge_list, **edgeattrs)
        return edge_list

    for aids_, nid in zip(*ibs.group_annots_by_name(dbaids)):
        add_clique(exemplars, aids_)
    viz_graph.ensure_node_images(ibs, exemplars)
    viz_graph.color_by_nids(exemplars, ibs=ibs)

    nx.set_node_attributes(unlabeled_graph, 'framewidth', False)
    nx.set_node_attributes(exemplars,  'framewidth', 4.0)

    nx.set_node_attributes(unlabeled_graph, 'group', 'unlab')
    nx.set_node_attributes(exemplars,  'group', 'exemp')

    #big_graph = nx.compose_all([unlabeled_graph])
    big_graph = nx.compose_all([exemplars, unlabeled_graph])

    # add sparse connections from unlabeled to exemplars
    import numpy as np
    rng = np.random.RandomState(0)
    if True or not postcut:
        for aid_ in unlabeled_graph.nodes():
            flags = rng.rand(len(exemplars)) > .5
            nid_ = ibs.get_annot_nids(aid_)
            exnids = np.array(ibs.get_annot_nids(list(exemplars.nodes())))
            flags = np.logical_or(exnids == nid_, flags)
            exmatches = ut.compress(list(exemplars.nodes()), flags)
            big_graph.add_edges_from(list(ut.product([aid_], exmatches)),
                                     color=pt.ORANGE, implicit=True)
    else:
        for aid_ in unlabeled_graph.nodes():
            flags = rng.rand(len(exemplars)) > .5
            exmatches = ut.compress(list(exemplars.nodes()), flags)
            nid_ = ibs.get_annot_nids(aid_)
            exnids = np.array(ibs.get_annot_nids(exmatches))
            exmatches = ut.compress(exmatches, exnids == nid_)
            big_graph.add_edges_from(list(ut.product([aid_], exmatches)))
        pass

    nx.set_node_attributes(big_graph, 'shape', 'rect')
    #if False and postcut:
    #    ut.nx_delete_node_attr(big_graph, 'nid')
    #    ut.nx_delete_edge_attr(big_graph, 'color')
    #    viz_graph.ensure_graph_nid_labels(big_graph, ibs=ibs)
    #    viz_graph.color_by_nids(big_graph, ibs=ibs)
    #    big_graph = big_graph.to_undirected()

    layoutkw = {
        'sep' : 1 / 5,
        'prog': 'neato',
        'overlap': 'false',
        #'splines': 'ortho',
        'splines': 'spline',
    }

    as_directed = False
    #as_directed = True
    #hacknode = True
    hacknode = 0

    graph = big_graph
    ut.nx_ensure_agraph_color(graph)
    if hacknode:
        nx.set_edge_attributes(graph, 'taillabel', {e: str(e[0]) for e in graph.edges()})
        nx.set_edge_attributes(graph, 'headlabel', {e: str(e[1]) for e in graph.edges()})

    explicit_graph = pt.get_explicit_graph(graph)
    _, layout_info = pt.nx_agraph_layout(explicit_graph, orig_graph=graph,
                                         inplace=True, **layoutkw)

    if ut.get_argflag('--smaller'):
        graph.node[7660]['pos'] = np.array([550, 350])
        graph.node[6120]['pos'] = np.array([200, 600]) + np.array([350, -400])
        graph.node[7164]['pos'] = np.array([200, 480]) + np.array([350, -400])
        nx.set_node_attributes(graph, 'pin', 'true')
        _, layout_info = pt.nx_agraph_layout(graph,
                                             inplace=True, **layoutkw)
    elif ut.get_argflag('--small'):
        graph.node[7660]['pos'] = np.array([750, 350])
        graph.node[33]['pos'] = np.array([300, 600]) + np.array([350, -400])
        graph.node[6120]['pos'] = np.array([500, 600]) + np.array([350, -400])
        graph.node[7164]['pos'] = np.array([410, 480]) + np.array([350, -400])
        nx.set_node_attributes(graph, 'pin', 'true')
        _, layout_info = pt.nx_agraph_layout(graph,
                                             inplace=True, **layoutkw)

    if not postcut:
        #pt.show_nx(graph.to_undirected(), layout='agraph', layoutkw=layoutkw,
        #           as_directed=False)
        #pt.show_nx(graph, layout='agraph', layoutkw=layoutkw,
        #           as_directed=as_directed, hacknode=hacknode)

        pt.show_nx(graph, layout='custom', layoutkw=layoutkw,
                   as_directed=as_directed, hacknode=hacknode)
    else:
        #explicit_graph = pt.get_explicit_graph(graph)
        #_, layout_info = pt.nx_agraph_layout(explicit_graph, orig_graph=graph,
        #                                     **layoutkw)

        #layout_info['edge']['alpha'] = .8
        #pt.apply_graph_layout_attrs(graph, layout_info)

        #graph_layout_attrs = layout_info['graph']
        ##edge_layout_attrs  = layout_info['edge']
        ##node_layout_attrs  = layout_info['node']

        #for key, vals in layout_info['node'].items():
        #    #print('[special] key = %r' % (key,))
        #    nx.set_node_attributes(graph, key, vals)

        #for key, vals in layout_info['edge'].items():
        #    #print('[special] key = %r' % (key,))
        #    nx.set_edge_attributes(graph, key, vals)

        #nx.set_edge_attributes(graph, 'alpha', .8)
        #graph.graph['splines'] = graph_layout_attrs.get('splines', 'line')
        #graph.graph['splines'] = 'polyline'   # graph_layout_attrs.get('splines', 'line')
        #graph.graph['splines'] = 'line'

        cut_graph = graph.copy()
        edge_list = list(cut_graph.edges())
        edge_nids = np.array(ibs.unflat_map(ibs.get_annot_nids, edge_list))
        cut_flags = edge_nids.T[0] != edge_nids.T[1]
        cut_edges = ut.compress(edge_list, cut_flags)
        cut_graph.remove_edges_from(cut_edges)
        ut.nx_delete_node_attr(cut_graph, 'nid')
        viz_graph.ensure_graph_nid_labels(cut_graph, ibs=ibs)

        #ut.nx_get_default_node_attributes(exemplars, 'color', None)
        ut.nx_delete_node_attr(cut_graph, 'color', nodes=unlabeled_graph.nodes())
        aid2_color = ut.nx_get_default_node_attributes(cut_graph, 'color', None)
        nid2_colors = ut.group_items(aid2_color.values(), ibs.get_annot_nids(aid2_color.keys()))
        nid2_colors = ut.map_dict_vals(ut.filter_Nones, nid2_colors)
        nid2_colors = ut.map_dict_vals(ut.unique, nid2_colors)
        #for val in nid2_colors.values():
        #    assert len(val) <= 1
        # Get initial colors
        nid2_color_ = {nid: colors_[0] for nid, colors_ in nid2_colors.items()
                       if len(colors_) == 1}

        graph = cut_graph
        viz_graph.color_by_nids(cut_graph, ibs=ibs, nid2_color_=nid2_color_)
        nx.set_node_attributes(cut_graph, 'framewidth', 4)

        pt.show_nx(cut_graph, layout='custom', layoutkw=layoutkw,
                   as_directed=as_directed, hacknode=hacknode)

    pt.zoom_factory()
예제 #18
0
파일: specialdraw.py 프로젝트: whaozl/ibeis
def intraoccurrence_connected():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw intraoccurrence_connected --show
        python -m ibeis.scripts.specialdraw intraoccurrence_connected --show --postcut
        python -m ibeis.scripts.specialdraw intraoccurrence_connected --show --smaller

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> result = intraoccurrence_connected()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import ibeis
    import plottool as pt
    from ibeis.viz import viz_graph
    import networkx as nx
    pt.ensure_pylab_qt4()
    ibs = ibeis.opendb(defaultdb='PZ_Master1')
    nid2_aid = {
        #4880: [3690, 3696, 3703, 3706, 3712, 3721],
        4880: [3690, 3696, 3703],
        6537: [3739],
        6653: [7671],
        6610: [7566, 7408],
        #6612: [7664, 7462, 7522],
        #6624: [7465, 7360],
        #6625: [7746, 7383, 7390, 7477, 7376, 7579],
        6630: [7586, 7377, 7464, 7478],
        #6677: [7500]
    }
    nid2_dbaids = {4880: [33, 6120, 7164], 6537: [7017, 7206], 6653: [7660]}
    if ut.get_argflag('--small') or ut.get_argflag('--smaller'):
        del nid2_aid[6630]
        del nid2_aid[6537]
        del nid2_dbaids[6537]
        if ut.get_argflag('--smaller'):
            nid2_dbaids[4880].remove(33)
            nid2_aid[4880].remove(3690)
            nid2_aid[6610].remove(7408)
        #del nid2_aid[4880]
        #del nid2_dbaids[4880]

    aids = ut.flatten(nid2_aid.values())

    temp_nids = [1] * len(aids)
    postcut = ut.get_argflag('--postcut')
    aids_list = ibs.group_annots_by_name(aids)[0]
    ensure_edges = 'all' if True or not postcut else None
    unlabeled_graph = viz_graph.make_netx_graph_from_aid_groups(
        ibs,
        aids_list,
        #invis_edges=invis_edges,
        ensure_edges=ensure_edges,
        temp_nids=temp_nids)
    viz_graph.color_by_nids(unlabeled_graph,
                            unique_nids=[1] *
                            len(list(unlabeled_graph.nodes())))
    viz_graph.ensure_node_images(ibs, unlabeled_graph)
    nx.set_node_attributes(unlabeled_graph, 'shape', 'rect')
    #unlabeled_graph = unlabeled_graph.to_undirected()

    # Find the "database exemplars for these annots"
    if False:
        gt_aids = ibs.get_annot_groundtruth(aids)
        gt_aids = [ut.setdiff(s, aids) for s in gt_aids]
        dbaids = ut.unique(ut.flatten(gt_aids))
        dbaids = ibs.filter_annots_general(dbaids, minqual='good')
        ibs.get_annot_quality_texts(dbaids)
    else:
        dbaids = ut.flatten(nid2_dbaids.values())
    exemplars = nx.DiGraph()
    #graph = exemplars  # NOQA
    exemplars.add_nodes_from(dbaids)

    def add_clique(graph, nodes, edgeattrs={}, nodeattrs={}):
        edge_list = ut.upper_diag_self_prodx(nodes)
        graph.add_edges_from(edge_list, **edgeattrs)
        return edge_list

    for aids_, nid in zip(*ibs.group_annots_by_name(dbaids)):
        add_clique(exemplars, aids_)
    viz_graph.ensure_node_images(ibs, exemplars)
    viz_graph.color_by_nids(exemplars, ibs=ibs)

    nx.set_node_attributes(unlabeled_graph, 'framewidth', False)
    nx.set_node_attributes(exemplars, 'framewidth', 4.0)

    nx.set_node_attributes(unlabeled_graph, 'group', 'unlab')
    nx.set_node_attributes(exemplars, 'group', 'exemp')

    #big_graph = nx.compose_all([unlabeled_graph])
    big_graph = nx.compose_all([exemplars, unlabeled_graph])

    # add sparse connections from unlabeled to exemplars
    import numpy as np
    rng = np.random.RandomState(0)
    if True or not postcut:
        for aid_ in unlabeled_graph.nodes():
            flags = rng.rand(len(exemplars)) > .5
            nid_ = ibs.get_annot_nids(aid_)
            exnids = np.array(ibs.get_annot_nids(list(exemplars.nodes())))
            flags = np.logical_or(exnids == nid_, flags)
            exmatches = ut.compress(list(exemplars.nodes()), flags)
            big_graph.add_edges_from(list(ut.product([aid_], exmatches)),
                                     color=pt.ORANGE,
                                     implicit=True)
    else:
        for aid_ in unlabeled_graph.nodes():
            flags = rng.rand(len(exemplars)) > .5
            exmatches = ut.compress(list(exemplars.nodes()), flags)
            nid_ = ibs.get_annot_nids(aid_)
            exnids = np.array(ibs.get_annot_nids(exmatches))
            exmatches = ut.compress(exmatches, exnids == nid_)
            big_graph.add_edges_from(list(ut.product([aid_], exmatches)))
        pass

    nx.set_node_attributes(big_graph, 'shape', 'rect')
    #if False and postcut:
    #    ut.nx_delete_node_attr(big_graph, 'nid')
    #    ut.nx_delete_edge_attr(big_graph, 'color')
    #    viz_graph.ensure_graph_nid_labels(big_graph, ibs=ibs)
    #    viz_graph.color_by_nids(big_graph, ibs=ibs)
    #    big_graph = big_graph.to_undirected()

    layoutkw = {
        'sep': 1 / 5,
        'prog': 'neato',
        'overlap': 'false',
        #'splines': 'ortho',
        'splines': 'spline',
    }

    as_directed = False
    #as_directed = True
    #hacknode = True
    hacknode = 0

    graph = big_graph
    ut.nx_ensure_agraph_color(graph)
    if hacknode:
        nx.set_edge_attributes(graph, 'taillabel',
                               {e: str(e[0])
                                for e in graph.edges()})
        nx.set_edge_attributes(graph, 'headlabel',
                               {e: str(e[1])
                                for e in graph.edges()})

    explicit_graph = pt.get_explicit_graph(graph)
    _, layout_info = pt.nx_agraph_layout(explicit_graph,
                                         orig_graph=graph,
                                         inplace=True,
                                         **layoutkw)

    if ut.get_argflag('--smaller'):
        graph.node[7660]['pos'] = np.array([550, 350])
        graph.node[6120]['pos'] = np.array([200, 600]) + np.array([350, -400])
        graph.node[7164]['pos'] = np.array([200, 480]) + np.array([350, -400])
        nx.set_node_attributes(graph, 'pin', 'true')
        _, layout_info = pt.nx_agraph_layout(graph, inplace=True, **layoutkw)
    elif ut.get_argflag('--small'):
        graph.node[7660]['pos'] = np.array([750, 350])
        graph.node[33]['pos'] = np.array([300, 600]) + np.array([350, -400])
        graph.node[6120]['pos'] = np.array([500, 600]) + np.array([350, -400])
        graph.node[7164]['pos'] = np.array([410, 480]) + np.array([350, -400])
        nx.set_node_attributes(graph, 'pin', 'true')
        _, layout_info = pt.nx_agraph_layout(graph, inplace=True, **layoutkw)

    if not postcut:
        #pt.show_nx(graph.to_undirected(), layout='agraph', layoutkw=layoutkw,
        #           as_directed=False)
        #pt.show_nx(graph, layout='agraph', layoutkw=layoutkw,
        #           as_directed=as_directed, hacknode=hacknode)

        pt.show_nx(graph,
                   layout='custom',
                   layoutkw=layoutkw,
                   as_directed=as_directed,
                   hacknode=hacknode)
    else:
        #explicit_graph = pt.get_explicit_graph(graph)
        #_, layout_info = pt.nx_agraph_layout(explicit_graph, orig_graph=graph,
        #                                     **layoutkw)

        #layout_info['edge']['alpha'] = .8
        #pt.apply_graph_layout_attrs(graph, layout_info)

        #graph_layout_attrs = layout_info['graph']
        ##edge_layout_attrs  = layout_info['edge']
        ##node_layout_attrs  = layout_info['node']

        #for key, vals in layout_info['node'].items():
        #    #print('[special] key = %r' % (key,))
        #    nx.set_node_attributes(graph, key, vals)

        #for key, vals in layout_info['edge'].items():
        #    #print('[special] key = %r' % (key,))
        #    nx.set_edge_attributes(graph, key, vals)

        #nx.set_edge_attributes(graph, 'alpha', .8)
        #graph.graph['splines'] = graph_layout_attrs.get('splines', 'line')
        #graph.graph['splines'] = 'polyline'   # graph_layout_attrs.get('splines', 'line')
        #graph.graph['splines'] = 'line'

        cut_graph = graph.copy()
        edge_list = list(cut_graph.edges())
        edge_nids = np.array(ibs.unflat_map(ibs.get_annot_nids, edge_list))
        cut_flags = edge_nids.T[0] != edge_nids.T[1]
        cut_edges = ut.compress(edge_list, cut_flags)
        cut_graph.remove_edges_from(cut_edges)
        ut.nx_delete_node_attr(cut_graph, 'nid')
        viz_graph.ensure_graph_nid_labels(cut_graph, ibs=ibs)

        #ut.nx_get_default_node_attributes(exemplars, 'color', None)
        ut.nx_delete_node_attr(cut_graph,
                               'color',
                               nodes=unlabeled_graph.nodes())
        aid2_color = ut.nx_get_default_node_attributes(cut_graph, 'color',
                                                       None)
        nid2_colors = ut.group_items(aid2_color.values(),
                                     ibs.get_annot_nids(aid2_color.keys()))
        nid2_colors = ut.map_dict_vals(ut.filter_Nones, nid2_colors)
        nid2_colors = ut.map_dict_vals(ut.unique, nid2_colors)
        #for val in nid2_colors.values():
        #    assert len(val) <= 1
        # Get initial colors
        nid2_color_ = {
            nid: colors_[0]
            for nid, colors_ in nid2_colors.items() if len(colors_) == 1
        }

        graph = cut_graph
        viz_graph.color_by_nids(cut_graph, ibs=ibs, nid2_color_=nid2_color_)
        nx.set_node_attributes(cut_graph, 'framewidth', 4)

        pt.show_nx(cut_graph,
                   layout='custom',
                   layoutkw=layoutkw,
                   as_directed=as_directed,
                   hacknode=hacknode)

    pt.zoom_factory()
예제 #19
0
파일: specialdraw.py 프로젝트: whaozl/ibeis
def double_depcache_graph():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw double_depcache_graph --show --testmode

        python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/  --diskshow  --figsize=8,20 --dpi=220 --testmode --show --clipwhite
        python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/  --diskshow  --figsize=8,20 --dpi=220 --testmode --show --clipwhite --arrow-width=.5

        python -m ibeis.scripts.specialdraw double_depcache_graph --save=figures5/doubledepc.png --dpath ~/latex/cand/  --diskshow  --figsize=8,20 --dpi=220 --testmode --show --clipwhite --arrow-width=5

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> result = double_depcache_graph()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import ibeis
    import networkx as nx
    import plottool as pt
    pt.ensure_pylab_qt4()
    # pt.plt.xkcd()
    ibs = ibeis.opendb('testdb1')
    reduced = True
    implicit = True
    annot_graph = ibs.depc_annot.make_graph(reduced=reduced, implicit=implicit)
    image_graph = ibs.depc_image.make_graph(reduced=reduced, implicit=implicit)
    to_rename = ut.isect(image_graph.nodes(), annot_graph.nodes())
    nx.relabel_nodes(annot_graph, {x: 'annot_' + x
                                   for x in to_rename},
                     copy=False)
    nx.relabel_nodes(image_graph, {x: 'image_' + x
                                   for x in to_rename},
                     copy=False)
    graph = nx.compose_all([image_graph, annot_graph])
    #graph = nx.union_all([image_graph, annot_graph], rename=('image', 'annot'))
    # userdecision = ut.nx_makenode(graph, 'user decision', shape='rect', color=pt.DARK_YELLOW, style='diagonals')
    # userdecision = ut.nx_makenode(graph, 'user decision', shape='circle', color=pt.DARK_YELLOW)
    userdecision = ut.nx_makenode(
        graph,
        'User decision',
        shape='rect',
        #width=100, height=100,
        color=pt.YELLOW,
        style='diagonals')
    #longcat = True
    longcat = False

    #edge = ('feat', 'neighbor_index')
    #data = graph.get_edge_data(*edge)[0]
    #print('data = %r' % (data,))
    #graph.remove_edge(*edge)
    ## hack
    #graph.add_edge('featweight', 'neighbor_index', **data)

    graph.add_edge('detections',
                   userdecision,
                   constraint=longcat,
                   color=pt.PINK)
    graph.add_edge(userdecision,
                   'annotations',
                   constraint=longcat,
                   color=pt.PINK)
    # graph.add_edge(userdecision, 'annotations', implicit=True, color=[0, 0, 0])
    if not longcat:
        pass
        #graph.add_edge('images', 'annotations', style='invis')
        #graph.add_edge('thumbnails', 'annotations', style='invis')
        #graph.add_edge('thumbnails', userdecision, style='invis')
    graph.remove_node('Has_Notch')
    graph.remove_node('annotmask')
    layoutkw = {
        'ranksep': 5,
        'nodesep': 5,
        'dpi': 96,
        # 'nodesep': 1,
    }
    ns = 1000

    ut.nx_set_default_node_attributes(graph, 'fontsize', 72)
    ut.nx_set_default_node_attributes(graph, 'fontname', 'Ubuntu')
    ut.nx_set_default_node_attributes(graph, 'style', 'filled')

    ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
    ut.nx_set_default_node_attributes(graph, 'height', ns * (1 / ut.PHI))

    #for u, v, d in graph.edge(data=True):
    for u, vkd in graph.edge.items():
        for v, dk in vkd.items():
            for k, d in dk.items():
                localid = d.get('local_input_id')
                if localid:
                    # d['headlabel'] = localid
                    if localid not in ['1']:
                        d['taillabel'] = localid
                    #d['label'] = localid
                if d.get('taillabel') in {'1'}:
                    del d['taillabel']

    node_alias = {
        'chips': 'Chip',
        'images': 'Image',
        'feat': 'Feat',
        'featweight': 'Feat Weights',
        'thumbnails': 'Thumbnail',
        'detections': 'Detections',
        'annotations': 'Annotation',
        'Notch_Tips': 'Notch Tips',
        'probchip': 'Prob Chip',
        'Cropped_Chips': 'Croped Chip',
        'Trailing_Edge': 'Trailing\nEdge',
        'Block_Curvature': 'Block\nCurvature',
        # 'BC_DTW': 'block curvature /\n dynamic time warp',
        'BC_DTW': 'DTW Distance',
        'vsone': 'Hots vsone',
        'feat_neighbs': 'Nearest\nNeighbors',
        'neighbor_index': 'Neighbor\nIndex',
        'vsmany': 'Hots vsmany',
        'annot_labeler': 'Annot Labeler',
        'labeler': 'Labeler',
        'localizations': 'Localizations',
        'classifier': 'Classifier',
        'sver': 'Spatial\nVerification',
        'Classifier': 'Existence',
        'image_labeler': 'Image Labeler',
    }
    node_alias = {
        'Classifier': 'existence',
        'feat_neighbs': 'neighbors',
        'sver': 'spatial_verification',
        'Cropped_Chips': 'cropped_chip',
        'BC_DTW': 'dtw_distance',
        'Block_Curvature': 'curvature',
        'Trailing_Edge': 'trailing_edge',
        'Notch_Tips': 'notch_tips',
        'thumbnails': 'thumbnail',
        'images': 'image',
        'annotations': 'annotation',
        'chips': 'chip',
        #userdecision: 'User de'
    }
    node_alias = ut.delete_dict_keys(
        node_alias, ut.setdiff(node_alias.keys(), graph.nodes()))
    nx.relabel_nodes(graph, node_alias, copy=False)

    fontkw = dict(fontname='Ubuntu', fontweight='normal', fontsize=12)
    #pt.gca().set_aspect('equal')
    #pt.figure()
    pt.show_nx(graph, layoutkw=layoutkw, fontkw=fontkw)
    pt.zoom_factory()
예제 #20
0
def ggr_random_name_splits():
    """
    CommandLine:
        python -m wbia.viz.viz_graph2 ggr_random_name_splits --show

    Ignore:
        sshfs -o idmap=user lev:/ ~/lev

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.viz.viz_graph2 import *  # NOQA
        >>> ggr_random_name_splits()
    """
    import wbia.guitool as gt

    gt.ensure_qtapp()
    # nid_list = ibs.get_valid_nids(filter_empty=True)
    import wbia

    dbdir = '/media/danger/GGR/GGR-IBEIS'
    dbdir = (dbdir if ut.checkpath(dbdir) else
             ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS'))
    ibs = wbia.opendb(dbdir=dbdir, allow_newdir=False)

    import datetime

    day1 = datetime.date(2016, 1, 30)
    day2 = datetime.date(2016, 1, 31)

    orig_filter_kw = {
        'multiple': None,
        # 'view': ['right'],
        # 'minqual': 'good',
        'is_known': True,
        'min_pername': 2,
    }
    orig_aids = ibs.filter_annots_general(filter_kw=ut.dict_union(
        orig_filter_kw,
        {
            'min_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day1, 0.0)),
            'max_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day2, 1.0)),
        },
    ))
    orig_all_annots = ibs.annots(orig_aids)
    orig_unique_nids, orig_grouped_annots_ = orig_all_annots.group(
        orig_all_annots.nids)
    # Ensure we get everything
    orig_grouped_annots = [
        ibs.annots(aids_) for aids_ in ibs.get_name_aids(orig_unique_nids)
    ]

    # pip install quantumrandom
    if False:
        import quantumrandom

        data = quantumrandom.uint16()
        seed = data.sum()
        print('seed = %r' % (seed, ))
        # import Crypto.Random
        # from Crypto import Random
        # quantumrandom.get_data()
        # StrongRandom = Crypto.Random.random.StrongRandom
        # aes.reseed(3340258)
        # chars = [str(chr(x)) for x in data.view(np.uint8)]
        # aes_seed = str('').join(chars)
        # aes = Crypto.Random.Fortuna.FortunaGenerator.AESGenerator()
        # aes.reseed(aes_seed)
        # aes.pseudo_random_data(10)

    orig_rand_idxs = ut.random_indexes(len(orig_grouped_annots), seed=3340258)
    orig_sample_size = 75
    random_annot_groups = ut.take(orig_grouped_annots, orig_rand_idxs)
    orig_annot_sample = random_annot_groups[:orig_sample_size]

    # OOOPS MADE ERROR REDO ----

    filter_kw = {
        'multiple': None,
        'view': ['right'],
        'minqual': 'good',
        'is_known': True,
        'min_pername': 2,
    }
    filter_kw_ = ut.dict_union(
        filter_kw,
        {
            'min_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day1, 0.0)),
            'max_unixtime':
            ut.datetime_to_posixtime(ut.date_to_datetime(day2, 1.0)),
        },
    )
    refiltered_sample = [
        ibs.filter_annots_general(annot.aids, filter_kw=filter_kw_)
        for annot in orig_annot_sample
    ]
    is_ok = np.array(ut.lmap(len, refiltered_sample)) >= 2
    ok_part_orig_sample = ut.compress(orig_annot_sample, is_ok)
    ok_part_orig_nids = [x.nids[0] for x in ok_part_orig_sample]

    # Now compute real sample
    aids = ibs.filter_annots_general(filter_kw=filter_kw_)
    all_annots = ibs.annots(aids)
    unique_nids, grouped_annots_ = all_annots.group(all_annots.nids)
    grouped_annots = grouped_annots_
    # Ensure we get everything
    # grouped_annots = [ibs.annots(aids_) for aids_ in ibs.get_name_aids(unique_nids)]

    pop = len(grouped_annots)
    pername_list = ut.lmap(len, grouped_annots)
    groups = wbia.annots.AnnotGroups(grouped_annots, ibs)
    match_tags = [ut.unique(ut.flatten(t)) for t in groups.match_tags]
    tag_case_hist = ut.dict_hist(ut.flatten(match_tags))
    print('name_pop = %r' % (pop, ))
    print('Annots per Multiton Name' +
          ut.repr3(ut.get_stats(pername_list, use_median=True)))
    print('Name Tag Hist ' + ut.repr3(tag_case_hist))
    print('Percent Photobomb: %.2f%%' %
          (tag_case_hist['photobomb'] / pop * 100))
    print('Percent Split: %.2f%%' % (tag_case_hist['splitcase'] / pop * 100))

    # Remove the ok part from this sample
    remain_unique_nids = ut.setdiff(unique_nids, ok_part_orig_nids)
    remain_grouped_annots = [
        ibs.annots(aids_) for aids_ in ibs.get_name_aids(remain_unique_nids)
    ]

    sample_size = 75
    import vtool as vt

    vt.calc_sample_from_error_bars(0.05, pop, conf_level=0.95, prior=0.05)

    remain_rand_idxs = ut.random_indexes(len(remain_grouped_annots),
                                         seed=3340258)
    remain_sample_size = sample_size - len(ok_part_orig_nids)
    remain_random_annot_groups = ut.take(remain_grouped_annots,
                                         remain_rand_idxs)
    remain_annot_sample = remain_random_annot_groups[:remain_sample_size]

    annot_sample_nofilter = ok_part_orig_sample + remain_annot_sample
    # Filter out all bad parts
    annot_sample_filter = [
        ibs.annots(ibs.filter_annots_general(annot.aids, filter_kw=filter_kw_))
        for annot in annot_sample_nofilter
    ]
    annot_sample = annot_sample_filter

    win = None
    from wbia.viz import viz_graph2

    for annots in ut.InteractiveIter(annot_sample):
        if win is not None:
            win.close()
        win = viz_graph2.make_qt_graph_interface(ibs,
                                                 aids=annots.aids,
                                                 init_mode='rereview')
        print(win)

    sample_groups = wbia.annots.AnnotGroups(annot_sample, ibs)

    flat_tags = [ut.unique(ut.flatten(t)) for t in sample_groups.match_tags]

    print('Using Split and Photobomb')
    is_positive = ['photobomb' in t or 'splitcase' in t for t in flat_tags]
    num_positive = sum(is_positive)
    vt.calc_error_bars_from_sample(sample_size,
                                   num_positive,
                                   pop,
                                   conf_level=0.95)

    print('Only Photobomb')
    is_positive = ['photobomb' in t for t in flat_tags]
    num_positive = sum(is_positive)
    vt.calc_error_bars_from_sample(sample_size,
                                   num_positive,
                                   pop,
                                   conf_level=0.95)

    print('Only SplitCase')
    is_positive = ['splitcase' in t for t in flat_tags]
    num_positive = sum(is_positive)
    vt.calc_error_bars_from_sample(sample_size,
                                   num_positive,
                                   pop,
                                   conf_level=0.95)
예제 #21
0
def get_injured_sharks():
    """
    >>> from wbia.scripts.getshark import *  # NOQA
    """
    import requests

    url = 'http://www.whaleshark.org/getKeywordImages.jsp'
    resp = requests.get(url)
    assert resp.status_code == 200
    keywords = resp.json()['keywords']
    key_list = ut.take_column(keywords, 'indexName')
    key_to_nice = {k['indexName']: k['readableName'] for k in keywords}

    injury_patterns = [
        'injury',
        'net',
        'hook',
        'trunc',
        'damage',
        'scar',
        'nicks',
        'bite',
    ]

    injury_keys = [
        key for key in key_list if any([pat in key for pat in injury_patterns])
    ]
    noninjury_keys = ut.setdiff(key_list, injury_keys)
    injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys)  # NOQA
    noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys)  # NOQA
    key_list = injury_keys

    keyed_images = {}
    for key in ut.ProgIter(key_list, lbl='reading index', bs=True):
        key_url = url + '?indexName={indexName}'.format(indexName=key)
        key_resp = requests.get(key_url)
        assert key_resp.status_code == 200
        key_imgs = key_resp.json()['images']
        keyed_images[key] = key_imgs

    key_hist = {key: len(imgs) for key, imgs in keyed_images.items()}
    key_hist = ut.sort_dict(key_hist, 'vals')
    logger.info(ut.repr3(key_hist))
    nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist)
    nice_key_hist = ut.sort_dict(nice_key_hist, 'vals')
    logger.info(ut.repr3(nice_key_hist))

    key_to_urls = {
        key: ut.take_column(vals, 'url')
        for key, vals in keyed_images.items()
    }
    overlaps = {}
    import itertools

    overlap_img_list = []
    for k1, k2 in itertools.combinations(key_to_urls.keys(), 2):
        overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2])
        num_overlap = len(overlap_imgs)
        overlaps[(k1, k2)] = num_overlap
        overlaps[(k1, k1)] = len(key_to_urls[k1])
        if num_overlap > 0:
            # logger.info('[%s][%s], overlap=%r' % (k1, k2, num_overlap))
            overlap_img_list.extend(overlap_imgs)

    all_img_urls = list(set(ut.flatten(key_to_urls.values())))
    num_all = len(all_img_urls)  # NOQA
    logger.info('num_all = %r' % (num_all, ))

    # Determine super-categories
    categories = ['nicks', 'scar', 'trunc']

    # Force these keys into these categories
    key_to_cat = {'scarbite': 'other_injury'}

    cat_to_keys = ut.ddict(list)

    for key in key_to_urls.keys():
        flag = 1
        if key in key_to_cat:
            cat = key_to_cat[key]
            cat_to_keys[cat].append(key)
            continue
        for cat in categories:
            if cat in key:
                cat_to_keys[cat].append(key)
                flag = 0
        if flag:
            cat = 'other_injury'
            cat_to_keys[cat].append(key)

    cat_urls = ut.ddict(list)
    for cat, keys in cat_to_keys.items():
        for key in keys:
            cat_urls[cat].extend(key_to_urls[key])

    cat_hist = {}
    for cat in list(cat_urls.keys()):
        cat_urls[cat] = list(set(cat_urls[cat]))
        cat_hist[cat] = len(cat_urls[cat])

    logger.info(ut.repr3(cat_to_keys))
    logger.info(ut.repr3(cat_hist))

    key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items()
                       for val in vals])

    # ingestset = {
    #    '__class__': 'ImageSet',
    #    'images': ut.ddict(dict)
    # }
    # for key, key_imgs in keyed_images.items():
    #    for imgdict in key_imgs:
    #        url = imgdict['url']
    #        encid = imgdict['correspondingEncounterNumber']
    #        # Make structure
    #        encdict = encounters[encid]
    #        encdict['__class__'] = 'Encounter'
    #        imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber'])
    #        imgdict['__class__'] = 'Image'
    #        cat = key_to_cat[key]
    #        annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]}
    #        annotdict['__class__'] = 'Annotation'

    #        # Ensure structures exist
    #        encdict['images'] = encdict.get('images', [])
    #        imgdict['annots'] = imgdict.get('annots', [])

    #        # Add an image to this encounter
    #        encdict['images'].append(imgdict)
    #        # Add an annotation to this image
    #        imgdict['annots'].append(annotdict)

    # # http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111
    # get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,)
    # resp = requests.get(get_enc_url)
    # logger.info(ut.repr3(encdict))
    # logger.info(ut.repr3(encounters))

    # Download the files to the local disk
    # fpath_list =

    all_urls = ut.unique(
        ut.take_column(
            ut.flatten(
                ut.dict_subset(keyed_images,
                               ut.flatten(cat_to_keys.values())).values()),
            'url',
        ))

    dldir = ut.truepath('~/tmpsharks')
    from os.path import commonprefix, basename  # NOQA

    prefix = commonprefix(all_urls)
    suffix_list = [url_[len(prefix):] for url_ in all_urls]
    fname_list = [suffix.replace('/', '--') for suffix in suffix_list]

    fpath_list = []
    for url, fname in ut.ProgIter(zip(all_urls, fname_list),
                                  lbl='downloading imgs',
                                  freq=1):
        fpath = ut.grab_file_url(url,
                                 download_dir=dldir,
                                 fname=fname,
                                 verbose=False)
        fpath_list.append(fpath)

    # Make sure we keep orig info
    # url_to_keys = ut.ddict(list)
    url_to_info = ut.ddict(dict)
    for key, imgdict_list in keyed_images.items():
        for imgdict in imgdict_list:
            url = imgdict['url']
            info = url_to_info[url]
            for k, v in imgdict.items():
                info[k] = info.get(k, [])
                info[k].append(v)
            info['keys'] = info.get('keys', [])
            info['keys'].append(key)
            # url_to_keys[url].append(key)

    info_list = ut.take(url_to_info, all_urls)
    for info in info_list:
        if len(set(info['correspondingEncounterNumber'])) > 1:
            assert False, 'url with two different encounter nums'
    # Combine duplicate tags

    hashid_list = [
        ut.get_file_uuid(fpath_, stride=8)
        for fpath_ in ut.ProgIter(fpath_list, bs=True)
    ]
    groupxs = ut.group_indices(hashid_list)[1]

    # Group properties by duplicate images
    # groupxs = [g for g in groupxs if len(g) > 1]
    fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0)
    url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0)
    info_list_ = [
        ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_))
        for info_ in ut.apply_grouping(info_list, groupxs)
    ]

    encid_list_ = [
        ut.unique(info_['correspondingEncounterNumber'])[0]
        for info_ in info_list_
    ]
    keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_]
    cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_]

    clist = ut.ColumnLists({
        'gpath': fpath_list_,
        'url': url_list_,
        'encid': encid_list_,
        'key': keys_list_,
        'cat': cats_list_,
    })

    # for info_ in ut.apply_grouping(info_list, groupxs):
    #    info = ut.dict_accum(*info_)
    #    info = ut.map_dict_vals(ut.flatten, info)
    #    x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber']))
    #    if len(x) > 1:
    #        info = info.copy()
    #        del info['keys']
    #        logger.info(ut.repr3(info))

    flags = ut.lmap(ut.fpath_has_imgext, clist['gpath'])
    clist = clist.compress(flags)

    import wbia

    ibs = wbia.opendb('WS_Injury', allow_newdir=True)

    gid_list = ibs.add_images(clist['gpath'])
    clist['gid'] = gid_list

    failed_flags = ut.flag_None_items(clist['gid'])
    logger.info('# failed %s' % (sum(failed_flags), ))
    passed_flags = ut.not_list(failed_flags)
    clist = clist.compress(passed_flags)
    ut.assert_all_not_None(clist['gid'])
    # ibs.get_image_uris_original(clist['gid'])
    ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True)

    # ut.zipflat(clist['cat'], clist['key'])
    if False:
        # Can run detection instead
        clist['tags'] = ut.zipflat(clist['cat'])
        aid_list = ibs.use_images_as_annotations(clist['gid'],
                                                 adjust_percent=0.01,
                                                 tags_list=clist['tags'])
        aid_list

    import wbia.plottool as pt
    from wbia import core_annots

    pt.qt4ensure()
    # annots = ibs.annots()
    # aids = [1, 2]
    # ibs.depc_annot.get('hog', aids , 'hog')
    # ibs.depc_annot.get('chip', aids, 'img')
    for aid in ut.InteractiveIter(ibs.get_valid_aids()):
        hogs = ibs.depc_annot.d.get_hog_hog([aid])
        chips = ibs.depc_annot.d.get_chips_img([aid])
        chip = chips[0]
        hogimg = core_annots.make_hog_block_image(hogs[0])
        pt.clf()
        pt.imshow(hogimg, pnum=(1, 2, 1))
        pt.imshow(chip, pnum=(1, 2, 2))
        fig = pt.gcf()
        fig.show()
        fig.canvas.draw()

    # logger.info(len(groupxs))

    # if False:
    # groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values()
    # logger.info(ut.repr3(ut.apply_grouping(all_urls, groupxs)))
    #    # FIX
    #    for fpath, fname in zip(fpath_list, fname_list):
    #        if ut.checkpath(fpath):
    #            ut.move(fpath, join(dirname(fpath), fname))
    #            logger.info('fpath = %r' % (fpath,))

    # import wbia
    # from wbia.dbio import ingest_dataset
    # dbdir = wbia.sysres.lookup_dbdir('WS_ALL')
    # self = ingest_dataset.Ingestable2(dbdir)

    if False:
        # Show overlap matrix
        import wbia.plottool as pt
        import pandas as pd
        import numpy as np

        dict_ = overlaps
        s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps))
        df = s.unstack()
        lhs, rhs = df.align(df.T)
        df = lhs.add(rhs, fill_value=0).fillna(0)

        label_texts = df.columns.values

        def label_ticks(label_texts):
            import wbia.plottool as pt

            truncated_labels = [repr(lbl[0:100]) for lbl in label_texts]
            ax = pt.gca()
            ax.set_xticks(list(range(len(label_texts))))
            ax.set_xticklabels(truncated_labels)
            [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()]
            [
                lbl.set_horizontalalignment('left')
                for lbl in ax.get_xticklabels()
            ]

            # xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts)))
            # pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
            ax.set_yticks(list(range(len(label_texts))))
            ax.set_yticklabels(truncated_labels)
            [
                lbl.set_horizontalalignment('right')
                for lbl in ax.get_yticklabels()
            ]
            [
                lbl.set_verticalalignment('center')
                for lbl in ax.get_yticklabels()
            ]
            # [lbl.set_rotation(20) for lbl in ax.get_yticklabels()]

        # df = df.sort(axis=0)
        # df = df.sort(axis=1)

        sortx = np.argsort(df.sum(axis=1).values)[::-1]
        df = df.take(sortx, axis=0)
        df = df.take(sortx, axis=1)

        fig = pt.figure(fnum=1)
        fig.clf()
        mat = df.values.astype(np.int32)
        mat[np.diag_indices(len(mat))] = 0
        vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max()
        import matplotlib.colors

        norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True)
        pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none')
        pt.plt.colorbar()
        pt.plt.grid('off')
        label_ticks(label_texts)
        fig.tight_layout()

    # overlap_df = pd.DataFrame.from_dict(overlap_img_list)

    class TmpImage(ut.NiceRepr):
        pass

    from skimage.feature import hog
    from skimage import data, color, exposure
    import wbia.plottool as pt

    image2 = color.rgb2gray(data.astronaut())  # NOQA

    fpath = './GOPR1120.JPG'

    import vtool as vt

    for fpath in [fpath]:
        """
        http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
        """

        image = vt.imread(fpath, grayscale=True)
        image = pt.color_funcs.to_base01(image)

        fig = pt.figure(fnum=2)
        fd, hog_image = hog(
            image,
            orientations=8,
            pixels_per_cell=(16, 16),
            cells_per_block=(1, 1),
            visualise=True,
        )

        fig, (ax1, ax2) = pt.plt.subplots(1,
                                          2,
                                          figsize=(8, 4),
                                          sharex=True,
                                          sharey=True)

        ax1.axis('off')
        ax1.imshow(image, cmap=pt.plt.cm.gray)
        ax1.set_title('Input image')
        ax1.set_adjustable('box-forced')

        # Rescale histogram for better display
        hog_image_rescaled = exposure.rescale_intensity(hog_image,
                                                        in_range=(0, 0.02))

        ax2.axis('off')
        ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray)
        ax2.set_title('Histogram of Oriented Gradients')
        ax1.set_adjustable('box-forced')
        pt.plt.show()