def on_click(self, event): print('[inter] clicked in expandable interact') ax = event.inaxes if ih.clicked_inside_axis(event): func = pt.get_plotdat(ax, 'plot_func', None) if ut.VERBOSE: print('func = %r' % (func, )) if func is not None: if ut.VERBOSE: print('calling func = %r' % (func, )) fnum = pt.next_fnum() # pt.figure(fnum=fnum) pnum = (1, 1, 1) index = pt.get_plotdat(ax, 'expandable_index', None) if index is not None: ishow_func = self.ishow_func_list[index] else: ishow_func = None if ishow_func is not None: inter = ishow_func(fnum=fnum) else: if check_if_subinteract(func): inter = func(fnum=fnum) inter.show_page() elif hasattr(func, 'plot'): inter = func inter.start() # func.plot(fnum=self.fnum, pnum=pnum) else: func(fnum=fnum, pnum=pnum) # inter.show_page() fig = pt.gcf() pt.show_figure(fig)
def query_last_feature(self): ibs = self.ibs qaid = self.qaid viz.show_nearest_descriptors(ibs, qaid, self.last_fx, pt.next_fnum(), qreq_=self.qreq_, draw_chip=True) fig3 = pt.gcf() ih.connect_callback(fig3, 'button_press_event', self.on_click) pt.draw()
def hack_fix_centeralign(): if textprops['horizontalalignment'] == 'center': logger.info('Fixing centeralign') fig = pt.gcf() fig.canvas.draw() # Superhack for centered text. Fix bug in # /usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py # /usr/local/lib/python2.7/dist-packages/matplotlib/text.py for offset_box in offset_box_list: offset_box.set_offset z = offset_box._text.get_window_extent() (z.x1 - z.x0) / 2 offset_box._text T = offset_box._text.get_transform() A = mpl.transforms.Affine2D() A.clear() A.translate((z.x1 - z.x0) / 2, 0) offset_box._text.set_transform(T + A)
def show_model(model, evidence={}, soft_evidence={}, **kwargs): """ References: http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer Ignore: pkg-config --libs-only-L libcgraph sudo apt-get install libgraphviz-dev -y sudo apt-get install libgraphviz4 -y # sudo apt-get install pkg-config sudo apt-get install libgraphviz-dev # pip install git+git://github.com/pygraphviz/pygraphviz.git pip install pygraphviz python -c "import pygraphviz; print(pygraphviz.__file__)" sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/" python3 -c "import pygraphviz; print(pygraphviz.__file__)" """ if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'): draw_tree_model(model, **kwargs) return import wbia.plottool as pt import networkx as netx import matplotlib as mpl fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, pnum=(3, 1, (slice(0, 2), 0)), doclf=True) # NOQA # fig = pt.figure(fnum=fnum, pnum=(3, 2, (1, slice(1, 2))), doclf=True) # NOQA ax = pt.gca() var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])} netx_graph = model # netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"' # netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR' pos = get_hacked_pos(netx_graph) # netx.nx_agraph.pygraphviz_layout(netx_graph) # pos = netx.nx_agraph.pydot_layout(netx_graph, prog='dot') # pos = netx.nx_agraph.graphviz_layout(netx_graph) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_size=1500) if evidence is not None: node_colors = [ # (pt.TRUE_BLUE (pt.WHITE if node not in soft_evidence else pt.LIGHT_PINK) if node not in evidence else pt.FALSE_RED for node in netx_graph.nodes() ] for node in netx_graph.nodes(): cpd = model.var2_cpd[node] if cpd.ttype == 'score': pass drawkw['node_color'] = node_colors netx.draw(netx_graph, **drawkw) show_probs = True if show_probs: textprops = { 'family': 'monospace', 'horizontalalignment': 'left', #'horizontalalignment': 'center', #'size': 12, 'size': 8, } textkw = dict( xycoords='data', boxcoords='offset points', pad=0.25, framewidth=True, arrowprops=dict(arrowstyle='->'), # bboxprops=dict(fc=node_attr['fillcolor']), ) netx_nodes = model.nodes(data=True) node_key_list = ut.get_list_column(netx_nodes, 0) pos_list = ut.dict_take(pos, node_key_list) artist_list = [] offset_box_list = [] for pos_, node in zip(pos_list, netx_nodes): x, y = pos_ variable = node[0] cpd = model.var2_cpd[variable] prior_marg = (cpd if cpd.evidence is None else cpd.marginalize( cpd.evidence, inplace=False)) prior_text = None text = None if variable in evidence: text = cpd.variable_statenames[evidence[variable]] elif variable in var2_post: post_marg = var2_post[variable] text = pgm_ext.make_factor_text(post_marg, 'post') prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') else: if len(evidence) == 0 and len(soft_evidence) == 0: prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') show_post = kwargs.get('show_post', False) show_prior = kwargs.get('show_prior', False) show_prior = True show_post = True show_ev = evidence is not None and variable in evidence if (show_post or show_ev) and text is not None: offset_box = mpl.offsetbox.TextArea(text, textprops) artist = mpl.offsetbox.AnnotationBbox( # offset_box, (x + 5, y), xybox=(20., 5.), offset_box, (x, y + 5), xybox=(4.0, 20.0), # box_alignment=(0, 0), box_alignment=(0.5, 0), **textkw) offset_box_list.append(offset_box) artist_list.append(artist) if show_prior and prior_text is not None: offset_box2 = mpl.offsetbox.TextArea(prior_text, textprops) artist2 = mpl.offsetbox.AnnotationBbox( # offset_box2, (x - 5, y), xybox=(-20., -15.), # offset_box2, (x, y - 5), xybox=(-15., -20.), offset_box2, (x, y - 5), xybox=(-4, -20.0), # box_alignment=(1, 1), box_alignment=(0.5, 1), **textkw) offset_box_list.append(offset_box2) artist_list.append(artist2) for artist in artist_list: ax.add_artist(artist) xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) fig = pt.gcf() title = 'num_names=%r, num_annots=%r' % (model.num_names, num_annots) map_assign = kwargs.get('map_assign', None) # max_marginal_list = [] # for name, marginal in marginalized_joints.items(): # states = list(ut.iprod(*marginal.statenames)) # vals = marginal.values.ravel() # x = vals.argmax() # max_marginal_list += ['P(' + ', '.join(states[x]) + ') = ' + str(vals[x])] # title += str(marginal) top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: # title += '\nMAP=' + ut.repr2(map_assign, strvals=True) title += '\nMAP: ' + map_assign + ' @' + '%.2f%%' % ( 100 * map_prob, ) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) # pt.set_xlabel() def hack_fix_centeralign(): if textprops['horizontalalignment'] == 'center': logger.info('Fixing centeralign') fig = pt.gcf() fig.canvas.draw() # Superhack for centered text. Fix bug in # /usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py # /usr/local/lib/python2.7/dist-packages/matplotlib/text.py for offset_box in offset_box_list: offset_box.set_offset z = offset_box._text.get_window_extent() (z.x1 - z.x0) / 2 offset_box._text T = offset_box._text.get_transform() A = mpl.transforms.Affine2D() A.clear() A.translate((z.x1 - z.x0) / 2, 0) offset_box._text.set_transform(T + A) hack_fix_centeralign() top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: bin_labels = ut.get_list_column(top_assignments, 0) bin_vals = ut.get_list_column(top_assignments, 1) # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels] pt.draw_histogram( bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))), transpose=True, use_darkbackground=False, # xtick_rotation=-10, ylabel='Prob', xlabel='assignment', ) pt.set_title('Assignment probabilities')
def ishow_chip(ibs, aid, fnum=2, fx=None, dodraw=True, config2_=None, ischild=False, **kwargs): r""" # TODO: split into two interactions interact chip and interact chip features Args: ibs (IBEISController): wbia controller object aid (int): annotation id fnum (int): figure number fx (None): CommandLine: python -m wbia.viz.interact.interact_chip --test-ishow_chip --show python -m wbia.viz.interact.interact_chip --test-ishow_chip --show --aid 2 Example: >>> # DISABLE_DOCTEST >>> from wbia.viz.interact.interact_chip import * # NOQA >>> import wbia >>> # build test data >>> ibs = wbia.opendb('testdb1') >>> aid = ut.get_argval('--aid', type_=int, default=1) >>> fnum = 2 >>> fx = None >>> # execute function >>> dodraw = ut.show_was_requested() >>> result = ishow_chip(ibs, aid, fnum, fx, dodraw) >>> # verify results >>> pt.show_if_requested() >>> print(result) """ fnum = pt.ensure_fnum(fnum) vh.ibsfuncs.assert_valid_aids(ibs, (aid, )) # TODO: Reconcile this with interact keypoints. # Preferably this will call that but it will set some fancy callbacks if not ischild: fig = ih.begin_interaction('chip', fnum) else: fig = pt.gcf() # fig = pt.figure(fnum=fnum, pnum=pnum) # Get chip info (make sure get_chips is called first) # mode_ptr = [1] mode_ptr = [0] def _select_fxth_kpt(fx): from wbia.plottool.viz_featrow import draw_feat_row # Get the fx-th keypiont chip = ibs.get_annot_chips(aid, config2_=config2_) kp = ibs.get_annot_kpts(aid, config2_=config2_)[fx] sift = ibs.get_annot_vecs(aid, config2_=config2_)[fx] # Draw chip + keypoints + highlighted plots _chip_view(pnum=(2, 1, 1), sel_fx=fx) # ishow_chip(ibs, aid, fnum=None, fx=fx, config2_=config2_, **kwargs) # Draw the selected feature plots nRows, nCols, px = (2, 3, 3) draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None) def _chip_view(mode=0, pnum=(1, 1, 1), **kwargs): logger.info('... _chip_view mode=%r' % mode_ptr[0]) kwargs['ell'] = mode_ptr[0] == 1 kwargs['pts'] = mode_ptr[0] == 2 if not ischild: pt.figure(fnum=fnum, pnum=pnum, docla=True, doclf=True) # Toggle no keypoints view viz.show_chip(ibs, aid, fnum=fnum, pnum=pnum, config2_=config2_, **kwargs) pt.set_figtitle('Chip View') def _on_chip_click(event): logger.info('[inter] clicked chip') ax, x, y = event.inaxes, event.xdata, event.ydata if ih.clicked_outside_axis(event): if not ischild: logger.info('... out of axis') mode_ptr[0] = (mode_ptr[0] + 1) % 3 _chip_view(**kwargs) else: if event.button == 3: # right-click import wbia.guitool as gt # from wbia.viz.interact import interact_chip height = fig.canvas.geometry().height() qpoint = gt.newQPoint(event.x, height - event.y) refresh_func = partial(_chip_view, **kwargs) callback_list = build_annot_context_options( ibs, aid, refresh_func=refresh_func, with_interact_chip=False, config2_=config2_, ) qwin = fig.canvas gt.popup_menu(qwin, qpoint, callback_list) # interact_chip.show_annot_context_menu( # ibs, aid, fig.canvas, qpoint, refresh_func=refresh_func, # with_interact_chip=False, config2_=config2_) else: viztype = vh.get_ibsdat(ax, 'viztype') logger.info('[ic] viztype=%r' % viztype) if viztype == 'chip' and event.key == 'shift': _chip_view(**kwargs) ih.disconnect_callback(fig, 'button_press_event') elif viztype == 'chip': kpts = ibs.get_annot_kpts(aid, config2_=config2_) if len(kpts) > 0: fx = vt.nearest_point(x, y, kpts, conflict_mode='next')[0] logger.info('... clicked fx=%r' % fx) _select_fxth_kpt(fx) else: logger.info('... len(kpts) == 0') elif viztype in ['warped', 'unwarped']: fx = vh.get_ibsdat(ax, 'fx') if fx is not None and viztype == 'warped': viz.show_keypoint_gradient_orientations( ibs, aid, fx, fnum=pt.next_fnum()) else: logger.info('...Unknown viztype: %r' % viztype) viz.draw() # Draw without keypoints the first time if fx is not None: _select_fxth_kpt(fx) else: _chip_view(**kwargs) if dodraw: viz.draw() if not ischild: ih.connect_callback(fig, 'button_press_event', _on_chip_click)
def show_model(model, evidence={}, soft_evidence={}, **kwargs): """ References: http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer Ignore: pkg-config --libs-only-L libcgraph sudo apt-get install libgraphviz-dev -y sudo apt-get install libgraphviz4 -y # sudo apt-get install pkg-config sudo apt-get install libgraphviz-dev # pip install git+git://github.com/pygraphviz/pygraphviz.git pip install pygraphviz python -c "import pygraphviz; print(pygraphviz.__file__)" sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/" python3 -c "import pygraphviz; print(pygraphviz.__file__)" CommandLine: python -m wbia.algo.hots.bayes --exec-show_model --show Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> model = '?' >>> evidence = {} >>> soft_evidence = {} >>> result = show_model(model, evidence, soft_evidence) >>> print(result) >>> ut.quit_if_noshow() >>> import wbia.plottool as pt >>> ut.show_if_requested() """ if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'): draw_tree_model(model, **kwargs) return import wbia.plottool as pt import networkx as netx fnum = pt.ensure_fnum(None) netx_graph = model # netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"' # netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR' pos_dict = get_hacked_pos(netx_graph) # pos_dict = netx.nx_agraph.pygraphviz_layout(netx_graph) # pos = netx.nx_agraph.nx_pydot.pydot_layout(netx_graph, prog='dot') # pos_dict = netx.nx_agraph.graphviz_layout(netx_graph) textprops = { 'family': 'monospace', 'horizontalalignment': 'left', # 'horizontalalignment': 'center', # 'size': 12, 'size': 8, } netx_nodes = model.nodes(data=True) node_key_list = ut.get_list_column(netx_nodes, 0) pos_list = ut.dict_take(pos_dict, node_key_list) var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])} prior_text = None post_text = None evidence_tas = [] post_tas = [] prior_tas = [] node_color = [] has_inferred = evidence or var2_post if has_inferred: ignore_prior_with_ttype = [SCORE_TTYPE, MATCH_TTYPE] show_prior = False else: ignore_prior_with_ttype = [] # show_prior = True show_prior = False dpy = 5 dbx, dby = (20, 20) takw1 = { 'bbox_align': (0.5, 0), 'pos_offset': [0, dpy], 'bbox_offset': [dbx, dby] } takw2 = { 'bbox_align': (0.5, 1), 'pos_offset': [0, -dpy], 'bbox_offset': [-dbx, -dby] } name_colors = pt.distinct_colors(max(model.num_names, 10)) name_colors = name_colors[:model.num_names] # cmap_ = 'hot' #mx = 0.65 #mn = 0.15 cmap_, mn, mx = 'plasma', 0.15, 1.0 _cmap = pt.plt.get_cmap(cmap_) def cmap(x): return _cmap((x * mx) + mn) for node, pos in zip(netx_nodes, pos_list): variable = node[0] cpd = model.var2_cpd[variable] prior_marg = (cpd if cpd.evidence is None else cpd.marginalize( cpd.evidence, inplace=False)) show_evidence = variable in evidence show_prior = cpd.ttype not in ignore_prior_with_ttype show_post = variable in var2_post show_prior |= cpd.ttype not in ignore_prior_with_ttype post_marg = None if show_post: post_marg = var2_post[variable] def get_name_color(phi): order = phi.values.argsort()[::-1] if len(order) < 2: dist_next = phi.values[order[0]] else: dist_next = phi.values[order[0]] - phi.values[order[1]] dist_total = phi.values[order[0]] confidence = (dist_total * dist_next)**(2.5 / 4) # logger.info('confidence = %r' % (confidence,)) color = name_colors[order[0]] color = pt.color_funcs.desaturate_rgb(color, 1 - confidence) color = np.array(color) return color if variable in evidence: if cpd.ttype == SCORE_TTYPE: cmap_index = evidence[variable] / (cpd.variable_card - 1) color = cmap(cmap_index) color = pt.lighten_rgb(color, 0.4) color = np.array(color) node_color.append(color) elif cpd.ttype == NAME_TTYPE: color = name_colors[evidence[variable]] color = np.array(color) node_color.append(color) else: color = pt.FALSE_RED node_color.append(color) # elif variable in soft_evidence: # color = pt.LIGHT_PINK # show_prior = True # color = get_name_color(prior_marg) # node_color.append(color) else: if cpd.ttype == NAME_TTYPE and post_marg is not None: color = get_name_color(post_marg) node_color.append(color) elif cpd.ttype == MATCH_TTYPE and post_marg is not None: color = cmap(post_marg.values[1]) color = pt.lighten_rgb(color, 0.4) color = np.array(color) node_color.append(color) else: # color = pt.WHITE color = pt.NEUTRAL node_color.append(color) if show_prior: if variable in soft_evidence: prior_color = pt.LIGHT_PINK else: prior_color = None prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') prior_tas.append( dict(text=prior_text, pos=pos, color=prior_color, **takw2)) if show_evidence: _takw1 = takw1 if cpd.ttype == SCORE_TTYPE: _takw1 = takw2 evidence_text = cpd.variable_statenames[evidence[variable]] if isinstance(evidence_text, int): evidence_text = '%d/%d' % (evidence_text + 1, cpd.variable_card) evidence_tas.append( dict(text=evidence_text, pos=pos, color=color, **_takw1)) if show_post: _takw1 = takw1 if cpd.ttype == MATCH_TTYPE: _takw1 = takw2 post_text = pgm_ext.make_factor_text(post_marg, 'post') post_tas.append(dict(text=post_text, pos=pos, color=None, **_takw1)) def trnps_(dict_list): """ tranpose dict list """ list_dict = ut.ddict(list) for dict_ in dict_list: for key, val in dict_.items(): list_dict[key + '_list'].append(val) return list_dict takw1_ = trnps_(post_tas + evidence_tas) takw2_ = trnps_(prior_tas) # Draw graph if has_inferred: pnum1 = (3, 1, (slice(0, 2), 0)) else: pnum1 = None fig = pt.figure(fnum=fnum, pnum=pnum1, doclf=True) # NOQA ax = pt.gca() # logger.info('node_color = %s' % (ut.repr3(node_color),)) drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1500, node_color=node_color) netx.draw(netx_graph, **drawkw) hacks = [] if len(post_tas + evidence_tas): hacks.append(pt.draw_text_annotations(textprops=textprops, **takw1_)) if prior_tas: hacks.append(pt.draw_text_annotations(textprops=textprops, **takw2_)) xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) num_annots = len(model.ttype2_cpds[NAME_TTYPE]) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) fig = pt.gcf() title = 'num_names=%r, num_annots=%r' % ( model.num_names, num_annots, ) map_assign = kwargs.get('map_assign', None) top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: def word_insert(text): return '' if len(text) == 0 else text + ' ' title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob, ) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() # Hack in colorbars if has_inferred: pt.colorbar( np.linspace(0, 1, len(name_colors)), name_colors, lbl=NAME_TTYPE, ticklabels=model.ttype2_template[NAME_TTYPE].basis, ticklocation='left', ) basis = model.ttype2_template[SCORE_TTYPE].basis scalars = np.linspace(0, 1, len(basis)) scalars = np.linspace(0, 1, 100) colors = pt.scores_to_color(scalars, cmap_=cmap_, reverse_cmap=False, cmap_range=(mn, mx)) colors = [pt.lighten_rgb(c, 0.4) for c in colors] if ut.list_type(basis) is int: pt.colorbar(scalars, colors, lbl=SCORE_TTYPE, ticklabels=np.array(basis) + 1) else: pt.colorbar(scalars, colors, lbl=SCORE_TTYPE, ticklabels=basis) # logger.info('basis = %r' % (basis,)) # Draw probability hist if has_inferred and top_assignments is not None: bin_labels = ut.get_list_column(top_assignments, 0) bin_vals = ut.get_list_column(top_assignments, 1) # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels] pt.draw_histogram( bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))), transpose=True, use_darkbackground=False, # xtick_rotation=-10, ylabel='Prob', xlabel='assignment', ) pt.set_title('Assignment probabilities')
def show_graph( infr, graph=None, use_image=False, update_attrs=True, with_colorbar=False, pnum=(1, 1, 1), zoomable=True, pickable=False, **kwargs, ): r""" Args: infr (?): graph (None): (default = None) use_image (bool): (default = False) update_attrs (bool): (default = True) with_colorbar (bool): (default = False) pnum (tuple): plot number(default = (1, 1, 1)) zoomable (bool): (default = True) pickable (bool): (de = False) **kwargs: verbose, with_labels, fnum, layout, ax, pos, img_dict, title, layoutkw, framewidth, modify_ax, as_directed, hacknoedge, hacknode, node_labels, arrow_width, fontsize, fontweight, fontname, fontfamilty, fontproperties CommandLine: python -m wbia.algo.graph.mixin_viz GraphVisualization.show_graph --show Example: >>> # xdoctest: +REQUIRES(module:pygraphviz) >>> # ENABLE_DOCTEST >>> from wbia.algo.graph.mixin_viz import * # NOQA >>> from wbia.algo.graph import demo >>> import wbia.plottool as pt >>> infr = demo.demodata_infr(ccs=ut.estarmap( >>> range, [(1, 6), (6, 10), (10, 13), (13, 15), (15, 16), >>> (17, 20)])) >>> pnum_ = pt.make_pnum_nextgen(nRows=1, nCols=3) >>> infr.show_graph(show_cand=True, simple_labels=True, pickable=True, fnum=1, pnum=pnum_()) >>> infr.add_feedback((1, 5), INCMP) >>> infr.add_feedback((14, 18), INCMP) >>> infr.refresh_candidate_edges() >>> infr.show_graph(show_cand=True, simple_labels=True, pickable=True, fnum=1, pnum=pnum_()) >>> infr.add_feedback((17, 18), NEGTV) # add inconsistency >>> infr.apply_nondynamic_update() >>> infr.show_graph(show_cand=True, simple_labels=True, pickable=True, fnum=1, pnum=pnum_()) >>> ut.show_if_requested() """ import wbia.plottool as pt if graph is None: graph = infr.graph # kwargs['fontsize'] = kwargs.get('fontsize', 8) with warnings.catch_warnings(): warnings.simplefilter('ignore') # default_update_kw = ut.get_func_kwargs(infr.update_visual_attrs) # update_kw = ut.update_existing(default_update_kw, kwargs) # infr.update_visual_attrs(**update_kw) if update_attrs: infr.update_visual_attrs(graph=graph, **kwargs) verbose = kwargs.pop('verbose', infr.verbose) pt.show_nx( graph, layout='custom', as_directed=False, modify_ax=False, use_image=use_image, pnum=pnum, verbose=verbose, **kwargs, ) if zoomable: pt.zoom_factory() pt.pan_factory(pt.gca()) # if with_colorbar: # # Draw a colorbar # _normal_ticks = np.linspace(0, 1, num=11) # _normal_scores = np.linspace(0, 1, num=500) # _normal_colors = infr.get_colored_weights(_normal_scores) # cb = pt.colorbar(_normal_scores, _normal_colors, lbl='weights', # ticklabels=_normal_ticks) # # point to threshold location # thresh = None # if thresh is not None: # xy = (1, thresh) # xytext = (2.5, .3 if thresh < .5 else .7) # cb.ax.annotate('threshold', xy=xy, xytext=xytext, # arrowprops=dict( # alpha=.5, fc="0.6", # connectionstyle="angle3,angleA=90,angleB=0"),) # infr.graph if graph.graph.get('dark_background', None): pt.dark_background(force=True) if pickable: fig = pt.gcf() fig.canvas.mpl_connect('pick_event', ut.partial(on_pick, infr=infr))
def show_time_distributions(ibs, unixtime_list): r""" """ # import vtool as vt import wbia.plottool as pt unixtime_list = np.array(unixtime_list) num_nan = np.isnan(unixtime_list).sum() num_total = len(unixtime_list) unixtime_list = unixtime_list[~np.isnan(unixtime_list)] from wbia.scripts.thesis import TMP_RC import matplotlib as mpl mpl.rcParams.update(TMP_RC) if False: from matplotlib import dates as mpldates # data_list = list(map(ut.unixtime_to_datetimeobj, unixtime_list)) n, bins, patches = pt.plt.hist(unixtime_list, 365) # n_ = list(map(ut.unixtime_to_datetimeobj, n)) # bins_ = list(map(ut.unixtime_to_datetimeobj, bins)) pt.plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75) ax = pt.gca() # ax.xaxis.set_major_locator(mpldates.YearLocator()) # hfmt = mpldates.DateFormatter('%y/%m/%d') # ax.xaxis.set_major_formatter(hfmt) mpldates.num2date(unixtime_list) # pt.gcf().autofmt_xdate() # y = pt.plt.normpdf( bins, unixtime_list.mean(), unixtime_list.std()) # ax.set_xticks(bins_) # l = pt.plt.plot(bins_, y, 'k--', linewidth=1.5) else: pt.draw_time_distribution(unixtime_list) # pt.draw_histogram() ax = pt.gca() ax.set_xlabel('Date') ax.set_title( 'Timestamp distribution of %s. #nan=%d/%d' % (ibs.get_dbname_alias(), num_nan, num_total) ) pt.gcf().autofmt_xdate() icon = ibs.get_database_icon() if False and icon is not None: # import matplotlib as mpl # import vtool as vt ax = pt.gca() # Overlay a species icon # http://matplotlib.org/examples/pylab_examples/demo_annotation_box.html # icon = vt.convert_image_list_colorspace([icon], 'RGB', 'BGR')[0] # pt.overlay_icon(icon, coords=(0, 1), bbox_alignment=(0, 1)) pt.overlay_icon( icon, coords=(0, 1), bbox_alignment=(0, 1), as_artist=1, max_asize=(100, 200), ) # imagebox = mpl.offsetbox.OffsetImage(icon, zoom=1.0) # # xy = [ax.get_xlim()[0] + 5, ax.get_ylim()[1]] # # ax.set_xlim(1, 100) # # ax.set_ylim(0, 100) # # x = np.array(ax.get_xlim()).sum() / 2 # # y = np.array(ax.get_ylim()).sum() / 2 # # xy = [x, y] # # logger.info('xy = %r' % (xy,)) # # x = np.nanmin(unixtime_list) # # xy = [x, y] # # logger.info('xy = %r' % (xy,)) # # ax.get_ylim()[0]] # xy = [ax.get_xlim()[0], ax.get_ylim()[1]] # ab = mpl.offsetbox.AnnotationBbox( # imagebox, xy, xycoords='data', # xybox=(-0., 0.), # boxcoords="offset points", # box_alignment=(0, 1), pad=0.0) # ax.add_artist(ab) if ut.get_argflag('--contextadjust'): # pt.adjust_subplots(left=.08, bottom=.1, top=.9, wspace=.3, hspace=.1) pt.adjust_subplots(use_argv=True)
def get_injured_sharks(): """ >>> from wbia.scripts.getshark import * # NOQA """ import requests url = 'http://www.whaleshark.org/getKeywordImages.jsp' resp = requests.get(url) assert resp.status_code == 200 keywords = resp.json()['keywords'] key_list = ut.take_column(keywords, 'indexName') key_to_nice = {k['indexName']: k['readableName'] for k in keywords} injury_patterns = [ 'injury', 'net', 'hook', 'trunc', 'damage', 'scar', 'nicks', 'bite', ] injury_keys = [ key for key in key_list if any([pat in key for pat in injury_patterns]) ] noninjury_keys = ut.setdiff(key_list, injury_keys) injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys) # NOQA noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys) # NOQA key_list = injury_keys keyed_images = {} for key in ut.ProgIter(key_list, lbl='reading index', bs=True): key_url = url + '?indexName={indexName}'.format(indexName=key) key_resp = requests.get(key_url) assert key_resp.status_code == 200 key_imgs = key_resp.json()['images'] keyed_images[key] = key_imgs key_hist = {key: len(imgs) for key, imgs in keyed_images.items()} key_hist = ut.sort_dict(key_hist, 'vals') logger.info(ut.repr3(key_hist)) nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist) nice_key_hist = ut.sort_dict(nice_key_hist, 'vals') logger.info(ut.repr3(nice_key_hist)) key_to_urls = { key: ut.take_column(vals, 'url') for key, vals in keyed_images.items() } overlaps = {} import itertools overlap_img_list = [] for k1, k2 in itertools.combinations(key_to_urls.keys(), 2): overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2]) num_overlap = len(overlap_imgs) overlaps[(k1, k2)] = num_overlap overlaps[(k1, k1)] = len(key_to_urls[k1]) if num_overlap > 0: # logger.info('[%s][%s], overlap=%r' % (k1, k2, num_overlap)) overlap_img_list.extend(overlap_imgs) all_img_urls = list(set(ut.flatten(key_to_urls.values()))) num_all = len(all_img_urls) # NOQA logger.info('num_all = %r' % (num_all, )) # Determine super-categories categories = ['nicks', 'scar', 'trunc'] # Force these keys into these categories key_to_cat = {'scarbite': 'other_injury'} cat_to_keys = ut.ddict(list) for key in key_to_urls.keys(): flag = 1 if key in key_to_cat: cat = key_to_cat[key] cat_to_keys[cat].append(key) continue for cat in categories: if cat in key: cat_to_keys[cat].append(key) flag = 0 if flag: cat = 'other_injury' cat_to_keys[cat].append(key) cat_urls = ut.ddict(list) for cat, keys in cat_to_keys.items(): for key in keys: cat_urls[cat].extend(key_to_urls[key]) cat_hist = {} for cat in list(cat_urls.keys()): cat_urls[cat] = list(set(cat_urls[cat])) cat_hist[cat] = len(cat_urls[cat]) logger.info(ut.repr3(cat_to_keys)) logger.info(ut.repr3(cat_hist)) key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items() for val in vals]) # ingestset = { # '__class__': 'ImageSet', # 'images': ut.ddict(dict) # } # for key, key_imgs in keyed_images.items(): # for imgdict in key_imgs: # url = imgdict['url'] # encid = imgdict['correspondingEncounterNumber'] # # Make structure # encdict = encounters[encid] # encdict['__class__'] = 'Encounter' # imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber']) # imgdict['__class__'] = 'Image' # cat = key_to_cat[key] # annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]} # annotdict['__class__'] = 'Annotation' # # Ensure structures exist # encdict['images'] = encdict.get('images', []) # imgdict['annots'] = imgdict.get('annots', []) # # Add an image to this encounter # encdict['images'].append(imgdict) # # Add an annotation to this image # imgdict['annots'].append(annotdict) # # http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111 # get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,) # resp = requests.get(get_enc_url) # logger.info(ut.repr3(encdict)) # logger.info(ut.repr3(encounters)) # Download the files to the local disk # fpath_list = all_urls = ut.unique( ut.take_column( ut.flatten( ut.dict_subset(keyed_images, ut.flatten(cat_to_keys.values())).values()), 'url', )) dldir = ut.truepath('~/tmpsharks') from os.path import commonprefix, basename # NOQA prefix = commonprefix(all_urls) suffix_list = [url_[len(prefix):] for url_ in all_urls] fname_list = [suffix.replace('/', '--') for suffix in suffix_list] fpath_list = [] for url, fname in ut.ProgIter(zip(all_urls, fname_list), lbl='downloading imgs', freq=1): fpath = ut.grab_file_url(url, download_dir=dldir, fname=fname, verbose=False) fpath_list.append(fpath) # Make sure we keep orig info # url_to_keys = ut.ddict(list) url_to_info = ut.ddict(dict) for key, imgdict_list in keyed_images.items(): for imgdict in imgdict_list: url = imgdict['url'] info = url_to_info[url] for k, v in imgdict.items(): info[k] = info.get(k, []) info[k].append(v) info['keys'] = info.get('keys', []) info['keys'].append(key) # url_to_keys[url].append(key) info_list = ut.take(url_to_info, all_urls) for info in info_list: if len(set(info['correspondingEncounterNumber'])) > 1: assert False, 'url with two different encounter nums' # Combine duplicate tags hashid_list = [ ut.get_file_uuid(fpath_, stride=8) for fpath_ in ut.ProgIter(fpath_list, bs=True) ] groupxs = ut.group_indices(hashid_list)[1] # Group properties by duplicate images # groupxs = [g for g in groupxs if len(g) > 1] fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0) url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0) info_list_ = [ ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_)) for info_ in ut.apply_grouping(info_list, groupxs) ] encid_list_ = [ ut.unique(info_['correspondingEncounterNumber'])[0] for info_ in info_list_ ] keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_] cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_] clist = ut.ColumnLists({ 'gpath': fpath_list_, 'url': url_list_, 'encid': encid_list_, 'key': keys_list_, 'cat': cats_list_, }) # for info_ in ut.apply_grouping(info_list, groupxs): # info = ut.dict_accum(*info_) # info = ut.map_dict_vals(ut.flatten, info) # x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber'])) # if len(x) > 1: # info = info.copy() # del info['keys'] # logger.info(ut.repr3(info)) flags = ut.lmap(ut.fpath_has_imgext, clist['gpath']) clist = clist.compress(flags) import wbia ibs = wbia.opendb('WS_Injury', allow_newdir=True) gid_list = ibs.add_images(clist['gpath']) clist['gid'] = gid_list failed_flags = ut.flag_None_items(clist['gid']) logger.info('# failed %s' % (sum(failed_flags), )) passed_flags = ut.not_list(failed_flags) clist = clist.compress(passed_flags) ut.assert_all_not_None(clist['gid']) # ibs.get_image_uris_original(clist['gid']) ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True) # ut.zipflat(clist['cat'], clist['key']) if False: # Can run detection instead clist['tags'] = ut.zipflat(clist['cat']) aid_list = ibs.use_images_as_annotations(clist['gid'], adjust_percent=0.01, tags_list=clist['tags']) aid_list import wbia.plottool as pt from wbia import core_annots pt.qt4ensure() # annots = ibs.annots() # aids = [1, 2] # ibs.depc_annot.get('hog', aids , 'hog') # ibs.depc_annot.get('chip', aids, 'img') for aid in ut.InteractiveIter(ibs.get_valid_aids()): hogs = ibs.depc_annot.d.get_hog_hog([aid]) chips = ibs.depc_annot.d.get_chips_img([aid]) chip = chips[0] hogimg = core_annots.make_hog_block_image(hogs[0]) pt.clf() pt.imshow(hogimg, pnum=(1, 2, 1)) pt.imshow(chip, pnum=(1, 2, 2)) fig = pt.gcf() fig.show() fig.canvas.draw() # logger.info(len(groupxs)) # if False: # groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values() # logger.info(ut.repr3(ut.apply_grouping(all_urls, groupxs))) # # FIX # for fpath, fname in zip(fpath_list, fname_list): # if ut.checkpath(fpath): # ut.move(fpath, join(dirname(fpath), fname)) # logger.info('fpath = %r' % (fpath,)) # import wbia # from wbia.dbio import ingest_dataset # dbdir = wbia.sysres.lookup_dbdir('WS_ALL') # self = ingest_dataset.Ingestable2(dbdir) if False: # Show overlap matrix import wbia.plottool as pt import pandas as pd import numpy as np dict_ = overlaps s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps)) df = s.unstack() lhs, rhs = df.align(df.T) df = lhs.add(rhs, fill_value=0).fillna(0) label_texts = df.columns.values def label_ticks(label_texts): import wbia.plottool as pt truncated_labels = [repr(lbl[0:100]) for lbl in label_texts] ax = pt.gca() ax.set_xticks(list(range(len(label_texts)))) ax.set_xticklabels(truncated_labels) [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()] [ lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels() ] # xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts))) # pt.plot_surface3d(xgrid, ygrid, disjoint_mat) ax.set_yticks(list(range(len(label_texts)))) ax.set_yticklabels(truncated_labels) [ lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels() ] [ lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels() ] # [lbl.set_rotation(20) for lbl in ax.get_yticklabels()] # df = df.sort(axis=0) # df = df.sort(axis=1) sortx = np.argsort(df.sum(axis=1).values)[::-1] df = df.take(sortx, axis=0) df = df.take(sortx, axis=1) fig = pt.figure(fnum=1) fig.clf() mat = df.values.astype(np.int32) mat[np.diag_indices(len(mat))] = 0 vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max() import matplotlib.colors norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True) pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none') pt.plt.colorbar() pt.plt.grid('off') label_ticks(label_texts) fig.tight_layout() # overlap_df = pd.DataFrame.from_dict(overlap_img_list) class TmpImage(ut.NiceRepr): pass from skimage.feature import hog from skimage import data, color, exposure import wbia.plottool as pt image2 = color.rgb2gray(data.astronaut()) # NOQA fpath = './GOPR1120.JPG' import vtool as vt for fpath in [fpath]: """ http://scikit-image.org/docs/dev/auto_examples/plot_hog.html """ image = vt.imread(fpath, grayscale=True) image = pt.color_funcs.to_base01(image) fig = pt.figure(fnum=2) fd, hog_image = hog( image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True, ) fig, (ax1, ax2) = pt.plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(image, cmap=pt.plt.cm.gray) ax1.set_title('Input image') ax1.set_adjustable('box-forced') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') ax1.set_adjustable('box-forced') pt.plt.show()
def show_graph(infr, title, final=False, selected_edges=None): if not VISUALIZE: return # TODO: rich colored text? latest = '\n'.join(infr.latest_logs()) showkw = dict( # fontsize=infr.graph.graph['fontsize'], # fontname=infr.graph.graph['fontname'], show_unreviewed_edges=True, show_inferred_same=False, show_inferred_diff=False, outof=(len(infr.aids)), # show_inferred_same=True, # show_inferred_diff=True, selected_edges=selected_edges, show_labels=True, simple_labels=True, # show_recent_review=not final, show_recent_review=False, # splines=infr.graph.graph['splines'], reposition=False, # with_colorbar=True ) verbose = infr.verbose infr.verbose = 0 infr_ = infr.copy() infr_ = infr infr_.verbose = verbose infr_.show(pickable=True, verbose=0, **showkw) infr.verbose = verbose # logger.info('status ' + ut.repr4(infr_.status())) # infr.show(**showkw) ax = pt.gca() pt.set_title(title, fontsize=20) fig = pt.gcf() fontsize = 22 if True: # postprocess xlabel lines = [] for line in latest.split('\n'): if False and line.startswith('ORACLE ERROR'): lines += ['ORACLE ERROR'] else: lines += [line] latest = '\n'.join(lines) if len(lines) > 10: fontsize = 16 if len(lines) > 12: fontsize = 14 if len(lines) > 14: fontsize = 12 if len(lines) > 18: fontsize = 10 if len(lines) > 23: fontsize = 8 if True: pt.adjust_subplots(top=0.95, left=0, right=1, bottom=0.45, fig=fig) ax.set_xlabel('\n' + latest) xlabel = ax.get_xaxis().get_label() xlabel.set_horizontalalignment('left') # xlabel.set_x(.025) xlabel.set_x(-0.6) # xlabel.set_fontname('CMU Typewriter Text') xlabel.set_fontname('Inconsolata') xlabel.set_fontsize(fontsize) ax.set_aspect('equal') # ax.xaxis.label.set_color('red') from os.path import join fpath = join(dpath, 'demo_{:04d}.png'.format(next(fig_counter))) fig.savefig( fpath, dpi=300, # transparent=True, edgecolor='none', ) # pt.save_figure(dpath=dpath, dpi=300) infr.latest_logs()
def draw_bayesian_model(model, evidence={}, soft_evidence={}, fnum=None, pnum=None, **kwargs): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): model = model.to_bayesian_model() import wbia.plottool as pt import networkx as nx kwargs = kwargs.copy() factor_list = kwargs.pop('factor_list', []) ttype_colors, ttype_scalars = make_colorcodes(model) textprops = { 'horizontalalignment': 'left', 'family': 'monospace', 'size': 8, } # build graph attrs tup = get_node_viz_attrs(model, evidence, soft_evidence, factor_list, ttype_colors, **kwargs) node_color, pos_list, pos_dict, takws = tup # draw graph has_inferred = evidence or 'factor_list' in kwargs if False: fig = pt.figure(fnum=fnum, pnum=pnum, doclf=True) # NOQA ax = pt.gca() drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1100, node_color=node_color) nx.draw(model, **drawkw) else: # BE VERY CAREFUL if 1: graph = model.copy() graph.__class__ = nx.DiGraph graph.graph['groupattrs'] = ut.ddict(dict) # graph = model. if getattr(graph, 'ttype2_cpds', None) is not None: # Add invis edges and ttype groups for ttype in model.ttype2_cpds.keys(): ttype_cpds = model.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) graph.add_edges_from(invis_edges) nx.set_edge_attributes( graph, name='style', values={edge: 'invis' for edge in invis_edges}, ) nx.set_node_attributes( graph, name='groupid', values={node: ttype for node in ttype_nodes}, ) graph.graph['groupattrs'][ttype]['rank'] = 'same' graph.graph['groupattrs'][ttype]['cluster'] = False else: graph = model pt.show_nx(graph, layout_kw={'prog': 'dot'}, fnum=fnum, pnum=pnum, verbose=0) pt.zoom_factory() fig = pt.gcf() ax = pt.gca() pass hacks = [ pt.draw_text_annotations(textprops=textprops, **takw) for takw in takws if takw ] xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) if 'name' in model.ttype2_template: num_names = len(model.ttype2_template['name'].basis) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) title = 'num_names=%r, num_annots=%r' % ( num_names, num_annots, ) else: title = '' map_assign = kwargs.get('map_assign', None) def word_insert(text): return '' if len(text) == 0 else text + ' ' top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob, ) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() if has_inferred: # Hack in colorbars # if ut.list_type(basis) is int: # pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1) # else: # pt.colorbar(scalars, colors, lbl='score', ticklabels=basis) keys = ['name', 'score'] locs = ['left', 'right'] for key, loc in zip(keys, locs): if key in ttype_colors: basis = model.ttype2_template[key].basis # scalars = colors = ttype_colors[key] scalars = ttype_scalars[key] pt.colorbar(scalars, colors, lbl=key, ticklabels=basis, ticklocation=loc)