def plot(self, fnum=None, pnum=(1, 1, 1), **kwargs): import plottool as pt fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum, docla=True, doclf=True) show_keypoints(self.chip, self.kpts, fnum=fnum, pnum=pnum, **kwargs) if self.figtitle is not None: pt.set_figtitle(self.figtitle)
def visualize(normalizer, update=True, verbose=True, fnum=None): """ CommandLine: python -m ibeis.algo.hots.score_normalization --test-visualize --index 0 --cmd Example: >>> # DISABLE_DOCTEST >>> import plottool as pt >>> from ibeis.algo.hots.score_normalization import * # NOQA >>> #import ibeis >>> index = ut.get_argval('--index', type_=int, default=0) >>> normalizer = load_precomputed_normalizer(index, with_global=False) >>> normalizer.visualize() >>> six.exec_(pt.present(), globals(), locals()) """ import plottool as pt if verbose: print(normalizer.get_infostr()) if normalizer.score_domain is None: return if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=(2, 1, 1), doclf=True, docla=True) normalizer.visualize_probs(fnum=fnum, pnum=(2, 1, 1), update=False) normalizer.visualize_support(fnum=fnum, pnum=(2, 1, 2), update=False) if update: pt.update()
def inspect_pdfs(tn_support, tp_support, score_domain, p_tp_given_score, p_tn_given_score, p_score_given_tp, p_score_given_tn, p_score, with_scores=False): import plottool as pt # NOQA fnum = pt.next_fnum() nRows = 2 + with_scores pnum_ = pt.get_pnum_func(nRows=nRows, nCols=1) #pnum_ = pt.get_pnum_func(nRows=3, nCols=1) #def next_pnum(): # return pnum_( def generate_pnum(): for px in range(nRows): yield pnum_(px) _pnumiter = generate_pnum().next pt.figure(fnum=fnum, pnum=pnum_(0)) if with_scores: plot_support(tn_support, tp_support, fnum=fnum, pnum=_pnumiter()) plot_prebayes_pdf(score_domain, p_score_given_tn, p_score_given_tp, p_score, cfgstr='', fnum=fnum, pnum=_pnumiter()) plot_postbayes_pdf(score_domain, p_tn_given_score, p_tp_given_score, cfgstr='', fnum=fnum, pnum=_pnumiter())
def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None): import plottool as pt from ibeis import viz fnum = pt.ensure_fnum(fnum) idx_list = ut.dict_take(cm.daid2_idx, daids) nPlots = len(idx_list) + 1 nRows, nCols = pt.get_square_row_cols(nPlots) pnum_ = pt.make_pnum_nextgen(nRows, nCols) pt.figure(fnum=fnum, pnum=(1, 2, 1)) # Draw coverage masks with bbox # <FlipHack> #weight_mask_m = np.fliplr(np.flipud(weight_mask_m)) #weight_mask = np.fliplr(np.flipud(weight_mask)) # </FlipHack> stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True) (woff, hoff) = offset_tup[1] wh1 = weight_mask_m.shape[0:2][::-1] wh2 = weight_mask.shape[0:2][::-1] pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match') pt.draw_bbox(( 0, 0) + wh1, bbox_color=(0, 0, 1)) pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1)) # Get contributing matches qaid = cm.qaid daid_list = daids fm_list = ut.take(cm.fm_list, idx_list) fs_list = ut.take(cm.fs_list, idx_list) # Draw matches for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1): viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, draw_pts=False, draw_lines=True, draw_ell=False, fnum=fnum, pnum=pnum_(px), darken=.5) coverage_score = score_matching_mask(weight_mask_m, weight_mask) pt.set_figtitle('score=%.4f' % (coverage_score,))
def draw_junction_tree(model, fnum=None, **kwargs): import plottool as pt fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum) ax = pt.gca() from pgmpy.models import JunctionTree if not isinstance(model, JunctionTree): netx_graph = model.to_junction_tree() else: netx_graph = model # prettify nodes def fixtupkeys(dict_): return { ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v) for k, v in dict_.items() } n = fixtupkeys(netx_graph.node) e = fixtupkeys(netx_graph.edge) a = fixtupkeys(netx_graph.adj) netx_graph.node = n netx_graph.edge = e netx_graph.adj = a #netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) pos = netx.pydot_layout(netx_graph) node_color = [pt.NEUTRAL] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=2000) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Junction / Clique Tree / Cluster Graph')
def show_notch_tips(depc, aid, config={}, fnum=None, pnum=None): import plottool as pt pt.figure(fnum=fnum, pnum=pnum) notch = depc.get('Notch_Tips', aid, config=config) chip = depc.get('chips', aid, 'img', config=config) pt.imshow(chip) pt.draw_kpts2(np.array(notch), pts=True, ell=False, pts_size=20)
def next_query_slot(self): """ callback used when all interactions are completed. Generates the next incremental query and then tries the automatic interactions """ try: # Generate the query result item = six.next(self.inc_query_gen) (ibs, cm, qreq_, incinfo) = item # update incinfo self.cm = cm self.qreq_ = qreq_ self.incinfo = incinfo incinfo['count'] += 1 automatch.run_until_name_decision_signal(ibs, cm, qreq_, incinfo=incinfo) #import plottool as pt #pt.present() except StopIteration: #TODO: close this figure # incinfo['fnum'] print('NO MORE QUERIES. CLOSE DOWN WINDOWS AND DISPLAY DONE MESSAGE') import plottool as pt fig1 = pt.figure(fnum=511) fig2 = pt.figure(fnum=512) pt.close_figure(fig1) pt.close_figure(fig2) if 'finish_callback' in self.incinfo: self.incinfo['update_callback']() if 'finish_callback' in self.incinfo: self.incinfo['finish_callback']() pass
def show_matches(fm, fs, fnum=1, pnum=None, title='', key=None, simp=None, cmap='hot', draw_lines=True, **locals_): #locals_ = locals() import plottool as pt from plottool import plot_helpers as ph # hack keys out of namespace keys = 'rchip1, rchip2, kpts1, kpts2'.split(', ') rchip1, rchip2, kpts1, kpts2 = ut.dict_take(locals_, keys) pt.figure(fnum=fnum, pnum=pnum) #doclf=True, docla=True) ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, draw_lines=draw_lines, ori=True) ph.set_plotdat(ax, 'viztype', 'matches') ph.set_plotdat(ax, 'simp', simp) ph.set_plotdat(ax, 'key', key) title = title + '\n num=%d, sum=%.2f' % (len(fm), sum(fs)) pt.set_title(title) return ax, xywh1, xywh2
def show_chip_distinctiveness_plot(chip, kpts, dstncvs, fnum=1, pnum=None): import plottool as pt pt.figure(fnum, pnum=pnum) ax = pt.gca() divider = pt.ensure_divider(ax) #ax1 = divider.append_axes("left", size="50%", pad=0) ax1 = ax ax2 = divider.append_axes("bottom", size="100%", pad=0.05) #f, (ax1, ax2) = pt.plt.subplots(1, 2, sharex=True) cmapstr = 'rainbow' # 'hot' color_list = pt.df2.plt.get_cmap(cmapstr)(ut.norm_zero_one(dstncvs)) sortx = dstncvs.argsort() #pt.df2.plt.plot(qfx2_dstncvs[sortx], c=color_list[sortx]) pt.plt.sca(ax1) pt.colorline(np.arange(len(sortx)), dstncvs[sortx], cmap=pt.plt.get_cmap(cmapstr)) pt.gca().set_xlim(0, len(sortx)) pt.dark_background() pt.plt.sca(ax2) pt.imshow(chip, darken=.2) # MATPLOTLIB BUG CANNOT SHOW DIFFERENT ALPHA FOR POINTS AND KEYPOINTS AT ONCE #pt.draw_kpts2(kpts, pts_color=color_list, ell_color=color_list, ell_alpha=.1, ell=True, pts=True) #pt.draw_kpts2(kpts, color_list=color_list, pts_alpha=1.0, pts_size=1.5, # ell=True, ell_alpha=.1, pts=False) ell = ut.get_argflag('--ell') pt.draw_kpts2(kpts, color_list=color_list, pts_alpha=1.0, pts_size=1.5, ell=ell, ell_alpha=.3, pts=not ell) pt.plt.sca(ax)
def show_normalizers(match, fnum=None, pnum=None, update=True): import plottool as pt from plottool import plot_helpers as ph # hack keys out of namespace keys = ["rchip", "kpts"] rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys) rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys) fs, fm = match.fs, match.fm_norm cmap = "cool" draw_lines = True if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=pnum) # doclf=True, docla=True) ax, xywh1, xywh2 = pt.show_chipmatch2( rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, draw_lines=draw_lines ) ph.set_plotdat(ax, "viztype", "matches") ph.set_plotdat(ax, "key", match.key) title = match.key + "\n num=%d, sum=%.2f" % (len(fm), sum(fs)) pt.set_title(title) if update: pt.update() return ax, xywh1, xywh2
def show_normalizers(match, fnum=None, pnum=None, update=True): import plottool as pt from plottool import plot_helpers as ph # hack keys out of namespace keys = ['rchip', 'kpts'] rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys) rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys) fs, fm = match.fs, match.fm_norm cmap = 'cool' draw_lines = True if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=pnum) #doclf=True, docla=True) ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, draw_lines=draw_lines) ph.set_plotdat(ax, 'viztype', 'matches') ph.set_plotdat(ax, 'key', match.key) title = match.key + '\n num=%d, sum=%.2f' % (len(fm), sum(fs)) pt.set_title(title) if update: pt.update() return ax, xywh1, xywh2
def show_hist_submaxima(hist_, edges=None, centers=None, maxima_thresh=.8, pnum=(1, 1, 1)): r""" For C++ to show data Args: hist_ (?): edges (None): centers (None): CommandLine: python -m vtool.histogram --test-show_hist_submaxima --show python -m pyhesaff._pyhesaff --test-test_rot_invar --show python -m vtool.histogram --test-show_hist_submaxima --dpath figures --save ~/latex/crall-candidacy-2015/figures/show_hist_submaxima.jpg Example: >>> # DISABLE_DOCTEST >>> import plottool as pt >>> from vtool.histogram import * # NOQA >>> # build test data >>> hist_ = np.array(list(map(float, ut.get_argval('--hist', type_=list, default=[1, 4, 2, 5, 3, 3])))) >>> edges = np.array(list(map(float, ut.get_argval('--edges', type_=list, default=[0, 1, 2, 3, 4, 5, 6])))) >>> maxima_thresh = ut.get_argval('--maxima_thresh', type_=float, default=.8) >>> centers = None >>> # execute function >>> show_hist_submaxima(hist_, edges, centers, maxima_thresh) >>> pt.show_if_requested() """ #print(repr(hist_)) #print(repr(hist_.shape)) #print(repr(edges)) #print(repr(edges.shape)) #ut.embed() import plottool as pt #ut.embed() if centers is None: centers = hist_edges_to_centers(edges) bin_colors = pt.get_orientation_color(centers) pt.figure(fnum=pt.next_fnum(), pnum=pnum) POLAR = False if POLAR: pt.df2.plt.subplot(*pnum, polar=True, axisbg='#000000') pt.draw_hist_subbin_maxima(hist_, centers, bin_colors=bin_colors, maxima_thresh=maxima_thresh) #pt.gca().set_rmax(hist_.max() * 1.1) #pt.gca().invert_yaxis() #pt.gca().invert_xaxis() pt.dark_background() #if ut.get_argflag('--legend'): # pt.figure(fnum=pt.next_fnum()) # centers_ = np.append(centers, centers[0]) # r = np.ones(centers_.shape) * .2 # ax = pt.df2.plt.subplot(111, polar=True) # pt.plots.colorline(centers_, r, cmap=pt.df2.plt.get_cmap('hsv'), linewidth=10) # #ax.plot(centers_, r, 'm', color=bin_colors, linewidth=100) # ax.set_rmax(.2) # #ax.grid(True) # #ax.set_title("Angle Colors", va='bottom') title = ut.get_argval('--title', default='') import plottool as pt pt.set_figtitle(title)
def show_selected(self, event): import plottool as pt print('show_selected') from ibeis.viz import viz_chip fnum = pt.ensure_fnum(None) print('fnum = %r' % (fnum,)) pt.figure(fnum=fnum) viz_chip.show_many_chips(self.ibs, self.selected_aids) pt.update()
def show_selected(self, event): import plottool as pt print('show_selected') from ibeis.viz import viz_chip fnum = pt.ensure_fnum(None) print('fnum = %r' % (fnum, )) pt.figure(fnum=fnum) viz_chip.show_many_chips(self.ibs, self.selected_aids) pt.update()
def draw_tree_model(model, **kwargs): import plottool as pt import networkx as netx if not ut.get_argval('--hackjunc'): fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True) # NOQA ax = pt.gca() #name_nodes = sorted(ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable')) netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) #pos = get_hacked_pos(netx_graph, name_nodes, prog='neato') pos = netx.nx_pydot.pydot_layout(netx_graph) node_color = [pt.WHITE] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=1100) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Markov Model') if not ut.get_argval('--hackmarkov'): fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True) # NOQA ax = pt.gca() netx_graph = model.to_junction_tree() # prettify nodes def fixtupkeys(dict_): return { ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v) for k, v in dict_.items() } # FIXME n = fixtupkeys(netx_graph.node) e = fixtupkeys(netx_graph.edge) a = fixtupkeys(netx_graph.adj) netx_graph.nodes.update(n) netx_graph.edges.update(e) netx_graph.adj.update(a) #netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) pos = netx.nx_pydot.pydot_layout(netx_graph) node_color = [pt.WHITE] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=2000) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Junction/Clique Tree / Cluster Graph')
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_): """ just visualizes the matches using some type of lines """ import plottool as pt from plottool import plot_helpers as ph if fnum is None: fnum = self.fnum if verbose is None: verbose = ut.VERBOSE if verbose: print("-- CHIPMATCH VIEW --") print("[ichipmatch_view] self.mode = %r" % (self.mode,)) mode = kwargs_.get("mode", self.mode) draw_ell = mode >= 1 draw_lines = mode == 2 if verbose: print("[ichipmatch_view] draw_lines = %r" % (draw_lines,)) print("[ichipmatch_view] draw_ell = %r" % (draw_ell,)) # pt.figure(fnum=fnum, docla=True, doclf=True) # NOTE: i remove the clf here. might cause issues pt.figure(fnum=fnum, docla=True, doclf=False) # show_matches_kw = self.__dict__.copy() show_matches_kw = dict( # fnum=fnum, pnum=pnum, draw_lines=draw_lines, draw_ell=draw_ell, colorbar_=True, vert=self.vert, ) show_matches_kw.update(kwargs_) if verbose: print("self.warp_homog = %r" % (self.warp_homog,)) if self.warp_homog: show_matches_kw["H1"] = self.H1 show_matches_kw["H2"] = self.H2 if verbose: print("show_matches_kw = %s" % (ut.dict_str(show_matches_kw, truncate=True))) # tup = show_matches(fm, fs, **show_matches_kw) ax, xywh1, xywh2 = pt.show_chipmatch2( self.rchip1, self.rchip2, self.kpts1, self.kpts2, fm=self.fm, fs=self.fs, pnum=pnum, **show_matches_kw ) self.xywh2 = xywh2 ph.set_plotdat(ax, "viztype", "matches") if self.truth is not None and self.truth: truth_color = pt.TRUE_BLUE # if else pt.FALSE_RED pt.draw_border(ax, color=truth_color, lw=4) if self.title is not None: pt.set_title(self.title, ax=ax)
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_): """ just visualizes the matches using some type of lines """ import plottool as pt from plottool import plot_helpers as ph if fnum is None: fnum = self.fnum if verbose is None: verbose = ut.VERBOSE if verbose: print('-- CHIPMATCH VIEW --') print('[ichipmatch_view] self.mode = %r' % (self.mode,)) mode = kwargs_.get('mode', self.mode) draw_ell = mode >= 1 draw_lines = mode == 2 if verbose: print('[ichipmatch_view] draw_lines = %r' % (draw_lines,)) print('[ichipmatch_view] draw_ell = %r' % (draw_ell,)) #pt.figure(fnum=fnum, docla=True, doclf=True) # NOTE: i remove the clf here. might cause issues pt.figure(fnum=fnum, docla=True, doclf=False) #show_matches_kw = self.__dict__.copy() show_matches_kw = dict( #fnum=fnum, pnum=pnum, draw_lines=draw_lines, draw_ell=draw_ell, colorbar_=True, vert=self.vert) show_matches_kw.update(kwargs_) if verbose: print('self.warp_homog = %r' % (self.warp_homog,)) if self.warp_homog: show_matches_kw['H1'] = self.H1 show_matches_kw['H2'] = self.H2 if verbose: print('show_matches_kw = %s' % (ut.dict_str(show_matches_kw, truncate=True))) #tup = show_matches(fm, fs, **show_matches_kw) ax, xywh1, xywh2 = pt.show_chipmatch2( self.rchip1, self.rchip2, self.kpts1, self.kpts2, fm=self.fm, fs=self.fs, pnum=pnum, **show_matches_kw) self.xywh2 = xywh2 ph.set_plotdat(ax, 'viztype', 'matches') if self.truth is not None and self.truth: truth_color = pt.TRUE_BLUE # if else pt.FALSE_RED pt.draw_border(ax, color=truth_color, lw=4) if self.title is not None: pt.set_title(self.title, ax=ax)
def plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params=None): import plottool as pt pt.figure(2, doclf=True) # Interpolate uniform grid positions unknown_nd_data, ug_shape = compute_interpolation_grid(known_nd_data, 0 * 5) interpolated_error = interpolate_error(known_nd_data, known_target_points, unknown_nd_data) ax = pt.plot_surface3d( unknown_nd_data.T[0].reshape(ug_shape), unknown_nd_data.T[1].reshape(ug_shape), interpolated_error.reshape(ug_shape), xlabel='nDaids', ylabel='K', zlabel='error', rstride=1, cstride=1, cmap=pt.plt.get_cmap('jet'), wire=True, #norm=pt.mpl.colors.Normalize(0, 1), #shade=False, #dark=False, ) ax.scatter(known_nd_data.T[0], known_nd_data.T[1], known_target_points, s=100, c=pt.YELLOW) assert len(given_data_dims) == 1, 'can only plot 1 given data dim' xdim = given_data_dims[0] ydim = (xdim + 1) % (len(known_nd_data.T)) known_nd_min = known_nd_data.min(axis=0) known_nd_max = known_nd_data.max(axis=0) xmin, xmax = known_nd_min[xdim], known_nd_max[xdim] ymin, ymax = known_nd_min[ydim], known_nd_max[ydim] zmin, zmax = known_target_points.min(), known_target_points.max() if opt_model_params is not None: # plot learned data if availabel #given_known_nd_data = known_nd_data.take(given_data_dims, axis=1) xdata = np.linspace(xmin, xmax) ydata = compute_K(xdata, opt_model_params) xydata = np.array((xdata, ydata)).T zdata = interpolate_error(known_nd_data, known_target_points, xydata) ax.plot(xdata, ydata, zdata, c=pt.ORANGE) ymax = max(ymax, ydata.max()) ymin = min(ymin, ydata.min()) zmin = min(zmin, zdata.min()) zmax = max(zmax, zdata.max()) ax.scatter(xdata, ydata, zdata, s=100, c=pt.ORANGE) #[t.set_color('white') for t in ax.xaxis.get_ticklines()] #[t.set_color('white') for t in ax.xaxis.get_ticklabels()] ax.set_aspect('auto') ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) ax.set_zlim(zmin, zmax) import matplotlib.ticker as mtick ax.zaxis.set_major_formatter(mtick.FormatStrFormatter('%.2f')) return ax
def _chip_view(mode=0, pnum=(1, 1, 1), **kwargs): print('... _chip_view mode=%r' % mode_ptr[0]) kwargs['ell'] = mode_ptr[0] == 1 kwargs['pts'] = mode_ptr[0] == 2 if not ischild: pt.figure(fnum=fnum, pnum=pnum, docla=True, doclf=True) # Toggle no keypoints view viz.show_chip(ibs, aid, fnum=fnum, pnum=pnum, config2_=config2_, **kwargs) pt.set_figtitle('Chip View')
def show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids, fnum=None): import plottool as pt from ibeis import viz fnum = pt.ensure_fnum(fnum) idx_list = ut.dict_take(cm.daid2_idx, daids) nPlots = len(idx_list) + 1 nRows, nCols = pt.get_square_row_cols(nPlots) pnum_ = pt.make_pnum_nextgen(nRows, nCols) pt.figure(fnum=fnum, pnum=(1, 2, 1)) # Draw coverage masks with bbox # <FlipHack> #weight_mask_m = np.fliplr(np.flipud(weight_mask_m)) #weight_mask = np.fliplr(np.flipud(weight_mask)) # </FlipHack> stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m, weight_mask, return_sf=True) (woff, hoff) = offset_tup[1] wh1 = weight_mask_m.shape[0:2][::-1] wh2 = weight_mask.shape[0:2][::-1] pt.imshow(255 * (stacked_weights), fnum=fnum, pnum=pnum_(0), title='(query image) What did match vs what should match') pt.draw_bbox((0, 0) + wh1, bbox_color=(0, 0, 1)) pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1)) # Get contributing matches qaid = cm.qaid daid_list = daids fm_list = ut.take(cm.fm_list, idx_list) fs_list = ut.take(cm.fs_list, idx_list) # Draw matches for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list), start=1): viz.viz_matches.show_matches2(qreq_.ibs, qaid, daid, fm, fs, draw_pts=False, draw_lines=True, draw_ell=False, fnum=fnum, pnum=pnum_(px), darken=.5) coverage_score = score_matching_mask(weight_mask_m, weight_mask) pt.set_figtitle('score=%.4f' % (coverage_score, ))
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_): """ just visualizes the matches using some type of lines CommandLine: python -m ibeis.viz.interact.interact_matches --test-chipmatch_view --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.interact.interact_matches import * # NOQA >>> self = testdata_match_interact() >>> self.chipmatch_view() >>> pt.show_if_requested() """ if fnum is None: fnum = self.fnum if verbose is None: verbose = ut.VERBOSE ibs = self.ibs aid = self.daid qaid = self.qaid figtitle = self.figtitle # drawing mode draw: with/without lines/feats mode = kwargs_.get('mode', self.mode) draw_ell = mode >= 1 draw_lines = mode == 2 #self.mode = (self.mode + 1) % 3 pt.figure(fnum=fnum, docla=True, doclf=True) show_matches_kw = self.kwargs.copy() show_matches_kw.update( dict(fnum=fnum, pnum=pnum, draw_lines=draw_lines, draw_ell=draw_ell, colorbar_=True, vert=self.vert)) show_matches_kw.update(kwargs_) if self.warp_homog: show_matches_kw['H1'] = self.H1 #show_matches_kw['score'] = self.score show_matches_kw['rawscore'] = self.score show_matches_kw['aid2_raw_rank'] = self.rank tup = viz.viz_matches.show_matches2(ibs, self.qaid, self.daid, self.fm, self.fs, qreq_=self.qreq_, **show_matches_kw) ax, xywh1, xywh2 = tup self.xywh2 = xywh2 pt.set_figtitle(figtitle + ' ' + vh.get_vsstr(qaid, aid))
def show_edge(infr, edge, fnum=None, pnum=None, **kwargs): import plottool as pt match = infr._exec_pairwise_match([edge])[0] fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum, pnum=pnum) ax = pt.gca() showkw = dict(vert=False, heatmask=True, show_lines=False, show_ell=False, show_ori=False, show_eig=False, modifysize=True) showkw.update(kwargs) match.show(ax, **showkw)
def draw_aids(infr, aids, fnum=None): from ibeis.viz import viz_chip import plottool as pt fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum) viz_chip.show_many_chips(infr.ibs, aids, fnum=fnum) return fig
def draw_tree_model(model, **kwargs): import plottool as pt import networkx as netx if not ut.get_argval('--hackjunc'): fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True) # NOQA ax = pt.gca() #name_nodes = sorted(ut.list_getattr(model.ttype2_cpds['name'], 'variable')) netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) #pos = get_hacked_pos(netx_graph, name_nodes, prog='neato') pos = netx.pydot_layout(netx_graph) node_color = [pt.WHITE] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=1100) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Markov Model') if not ut.get_argval('--hackmarkov'): fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True) # NOQA ax = pt.gca() netx_graph = model.to_junction_tree() # prettify nodes def fixtupkeys(dict_): return { ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v) for k, v in dict_.items() } n = fixtupkeys(netx_graph.node) e = fixtupkeys(netx_graph.edge) a = fixtupkeys(netx_graph.adj) netx_graph.node = n netx_graph.edge = e netx_graph.adj = a #netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) pos = netx.pydot_layout(netx_graph) node_color = [pt.WHITE] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=2000) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Junction/Clique Tree / Cluster Graph')
def prepare_page(self): figkw = { 'fnum': self.fnum, 'doclf': True, 'docla': True, } self.fig = pt.figure(**figkw) ih.disconnect_callback(self.fig, 'button_press_event') ih.connect_callback(self.fig, 'button_press_event', self.figure_clicked)
def plot_annotaiton_gps(X_data): """ Plots gps coordinates on a map projection InstallBasemap: sudo apt-get install libgeos-dev pip install git+https://github.com/matplotlib/basemap Ignore: pip install git+git://github.com/myuser/foo.git@v123 """ import plottool as pt from mpl_toolkits.basemap import Basemap # lat = X_data[1:5, 1] # lon = X_data[1:5, 2] lat = X_data[:, 1] # NOQA lon = X_data[:, 2] # NOQA fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True, docla=True) # NOQA pt.close_figure(fig) fig = pt.figure(fnum=fnum, doclf=True, docla=True) # setup Lambert Conformal basemap. m = Basemap( llcrnrlon=lon.min(), urcrnrlon=lon.max(), llcrnrlat=lat.min(), urcrnrlat=lat.max(), projection="cea", resolution="h", ) # draw coastlines. # m.drawcoastlines() # m.drawstates() # draw a boundary around the map, fill the background. # this background will end up being the ocean color, since # the continents will be drawn on top. # m.bluemarble() m.drawmapboundary(fill_color="aqua") m.fillcontinents(color="coral", lake_color="aqua") # Convert GPS to projected coordinates x1, y1 = m(lon, lat) # convert to meters # lon==X, lat==Y m.plot(x1, y1, "o") fig.show()
def prepare_page(self, fulldraw=True): figkw = {'fnum': self.fnum, 'doclf': fulldraw, 'docla': fulldraw, } if fulldraw: self.fig = pt.figure(**figkw) ih.disconnect_callback(self.fig, 'button_press_event') ih.disconnect_callback(self.fig, 'key_press_event') ih.connect_callback(self.fig, 'button_press_event', self.figure_clicked) ih.connect_callback(self.fig, 'key_press_event', self.on_key_press)
def test_average_contrast(): import vtool as vt ut.get_valid_test_imgkeys() img_fpath_list = [ut.grab_test_imgpath(key) for key in ut.get_valid_test_imgkeys()] img_list = [vt.imread(img, grayscale=True) for img in img_fpath_list] avecontrast_list = np.array([compute_average_contrast(img) for img in img_list]) import plottool as pt nCols = len(img_list) fnum = None if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=(2, 1, 1)) sortx = avecontrast_list.argsort() y_list = avecontrast_list[sortx] x_list = np.arange(0, nCols) + .5 pt.plot(x_list, y_list, 'bo-') sorted_imgs = ut.take(img_list, sortx) for px, img in ut.ProgressIter(enumerate(sorted_imgs, start=1)): pt.imshow(img, fnum=fnum, pnum=(2, nCols, nCols + px))
def sv_view(self, dodraw=True): """ spatial verification view """ #fnum = viz.FNUMS['special'] aid = self.daid fnum = pt.next_fnum() fig = pt.figure(fnum=fnum, docla=True, doclf=True) ih.disconnect_callback(fig, 'button_press_event') viz.viz_sver.show_sver(self.ibs, self.qaid, aid2=aid, fnum=fnum) if dodraw: #self.draw() pt.draw()
def test_average_contrast(): import vtool as vt ut.get_valid_test_imgkeys() img_fpath_list = [ ut.grab_test_imgpath(key) for key in ut.get_valid_test_imgkeys() ] img_list = [vt.imread(img, grayscale=True) for img in img_fpath_list] avecontrast_list = np.array( [compute_average_contrast(img) for img in img_list]) import plottool as pt nCols = len(img_list) fnum = None if fnum is None: fnum = pt.next_fnum() pt.figure(fnum=fnum, pnum=(2, 1, 1)) sortx = avecontrast_list.argsort() y_list = avecontrast_list[sortx] x_list = np.arange(0, nCols) + .5 pt.plot(x_list, y_list, 'bo-') sorted_imgs = ut.take(img_list, sortx) for px, img in ut.ProgressIter(enumerate(sorted_imgs, start=1)): pt.imshow(img, fnum=fnum, pnum=(2, nCols, nCols + px))
def draw_expanded_scales(imgL, sel_kpts, exkpts, exdesc_): import plottool as pt draw_keypoint_patch = pt.draw_keypoint_patch get_warped_patch = pt.get_warped_patch # NOQA # Rows are for different scales # Cols are for different patches # There is a prefix row showing the original image nRows, nCols = len(exkpts), len(sel_kpts) exkpts_ = np.vstack(exkpts) fnum = 1 pt.figure(fnum=fnum, docla=True, doclf=True) nPreRows = 1 nPreCols = (nPreRows * nCols) + 1 pt.show_keypoints(imgL, exkpts_, fnum=fnum, pnum=(nRows + nPreRows, 1, 1), color=pt.BLUE) px = 0 for row, kpts_ in enumerate(exkpts): for col, kp in enumerate(kpts_): sift = exdesc_[px] pnum = (nRows + nPreRows, nCols, px + nPreCols) draw_keypoint_patch(imgL, kp, sift, warped=True, fnum=fnum, pnum=pnum) px += 1 #df2.draw() print('nRows = %r' % nRows) print('nCols = %r' % nCols)
def prepare_page(self, fulldraw=True): figkw = { 'fnum': self.fnum, 'doclf': fulldraw, 'docla': fulldraw, } if fulldraw: self.fig = pt.figure(**figkw) ih.disconnect_callback(self.fig, 'button_press_event') ih.disconnect_callback(self.fig, 'key_press_event') ih.connect_callback(self.fig, 'button_press_event', self.figure_clicked) ih.connect_callback(self.fig, 'key_press_event', self.on_key_press)
def next_query_slot(self): """ callback used when all interactions are completed. Generates the next incremental query and then tries the automatic interactions """ try: # Generate the query result item = six.next(self.inc_query_gen) (ibs, cm, qreq_, incinfo) = item # update incinfo self.cm = cm self.qreq_ = qreq_ self.incinfo = incinfo incinfo['count'] += 1 automatch.run_until_name_decision_signal(ibs, cm, qreq_, incinfo=incinfo) #import plottool as pt #pt.present() except StopIteration: #TODO: close this figure # incinfo['fnum'] print( 'NO MORE QUERIES. CLOSE DOWN WINDOWS AND DISPLAY DONE MESSAGE') import plottool as pt fig1 = pt.figure(fnum=511) fig2 = pt.figure(fnum=512) pt.close_figure(fig1) pt.close_figure(fig2) if 'finish_callback' in self.incinfo: self.incinfo['update_callback']() if 'finish_callback' in self.incinfo: self.incinfo['finish_callback']() pass
def draw_junction_tree(model, fnum=None, **kwargs): import plottool as pt fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum) ax = pt.gca() from pgmpy.models import JunctionTree if not isinstance(model, JunctionTree): netx_graph = model.to_junction_tree() else: netx_graph = model # prettify nodes def fixtupkeys(dict_): return { ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v) for k, v in dict_.items() } n = fixtupkeys(netx_graph.node) e = fixtupkeys(netx_graph.edge) a = fixtupkeys(netx_graph.adj) netx_graph.node = n netx_graph.edge = e netx_graph.adj = a #netx_graph = model.to_markov_model() #pos = nx.nx_agraph.pygraphviz_layout(netx_graph) #pos = nx.nx_agraph.graphviz_layout(netx_graph) pos = nx.pydot_layout(netx_graph) node_color = [pt.NEUTRAL] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=2000) nx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Junction / Clique Tree / Cluster Graph')
def prepare_page(self, fulldraw=True): import plottool as pt ih.disconnect_callback(self.fig, 'button_press_event') ih.disconnect_callback(self.fig, 'button_release_event') ih.disconnect_callback(self.fig, 'key_press_event') ih.disconnect_callback(self.fig, 'motion_notify_event') figkw = {'fnum': self.fnum, 'doclf': fulldraw, 'docla': fulldraw, } if fulldraw: self.fig = pt.figure(**figkw) self.make_hud() ih.connect_callback(self.fig, 'button_press_event', self.on_click) ih.connect_callback(self.fig, 'button_release_event', self.on_click_release) ih.connect_callback(self.fig, 'key_press_event', self.on_key_press) ih.connect_callback(self.fig, 'motion_notify_event', self.on_motion)
def draw_precision_recall_curve_(recall_range_, p_interp_curve, title_pref=None, fnum=1): import plottool as pt if recall_range_ is None: recall_range_ = np.array([]) p_interp_curve = np.array([]) fig = pt.figure(fnum=fnum, docla=True, doclf=True) # NOQA if recall_range_ is None: ave_p = np.nan else: ave_p = p_interp_curve.sum() / p_interp_curve.size pt.plot2(recall_range_, p_interp_curve, marker='o--', x_label='recall', y_label='precision', unitbox=True, flipx=False, color='r', title='Interplated Precision Vs Recall\n' + 'avep = %r' % ave_p) print('Interplated Precision') print(ut.repr2(list(zip(recall_range_, p_interp_curve))))
def draw_precision_recall_curve_(recall_range_, p_interp_curve, title_pref=None, fnum=1): import plottool as pt if recall_range_ is None: recall_range_ = np.array([]) p_interp_curve = np.array([]) fig = pt.figure(fnum=fnum, docla=True, doclf=True) # NOQA if recall_range_ is None: ave_p = np.nan else: ave_p = p_interp_curve.sum() / p_interp_curve.size pt.plot2(recall_range_, p_interp_curve, marker='o--', x_label='recall', y_label='precision', unitbox=True, flipx=False, color='r', title='Interplated Precision Vs Recall\n' + 'avep = %r' % ave_p) print('Interplated Precision') print(ut.list_str(list(zip(recall_range_, p_interp_curve))))
def examine(self, aid, event=None): print(' examining aid %r against the query result' % aid) figtitle = 'Examine a specific image against the query' #fnum = 510 fnum = pt.next_fnum() fig = pt.figure(fnum=fnum, pnum=(1, 1, 1), doclf=True, docla=True) # can cause freezes should be False INTERACT_EXAMINE = False if INTERACT_EXAMINE: #fig = interact_matches.ishow_matches(self.ibs, self.cm, aid, figtitle=figtitle, fnum=fnum) fig = self.cm.ishow_matches(self.ibs, aid, figtitle=figtitle, fnum=fnum) print('Finished interact') # this is only relevant to matplotlib.__version__ < 1.4.2 #raise Exception( # 'BLACK MAGIC: error intentionally included as a workaround that seems' # 'to fix a gui hang on certain computers.') else: viz_matches.show_matches(self.ibs, self.cm, aid, figtitle=figtitle) fig.show()
def prepare_page(self, fulldraw=True): import plottool as pt ih.disconnect_callback(self.fig, 'button_press_event') ih.disconnect_callback(self.fig, 'button_release_event') ih.disconnect_callback(self.fig, 'key_press_event') ih.disconnect_callback(self.fig, 'motion_notify_event') figkw = { 'fnum': self.fnum, 'doclf': fulldraw, 'docla': fulldraw, } if fulldraw: self.fig = pt.figure(**figkw) self.make_hud() ih.connect_callback(self.fig, 'button_press_event', self.on_click) ih.connect_callback(self.fig, 'button_release_event', self.on_click_release) ih.connect_callback(self.fig, 'key_press_event', self.on_key_press) ih.connect_callback(self.fig, 'motion_notify_event', self.on_motion)
def get_iters_vs_miou(harn): from pysseg.util import jsonutil import pysseg.backend.iface_caffe as iface harn.prepare_test_model() test_weight_dpaths = harn.find_test_weights_dpaths() # for test_weights_dpath in test_weight_dpaths: # harn.test_weights_dpath = test_weights_dpath # harn.test_weights_fpath = ub.readfrom(join(test_weights_dpath, 'test_weights.caffemodel.lnk')) # # if not exists(join(harn.test_weights_dpath, 'pred')): # results_fpath = join(harn.test_weights_dpath, 'results.json') # if exists(results_fpath): # results = json.load(results_fpath) iter_results = {} for test_weights_dpath in test_weight_dpaths: results_fpath = join(test_weights_dpath, 'results.json') if exists(results_fpath): iterno = iface.snapshot_iterno(test_weights_dpath) results = json.load(open(results_fpath, 'r')) ious = eval(results['ious']) iter_results[iterno] = ious iter_df = pd.DataFrame(iter_results) iter_df.columns.name = 'iterno' iter_df.index.name = 'class' fpath = join(harn.test_dpath, 'iter_ious.json') jsonutil.write_json(fpath, iter_df) iter_df = iter_df.drop([57], axis=1) iter_df.drop(harn.task.ignore_classnames).mean(axis=0) if False: """ ffmpeg -y -f image2 -i ~/aretha/store/data/work/camvid/arch/segnet_proper/test/input_nqmmrhd/weights_abvroyo_segnet_proper_None_xwfmwfo_00040000/blend_pred/%*.png -crf 25 -vcodec libx264 -vf "setpts=4*PTS" camvid-results.avi ffmpeg -y -f image2 -i out_haul83/%*.png -vcodec mpeg4 -vf "setpts=10*PTS" haul83-results.avi """ # move to computer with plottool iter_df = pd.read_json( '/home/joncrall/aretha/store/data/work/camvid/arch/segnet_proper/test/input_nqmmrhd/iter_ious.json' ) import plottool as pt pt.qtensure() from pysseg.tasks import CamVid task = CamVid() iter_miou = iter_df.drop(task.ignore_classnames).mean(axis=0) iter_miou = iter_miou.sort_index() _set_mpl_rcparams() fig = pt.figure(fnum=1, pnum=(1, 1, 1)) ax = pt.gca() iter_miou.plot(ax=ax) ax.set_xlabel('train iters') ax.set_ylabel('test mIoU') ax.set_title('Reproduced CamVid Results (init using VGG)') ub.ensuredir('result_plots') from pysseg.draw import render_figure_to_image import cv2 cv2.imwrite('result_plots/miou.png', render_figure_to_image(fig, dpi=100, transparent=True)) fig = pt.figure(fnum=2, pnum=(1, 1, 1)) ax = pt.gca() iter_iou = iter_df.drop(task.ignore_classnames).T.sort_index() # sort by results iter_iou = iter_iou[iter_iou.iloc[-1].sort_values().index[::-1]] colors = [ tuple(np.array(v[::-1]) / 255) + (1, ) for v in ub.take(task.class_colors, iter_iou.columns) ] iter_iou.plot(ax=ax, colors=colors, lw=4) ax.set_xlabel('train iters') ax.set_ylabel('test IoU') ax.set_title('Reproduced CamVid Results (init using VGG)') ub.ensuredir('result_plots') from pysseg.draw import render_figure_to_image cv2.imwrite('result_plots/perclass_iou.png', render_figure_to_image(fig, dpi=100, transparent=True))
def show_augmented_patches(Xb, Xb_, yb, yb_, data_per_label=1, shadows=None): """ from ibeis_cnn.augment import * # NOQA std_ = center_std mean_ = center_mean """ import plottool as pt import vtool as vt Xb_old = vt.rectify_to_float01(Xb) Xb_new = vt.rectify_to_float01(Xb_) # only look at ones that were actually augmented sample1 = Xb_old[0::data_per_label] sample2 = Xb_new[0::data_per_label] diff = np.abs((sample1 - sample2)) diff_batches = diff.sum(-1).sum(-1).sum(-1) > 0 modified_indexes = np.where(diff_batches > 0)[0] print('modified_indexes = %r' % (modified_indexes,)) #modified_indexes = np.arange(num_examples) Xb_old = vt.rectify_to_uint8(Xb_old) Xb_new = vt.rectify_to_uint8(Xb_new) # Group data into n-tuples grouped_idxs = [np.arange(n, len(Xb_), data_per_label) for n in range(data_per_label)] data_lists_old = vt.apply_grouping(Xb_old, grouped_idxs, axis=0) data_lists_new = vt.apply_grouping(Xb_new, grouped_idxs, axis=0) import six #chunck_sizes = (4, 10) import utool with utool.embed_on_exception_context: chunk_sizes = pt.get_square_row_cols(len(modified_indexes), max_cols=10, fix=False, inclusive=False) _iter = ut.iter_multichunks(modified_indexes, chunk_sizes) multiindices = six.next(_iter) from ibeis_cnn import draw_results tup = draw_results.get_patch_multichunks(data_lists_old, yb, {}, multiindices) orig_stack = tup[0] #stacked_img, stacked_offsets, stacked_sfs = tup tup = draw_results.get_patch_multichunks(data_lists_new, yb_, {}, multiindices) warp_stack = tup[0] #stacked_img, stacked_offsets, stacked_sfs = tup #orig_stack = stacked_img_pairs(Xb_old, modified_indexes, yb) #warp_stack = stacked_img_pairs(Xb_new, modified_indexes, yb_) if shadows is not None: # hack shadow_stack = stacked_img_pairs(shadows, modified_indexes, yb_) fnum = None fnum = pt.ensure_fnum(fnum) pt.figure(fnum) #next_pnum = pt.make_pnum_nextgen(nRows=2 + (shadows is not None), nCols=1) next_pnum = pt.make_pnum_nextgen(nCols=2 + (shadows is not None), nRows=1) pt.imshow(orig_stack, pnum=next_pnum(), title='before') pt.imshow(warp_stack, pnum=next_pnum(), title='after') if shadows is not None: pt.imshow(shadow_stack, pnum=next_pnum(), title='shadow_stack')
def show_model(model, evidence={}, soft_evidence={}, **kwargs): """ References: http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer Ignore: pkg-config --libs-only-L libcgraph sudo apt-get install libgraphviz-dev -y sudo apt-get install libgraphviz4 -y # sudo apt-get install pkg-config sudo apt-get install libgraphviz-dev # pip install git+git://github.com/pygraphviz/pygraphviz.git pip install pygraphviz python -c "import pygraphviz; print(pygraphviz.__file__)" sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/" python3 -c "import pygraphviz; print(pygraphviz.__file__)" """ if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'): draw_tree_model(model, **kwargs) return import plottool as pt import networkx as netx import matplotlib as mpl fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, pnum=(3, 1, (slice(0, 2), 0)), doclf=True) # NOQA #fig = pt.figure(fnum=fnum, pnum=(3, 2, (1, slice(1, 2))), doclf=True) # NOQA ax = pt.gca() var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])} netx_graph = (model) #netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"' #netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR' pos = get_hacked_pos(netx_graph) #netx.pygraphviz_layout(netx_graph) #pos = netx.pydot_layout(netx_graph, prog='dot') #pos = netx.graphviz_layout(netx_graph) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_size=1500) if evidence is not None: node_colors = [ # (pt.TRUE_BLUE (pt.WHITE if node not in soft_evidence else pt.LIGHT_PINK) if node not in evidence else pt.FALSE_RED for node in netx_graph.nodes()] for node in netx_graph.nodes(): cpd = model.var2_cpd[node] if cpd.ttype == 'score': pass drawkw['node_color'] = node_colors netx.draw(netx_graph, **drawkw) show_probs = True if show_probs: textprops = { 'family': 'monospace', 'horizontalalignment': 'left', #'horizontalalignment': 'center', #'size': 12, 'size': 8, } textkw = dict( xycoords='data', boxcoords='offset points', pad=0.25, frameon=True, arrowprops=dict(arrowstyle='->'), #bboxprops=dict(fc=node_attr['fillcolor']), ) netx_nodes = model.nodes(data=True) node_key_list = ut.get_list_column(netx_nodes, 0) pos_list = ut.dict_take(pos, node_key_list) artist_list = [] offset_box_list = [] for pos_, node in zip(pos_list, netx_nodes): x, y = pos_ variable = node[0] cpd = model.var2_cpd[variable] prior_marg = (cpd if cpd.evidence is None else cpd.marginalize(cpd.evidence, inplace=False)) prior_text = None text = None if variable in evidence: text = cpd.variable_statenames[evidence[variable]] elif variable in var2_post: post_marg = var2_post[variable] text = pgm_ext.make_factor_text(post_marg, 'post') prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') else: if len(evidence) == 0 and len(soft_evidence) == 0: prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') show_post = kwargs.get('show_post', False) show_prior = kwargs.get('show_prior', False) show_prior = True show_post = True show_ev = (evidence is not None and variable in evidence) if (show_post or show_ev) and text is not None: offset_box = mpl.offsetbox.TextArea(text, textprops) artist = mpl.offsetbox.AnnotationBbox( # offset_box, (x + 5, y), xybox=(20., 5.), offset_box, (x, y + 5), xybox=(4., 20.), #box_alignment=(0, 0), box_alignment=(.5, 0), **textkw) offset_box_list.append(offset_box) artist_list.append(artist) if show_prior and prior_text is not None: offset_box2 = mpl.offsetbox.TextArea(prior_text, textprops) artist2 = mpl.offsetbox.AnnotationBbox( # offset_box2, (x - 5, y), xybox=(-20., -15.), # offset_box2, (x, y - 5), xybox=(-15., -20.), offset_box2, (x, y - 5), xybox=(-4, -20.), #box_alignment=(1, 1), box_alignment=(.5, 1), **textkw) offset_box_list.append(offset_box2) artist_list.append(artist2) for artist in artist_list: ax.add_artist(artist) xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) fig = pt.gcf() title = 'num_names=%r, num_annots=%r' % (model.num_names, num_annots,) map_assign = kwargs.get('map_assign', None) #max_marginal_list = [] #for name, marginal in marginalized_joints.items(): # states = list(ut.iprod(*marginal.statenames)) # vals = marginal.values.ravel() # x = vals.argmax() # max_marginal_list += ['P(' + ', '.join(states[x]) + ') = ' + str(vals[x])] # title += str(marginal) top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: # title += '\nMAP=' + ut.repr2(map_assign, strvals=True) title += '\nMAP: ' + map_assign + ' @' + '%.2f%%' % (100 * map_prob,) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) #pt.set_xlabel() def hack_fix_centeralign(): if textprops['horizontalalignment'] == 'center': print('Fixing centeralign') fig = pt.gcf() fig.canvas.draw() # Superhack for centered text. Fix bug in # /usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py # /usr/local/lib/python2.7/dist-packages/matplotlib/text.py for offset_box in offset_box_list: offset_box.set_offset z = offset_box._text.get_window_extent() (z.x1 - z.x0) / 2 offset_box._text T = offset_box._text.get_transform() A = mpl.transforms.Affine2D() A.clear() A.translate((z.x1 - z.x0) / 2, 0) offset_box._text.set_transform(T + A) hack_fix_centeralign() top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: bin_labels = ut.get_list_column(top_assignments, 0) bin_vals = ut.get_list_column(top_assignments, 1) # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels] pt.draw_histogram(bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))), transpose=True, use_darkbackground=False, #xtick_rotation=-10, ylabel='Prob', xlabel='assignment') pt.set_title('Assignment probabilities')
def viz_netx_chipgraph(ibs, netx_graph, fnum=None, with_images=False, zoom=ZOOM): r""" Args: ibs (IBEISController): ibeis controller object netx_graph (?): fnum (int): figure number(default = None) with_images (bool): (default = False) zoom (float): (default = 0.4) Returns: ?: pos CommandLine: python -m ibeis.viz.viz_graph --exec-viz_netx_chipgraph --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_graph import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> nid_list = ibs.get_valid_nids()[0:5] >>> fnum = None >>> with_images = True >>> zoom = 0.4 >>> #pos = viz_netx_chipgraph(ibs, netx_graph, fnum, with_images, zoom) >>> make_name_graph_interaction(ibs, None, ibs.get_valid_aids()[0:1]) >>> #make_name_graph_interaction(ibs, nid_list) >>> ut.show_if_requested() """ if fnum is None: fnum = pt.next_fnum() #zoom = .8 print('[viz_graph] drawing chip graph') pt.figure(fnum=fnum, pnum=(1, 1, 1)) ax = pt.gca() #pos = netx.spring_layout(graph) aid_list = netx_graph.nodes() IMPLICIT_LAYOUT = len(set(ibs.get_annot_nids(aid_list))) != 1 # FIXME print('zoom = %r' % (zoom,)) if IMPLICIT_LAYOUT: # HACK: # Use name edge to make pos (very bad) aids1, aids2 = get_name_rowid_edges_from_aids(ibs, aid_list) netx_graph_hack = make_netx_graph_from_aidpairs(ibs, aids1, aids2, unique_aids=aid_list) pos = netx.graphviz_layout(netx_graph_hack) else: pos = netx.graphviz_layout(netx_graph) #pos = netx.fruchterman_reingold_layout(netx_graph) #pos = netx.spring_layout(netx_graph) netx.draw(netx_graph, pos=pos, ax=ax) with_nid_edges = True if with_nid_edges: import matplotlib as mpl import scipy.sparse as spsparse aids1, aids2 = get_name_rowid_edges_from_aids(ibs, aid_list) edge_pts1 = np.array(ut.dict_take(pos, aids1), dtype=np.int32) edge_pts2 = np.array(ut.dict_take(pos, aids2), dtype=np.int32) if len(edge_pts1) == 0: edge_pts1 = edge_pts1[:, None] if len(edge_pts2) == 0: edge_pts2 = edge_pts2[:, None] I = np.array(aids1) J = np.array(aids2) if len(aid_list) > 0: N = max(aid_list) + 1 else: N = 1 forced_edge_idxs = ut.dict_take(dict(zip(zip(I, J), range(len(I)))), netx_graph.edges()) data = vt.L2(edge_pts1, edge_pts2) if len(forced_edge_idxs) > 0: data[forced_edge_idxs] = 0.00001 graph = spsparse.coo_matrix((data, (I, J)), shape=(N, N)) def extract_connected_compoments(graph): import scipy.sparse as spsparse import utool as ut # I think this is how extraction is done? # only returns edge info # so singletons are not represented shape = graph.shape csr_graph = graph.tocsr() num_components, labels = spsparse.csgraph.connected_components(csr_graph) unique_labels = np.unique(labels) group_flags_list = [labels == groupid for groupid in unique_labels] subgraph_list = [] for label, group_flags in zip(unique_labels, group_flags_list): num_members = group_flags.sum() ixs = list(range(num_members)) if num_members == 0: continue group_rowix, group_cols = csr_graph[group_flags, :].nonzero() if len(group_cols) == 0: continue ix2_row = dict(zip(ixs, np.nonzero(group_flags)[0])) group_rows = ut.dict_take(ix2_row, group_rowix) component = (group_rows, group_cols.tolist()) data = csr_graph[component].tolist()[0] subgraph = spsparse.coo_matrix((data, component), shape=shape) subgraph_list.append(subgraph) #assert len(compoment_list) == num_components, 'bad impl' return subgraph_list subgraph_list = extract_connected_compoments(graph) spantree_aids1_ = [] spantree_aids2_ = [] for subgraph in subgraph_list: subgraph_spantree = spsparse.csgraph.minimum_spanning_tree(subgraph) min_aids1_, min_aids2_ = subgraph_spantree.nonzero() spantree_aids1_.extend(min_aids1_) spantree_aids2_.extend(min_aids2_) edge_pts1_ = np.array(ut.dict_take(pos, spantree_aids1_)) edge_pts2_ = np.array(ut.dict_take(pos, spantree_aids2_)) segments = list(zip(edge_pts1_, edge_pts2_)) #pt.distinct_colors color_list = pt.DARK_ORANGE #color_list = pt.BLACK line_group = mpl.collections.LineCollection(segments, color=color_list, alpha=.3, lw=4) ax.add_collection(line_group) if with_images: import cv2 pos_list = ut.dict_take(pos, aid_list) img_list = ibs.get_annot_chips(aid_list) img_list = [vt.resize_thumb(img, (220, 220)) for img in img_list] img_list = [cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for img in img_list] artist_list = netx_draw_images_at_positions(img_list, pos_list, zoom=zoom) for artist, aid in zip(artist_list, aid_list): pt.set_plotdat(artist, 'aid', aid) return pos
def draw_bayesian_model(model, evidence={}, soft_evidence={}, fnum=None, pnum=None, **kwargs): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): model = model.to_bayesian_model() import plottool as pt import networkx as netx factor_list = kwargs.get('factor_list', []) ttype_colors, ttype_scalars = make_colorcodes(model) textprops = { 'horizontalalignment': 'left', 'family': 'monospace', 'size': 8, } # build graph attrs tup = get_node_viz_attrs( model, evidence, soft_evidence, factor_list, ttype_colors, **kwargs) node_color, pos_list, pos_dict, takws = tup # draw graph has_infered = evidence or 'factor_list' in kwargs fig = pt.figure(fnum=fnum, pnum=pnum, doclf=True) # NOQA ax = pt.gca() drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1100, node_color=node_color) netx.draw(model, **drawkw) hacks = [pt.draw_text_annotations(textprops=textprops, **takw) for takw in takws if takw] xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) if 'name' in model.ttype2_template: num_names = len(model.ttype2_template['name'].basis) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) title = 'num_names=%r, num_annots=%r' % (num_names, num_annots,) else: title = '' map_assign = kwargs.get('map_assign', None) def word_insert(text): return '' if len(text) == 0 else text + ' ' top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob,) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() if has_infered: # Hack in colorbars # if ut.list_type(basis) is int: # pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1) # else: # pt.colorbar(scalars, colors, lbl='score', ticklabels=basis) keys = ['name', 'score'] locs = ['left', 'right'] for key, loc in zip(keys, locs): if key in ttype_colors: basis = model.ttype2_template[key].basis # scalars = colors = ttype_colors[key] scalars = ttype_scalars[key] pt.colorbar(scalars, colors, lbl=key, ticklabels=basis, ticklocation=loc)
def draw_markov_model(model, fnum=None, **kwargs): import plottool as pt fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum, doclf=True) ax = pt.gca() from pgmpy.models import MarkovModel if isinstance(model, MarkovModel): markovmodel = model else: markovmodel = model.to_markov_model() # pos = nx.nx_agraph.pydot_layout(markovmodel) pos = nx.nx_agraph.pygraphviz_layout(markovmodel) # Referenecs: # https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY # pos = nx.spring_layout(markovmodel) # pos = nx.circular_layout(markovmodel) # curved-arrow # markovmodel.edge_attr['curved-arrow'] = True # markovmodel.graph.setdefault('edge', {})['splines'] = 'curved' # markovmodel.graph.setdefault('graph', {})['splines'] = 'curved' # markovmodel.graph.setdefault('edge', {})['splines'] = 'curved' node_color = [pt.NEUTRAL] * len(pos) drawkw = dict( pos=pos, ax=ax, with_labels=True, node_color=node_color, # NOQA node_size=1100) from matplotlib.patches import FancyArrowPatch, Circle import numpy as np def draw_network(G, pos, ax, sg=None): for n in G: c = Circle(pos[n], radius=10, alpha=0.5, color=pt.NEUTRAL_BLUE) ax.add_patch(c) G.node[n]['patch'] = c x, y = pos[n] pt.ax_absolute_text(x, y, n, ha='center', va='center') seen = {} for (u, v, d) in G.edges(data=True): n1 = G.node[u]['patch'] n2 = G.node[v]['patch'] rad = 0.1 if (u, v) in seen: rad = seen.get((u, v)) rad = (rad + np.sign(rad) * 0.1) * -1 alpha = 0.5 color = 'k' e = FancyArrowPatch( n1.center, n2.center, patchA=n1, patchB=n2, # arrowstyle='-|>', arrowstyle='-', connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=2, alpha=alpha, color=color) seen[(u, v)] = rad ax.add_patch(e) return e # nx.draw(markovmodel, **drawkw) draw_network(markovmodel, pos, ax) ax.autoscale() pt.plt.axis('equal') pt.plt.axis('off') if kwargs.get('show_title', True): pt.set_figtitle('Markov Model')
def draw_markov_model(model, fnum=None, **kwargs): import plottool as pt fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum, doclf=True) ax = pt.gca() from pgmpy.models import MarkovModel if isinstance(model, MarkovModel): markovmodel = model else: markovmodel = model.to_markov_model() # pos = netx.pydot_layout(markovmodel) pos = netx.pygraphviz_layout(markovmodel) # Referenecs: # https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY # pos = netx.spring_layout(markovmodel) # pos = netx.circular_layout(markovmodel) # curved-arrow # markovmodel.edge_attr['curved-arrow'] = True # markovmodel.graph.setdefault('edge', {})['splines'] = 'curved' # markovmodel.graph.setdefault('graph', {})['splines'] = 'curved' # markovmodel.graph.setdefault('edge', {})['splines'] = 'curved' node_color = [pt.NEUTRAL] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, # NOQA node_size=1100) from matplotlib.patches import FancyArrowPatch, Circle import numpy as np def draw_network(G, pos, ax, sg=None): for n in G: c = Circle(pos[n], radius=10, alpha=0.5, color=pt.NEUTRAL_BLUE) ax.add_patch(c) G.node[n]['patch'] = c x, y = pos[n] pt.ax_absolute_text(x, y, n, ha='center', va='center') seen = {} for (u, v, d) in G.edges(data=True): n1 = G.node[u]['patch'] n2 = G.node[v]['patch'] rad = 0.1 if (u, v) in seen: rad = seen.get((u, v)) rad = (rad + np.sign(rad) * 0.1) * -1 alpha = 0.5 color = 'k' e = FancyArrowPatch(n1.center, n2.center, patchA=n1, patchB=n2, # arrowstyle='-|>', arrowstyle='-', connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=2, alpha=alpha, color=color) seen[(u, v)] = rad ax.add_patch(e) return e # netx.draw(markovmodel, **drawkw) draw_network(markovmodel, pos, ax) ax.autoscale() pt.plt.axis('equal') pt.plt.axis('off') if kwargs.get('show_title', True): pt.set_figtitle('Markov Model')
def viz_netx_chipgraph(ibs, graph, fnum=None, use_image=False, layout=None, zoom=None, prog='neato', as_directed=False, augment_graph=True, layoutkw=None, framewidth=3.0, **kwargs): r""" DEPRICATE or improve Args: ibs (IBEISController): ibeis controller object graph (nx.DiGraph): fnum (int): figure number(default = None) use_image (bool): (default = False) zoom (float): (default = 0.4) Returns: ?: pos CommandLine: python -m ibeis --tf viz_netx_chipgraph --show Cand: ibeis review_tagged_joins --save figures4/mergecase.png --figsize=15,15 --clipwhite --diskshow ibeis compute_occurrence_groups --save figures4/occurgraph.png --figsize=40,40 --clipwhite --diskshow ~/code/ibeis/ibeis/algo/preproc/preproc_occurrence.py Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_graph import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST') >>> nid_list = ibs.get_valid_nids()[0:10] >>> fnum = None >>> use_image = True >>> zoom = 0.4 >>> make_name_graph_interaction(ibs, nid_list, prog='neato') >>> ut.show_if_requested() """ import plottool as pt print('[viz_graph] drawing chip graph') fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum, pnum=(1, 1, 1)) ax = pt.gca() if layout is None: layout = 'agraph' print('layout = %r' % (layout, )) if use_image: ensure_node_images(ibs, graph) nx.set_node_attributes(graph, 'shape', 'rect') if layoutkw is None: layoutkw = {} layoutkw['prog'] = layoutkw.get('prog', prog) layoutkw.update(kwargs) if prog == 'neato': graph = graph.to_undirected() plotinfo = pt.show_nx( graph, ax=ax, # img_dict=img_dict, layout=layout, # hacknonode=bool(use_image), layoutkw=layoutkw, as_directed=as_directed, framewidth=framewidth, ) return plotinfo
def select_ith_match(self, mx): """ Selects the ith match and visualizes and prints information concerning features weights, keypoint details, and sift descriptions """ import plottool as pt from plottool import viz_featrow from plottool import interact_helpers as ih # <CLOSURE VARS> fnum = self.fnum same_fig = self.same_fig rchip1 = self.rchip1 rchip2 = self.rchip2 # </CLOSURE VARS> self.mx = mx print("+--- SELECT --- ") print("... selecting mx-th=%r feature match" % mx) fsv = self.fsv fs = self.fs print("score stats:") print(ut.get_stats_str(fsv, axis=0, newlines=True)) print("fsv[mx] = %r" % (fsv[mx],)) print("fs[mx] = %r" % (fs[mx],)) # ---------------------- # Get info for the select_ith_match plot self.mode = 1 # Get the mx-th feature match fx1, fx2 = self.fm[mx] # Older info fscore2 = self.fs[mx] fk2 = None kp1, kp2 = self.kpts1[fx1], self.kpts2[fx2] vecs1, vecs2 = self.vecs1[fx1], self.vecs2[fx2] info1 = "\nquery" info2 = "\nk=%r fscore=%r" % (fk2, fscore2) # self.last_fx = fx1 self.last_fx = fx1 # Extracted keypoints to draw extracted_list = [(rchip1, kp1, vecs1, fx1, "aid1", info1), (rchip2, kp2, vecs2, fx2, "aid2", info2)] # Normalizng Keypoint # if hasattr(cm, 'filt2_meta') and 'lnbnn' in cm.filt2_meta: # qfx2_norm = cm.filt2_meta['lnbnn'] # # Normalizing chip and feature # (aid3, fx3, normk) = qfx2_norm[fx1] # rchip3 = ibs.get_annot_chips(aid3) # kp3 = ibs.get_annot_kpts(aid3)[fx3] # sift3 = ibs.get_annot_vecs(aid3)[fx3] # info3 = '\nnorm %s k=%r' % (vh.get_aidstrs(aid3), normk) # extracted_list.append((rchip3, kp3, sift3, fx3, aid3, info3)) # else: # pass # #print('WARNING: meta doesnt exist') # ---------------------- # Draw the select_ith_match plot nRows, nCols = len(extracted_list) + same_fig, 3 # Draw matching chips and features sel_fm = np.array([(fx1, fx2)]) pnum1 = (nRows, 1, 1) if same_fig else (1, 1, 1) vert = self.vert if self.vert is not None else False self.chipmatch_view(pnum=pnum1, ell_alpha=0.4, ell_linewidth=1.8, colors=pt.BLUE, sel_fm=sel_fm, vert=vert) # Draw selected feature matches px = nCols * same_fig # plot offset prevsift = None if not same_fig: # fnum2 = fnum + len(viz.FNUMS) fnum2 = self.fnum2 fig2 = pt.figure(fnum=fnum2, docla=True, doclf=True) else: fnum2 = fnum for (rchip, kp, sift, fx, aid, info) in extracted_list: px = viz_featrow.draw_feat_row( rchip, fx, kp, sift, fnum2, nRows, nCols, px, prevsift=prevsift, aid=aid, info=info ) prevsift = sift if not same_fig: ih.connect_callback(fig2, "button_press_event", self.on_click)
def show_model(model, evidence={}, soft_evidence={}, **kwargs): """ References: http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer Ignore: pkg-config --libs-only-L libcgraph sudo apt-get install libgraphviz-dev -y sudo apt-get install libgraphviz4 -y # sudo apt-get install pkg-config sudo apt-get install libgraphviz-dev # pip install git+git://github.com/pygraphviz/pygraphviz.git pip install pygraphviz python -c "import pygraphviz; print(pygraphviz.__file__)" sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/" python3 -c "import pygraphviz; print(pygraphviz.__file__)" CommandLine: python -m ibeis.algo.hots.bayes --exec-show_model --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.bayes import * # NOQA >>> model = '?' >>> evidence = {} >>> soft_evidence = {} >>> result = show_model(model, evidence, soft_evidence) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'): draw_tree_model(model, **kwargs) return import plottool as pt import networkx as netx fnum = pt.ensure_fnum(None) netx_graph = (model) #netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"' #netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR' pos_dict = get_hacked_pos(netx_graph) #pos_dict = netx.pygraphviz_layout(netx_graph) #pos = netx.pydot_layout(netx_graph, prog='dot') #pos_dict = netx.graphviz_layout(netx_graph) textprops = { 'family': 'monospace', 'horizontalalignment': 'left', #'horizontalalignment': 'center', #'size': 12, 'size': 8, } netx_nodes = model.nodes(data=True) node_key_list = ut.get_list_column(netx_nodes, 0) pos_list = ut.dict_take(pos_dict, node_key_list) var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])} prior_text = None post_text = None evidence_tas = [] post_tas = [] prior_tas = [] node_color = [] has_infered = evidence or var2_post if has_infered: ignore_prior_with_ttype = ['score', 'match'] show_prior = False else: ignore_prior_with_ttype = [] #show_prior = True show_prior = False dpy = 5 dbx, dby = (20, 20) takw1 = {'bbox_align': (.5, 0), 'pos_offset': [0, dpy], 'bbox_offset': [dbx, dby]} takw2 = {'bbox_align': (.5, 1), 'pos_offset': [0, -dpy], 'bbox_offset': [-dbx, -dby]} name_colors = pt.distinct_colors(max(model.num_names, 10)) name_colors = name_colors[:model.num_names] #cmap_ = 'hot' #mx = 0.65 #mn = 0.15 cmap_, mn, mx = 'plasma', 0.15, 1.0 _cmap = pt.plt.get_cmap(cmap_) def cmap(x): return _cmap((x * mx) + mn) for node, pos in zip(netx_nodes, pos_list): variable = node[0] cpd = model.var2_cpd[variable] prior_marg = (cpd if cpd.evidence is None else cpd.marginalize(cpd.evidence, inplace=False)) show_evidence = variable in evidence show_prior = cpd.ttype not in ignore_prior_with_ttype show_post = variable in var2_post show_prior |= cpd.ttype not in ignore_prior_with_ttype post_marg = None if show_post: post_marg = var2_post[variable] def get_name_color(phi): order = phi.values.argsort()[::-1] if len(order) < 2: dist_next = phi.values[order[0]] else: dist_next = phi.values[order[0]] - phi.values[order[1]] dist_total = (phi.values[order[0]]) confidence = (dist_total * dist_next) ** (2.5 / 4) #print('confidence = %r' % (confidence,)) color = name_colors[order[0]] color = pt.color_funcs.desaturate_rgb(color, 1 - confidence) color = np.array(color) return color if variable in evidence: if cpd.ttype == 'score': cmap_index = evidence[variable] / (cpd.variable_card - 1) color = cmap(cmap_index) color = pt.lighten_rgb(color, .4) color = np.array(color) node_color.append(color) elif cpd.ttype == 'name': color = name_colors[evidence[variable]] color = np.array(color) node_color.append(color) else: color = pt.FALSE_RED node_color.append(color) #elif variable in soft_evidence: # color = pt.LIGHT_PINK # show_prior = True # color = get_name_color(prior_marg) # node_color.append(color) else: if cpd.ttype == 'name' and post_marg is not None: color = get_name_color(post_marg) node_color.append(color) elif cpd.ttype == 'match' and post_marg is not None: color = cmap(post_marg.values[1]) color = pt.lighten_rgb(color, .4) color = np.array(color) node_color.append(color) else: #color = pt.WHITE color = pt.NEUTRAL node_color.append(color) if show_prior: if variable in soft_evidence: prior_color = pt.LIGHT_PINK else: prior_color = None prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') prior_tas.append(dict(text=prior_text, pos=pos, color=prior_color, **takw2)) if show_evidence: _takw1 = takw1 if cpd.ttype == 'score': _takw1 = takw2 evidence_text = cpd.variable_statenames[evidence[variable]] if isinstance(evidence_text, int): evidence_text = '%d/%d' % (evidence_text + 1, cpd.variable_card) evidence_tas.append(dict(text=evidence_text, pos=pos, color=color, **_takw1)) if show_post: _takw1 = takw1 if cpd.ttype == 'match': _takw1 = takw2 post_text = pgm_ext.make_factor_text(post_marg, 'post') post_tas.append(dict(text=post_text, pos=pos, color=None, **_takw1)) def trnps_(dict_list): """ tranpose dict list """ list_dict = ut.ddict(list) for dict_ in dict_list: for key, val in dict_.items(): list_dict[key + '_list'].append(val) return list_dict takw1_ = trnps_(post_tas + evidence_tas) takw2_ = trnps_(prior_tas) # Draw graph if has_infered: pnum1 = (3, 1, (slice(0, 2), 0)) else: pnum1 = None fig = pt.figure(fnum=fnum, pnum=pnum1, doclf=True) # NOQA ax = pt.gca() #print('node_color = %s' % (ut.repr3(node_color),)) drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1500, node_color=node_color) netx.draw(netx_graph, **drawkw) hacks = [] if len(post_tas + evidence_tas): hacks.append(pt.draw_text_annotations(textprops=textprops, **takw1_)) if prior_tas: hacks.append(pt.draw_text_annotations(textprops=textprops, **takw2_)) xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) fig = pt.gcf() title = 'num_names=%r, num_annots=%r' % (model.num_names, num_annots,) map_assign = kwargs.get('map_assign', None) top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: def word_insert(text): return '' if len(text) == 0 else text + ' ' title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob,) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() # Hack in colorbars if has_infered: pt.colorbar(np.linspace(0, 1, len(name_colors)), name_colors, lbl='name', ticklabels=model.ttype2_template['name'].basis, ticklocation='left') basis = model.ttype2_template['score'].basis scalars = np.linspace(0, 1, len(basis)) scalars = np.linspace(0, 1, 100) colors = pt.scores_to_color(scalars, cmap_=cmap_, reverse_cmap=False, cmap_range=(mn, mx)) colors = [pt.lighten_rgb(c, .4) for c in colors] if ut.list_type(basis) is int: pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1) else: pt.colorbar(scalars, colors, lbl='score', ticklabels=basis) #print('basis = %r' % (basis,)) # Draw probability hist if has_infered and top_assignments is not None: bin_labels = ut.get_list_column(top_assignments, 0) bin_vals = ut.get_list_column(top_assignments, 1) # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels] pt.draw_histogram(bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))), transpose=True, use_darkbackground=False, #xtick_rotation=-10, ylabel='Prob', xlabel='assignment') pt.set_title('Assignment probabilities')
def test_siamese_performance(model, data, labels, flat_metadata, dataname=''): r""" CommandLine: utprof.py -m ibeis_cnn --tf pz_patchmatch --db liberty --test --weights=liberty:current --arch=siaml2_128 --test python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test --ensure python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --test --ensure --weights=new python -m ibeis_cnn --tf netrun --db liberty --arch=siaml2_128 --train --weights=new python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 --test # NOQA python -m ibeis_cnn --tf netrun --db pzmtest --weights=liberty:current --arch=siaml2_128 """ import vtool as vt import plottool as pt # TODO: save in model.trainind_dpath/diagnostics/figures ut.colorprint('\n[siam_perf] Testing Siamese Performance', 'white') #epoch_dpath = model.get_epoch_diagnostic_dpath() epoch_dpath = model.arch_dpath ut.vd(epoch_dpath) dataname += ' ' + model.get_history_hashid() + '\n' history_text = ut.list_str(model.era_history, newlines=True) ut.write_to(ut.unixjoin(epoch_dpath, 'era_history.txt'), history_text) #if True: # import matplotlib as mpl # mpl.rcParams['agg.path.chunksize'] = 100000 #data = data[::50] #labels = labels[::50] #from ibeis_cnn import utils #data, labels = utils.random_xy_sample(data, labels, 10000, model.data_per_label_input) FULL = not ut.get_argflag('--quick') fnum_gen = pt.make_fnum_nextgen() ut.colorprint('[siam_perf] Show era history', 'white') fig = model.show_era_loss(fnum=fnum_gen()) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) # hack ut.colorprint('[siam_perf] Show weights image', 'white') fig = model.show_weights_image(fnum=fnum_gen()) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) #model.draw_all_conv_layer_weights(fnum=fnum_gen()) #model.imwrite_weights(1) #model.imwrite_weights(2) # Compute each type of score ut.colorprint('[siam_perf] Building Scores', 'white') test_outputs = model.predict2(model, data) network_output = test_outputs['network_output_determ'] # hack converting network output to distances for non-descriptor networks if len(network_output.shape) == 2 and network_output.shape[1] == 1: cnn_scores = network_output.T[0] elif len(network_output.shape) == 1: cnn_scores = network_output elif len(network_output.shape) == 2 and network_output.shape[1] > 1: assert model.data_per_label_output == 2 vecs1 = network_output[0::2] vecs2 = network_output[1::2] cnn_scores = vt.L2(vecs1, vecs2) else: assert False cnn_scores = cnn_scores.astype(np.float64) # Segfaults with the data passed in is large (AND MEMMAPPED apparently) # Fixed in hesaff implementation SIFT = FULL if SIFT: sift_scores, sift_list = test_sift_patchmatch_scores(data, labels) sift_scores = sift_scores.astype(np.float64) ut.colorprint('[siam_perf] Learning Encoders', 'white') # Learn encoders encoder_kw = { #'monotonize': False, 'monotonize': True, } cnn_encoder = vt.ScoreNormalizer(**encoder_kw) cnn_encoder.fit(cnn_scores, labels) if SIFT: sift_encoder = vt.ScoreNormalizer(**encoder_kw) sift_encoder.fit(sift_scores, labels) # Visualize ut.colorprint('[siam_perf] Visualize Encoders', 'white') viz_kw = dict( with_scores=False, with_postbayes=False, with_prebayes=False, target_tpr=.95, ) inter_cnn = cnn_encoder.visualize( figtitle=dataname + ' CNN scores. #data=' + str(len(data)), fnum=fnum_gen(), **viz_kw) if SIFT: inter_sift = sift_encoder.visualize( figtitle=dataname + ' SIFT scores. #data=' + str(len(data)), fnum=fnum_gen(), **viz_kw) # Save pt.save_figure(fig=inter_cnn.fig, dpath=epoch_dpath) if SIFT: pt.save_figure(fig=inter_sift.fig, dpath=epoch_dpath) # Save out examples of hard errors #cnn_fp_label_indicies, cnn_fn_label_indicies = #cnn_encoder.get_error_indicies(cnn_scores, labels) #sift_fp_label_indicies, sift_fn_label_indicies = #sift_encoder.get_error_indicies(sift_scores, labels) with_patch_examples = FULL if with_patch_examples: ut.colorprint('[siam_perf] Visualize Confusion Examples', 'white') cnn_indicies = cnn_encoder.get_confusion_indicies(cnn_scores, labels) if SIFT: sift_indicies = sift_encoder.get_confusion_indicies(sift_scores, labels) warped_patch1_list, warped_patch2_list = list(zip(*ut.ichunks(data, 2))) samp_args = (warped_patch1_list, warped_patch2_list, labels) _sample = functools.partial(draw_results.get_patch_sample_img, *samp_args) cnn_fp_img = _sample({'fs': cnn_scores}, cnn_indicies.fp)[0] cnn_fn_img = _sample({'fs': cnn_scores}, cnn_indicies.fn)[0] cnn_tp_img = _sample({'fs': cnn_scores}, cnn_indicies.tp)[0] cnn_tn_img = _sample({'fs': cnn_scores}, cnn_indicies.tn)[0] if SIFT: sift_fp_img = _sample({'fs': sift_scores}, sift_indicies.fp)[0] sift_fn_img = _sample({'fs': sift_scores}, sift_indicies.fn)[0] sift_tp_img = _sample({'fs': sift_scores}, sift_indicies.tp)[0] sift_tn_img = _sample({'fs': sift_scores}, sift_indicies.tn)[0] #if ut.show_was_requested(): #def rectify(arr): # return np.flipud(arr) SINGLE_FIG = False if SINGLE_FIG: def dump_img(img_, lbl, fnum): fig, ax = pt.imshow(img_, figtitle=dataname + ' ' + lbl, fnum=fnum) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180) dump_img(cnn_fp_img, 'cnn_fp_img', fnum_gen()) dump_img(cnn_fn_img, 'cnn_fn_img', fnum_gen()) dump_img(cnn_tp_img, 'cnn_tp_img', fnum_gen()) dump_img(cnn_tn_img, 'cnn_tn_img', fnum_gen()) dump_img(sift_fp_img, 'sift_fp_img', fnum_gen()) dump_img(sift_fn_img, 'sift_fn_img', fnum_gen()) dump_img(sift_tp_img, 'sift_tp_img', fnum_gen()) dump_img(sift_tn_img, 'sift_tn_img', fnum_gen()) #vt.imwrite(dataname + '_' + 'cnn_fp_img.png', (cnn_fp_img)) #vt.imwrite(dataname + '_' + 'cnn_fn_img.png', (cnn_fn_img)) #vt.imwrite(dataname + '_' + 'sift_fp_img.png', (sift_fp_img)) #vt.imwrite(dataname + '_' + 'sift_fn_img.png', (sift_fn_img)) else: print('Drawing TP FP TN FN') fnum = fnum_gen() pnum_gen = pt.make_pnum_nextgen(4, 2) fig = pt.figure(fnum) pt.imshow(cnn_fp_img, title='CNN FP', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_fp_img, title='SIFT FP', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_fn_img, title='CNN FN', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_fn_img, title='SIFT FN', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_tp_img, title='CNN TP', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_tp_img, title='SIFT TP', fnum=fnum, pnum=pnum_gen()) pt.imshow(cnn_tn_img, title='CNN TN', fnum=fnum, pnum=pnum_gen()) pt.imshow(sift_tn_img, title='SIFT TN', fnum=fnum, pnum=pnum_gen()) pt.set_figtitle(dataname + ' confusions') pt.adjust_subplots(left=0, right=1.0, bottom=0., wspace=.01, hspace=.05) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18)) with_patch_desc = FULL if with_patch_desc: ut.colorprint('[siam_perf] Visualize Patch Descriptors', 'white') fnum = fnum_gen() fig = pt.figure(fnum=fnum, pnum=(1, 1, 1)) num_rows = 7 pnum_gen = pt.make_pnum_nextgen(num_rows, 3) # Compare actual output descriptors for index in ut.random_indexes(len(sift_list), num_rows): vec_sift = sift_list[index] vec_cnn = network_output[index] patch = data[index] pt.imshow(patch, fnum=fnum, pnum=pnum_gen()) pt.plot_descriptor_signature(vec_cnn, 'cnn vec', fnum=fnum, pnum=pnum_gen()) pt.plot_sift_signature(vec_sift, 'sift vec', fnum=fnum, pnum=pnum_gen()) pt.set_figtitle('Patch Descriptors') pt.adjust_subplots(left=0, right=0.95, bottom=0., wspace=.1, hspace=.15) pt.save_figure(fig=fig, dpath=epoch_dpath, dpi=180, figsize=(9, 18))
def ewma(): import plottool as pt import ubelt as ub import numpy as np pt.qtensure() # Investigate the span parameter span = 20 alpha = 2 / (span + 1) # how long does it take for the estimation to hit 0? # (ie, it no longer cares about the initial 1?) # about 93 iterations to get to 1e-4 # about 47 iterations to get to 1e-2 # about 24 iterations to get to 1e-1 # 20 iterations goes to .135 data = ([1] + [0] * 20 + [1] * 40 + [0] * 20 + [1] * 50 + [0] * 20 + [1] * 60 + [0] * 20 + [1] * 165 + [0] * 20 + [0]) mave = [] iter_ = iter(data) current = next(iter_) mave += [current] for x in iter_: current = (alpha * x) + (1 - alpha) * current mave += [current] if False: pt.figure(fnum=1, doclf=True) pt.plot(data) pt.plot(mave) np.where(np.array(mave) < 1e-1) import sympy as sym # span, alpha, n = sym.symbols('span, alpha, n') n = sym.symbols('n', integer=True, nonnegative=True, finite=True) span = sym.symbols('span', integer=True, nonnegative=True, finite=True) thresh = sym.symbols('thresh', real=True, nonnegative=True, finite=True) # alpha = 2 / (span + 1) a, b, c = sym.symbols('a, b, c', real=True, nonnegative=True, finite=True) sym.solve(sym.Eq(b**a, c), a) current = 1 x = 0 steps = [] for _ in range(10): current = (alpha * x) + (1 - alpha) * current steps.append(current) alpha = sym.symbols('alpha', real=True, nonnegative=True, finite=True) base = sym.symbols('base', real=True, finite=True) alpha = 2 / (span + 1) thresh_expr = (1 - alpha)**n thresthresh_exprh_expr = base**n n_expr = sym.ceiling(sym.log(thresh) / sym.log(1 - 2 / (span + 1))) sym.pprint(sym.simplify(thresh_expr)) sym.pprint(sym.simplify(n_expr)) print(sym.latex(sym.simplify(n_expr))) # def calc_n2(span, thresh): # return np.log(thresh) / np.log(1 - 2 / (span + 1)) def calc_n(span, thresh): return np.log(thresh) / np.log((span - 1) / (span + 1)) def calc_thresh_val(n, span): alpha = 2 / (span + 1) return (1 - alpha)**n span = np.arange(2, 200) n_frac = calc_n(span, thresh=.5) n = np.ceil(n_frac) calc_thresh_val(n, span) pt.figure(fnum=1, doclf=True) ydatas = ut.odict([('thresh=%f' % thresh, np.ceil(calc_n(span, thresh=thresh))) for thresh in [1e-3, .01, .1, .2, .3, .4, .5]]) pt.multi_plot( span, ydatas, xlabel='span', ylabel='n iters to acheive thresh', marker='', # num_xticks=len(span), fnum=1) pt.gca().set_aspect('equal') def both_sides(eqn, func): return sym.Eq(func(eqn.lhs), func(eqn.rhs)) eqn = sym.Eq(thresh_expr, thresh) n_expr = sym.solve(eqn, n)[0].subs(base, (1 - alpha)).subs(alpha, (2 / (span + 1))) eqn = both_sides(eqn, lambda x: sym.log(x, (1 - alpha))) lhs = eqn.lhs from sympy.solvers.inequalities import solve_univariate_inequality def eval_expr(span_value, n_value): return np.array( [thresh_expr.subs(span, span_value).subs(n, n_) for n_ in n_value], dtype=np.float) eval_expr(20, np.arange(20)) def linear(x, a, b): return a * x + b def sigmoidal_4pl(x, a, b, c, d): return d + (a - d) / (1 + (x / c)**b) def exponential(x, a, b, c): return a + b * np.exp(-c * x) import scipy.optimize # Determine how to choose span, such that you get to .01 from 1 # in n timesteps thresh_to_span_to_n = [] thresh_to_n_to_span = [] for thresh_value in ub.ProgIter([.0001, .001, .01, .1, .2, .3, .4, .5]): print('') test_vals = sorted([2, 3, 4, 5, 6]) n_to_span = [] for n_value in ub.ProgIter(test_vals): # In n iterations I want to choose a span that the expression go # less than a threshold constraint = thresh_expr.subs(n, n_value) < thresh_value solution = solve_univariate_inequality(constraint, span) try: lowbound = np.ceil(float(solution.args[0].lhs)) highbound = np.floor(float(solution.args[1].rhs)) assert lowbound <= highbound span_value = lowbound except AttributeError: span_value = np.floor(float(solution.rhs)) n_to_span.append((n_value, span_value)) # Given a threshold, find a minimum number of steps # that brings you up to that threshold given a span test_vals = sorted(set(list(range(2, 1000, 50)) + [2, 3, 4, 5, 6])) span_to_n = [] for span_value in ub.ProgIter(test_vals): constraint = thresh_expr.subs(span, span_value) < thresh_value solution = solve_univariate_inequality(constraint, n) n_value = solution.lhs span_to_n.append((span_value, n_value)) thresh_to_n_to_span.append((thresh_value, n_to_span)) thresh_to_span_to_n.append((thresh_value, span_to_n)) thresh_to_params = [] for thresh_value, span_to_n in thresh_to_span_to_n: xdata, ydata = [np.array(_, dtype=np.float) for _ in zip(*span_to_n)] p0 = (1 / np.diff((ydata - ydata[0])[1:]).mean(), ydata[0]) func = linear popt, pcov = scipy.optimize.curve_fit(func, xdata, ydata, p0) # popt, pcov = scipy.optimize.curve_fit(exponential, xdata, ydata) if False: yhat = func(xdata, *popt) pt.figure(fnum=1, doclf=True) pt.plot(xdata, ydata, label='measured') pt.plot(xdata, yhat, label='predicteed') pt.legend() # slope = np.diff(ydata).mean() # pt.plot(d) thresh_to_params.append((thresh_value, popt)) # pt.plt.plot(*zip(*thresh_to_slope), 'x-') # for thresh_value=.01, we get a rough line with slop ~2.302, # for thresh_value=.5, we get a line with slop ~34.66 # if we want to get to 0 in n timesteps, with a thresh_value of # choose span=f(thresh_value) * (n + 2)) # f is some inverse exponential # 0.0001, 460.551314197147 # 0.001, 345.413485647860, # 0.01, 230.275657098573, # 0.1, 115.137828549287, # 0.2, 80.4778885203347, # 0.3, 60.2031233261536, # 0.4, 45.8179484913827, # 0.5, 34.6599400289520 # Seems to be 4PL symetrical sigmoid # f(x) = -66500.85 + (66515.88 - -66500.85) / (1 + (x/0.8604672)^0.001503716) # f(x) = -66500.85 + (66515.88 - -66500.85)/(1 + (x/0.8604672)^0.001503716) def f(x): return -66500.85 + (66515.88 - -66500.85) / (1 + (x / 0.8604672)**0.001503716) # return (10000 * (-6.65 + (13.3015) / (1 + (x/0.86) ** 0.00150))) # f(.5) * (n - 1) # f( solve_rational_inequalities(thresh_expr < .01, n)
def show_qres(ibs, cm, qreq_=None, **kwargs): """ Display Query Result Logic Defaults to: query chip, groundtruth matches, and top matches Args: ibs (ibeis.IBEISController): ibeis controller object cm (ibeis.ChipMatch): object of feature correspondences and scores Kwargs: in_image (bool) show result in image view if True else chip view annot_mode (int): if annot_mode == 0, then draw lines and ellipse elif annot_mode == 1, then dont draw lines or ellipse elif annot_mode == 2, then draw only lines See: viz_matches.show_name_matches, viz_helpers.get_query_text Returns: mpl.Figure: fig CommandLine: ./main.py --query 1 -y --db PZ_MTEST --noshow-qtres python -m ibeis.viz.viz_qres --test-show_qres --show python -m ibeis.viz.viz_qres --test-show_qres --show --top-aids=10 --db=PZ_MTEST --sidebyside --annot_mode=0 --notitle --no-viz_name_score --qaids=5 --max_nCols=2 --adjust=.01,.01,.01 python -m ibeis.viz.viz_qres --test-show_qres --show --top-aids=10 --db=PZ_MTEST --sidebyside --annot_mode=0 --notitle --no-viz_name_score --qaids=5 --max_nCols=2 --adjust=.01,.01,.01 Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_qres import * # NOQA >>> import plottool as pt >>> ibs, cm, qreq_, kwargs = testdata_show_qres() >>> # execute function >>> fig = show_qres(ibs, cm, show_query=False, qreq_=qreq_, **kwargs) >>> # verify results >>> #fig.show() >>> pt.show_if_requested() """ #ut.print_dict(kwargs) annot_mode = kwargs.get('annot_mode', 1) % 3 # this is toggled figtitle = kwargs.get('figtitle', '') make_figtitle = kwargs.get('make_figtitle', False) aug = kwargs.get('aug', '') top_aids = kwargs.get('top_aids', DEFAULT_NTOP) gt_aids = kwargs.get('gt_aids', []) all_kpts = kwargs.get('all_kpts', False) show_query = kwargs.get('show_query', False) in_image = kwargs.get('in_image', False) sidebyside = kwargs.get('sidebyside', True) #name_scoring = kwargs.get('name_scoring', False) viz_name_score = kwargs.get('viz_name_score', qreq_ is not None) max_nCols = kwargs.get('max_nCols', None) failed_to_match = kwargs.get('failed_to_match', False) fnum = pt.ensure_fnum(kwargs.get('fnum', None)) if ut.VERBOSE and ut.NOT_QUIET: print('query_info = ' + ut.obj_str( ibs.get_annot_info(cm.qaid, default=True, gname=False, name=False, notes=False, exemplar=False), nl=4)) print('top_aids_info = ' + ut.obj_str( ibs.get_annot_info(top_aids, default=True, gname=False, name=False, notes=False, exemplar=False, reference_aid=cm.qaid), nl=4)) if make_figtitle is True: pass #figtitle = cm.make_title(pack=True) #figtitle fig = pt.figure(fnum=fnum, docla=True, doclf=True) if isinstance(top_aids, int): #if isinstance(cm, chip_match.ChipMatch): top_aids = cm.get_top_aids(top_aids) #else: # top_aids = cm.get_top_aids(num=top_aids, name_scoring=name_scoring, ibs=ibs) if failed_to_match: # HACK to visually indicate failure to match in analysis top_aids = [None] + top_aids nTop = len(top_aids) if max_nCols is None: max_nCols = 5 if nTop in [6, 7]: max_nCols = 3 if nTop in [8]: max_nCols = 4 try: assert len(list(set(top_aids).intersection(set(gt_aids)))) == 0, ( 'gts should be missed. not in top') except AssertionError as ex: ut.printex(ex, keys=['top_aids', 'gt_aids']) raise if ut.DEBUG2: print(cm.get_inspect_str()) #-------------------------------------------------- # Get grid / cell information to build subplot grid #-------------------------------------------------- # Show query or not nQuerySubplts = 1 if show_query else 0 # The top row is given slots for ground truths and querys # all aids in gt_aids should not be in top aids nGtSubplts = nQuerySubplts + (0 if gt_aids is None else len(gt_aids)) # The bottom rows are for the top results nTopNSubplts = nTop nTopNCols = min(max_nCols, nTopNSubplts) nGTCols = min(max_nCols, nGtSubplts) nGTCols = max(nGTCols, nTopNCols) nTopNCols = nGTCols # Get number of rows to show groundtruth nGtRows = 0 if nGTCols == 0 else int(np.ceil(nGtSubplts / nGTCols)) # Get number of rows to show results nTopNRows = 0 if nTopNCols == 0 else int(np.ceil(nTopNSubplts / nTopNCols)) nGtCells = nGtRows * nGTCols # Total number of rows nRows = nTopNRows + nGtRows DEBUG_SHOW_QRES = 0 if DEBUG_SHOW_QRES: allgt_aids = ibs.get_annot_groundtruth(cm.qaid) nSelGt = len(gt_aids) nAllGt = len(allgt_aids) print('[show_qres]========================') print('[show_qres]----------------') print('[show_qres] * annot_mode=%r' % (annot_mode,)) print('[show_qres] #nTop=%r #missed_gts=%r/%r' % (nTop, nSelGt, nAllGt)) print('[show_qres] * -----') print('[show_qres] * nRows=%r' % (nRows,)) print('[show_qres] * nGtSubplts=%r' % (nGtSubplts,)) print('[show_qres] * nTopNSubplts=%r' % (nTopNSubplts,)) print('[show_qres] * nQuerySubplts=%r' % (nQuerySubplts,)) print('[show_qres] * -----') print('[show_qres] * nGTCols=%r' % (nGTCols,)) print('[show_qres] * -----') print('[show_qres] * fnum=%r' % (fnum,)) print('[show_qres] * figtitle=%r' % (figtitle,)) print('[show_qres] * max_nCols=%r' % (max_nCols,)) print('[show_qres] * show_query=%r' % (show_query,)) print('[show_qres] * kwargs=%s' % (ut.dict_str(kwargs),)) # HACK: _color_list = pt.distinct_colors(nTop) aid2_color = {aid: _color_list[ox] for ox, aid in enumerate(top_aids)} ranked_aids = cm.get_top_aids() # Helpers def _show_query_fn(plotx_shift, rowcols): """ helper for show_qres """ plotx = plotx_shift + 1 pnum = (rowcols[0], rowcols[1], plotx) #print('[viz] Plotting Query: pnum=%r' % (pnum,)) _kwshow = dict(draw_kpts=annot_mode) _kwshow.update(kwargs) _kwshow['prefix'] = 'q' _kwshow['pnum'] = pnum _kwshow['aid2_color'] = aid2_color _kwshow['draw_ell'] = annot_mode >= 1 viz_chip.show_chip(ibs, cm.qaid, annote=False, qreq_=qreq_, **_kwshow) def _plot_matches_aids(aid_list, plotx_shift, rowcols): """ helper for show_qres to draw many aids """ _kwshow = dict(draw_ell=annot_mode, draw_pts=False, draw_lines=annot_mode, ell_alpha=.5, all_kpts=all_kpts) _kwshow.update(kwargs) _kwshow['fnum'] = fnum _kwshow['in_image'] = in_image if sidebyside: # Draw each match side by side the query _kwshow['draw_ell'] = annot_mode == 1 _kwshow['draw_lines'] = annot_mode >= 1 else: #print('annot_mode = %r' % (annot_mode,)) _kwshow['draw_ell'] = annot_mode == 1 #_kwshow['draw_pts'] = annot_mode >= 1 #_kwshow['draw_lines'] = False _kwshow['show_query'] = False def _show_matches_fn(aid, orank, pnum): """ Helper function for drawing matches to one aid """ aug = 'rank=%r\n' % orank _kwshow['pnum'] = pnum _kwshow['title_aug'] = aug #draw_ell = annot_mode == 1 #draw_lines = annot_mode >= 1 # If we already are showing the query dont show it here if sidebyside: # Draw each match side by side the query if viz_name_score: cm.show_single_namematch(qreq_, ibs.get_annot_nids(aid), **_kwshow) else: _kwshow['draw_border'] = False _kwshow['draw_lbl'] = False _kwshow['notitle'] = True _kwshow['vert'] = False cm.show_single_annotmatch(qreq_, aid, **_kwshow) #viz_matches.show_matches(ibs, cm, aid, qreq_=qreq_, **_kwshow) else: # Draw each match by themselves data_config2_ = None if qreq_ is None else qreq_.get_external_data_config2() #_kwshow['draw_border'] = kwargs.get('draw_border', True) #_kwshow['notitle'] = ut.get_argflag(('--no-title', '--notitle')) viz_chip.show_chip(ibs, aid, annote=False, notitle=True, data_config2_=data_config2_, **_kwshow) if DEBUG_SHOW_QRES: print('[show_qres()] Plotting Chips %s:' % vh.get_aidstrs(aid_list)) if aid_list is None: return # Do lazy load before show #data_config2_ = None if qreq_ is None else qreq_.get_external_data_config2() tblhack = getattr(qreq_, 'tablename', None) # HACK FOR HUMPBACKS # (Also in viz_matches) if tblhack == 'vsone' or (qreq_ is not None and not qreq_._isnewreq): # precompute pass #ibs.get_annot_chips(aid_list, config2_=data_config2_, ensure=True) #ibs.get_annot_kpts(aid_list, config2_=data_config2_, ensure=True) for ox, aid in enumerate(aid_list): plotx = ox + plotx_shift + 1 pnum = (rowcols[0], rowcols[1], plotx) oranks = np.where(ranked_aids == aid)[0] # This pair has no matches between them. if len(oranks) == 0: orank = -1 if aid is None: pt.imshow_null('Failed to find matches\nfor qaid=%r' % (cm.qaid), fnum=fnum, pnum=pnum, fontsize=18) else: _show_matches_fn(aid, orank, pnum) #if DEBUG_SHOW_QRES: # print('skipping pnum=%r' % (pnum,)) continue if DEBUG_SHOW_QRES: print('pnum=%r' % (pnum,)) orank = oranks[0] + 1 _show_matches_fn(aid, orank, pnum) shift_topN = nGtCells if nGtSubplts == 1: nGTCols = 1 if nRows == 0: pt.imshow_null('[viz_qres] No matches. nRows=0', fnum=fnum) else: fig = pt.figure(fnum=fnum, pnum=(nRows, nGTCols, 1), docla=True, doclf=True) pt.plt.subplot(nRows, nGTCols, 1) # Plot Query if show_query: _show_query_fn(0, (nRows, nGTCols)) # Plot Ground Truth (if given) _plot_matches_aids(gt_aids, nQuerySubplts, (nRows, nGTCols)) # Plot Results _plot_matches_aids(top_aids, shift_topN, (nRows, nTopNCols)) figtitle += aug if failed_to_match: figtitle += '\n No matches found' incanvas = kwargs.get('with_figtitle', not vh.NO_LBL_OVERRIDE) pt.set_figtitle(figtitle, incanvas=incanvas) # Result Interaction return fig
def bayesnet(): """ References: https://class.coursera.org/pgm-003/lecture/17 http://www.cs.ubc.ca/~murphyk/Bayes/bnintro.html http://www3.cs.stonybrook.edu/~sael/teaching/cse537/Slides/chapter14d_BP.pdf http://www.cse.unsw.edu.au/~cs9417ml/Bayes/Pages/PearlPropagation.html https://github.com/pgmpy/pgmpy.git http://pgmpy.readthedocs.org/en/latest/ http://nipy.bic.berkeley.edu:5000/download/11 """ # import operator as op # # Enumerate all possible events # varcard_list = list(map(op.attrgetter('variable_card'), cpd_list)) # _esdat = list(ut.iprod(*map(range, varcard_list))) # _escol = list(map(op.attrgetter('variable'), cpd_list)) # event_space = pd.DataFrame(_esdat, columns=_escol) # # Custom compression of event space to inspect a specific graph # def compress_space_flags(event_space, var1, var2, var3, cmp12_): # """ # var1, var2, cmp_ = 'Lj', 'Lk', op.eq # """ # import vtool as vt # data = event_space # other_cols = ut.setdiff_ordered(data.columns.tolist(), [var1, var2, var3]) # case_flags12 = cmp12_(data[var1], data[var2]).values # # case_flags23 = cmp23_(data[var2], data[var3]).values # # case_flags = np.logical_and(case_flags12, case_flags23) # case_flags = case_flags12 # case_flags = case_flags.astype(np.int64) # subspace = np.hstack((case_flags[:, None], data[other_cols].values)) # sel_ = vt.unique_row_indexes(subspace) # flags = np.logical_and(mask, case_flags) # return flags # # Build special cases # case_same = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.eq)] # case_diff = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.ne)] # special_cases = [ # case_same, # case_diff, # ] from pgmpy.factors import TabularCPD from pgmpy.models import BayesianModel import pandas as pd from pgmpy.inference import BeliefPropagation # NOQA from pgmpy.inference import VariableElimination # NOQA name_nice = ['n1', 'n2', 'n3'] score_nice = ['low', 'high'] match_nice = ['diff', 'same'] num_names = len(name_nice) num_scores = len(score_nice) nid_basis = list(range(num_names)) score_basis = list(range(num_scores)) semtype2_nice = { 'score': score_nice, 'name': name_nice, 'match': match_nice, } var2_cpd = { } globals()['semtype2_nice'] = semtype2_nice globals()['var2_cpd'] = var2_cpd name_combo = np.array(list(ut.iprod(nid_basis, nid_basis))) combo_is_same = name_combo.T[0] == name_combo.T[1] def get_expected_scores_prob(level1, level2): part1 = combo_is_same * level1 part2 = (1 - combo_is_same) * (1 - (level2)) expected_scores_level = part1 + part2 return expected_scores_level # def make_cpd(): def name_cpd(aid): from pgmpy.factors import TabularCPD cpd = TabularCPD( variable='N' + aid, variable_card=num_names, values=[[1.0 / num_names] * num_names]) cpd.semtype = 'name' return cpd name_cpds = [name_cpd('i'), name_cpd('j'), name_cpd('k')] var2_cpd.update(dict(zip([cpd.variable for cpd in name_cpds], name_cpds))) if True: num_same_diff = 2 samediff_measure = np.array([ # get_expected_scores_prob(.12, .2), # get_expected_scores_prob(.88, .8), get_expected_scores_prob(0, 0), get_expected_scores_prob(1, 1), ]) samediff_vals = (samediff_measure / samediff_measure.sum(axis=0)).tolist() def samediff_cpd(aid1, aid2): cpd = TabularCPD( variable='A' + aid1 + aid2, variable_card=num_same_diff, values=samediff_vals, evidence=['N' + aid1, 'N' + aid2], # [::-1], evidence_card=[num_names, num_names]) # [::-1]) cpd.semtype = 'match' return cpd samediff_cpds = [samediff_cpd('i', 'j'), samediff_cpd('j', 'k'), samediff_cpd('k', 'i')] var2_cpd.update(dict(zip([cpd.variable for cpd in samediff_cpds], samediff_cpds))) if True: def score_cpd(aid1, aid2): semtype = 'score' evidence = ['A' + aid1 + aid2, 'N' + aid1, 'N' + aid2] evidence_cpds = [var2_cpd[key] for key in evidence] evidence_nice = [semtype2_nice[cpd.semtype] for cpd in evidence_cpds] evidence_card = list(map(len, evidence_nice)) evidence_states = list(ut.iprod(*evidence_nice)) variable_basis = semtype2_nice[semtype] variable_values = [] for mystate in variable_basis: row = [] for state in evidence_states: if state[0] == state[1]: if state[2] == 'same': val = .2 if mystate == 'low' else .8 else: val = 1 # val = .5 if mystate == 'low' else .5 elif state[0] != state[1]: if state[2] == 'same': val = .5 if mystate == 'low' else .5 else: val = 1 # val = .9 if mystate == 'low' else .1 row.append(val) variable_values.append(row) cpd = TabularCPD( variable='S' + aid1 + aid2, variable_card=len(variable_basis), values=variable_values, evidence=evidence, # [::-1], evidence_card=evidence_card) # [::-1]) cpd.semtype = semtype return cpd else: score_values = [ [.8, .1], [.2, .9], ] def score_cpd(aid1, aid2): cpd = TabularCPD( variable='S' + aid1 + aid2, variable_card=num_scores, values=score_values, evidence=['A' + aid1 + aid2], # [::-1], evidence_card=[num_same_diff]) # [::-1]) cpd.semtype = 'score' return cpd score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')] cpd_list = name_cpds + score_cpds + samediff_cpds else: score_measure = np.array([get_expected_scores_prob(level1, level2) for level1, level2 in zip(np.linspace(.1, .9, num_scores), np.linspace(.2, .8, num_scores))]) score_values = (score_measure / score_measure.sum(axis=0)).tolist() def score_cpd(aid1, aid2): cpd = TabularCPD( variable='S' + aid1 + aid2, variable_card=num_scores, values=score_values, evidence=['N' + aid1, 'N' + aid2], evidence_card=[num_names, num_names]) cpd.semtype = 'score' return cpd score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')] cpd_list = name_cpds + score_cpds pass input_graph = [] for cpd in cpd_list: if cpd.evidence is not None: for evar in cpd.evidence: input_graph.append((evar, cpd.variable)) name_model = BayesianModel(input_graph) name_model.add_cpds(*cpd_list) var2_cpd.update(dict(zip([cpd.variable for cpd in cpd_list], cpd_list))) globals()['var2_cpd'] = var2_cpd varnames = [cpd.variable for cpd in cpd_list] # --- PRINT CPDS --- cpd = score_cpds[0] def print_cpd(cpd): print('CPT: %r' % (cpd,)) index = semtype2_nice[cpd.semtype] if cpd.evidence is None: columns = ['None'] else: basis_lists = [semtype2_nice[var2_cpd[ename].semtype] for ename in cpd.evidence] columns = [','.join(x) for x in ut.iprod(*basis_lists)] data = cpd.get_cpd() print(pd.DataFrame(data, index=index, columns=columns)) for cpd in name_model.get_cpds(): print('----') print(cpd._str('phi')) print_cpd(cpd) # --- INFERENCE --- Ni = name_cpds[0] event_space_combos = {} event_space_combos[Ni.variable] = 0 # Set ni to always be Fred for cpd in cpd_list: if cpd.semtype == 'score': event_space_combos[cpd.variable] = list(range(cpd.variable_card)) evidence_dict = ut.all_dict_combinations(event_space_combos) # Query about name of annotation k given different event space params def pretty_evidence(evidence): return [key + '=' + str(semtype2_nice[var2_cpd[key].semtype][val]) for key, val in evidence.items()] def print_factor(factor): row_cards = factor.cardinality row_vars = factor.variables values = factor.values.reshape(np.prod(row_cards), 1).flatten() # col_cards = 1 # col_vars = [''] basis_lists = list(zip(*list(ut.iprod(*[range(c) for c in row_cards])))) nice_basis_lists = [] for varname, basis in zip(row_vars, basis_lists): cpd = var2_cpd[varname] _nice_basis = ut.take(semtype2_nice[cpd.semtype], basis) nice_basis = ['%s=%s' % (varname, val) for val in _nice_basis] nice_basis_lists.append(nice_basis) row_lbls = [', '.join(sorted(x)) for x in zip(*nice_basis_lists)] print(ut.repr3(dict(zip(row_lbls, values)), precision=3, align=True, key_order_metric='-val')) # name_belief = BeliefPropagation(name_model) name_belief = VariableElimination(name_model) import pgmpy import six # NOQA def try_query(evidence): print('--------') query_vars = ut.setdiff_ordered(varnames, list(evidence.keys())) evidence_str = ', '.join(pretty_evidence(evidence)) probs = name_belief.query(query_vars, evidence) factor_list = probs.values() joint_factor = pgmpy.factors.factor_product(*factor_list) print('P(' + ', '.join(query_vars) + ' | ' + evidence_str + ')') # print(six.text_type(joint_factor)) factor = joint_factor # NOQA # print_factor(factor) # import utool as ut print(ut.hz_str([(f._str(phi_or_p='phi')) for f in factor_list])) for evidence in evidence_dict: try_query(evidence) evidence = {'Aij': 1, 'Ajk': 1, 'Aki': 1, 'Ni': 0} try_query(evidence) evidence = {'Aij': 0, 'Ajk': 0, 'Aki': 0, 'Ni': 0} try_query(evidence) globals()['score_nice'] = score_nice globals()['name_nice'] = name_nice globals()['score_basis'] = score_basis globals()['nid_basis'] = nid_basis print('Independencies') print(name_model.get_independencies()) print(name_model.local_independencies([Ni.variable])) # name_belief = BeliefPropagation(name_model) # # name_belief = VariableElimination(name_model) # for case in special_cases: # test_data = case.drop('Lk', axis=1) # test_data = test_data.reset_index(drop=True) # print('----') # for i in range(test_data.shape[0]): # evidence = test_data.loc[i].to_dict() # probs = name_belief.query(['Lk'], evidence) # factor = probs['Lk'] # probs = factor.values # evidence_ = evidence.copy() # evidence_['Li'] = name_nice[evidence['Li']] # evidence_['Lj'] = name_nice[evidence['Lj']] # evidence_['Sij'] = score_nice[evidence['Sij']] # evidence_['Sjk'] = score_nice[evidence['Sjk']] # nice2_prob = ut.odict(zip(name_nice, probs.tolist())) # ut.print_python_code('P(Lk | {evidence}) = {cpt}'.format( # evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)), # cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val') # )) # for case in special_cases: # test_data = case.drop('Lk', axis=1) # test_data = test_data.drop('Lj', axis=1) # test_data = test_data.reset_index(drop=True) # print('----') # for i in range(test_data.shape[0]): # evidence = test_data.loc[i].to_dict() # query_vars = ['Lk', 'Lj'] # probs = name_belief.query(query_vars, evidence) # for queryvar in query_vars: # factor = probs[queryvar] # print(factor._str('phi')) # probs = factor.values # evidence_ = evidence.copy() # evidence_['Li'] = name_nice[evidence['Li']] # evidence_['Sij'] = score_nice[evidence['Sij']] # evidence_['Sjk'] = score_nice[evidence['Sjk']] # nice2_prob = ut.odict(zip([queryvar + '=' + x for x in name_nice], probs.tolist())) # ut.print_python_code('P({queryvar} | {evidence}) = {cpt}'.format( # query_var=query_var, # evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)), # cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val') # )) # _ draw model import plottool as pt import networkx as netx fig = pt.figure() # NOQA fig.clf() ax = pt.gca() netx_nodes = [(node, {}) for node in name_model.nodes()] netx_edges = [(etup[0], etup[1], {}) for etup in name_model.edges()] netx_graph = netx.DiGraph() netx_graph.add_nodes_from(netx_nodes) netx_graph.add_edges_from(netx_edges) # pos = netx.graphviz_layout(netx_graph) pos = netx.pydot_layout(netx_graph, prog='dot') netx.draw(netx_graph, pos=pos, ax=ax, with_labels=True) pt.plt.savefig('foo.png') ut.startfile('foo.png')
def draw_bayesian_model(model, evidence={}, soft_evidence={}, fnum=None, pnum=None, **kwargs): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): model = model.to_bayesian_model() import plottool as pt import networkx as nx kwargs = kwargs.copy() factor_list = kwargs.pop('factor_list', []) ttype_colors, ttype_scalars = make_colorcodes(model) textprops = { 'horizontalalignment': 'left', 'family': 'monospace', 'size': 8, } # build graph attrs tup = get_node_viz_attrs(model, evidence, soft_evidence, factor_list, ttype_colors, **kwargs) node_color, pos_list, pos_dict, takws = tup # draw graph has_infered = evidence or 'factor_list' in kwargs if False: fig = pt.figure(fnum=fnum, pnum=pnum, doclf=True) # NOQA ax = pt.gca() drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1100, node_color=node_color) nx.draw(model, **drawkw) else: # BE VERY CAREFUL if 1: graph = model.copy() graph.__class__ = nx.DiGraph graph.graph['groupattrs'] = ut.ddict(dict) #graph = model. if getattr(graph, 'ttype2_cpds', None) is not None: # Add invis edges and ttype groups for ttype in model.ttype2_cpds.keys(): ttype_cpds = model.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) graph.add_edges_from(invis_edges) nx.set_edge_attributes( graph, 'style', {edge: 'invis' for edge in invis_edges}) nx.set_node_attributes( graph, 'groupid', {node: ttype for node in ttype_nodes}) graph.graph['groupattrs'][ttype]['rank'] = 'same' graph.graph['groupattrs'][ttype]['cluster'] = False else: graph = model pt.show_nx(graph, layout_kw={'prog': 'dot'}, fnum=fnum, pnum=pnum, verbose=0) pt.zoom_factory() fig = pt.gcf() ax = pt.gca() pass hacks = [ pt.draw_text_annotations(textprops=textprops, **takw) for takw in takws if takw ] xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) if 'name' in model.ttype2_template: num_names = len(model.ttype2_template['name'].basis) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) title = 'num_names=%r, num_annots=%r' % ( num_names, num_annots, ) else: title = '' map_assign = kwargs.get('map_assign', None) def word_insert(text): return '' if len(text) == 0 else text + ' ' top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob, ) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() if has_infered: # Hack in colorbars # if ut.list_type(basis) is int: # pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1) # else: # pt.colorbar(scalars, colors, lbl='score', ticklabels=basis) keys = ['name', 'score'] locs = ['left', 'right'] for key, loc in zip(keys, locs): if key in ttype_colors: basis = model.ttype2_template[key].basis # scalars = colors = ttype_colors[key] scalars = ttype_scalars[key] pt.colorbar(scalars, colors, lbl=key, ticklabels=basis, ticklocation=loc)