def _plot_normals_entry(scene, disp_map, gt, mask_planes, mask_contin, algo_name, metric_mae_contin, metric_mae_planes, idx, grid, entries_per_row, n_vis_types, cols, colorbar_args, fs): add_ylabel = not idx % entries_per_row # is first column add_colorbar = not (idx + 1) % entries_per_row # is last column idx_row = (idx / entries_per_row) * n_vis_types idx_col = idx % entries_per_row idx = idx_row * cols + idx_col # plot disparity map plt.subplot(grid[idx]) cb = plt.imshow(disp_map, **settings.disp_map_args(scene)) plt.title(algo_name, fontsize=fs) if add_ylabel: plt.ylabel("DispMap", fontsize=fs) if add_colorbar: plotting.add_colorbar(grid[idx + 1], cb, **colorbar_args) # plot normal map plt.subplot(grid[idx + cols]) plt.imshow(scene.get_normal_vis_from_disp_map(disp_map)) if add_ylabel: plt.ylabel("Normals", fontsize=fs) # plot angular errors plt.subplot(grid[idx + 2 * cols]) # compute angular errors try: score_contin = "%0.2f" % metric_mae_contin.get_score( disp_map, gt, scene) except IOError: score_contin = "-" try: score_planes = "%0.2f" % metric_mae_planes.get_score( disp_map, gt, scene) except IOError: score_planes = "-" plt.title("%s / %s" % (score_contin, score_planes), fontsize=fs) if add_ylabel: plt.ylabel("Angular Error", fontsize=fs) # get combined error visualization (if applicable) mask = mask_contin + mask_planes if np.sum(mask) > 0: score, vis_normals = metric_mae_contin.get_score_from_mask( disp_map, gt, scene, mask, with_visualization=True) cb = plt.imshow(vis_normals, **settings.metric_args(metric_mae_contin)) if add_colorbar: plotting.add_colorbar(grid[idx + 2 * cols + 1], cb, **colorbar_args)
def plot_algo_vis_for_metric(self, metric, algo_result, gt, mask, hide_gt=False, fontsize=10): score, vis = metric.get_score(algo_result, gt, self, with_visualization=True) if hide_gt and metric.pixelize_results(): vis = plotting.pixelize(vis) # plot algorithm disparity map as background plt.imshow(algo_result, **settings.disp_map_args(self, cmap="gray")) # plot masked metric visualization on top cm = plt.imshow(np.ma.masked_array(vis, mask=~mask), **settings.metric_args(metric)) plt.title(metric.format_score(score), fontsize=fontsize) return cm
def plot_normals_explanation(algorithm, scene, fs=14, subdir="overview"): # prepare figure rows, cols = 1, 4 fig = plt.figure(figsize=(10, 4)) grid, cb_height, cb_width = plotting.get_grid_with_colorbar( rows, cols, scene) # prepare metrics normals_contin = MAEContinSurf() normals_planes = MAEPlanes() # prepare data gt = scene.get_gt() algo_result = misc.get_algo_result(algorithm, scene) mask = normals_contin.get_evaluation_mask( scene) + normals_planes.get_evaluation_mask(scene) score_normals, vis_normals = normals_contin.get_score_from_mask( algo_result, gt, scene, mask, with_visualization=True) # plot ground truth normals plt.subplot(grid[0]) plt.imshow(scene.get_normal_vis_from_disp_map(gt)) plt.title("Ground Truth Normals", fontsize=fs) # plot algorithm normals plt.subplot(grid[1]) plt.imshow(scene.get_normal_vis_from_disp_map(algo_result)) plt.title("Algorithm Normals", fontsize=fs) # plot median angular error with colorbar plt.subplot(grid[2]) cb = plt.imshow(vis_normals, **settings.metric_args(normals_contin)) plt.title("Median Angular Error: %0.1f" % score_normals, fontsize=fs) plt.subplot(grid[3]) plotting.add_colorbar(grid[3], cb, cb_height, cb_width, colorbar_bins=4, fontsize=fs) # save figure fig_name = "metrics_%s_%s" % (scene.get_name(), algorithm.get_name()) fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir) plotting.save_tight_figure(fig, fig_path, hide_frames=False, hspace=0.04, wspace=0.03)
def save_visualization(algo_result, metric_vis, metric, scene, tgt_dir): fig = init_figure() # algorithm result as background plt.imshow(algo_result, **settings.disp_map_args(scene, cmap="gray")) # metric visualization on top if scene.hidden_gt() and metric.pixelize_results() and settings.PIXELIZE: metric_vis = plotting.pixelize(metric_vis, noise_factor=0.05) cm = plt.imshow(metric_vis, **settings.metric_args(metric)) add_colorbar(cm, metric.colorbar_bins) # save fig relative_fname = get_relative_path(scene, metric.get_id()) fpath = op.normpath(op.join(tgt_dir, relative_fname)) plotting.save_tight_figure(fig, fpath, hide_frames=True, pad_inches=0.01) return relative_fname
def plot_general_overview(algorithms, scenes, metrics, fig_name=None, subdir=SUBDIR, fs=11): n_vis_types = len(metrics) # prepare figure grid rows, cols = len(scenes) * n_vis_types, len(algorithms) + 1 fig = plt.figure(figsize=(cols * 1.4, 1.15 * rows * 1.6)) grid, cb_height, cb_width = plotting.get_grid_with_colorbar( rows, cols, scenes[0]) for idx_s, scene in enumerate(scenes): gt = scene.get_gt() applicable_metrics = scene.get_applicable_metrics(metrics) for idx_a, algorithm in enumerate(algorithms): algo_result = misc.get_algo_result(algorithm, scene) for idx_m, metric in enumerate(metrics): idx = (n_vis_types * idx_s + idx_m) * cols + idx_a ylabel = metric.get_display_name() plt.subplot(grid[idx]) if metric in applicable_metrics: score, vis = metric.get_score(algo_result, gt, scene, with_visualization=True) cb = plt.imshow(vis, **settings.metric_args(metric)) # add algorithm name and metric score on top row if idx_s == 0 and idx_m == 0: plt.title("%s\n%0.2f" % (algorithm.get_display_name(), score), fontsize=fs) else: plt.title("%0.2f" % score, fontsize=fs) # add colorbar to last column if idx_a == len(algorithms) - 1: plotting.add_colorbar( grid[idx + 1], cb, cb_height, cb_width, colorbar_bins=metric.colorbar_bins, fontsize=fs) # add metric name to first column if idx_a == 0: plt.ylabel(ylabel) else: if idx_a == 0: log.warning("Metric %s not applicable for scene %s." % (metric.get_display_name(), scene.get_display_name())) plt.ylabel(ylabel + "\n(not applicable)") # save figure if fig_name is None: fig_name = "metric_overview_%s_%s" % ("_".join( metric.get_id() for metric in metrics), "_".join(scene.get_name() for scene in scenes)) fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir) plotting.save_tight_figure(fig, fig_path, hide_frames=True, hspace=0.01, wspace=0.01)
def plot_algo_overview(self, algorithms, with_metric_vis=True, subdir="algo_overview", fs=14): self.set_scale_for_algo_overview() metrics = self.get_scene_specific_metrics() n_metrics = len(metrics) if not with_metric_vis: rows, cols = 2 + n_metrics, len(algorithms) + 2 fig = plt.figure(figsize=(2.6 * len(algorithms), 4.9)) offset = 0 else: rows, cols = 2 + 2 * n_metrics, len(algorithms) + 2 fig = plt.figure(figsize=(2.6 * len(algorithms), rows + 3)) offset = n_metrics labelpad = -15 hscale, wscale = 7, 5 width_ratios = [wscale] * (len(algorithms) + 1) + [1] height_ratios = [hscale] * (rows - n_metrics) + [1] * n_metrics gs = gridspec.GridSpec(rows, cols, height_ratios=height_ratios, width_ratios=width_ratios) gt = self.get_gt() dummy = np.ones((self.get_height() / hscale, self.get_width())) cb_height, w = np.shape(gt) cb_width = w / float(wscale) # first column (gt, center view, ...) plt.subplot(gs[0]) plt.imshow(gt, **settings.disp_map_args(self)) plt.title("Ground Truth", fontsize=fs) plt.ylabel("Disparity Map", fontsize=fs) plt.subplot(gs[cols]) plt.imshow(self.get_center_view()) plt.ylabel("diff: GT - Algo", fontsize=fs) for idx_m, metric in enumerate(metrics): plt.subplot(gs[(2 + idx_m + offset) * cols]) plt.xlabel(metric.get_short_name(), labelpad=labelpad, fontsize=fs) plt.imshow(dummy, cmap="gray_r") # algorithm columns for idx_a, algorithm in enumerate(algorithms): log.info("Processing algorithm: %s" % algorithm) algo_result = misc.get_algo_result(algorithm, self) # algorithm disparities plt.subplot(gs[idx_a + 1]) plt.title(algorithm.get_display_name(), fontsize=fs) cm1 = plt.imshow(algo_result, **settings.disp_map_args(self)) # algorithm diff map plt.subplot(gs[cols + idx_a + 1]) cm2 = plt.imshow(gt - algo_result, **settings.diff_map_args()) # add colorbar if last column if idx_a == (len(algorithms) - 1): plotting.add_colorbar(gs[idx_a + 2], cm1, cb_height, cb_width, colorbar_bins=5, fontsize=fs - 4) plotting.add_colorbar(gs[cols + idx_a + 2], cm2, cb_height, cb_width, colorbar_bins=5, fontsize=fs - 4) # score and background color for metrics for idx_m, metric in enumerate(metrics): if with_metric_vis: plt.subplot(gs[(2 + idx_m) * cols + idx_a + 1]) score, vis = metric.get_score(algo_result, gt, self, with_visualization=True) cm3 = plt.imshow(vis, **settings.metric_args(metric)) if idx_a == 0: plt.ylabel(metric.get_short_name(), fontsize=fs) elif idx_a == (len(algorithms) - 1): plotting.add_colorbar( gs[(2 + idx_m) * cols + idx_a + 2], cm3, cb_height, cb_width, colorbar_bins=metric.colorbar_bins, fontsize=fs - 4) else: score = metric.get_score(algo_result, gt, self) plt.subplot(gs[(2 + idx_m + offset) * cols + idx_a + 1]) plt.imshow( dummy * score, **settings.score_color_args(vmin=metric.vmin, vmax=metric.vmax)) plt.xlabel(metric.format_score(score), labelpad=labelpad, fontsize=fs) fig_name = "algo_overview_" + self.get_name( ) + with_metric_vis * "_vis" fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir) plotting.save_tight_figure(fig, fig_path, wspace=0.04, hide_frames=True)