Exemplo n.º 1
0
    def visualize_masks(self, subdir="stratified"):
        self.set_high_gt_scale()

        rows, cols = 1, 3
        fig = plt.figure(figsize=(9, 3))

        center_view = self.get_center_view()
        m_eval = self.get_boundary_mask()

        plt.subplot(rows, cols, 1)
        plt.title("Low Texture")
        plotting.plot_img_with_transparent_mask(
            center_view,
            self.get_low_texture() * m_eval, **settings.mask_vis_args())

        plt.subplot(rows, cols, 2)
        plt.title("High Contrast\n(Dark Stripes)")
        plotting.plot_img_with_transparent_mask(
            center_view,
            self.get_high_contrast() * m_eval, **settings.mask_vis_args())

        plt.subplot(rows, cols, 3)
        plt.title("Low Contrast\n(Bright Stripes)")
        plotting.plot_img_with_transparent_mask(
            center_view,
            self.get_low_contrast() * m_eval, **settings.mask_vis_args())

        fig_path = plotting.get_path_to_figure("stripes_masks", subdir=subdir)
        plotting.save_tight_figure(fig, fig_path, hide_frames=True)
Exemplo n.º 2
0
def plot_scene_difficulty(scenes, subdir="overview", fs=10):

    n_scenes_per_row = 4
    rows, cols = 6, n_scenes_per_row + 1
    fig = plt.figure(figsize=(6, 9))
    grid, cb_height, cb_width = plotting.get_grid_with_colorbar(
        rows, cols, scenes[0])
    colorbar_args = {
        "height": cb_height,
        "width": cb_width,
        "colorbar_bins": 2,
        "fontsize": fs
    }

    median_algo = PerPixMedianDiff()
    best_algo = PerPixBest()

    for idx_s, scene in enumerate(scenes):
        # prepare data
        gt = scene.get_gt()
        median_result = misc.get_algo_result(median_algo, scene)
        best_result = misc.get_algo_result(best_algo, scene)

        idx_row = idx_s / n_scenes_per_row * 2
        idx_col = (idx_s % n_scenes_per_row)
        add_ylabel = not idx_s % n_scenes_per_row  # is first column
        add_colorbar = idx_col == (n_scenes_per_row - 1)  # is last column

        idx = idx_row * cols + idx_col

        # plot errors for median result
        plt.subplot(grid[idx])
        plt.title(scene.get_display_name(), fontsize=fs)
        cb = plt.imshow(np.abs(gt - median_result),
                        **settings.abs_diff_map_args())

        if add_ylabel:
            plt.ylabel("|GT - %s|" % median_algo.get_display_name(),
                       fontsize=fs - 2)
        if add_colorbar:
            plotting.add_colorbar(grid[idx + 1], cb, **colorbar_args)

        # plot error for best result
        plt.subplot(grid[idx + cols])
        cb = plt.imshow(np.abs(gt - best_result),
                        **settings.abs_diff_map_args())

        if add_ylabel:
            plt.ylabel("|GT - %s|" % best_algo.get_display_name(),
                       fontsize=fs - 2)
        if add_colorbar:
            plotting.add_colorbar(grid[idx + cols + 1], cb, **colorbar_args)

    fig_path = plotting.get_path_to_figure("scene_difficulty", subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=True,
                               hspace=0.08,
                               wspace=0.03)
Exemplo n.º 3
0
def plot(algorithms,
         scenes,
         thresh=settings.BAD_PIX_THRESH,
         subdir="error_heatmaps",
         fs=18,
         max_per_row=4):

    # prepare figure
    n_scenes = len(scenes)
    cols = min(n_scenes, max_per_row) + 1  # + 1 for colorbars
    rows = int(np.ceil(n_scenes / float(cols - 1)))
    fig = plt.figure(figsize=(2.7 * cols, 3 * rows))
    grid, cbh, cbw = plotting.get_grid_with_colorbar(rows,
                                                     cols,
                                                     scenes[0],
                                                     hscale=1,
                                                     wscale=9)
    colorbar_args = {
        "height": cbh,
        "width": cbw,
        "colorbar_bins": 5,
        "fontsize": 10,
        "scale": 0.8
    }

    # plot heatmaps
    idx_scene = 0
    for idx in range(rows * cols):

        if (idx + 1) % cols:
            # plot error heatmap for scene
            scene = scenes[idx_scene]
            idx_scene += 1

            plt.subplot(grid[idx])
            bad_count = get_bad_count(scene,
                                      algorithms,
                                      thresh,
                                      percentage=True)
            cm = plt.imshow(bad_count, vmin=0, vmax=100, cmap="inferno")
            plt.ylabel(scene.get_display_name(), fontsize=fs, labelpad=2.5)
        else:
            # plot colorbar
            plotting.add_colorbar(grid[idx], cm, **colorbar_args)

        if idx_scene >= n_scenes:
            if idx % cols or n_scenes == 1:
                plotting.add_colorbar(grid[idx + 1], cm, **colorbar_args)
            break

    plt.suptitle(
        "Per Pixel: Percentage of %d Algorithms with abs(gt-algo) > %0.2f" %
        (len(algorithms), thresh),
        fontsize=fs)

    fig_name = ("error_heatmaps_%.3f" % thresh).replace(".", "")
    fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
    plotting.save_tight_figure(fig, fig_path, hide_frames=True, hspace=0.02)
Exemplo n.º 4
0
def plot_normals_explanation(algorithm, scene, fs=14, subdir="overview"):
    # prepare figure
    rows, cols = 1, 4
    fig = plt.figure(figsize=(10, 4))
    grid, cb_height, cb_width = plotting.get_grid_with_colorbar(
        rows, cols, scene)

    # prepare metrics
    normals_contin = MAEContinSurf()
    normals_planes = MAEPlanes()

    # prepare data
    gt = scene.get_gt()
    algo_result = misc.get_algo_result(algorithm, scene)
    mask = normals_contin.get_evaluation_mask(
        scene) + normals_planes.get_evaluation_mask(scene)
    score_normals, vis_normals = normals_contin.get_score_from_mask(
        algo_result, gt, scene, mask, with_visualization=True)

    # plot ground truth normals
    plt.subplot(grid[0])
    plt.imshow(scene.get_normal_vis_from_disp_map(gt))
    plt.title("Ground Truth Normals", fontsize=fs)

    # plot algorithm normals
    plt.subplot(grid[1])
    plt.imshow(scene.get_normal_vis_from_disp_map(algo_result))
    plt.title("Algorithm Normals", fontsize=fs)

    # plot median angular error with colorbar
    plt.subplot(grid[2])
    cb = plt.imshow(vis_normals, **settings.metric_args(normals_contin))
    plt.title("Median Angular Error: %0.1f" % score_normals, fontsize=fs)
    plt.subplot(grid[3])
    plotting.add_colorbar(grid[3],
                          cb,
                          cb_height,
                          cb_width,
                          colorbar_bins=4,
                          fontsize=fs)

    # save figure
    fig_name = "metrics_%s_%s" % (scene.get_name(), algorithm.get_name())
    fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=False,
                               hspace=0.04,
                               wspace=0.03)
Exemplo n.º 5
0
def plot_benchmark_scene_overview(benchmark_scenes, subdir="overview", fs=16):
    # prepare grid figure
    rows, cols = 2, 12
    fig = plt.figure(figsize=(21.6, 4))
    grids = plotting.get_grid(rows, cols)

    # plot center view and ground truth for each scene
    for idx_s, scene in enumerate(benchmark_scenes):

        center_view = scene.get_center_view()
        plt.subplot(grids[idx_s])
        plt.imshow(center_view)
        plt.title("\n\n" + scene.get_display_name(), fontsize=fs)

        try:
            gt = scene.get_gt()
            plt.subplot(grids[cols + idx_s])
            if scene.hidden_gt():
                gt = plotting.pixelize(gt, noise_factor=0.5)
            plt.imshow(gt, **settings.disp_map_args(scene))
        except IOError as e:
            # skip potentially missing ground truth of test scenes
            log.warning(e)
            continue

    # add text
    height = 785
    plt.gca().annotate("(a) Stratified Scenes", (400, 420), (500, height),
                       fontsize=fs,
                       xycoords='figure pixels')
    plt.gca().annotate("(b) Training Scenes", (400, 420), (1910, height),
                       fontsize=fs,
                       xycoords='figure pixels')
    plt.gca().annotate("(c) Test Scenes (Hidden Ground Truth)", (400, 420),
                       (3070, height),
                       fontsize=fs,
                       xycoords='figure pixels')

    # save figure
    fig_path = plotting.get_path_to_figure("benchmark_scenes", subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=True,
                               hspace=0.02,
                               wspace=0.02,
                               dpi=200)
Exemplo n.º 6
0
    def plot_error_vs_noise(self, algorithms, subdir="stratified"):
        self.set_low_gt_scale()
        fig = plt.figure(figsize=(8, 4))

        grid = self.get_boxes()
        box_ids = sorted(list(np.unique(grid)))
        box_ids.remove(0)
        n_boxes = len(box_ids)
        mse = MSE()
        gt = self.get_gt()
        m_basic = self.get_boundary_mask()
        m_eval = self.get_background_mask() * m_basic

        x_values = np.arange(1, n_boxes + 1)

        for algorithm in algorithms:
            algo_result = misc.get_algo_result(algorithm, self)
            y_values = np.full(n_boxes, fill_value=np.nan)

            for idx_b, box_id in enumerate(box_ids):
                m_current = m_eval * (grid == box_id)
                y_values[idx_b] = mse.get_masked_score(algo_result, gt,
                                                       m_current)

            plt.plot(x_values,
                     y_values,
                     "o-",
                     color=algorithm.get_color(),
                     label=algorithm.get_display_name(),
                     lw=2,
                     alpha=0.9,
                     markeredgewidth=0)

        plt.legend(frameon=False,
                   loc="upper right",
                   ncol=1,
                   title="Algorithms:",
                   bbox_to_anchor=(1.25, 1),
                   borderaxespad=0.0)
        plt.xlabel("Cell IDs (increasing noise from left to right)")
        plt.ylabel("MSE on cell background")
        plt.title("%s: Error per Cell Background" % (self.get_display_name()))
        plotting.hide_upper_right()

        fig_path = plotting.get_path_to_figure("dots_per_box", subdir=subdir)
        plotting.save_tight_figure(fig, fig_path, remove_ticks=False)
def plot_pairwise_comparison(algo1,
                             algo2,
                             scenes,
                             n_scenes_per_row=4,
                             subdir="pairwise_diffs"):
    rows, cols = int(np.ceil(len(scenes) /
                             float(n_scenes_per_row))), n_scenes_per_row
    fig = plt.figure(figsize=(4 * cols, 3 * rows))

    for idx_s, scene in enumerate(scenes):
        algo_result_1 = misc.get_algo_result(algo1, scene)
        algo_result_2 = misc.get_algo_result(algo2, scene)
        gt = scene.get_gt()

        plt.subplot(rows, cols, idx_s + 1)
        cb = plt.imshow(np.abs(algo_result_1 - gt) -
                        np.abs(algo_result_2 - gt),
                        interpolation="none",
                        cmap=cm.seismic,
                        vmin=-.1,
                        vmax=.1)
        plt.colorbar(cb, shrink=0.7)
        plt.title(scene.get_display_name())

    # title
    a1 = algo1.get_display_name()
    a2 = algo2.get_display_name()
    plt.suptitle(
        "|%s - GT| - |%s - GT|\nblue: %s is better, red: %s is better" %
        (a1, a2, a1, a2))

    fig_name = "pairwise_diffs_%s_%s" % (algo1.get_name(), algo2.get_name())
    fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=True,
                               padding_top=0.85,
                               hspace=0.15,
                               wspace=0.15)
Exemplo n.º 8
0
    def plot_algo_overview(self, algorithms, subdir="algo_overview", fs=6):
        accv_metrics = [MSE(), BadPix(0.07), BumpinessPlanes(), BumpinessContinSurf(),
                        Discontinuities(), FineFattening(), FineThinning()]
        metrics_low_res = [m for m in self.get_applicable_metrics_low_res() if m in accv_metrics]
        metrics_high_res = [m for m in self.get_applicable_metrics_high_res() if m in accv_metrics]

        # prepare figure
        rows = len(metrics_low_res + metrics_high_res) + 1
        cols = len(algorithms) + 1
        fig = plt.figure(figsize=(cols, rows*1.1))
        grids = self._get_grids(fig, rows, cols, axes_pad=-0.2)

        # center view on top left grid cell
        self.set_high_gt_scale()
        plt.sca(grids[0][0])
        plt.imshow(self.get_center_view())
        plt.title("Center View", fontsize=fs)
        plt.ylabel("Disparity Map", fontsize=fs)

        # mask visualizations + algorithm disparity maps + metric visualizations
        log.info("Computing scores and visualizations for low resolution metrics.")
        self.set_low_gt_scale()
        self.plot_metric_rows(grids, algorithms, metrics_low_res, offset=0, fontsize=fs)

        log.info("Computing scores and visualizations for high resolution metrics.")
        self.set_high_gt_scale()
        self.plot_metric_rows(grids, algorithms, metrics_high_res,
                              offset=len(metrics_low_res), fontsize=fs)

        # finalize figure
        for grid in grids:
            plotting.remove_ticks_from_axes(grid.axes_all)
            plotting.remove_frames_from_axes(grid.axes_all)
        plt.suptitle(self.get_display_name(), fontsize=fs+2)

        fig_path = plotting.get_path_to_figure("algo_overview_%s" % self.get_name(), subdir=subdir)
        plotting.save_fig(fig, fig_path, pad_inches=0.1)
Exemplo n.º 9
0
def plot_discont_overview(algorithms,
                          scene,
                          n_rows=2,
                          fs=15,
                          subdir="overview",
                          xmin=150,
                          ymin=230,
                          ww=250):

    # prepare figure grid
    n_vis_types = 2
    n_entries_per_row = int(np.ceil((len(algorithms) + 1) / float(n_rows)))
    rows, cols = (n_vis_types * n_rows), n_entries_per_row + 1

    fig = plt.figure(figsize=(cols * 1.7, 1.45 * rows * 1.5))
    grid, cb_height, cb_width = plotting.get_grid_with_colorbar(
        rows, cols, scene)
    colorbar_args = {
        "height": cb_height,
        "width": cb_width,
        "colorbar_bins": 7,
        "fontsize": fs
    }

    # prepare data
    median_algo = PerPixMedianDiff()
    gt = scene.get_gt()
    median_result = misc.get_algo_result(median_algo, scene)
    center_view = scene.get_center_view()

    # center view
    plt.subplot(grid[0])
    plt.imshow(center_view[ymin:ymin + ww, xmin:xmin + ww])
    plt.title("Center View", fontsize=fs)
    plt.ylabel("DispMap", fontsize=fs)
    plt.subplot(grid[cols])
    plt.ylabel("MedianDiff", fontsize=fs)

    for idx_a, algorithm in enumerate(algorithms):
        algo_result = misc.get_algo_result(algorithm, scene)
        idx = idx_a + 1

        add_ylabel = not idx % n_entries_per_row  # is first column
        add_colorbar = not (idx + 1) % n_entries_per_row  # is last column
        idx_row = (idx / n_entries_per_row) * n_vis_types
        idx_col = idx % n_entries_per_row

        idx = idx_row * cols + idx_col

        # top row with algorithm disparity map
        plt.subplot(grid[idx])
        algo_result_crop = algo_result[ymin:ymin + ww, xmin:xmin + ww]
        cb_depth = plt.imshow(algo_result_crop,
                              **settings.disp_map_args(scene))
        plt.title(algorithm.get_display_name(), fontsize=fs)

        if add_ylabel:
            plt.ylabel("DispMap", fontsize=fs)
        if add_colorbar:
            plotting.add_colorbar(grid[idx + 1], cb_depth, **colorbar_args)

        # second row with median diff
        plt.subplot(grid[idx + cols])
        diff = (np.abs(median_result - gt) -
                np.abs(algo_result - gt))[ymin:ymin + ww, xmin:xmin + ww]
        cb_error = plt.imshow(diff,
                              interpolation="none",
                              cmap=cm.RdYlGn,
                              vmin=-.05,
                              vmax=.05)

        if add_ylabel:
            plt.ylabel("MedianDiff", fontsize=fs)
        if add_colorbar:
            plotting.add_colorbar(grid[idx + cols + 1], cb_error,
                                  **colorbar_args)

    fig_path = plotting.get_path_to_figure("discont_%s" % scene.get_name(),
                                           subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=True,
                               hspace=0.03,
                               wspace=0.03,
                               dpi=100)
Exemplo n.º 10
0
def plot_normals(algorithms, scenes, n_rows=2, subdir=SUBDIR, fs=15):

    # prepare figure grid
    n_vis_types = 3
    n_entries_per_row = int(np.ceil((len(algorithms) + 1) / float(n_rows)))
    rows, cols = (n_vis_types * n_rows), n_entries_per_row + 1

    # initialize metrics
    metric_mae_contin = MAEContinSurf()
    metric_mae_planes = MAEPlanes()

    for scene in scenes:
        h, w = scene.get_shape()

        # prepare figure and colorbar size
        fig = plt.figure(figsize=(cols * 1.7, 1.45 * rows * 1.5))
        grid, cb_height, cb_width = plotting.get_grid_with_colorbar(
            rows, cols, scene)
        colorbar_args = {
            "height": cb_height,
            "width": cb_width,
            "colorbar_bins": 7,
            "fontsize": fs
        }

        # some scenes have no evaluation mask for planar, non-planar or both surfaces
        try:
            mask_contin = metric_mae_contin.get_evaluation_mask(scene)
        except IOError:
            log.warning("No evaluation mask found for non-planar "
                        "continuous surfaces on: %s" %
                        scene.get_display_name())
            mask_contin = np.zeros((h, w), dtype=np.bool)
        try:
            mask_planes = metric_mae_planes.get_evaluation_mask(scene)
        except IOError:
            log.warning("No evaluation mask found for planar "
                        "continuous surfaces on: %s" %
                        scene.get_display_name())
            mask_planes = np.zeros((h, w), dtype=np.bool)

        # plot ground truth column
        gt = scene.get_gt()
        _plot_normals_entry(scene,
                            gt,
                            gt,
                            mask_planes,
                            mask_contin,
                            "GT",
                            metric_mae_contin,
                            metric_mae_planes,
                            0,
                            grid,
                            n_entries_per_row,
                            n_vis_types,
                            cols,
                            colorbar_args,
                            fs=fs)

        # plot algorithm columns
        for idx_a, algorithm in enumerate(algorithms):
            algo_result = misc.get_algo_result(algorithm, scene)

            _plot_normals_entry(scene,
                                algo_result,
                                gt,
                                mask_planes,
                                mask_contin,
                                algorithm.get_display_name(),
                                metric_mae_contin,
                                metric_mae_planes,
                                idx_a + 1,
                                grid,
                                n_entries_per_row,
                                n_vis_types,
                                cols,
                                colorbar_args,
                                fs=fs)

        plt.suptitle("Angular Error: non-planar / planar surfaces",
                     fontsize=fs)

        # save figure
        fig_path = plotting.get_path_to_figure("normals_%s" % scene.get_name(),
                                               subdir=subdir)
        plotting.save_tight_figure(fig,
                                   fig_path,
                                   hide_frames=True,
                                   hspace=0.03,
                                   wspace=0.03)
Exemplo n.º 11
0
def plot_general_overview(algorithms,
                          scenes,
                          metrics,
                          fig_name=None,
                          subdir=SUBDIR,
                          fs=11):
    n_vis_types = len(metrics)

    # prepare figure grid
    rows, cols = len(scenes) * n_vis_types, len(algorithms) + 1
    fig = plt.figure(figsize=(cols * 1.4, 1.15 * rows * 1.6))
    grid, cb_height, cb_width = plotting.get_grid_with_colorbar(
        rows, cols, scenes[0])

    for idx_s, scene in enumerate(scenes):
        gt = scene.get_gt()
        applicable_metrics = scene.get_applicable_metrics(metrics)

        for idx_a, algorithm in enumerate(algorithms):
            algo_result = misc.get_algo_result(algorithm, scene)

            for idx_m, metric in enumerate(metrics):
                idx = (n_vis_types * idx_s + idx_m) * cols + idx_a
                ylabel = metric.get_display_name()
                plt.subplot(grid[idx])

                if metric in applicable_metrics:
                    score, vis = metric.get_score(algo_result,
                                                  gt,
                                                  scene,
                                                  with_visualization=True)

                    cb = plt.imshow(vis, **settings.metric_args(metric))

                    # add algorithm name and metric score on top row
                    if idx_s == 0 and idx_m == 0:
                        plt.title("%s\n%0.2f" %
                                  (algorithm.get_display_name(), score),
                                  fontsize=fs)
                    else:
                        plt.title("%0.2f" % score, fontsize=fs)

                    # add colorbar to last column
                    if idx_a == len(algorithms) - 1:
                        plotting.add_colorbar(
                            grid[idx + 1],
                            cb,
                            cb_height,
                            cb_width,
                            colorbar_bins=metric.colorbar_bins,
                            fontsize=fs)

                    # add metric name to first column
                    if idx_a == 0:
                        plt.ylabel(ylabel)

                else:
                    if idx_a == 0:
                        log.warning("Metric %s not applicable for scene %s." %
                                    (metric.get_display_name(),
                                     scene.get_display_name()))
                        plt.ylabel(ylabel + "\n(not applicable)")

    # save figure
    if fig_name is None:
        fig_name = "metric_overview_%s_%s" % ("_".join(
            metric.get_id()
            for metric in metrics), "_".join(scene.get_name()
                                             for scene in scenes))

    fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=True,
                               hspace=0.01,
                               wspace=0.01)
Exemplo n.º 12
0
def plot_scores(scores_metrics_algos,
                algorithms,
                axis_labels,
                fig_name,
                subdir,
                title,
                max_per_metric=None,
                fs=18):
    # prepare figure
    fig = plt.figure(figsize=(10, 10))
    n_circles = 4 + 1

    rect = [0.1, 0.1, 0.8, 0.8]  # left, bottom, width, height
    n_axes = np.shape(scores_metrics_algos)[0]
    axes = [
        fig.add_axes(rect, projection="polar", label="axes%d" % i)
        for i in range(n_axes)
    ]
    angles = np.arange(90, 90 + 360, 360.0 / n_axes) % 360

    # add metric labels
    axes[0].set_thetagrids(angles, labels=axis_labels, fontsize=16)

    # hide default labeling
    for ax in axes[1:]:
        ax.patch.set_visible(False)
        ax.grid("off")
        ax.xaxis.set_visible(False)

    # compute metric values at axis intersections
    axes_values = []

    if max_per_metric is None:
        max_per_metric = np.ma.max(scores_metrics_algos, axis=1) * 1.2

    for vmax in max_per_metric:
        if vmax is not np.ma.masked:
            steps = list(np.linspace(0, vmax, n_circles))
            steps = steps[:-1]
            axes_values.append(steps)
        else:
            steps = [np.nan] * (n_circles - 1)
            axes_values.append(steps)

    # add metric values at axis intersections
    for ax, angle, labels in zip(axes, angles, axes_values):
        max_labels = max(labels)
        if not np.isnan(max_labels):
            decimals = int(np.ceil(np.log10(max_labels)))
            if decimals <= -1:
                str_labels = [('%0.3f' % e) for e in labels[1:]]
            elif decimals == 0:
                str_labels = [('%0.2f' % e) for e in labels[1:]]
            else:
                str_labels = [('%0.1f' % e) for e in labels[1:]]
            # add zero to inner circle
            str_labels = ["0"] + str_labels
        else:
            str_labels = [""] * len(labels)

        ax.set_rgrids(range(1, n_circles + 1),
                      angle=angle,
                      labels=str_labels,
                      fontsize=14)
        ax.spines["polar"].set_visible(False)
        ax.set_ylim(0, n_circles)

    # add first angle at the end to close the line-loop
    angle = np.deg2rad(np.r_[angles, angles[0]])

    # plot one line per algorithm, passing through all metrics
    for idx_a, algorithm in enumerate(algorithms):
        metric_scores = np.full(np.shape(scores_metrics_algos[:, idx_a]),
                                fill_value=np.nan)

        for idx_m in range(len(axis_labels)):
            step = axes_values[idx_m][1] - axes_values[idx_m][0]
            scale_factor = n_circles / (float(max_per_metric[idx_m]) + step)
            adjusted_value = (scores_metrics_algos[idx_m, idx_a] +
                              step) * scale_factor
            metric_scores[idx_m] = adjusted_value

        # add first score at end to close the line-loop
        metric_scores = np.r_[metric_scores, metric_scores[0]]
        ax.plot(angle,
                metric_scores,
                label=algorithm.get_display_name(),
                ls=algorithm.get_line_style(),
                lw=2,
                alpha=1,
                color=algorithm.get_color())

    plt.gca().legend(loc='upper right',
                     bbox_to_anchor=(1.45, 1.1),
                     frameon=False,
                     prop={'size': fs},
                     labelspacing=0.2)
    plt.title(title + "\n\n", fontsize=fs + 4)
    plotting.save_fig(fig, plotting.get_path_to_figure(fig_name,
                                                       subdir=subdir))
Exemplo n.º 13
0
    def plot_algo_disp_vs_gt_disp(self, algorithms, subdir="stratified"):
        self.set_low_gt_scale()

        # prepare data
        gt = self.get_gt()
        m_eval = self.get_boundary_mask()
        mask_names = ["Sphere In", "Sphere Out"]
        masks = [self.get_sphere_in()*m_eval, self.get_sphere_out()*m_eval]

        factor = 1000.0
        gt_rounded = np.asarray(gt * factor, dtype=np.int)
        disp_values = np.unique(gt_rounded)
        n_values = np.size(disp_values)

        # prepare figure
        fig = plt.figure(figsize=(14, 6))
        rows, cols = 1, 2
        fontsize = 14
        legend_lines = []
        legend_labels = []

        for algorithm in algorithms:
            algo_result = misc.get_algo_result(algorithm, self)

            # go through ground truth disparity values
            for idx_d in range(n_values):
                current_disp = disp_values[idx_d]
                m_disp = (gt_rounded == current_disp)

                # find median disparity of algorithm result at image regions
                # of given ground truth disparity value
                for idx_m, (mask, mask_name) in enumerate(zip(masks, mask_names)):
                    algo_disps = algo_result[m_disp * mask]

                    if np.size(algo_disps) > 0:
                        median = np.median(algo_disps)
                        plt.subplot(rows, cols, idx_m+1)
                        s = plt.scatter(current_disp/factor, median,
                                        marker="o", c=algorithm.get_color(), alpha=0.8, s=5, lw=0)

            legend_lines.append(s)
            legend_labels.append(algorithm.get_display_name())

        # finalize figure attributes
        for idx_m, (mask, mask_name) in enumerate(zip(masks, mask_names)):
            plt.subplot(rows, cols, idx_m+1)
            vmin = np.min(gt_rounded[mask]) / factor
            vmax = np.max(gt_rounded[mask]) / factor
            plt.xlim([vmin, vmax])
            plt.ylim([vmin, vmax])
            plt.xlabel("Ground truth disparities", fontsize=fontsize)
            plt.ylabel("Algorithm disparities", fontsize=fontsize)
            plt.title(mask_name, fontsize=fontsize)
            plotting.hide_upper_right()

        legend = plt.legend(legend_lines, legend_labels, frameon=False, ncol=1, scatterpoints=1,
                            title="Algorithms:", bbox_to_anchor=(1.25, .85), borderaxespad=0.0)
        for idx in range(len(legend.legendHandles)):
            legend.legendHandles[idx]._sizes = [22]
        plt.suptitle("Ground Truth Disparities vs. Algorithm Disparities", fontsize=fontsize)

        fig_path = plotting.get_path_to_figure("pyramids_disp_disp", subdir=subdir)
        plotting.save_tight_figure(fig, fig_path, remove_ticks=False,
                                   hspace=0.2, wspace=0.3, padding_top=0.88)
Exemplo n.º 14
0
def plot(algorithms,
         scenes,
         thresholds=THRESHOLDS,
         with_cached_scores=False,
         penalize_missing_pixels=False,
         title=None,
         subdir="bad_pix_series",
         fig_name=None,
         fig_size=(16, 6),
         legend_pos=(1.19, -0.04),
         marker_size=2.3,
         fs=16):

    # prepare scores
    fname_scores = get_fname_scores(scenes)
    if not op.isfile(fname_scores) or not with_cached_scores:
        percentages_algo_thresh = compute_scores(
            algorithms,
            scenes,
            thresholds,
            penalize_missing_pixels=penalize_missing_pixels)
        if with_cached_scores:
            fname_scores = get_fname_scores(scenes)
            file_io.check_dir_for_fname(fname_scores)
            with open(fname_scores, "w") as f:
                pickle.dump(percentages_algo_thresh, f)
    else:
        with open(fname_scores, "r") as f:
            percentages_algo_thresh = pickle.load(f)

    # prepare figure
    fig = plt.figure(figsize=fig_size)
    x_ticks = np.arange(len(thresholds))

    # plot BadPix scores per algorithm
    for idx_a, algorithm in enumerate(algorithms):
        plt.plot(x_ticks,
                 percentages_algo_thresh[idx_a, :],
                 alpha=0.9,
                 color=algorithm.get_color(),
                 lw=1.3,
                 ls=algorithm.get_line_style(),
                 label=algorithm.get_display_name(),
                 marker="D",
                 markersize=marker_size,
                 markeredgecolor="none")

    # add vertical lines for special thresholds
    indices = [i for i, t in enumerate(thresholds) if t in [0.01, 0.03, 0.07]]
    for idx in indices:
        plt.plot([idx, idx], [0, 100], lw=1., c="k", alpha=0.9, ls=":")

    # add horizontal line for Q25
    plt.plot(x_ticks, [25] * len(x_ticks), lw=1., c="k", alpha=0.9, ls=":")

    # add axis ticks and labels
    plt.xticks(x_ticks, ["%0.03f" % t for t in thresholds],
               rotation=90,
               fontsize=fs)
    plt.xlabel("Threshold for absolute disparity error", fontsize=fs)
    plt.ylabel("Percentage of pixels\nbelow threshold", fontsize=fs)
    plt.ylim([0, 103])

    # finalize figure
    if title is None:
        title = "Scenes: %s" % ", ".join(scene.get_display_name()
                                         for scene in scenes)
    plt.title(title, fontsize=fs)
    plotting.hide_upper_right()
    plt.legend(frameon=False,
               loc="lower right",
               bbox_to_anchor=legend_pos,
               prop={'size': fs},
               labelspacing=0.2)

    # save figure
    if fig_name is None:
        fig_name = "bad_pix_series_%s" % ("_".join(scene.get_name()
                                                   for scene in scenes))
    fig_path = plotting.get_path_to_figure(fig_name, subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=False,
                               remove_ticks=False,
                               hspace=0.07)
Exemplo n.º 15
0
def plot(algorithms,
         scenes,
         meta_algo,
         subdir="meta_algo_comparisons",
         fig_name=None,
         with_gt_row=False,
         fs=12):

    # prepare figure
    rows, cols = len(algorithms) + int(with_gt_row), len(scenes) * 3 + 1
    fig = plt.figure(figsize=(cols * 1.3, rows * 1.5))
    grid, cb_height, cb_width = plotting.get_grid_with_colorbar(
        rows, cols, scenes[0])
    colorbar_args = {
        "height": cb_height * 0.8,
        "width": cb_width,
        "colorbar_bins": 4,
        "fontsize": fs
    }

    for idx_s, scene in enumerate(scenes):
        gt = scene.get_gt()
        meta_algo_result = misc.get_algo_result(meta_algo, scene)
        add_label = idx_s == 0  # is first column
        add_colorbar = idx_s == len(scenes) - 1  # is last column

        # plot one row per algorithm
        for idx_a, algorithm in enumerate(algorithms):
            algo_result = misc.get_algo_result(algorithm, scene)
            add_title = idx_a == 0  # is top row

            idx = idx_a * cols + 3 * idx_s

            # disparity map
            plt.subplot(grid[idx])
            plt.imshow(algo_result, **settings.disp_map_args(scene))
            if add_title:
                plt.title("DispMap", fontsize=fs)
            if add_label:
                plt.ylabel(algorithm.get_display_name(), fontsize=fs)

            # error map: gt - algo
            plt.subplot(grid[idx + 1])
            cb1 = plt.imshow(gt - algo_result,
                             **settings.diff_map_args(vmin=-.1, vmax=.1))
            if add_title:
                plt.title("GT-Algo", fontsize=fs)

            # error map: |meta-gt| - |algo-gt|
            plt.subplot(grid[idx + 2])
            median_diff = np.abs(meta_algo_result - gt) - np.abs(algo_result -
                                                                 gt)
            cb2 = plt.imshow(median_diff,
                             interpolation="none",
                             cmap=cm.RdYlGn,
                             vmin=-.05,
                             vmax=.05)
            if add_title:
                plt.title(meta_algo.get_display_name().replace("PerPix", ""),
                          fontsize=fs)

            if add_colorbar:
                if idx_a % 2 == 0:
                    plotting.add_colorbar(grid[idx + 3], cb1, **colorbar_args)
                else:
                    plotting.add_colorbar(grid[idx + 3], cb2, **colorbar_args)

        if with_gt_row:
            idx = len(algorithms) * cols + 3 * idx_s

            plt.subplot(grid[idx])
            plt.imshow(gt, **settings.disp_map_args(scene))
            plt.xlabel("GT", fontsize=fs)

            if add_label:
                plt.ylabel("Reference")

            plt.subplot(grid[idx + 1])
            cb1 = plt.imshow(np.abs(gt - meta_algo_result),
                             **settings.abs_diff_map_args())
            plt.xlabel("|GT-%s|" % meta_algo.get_display_name(),
                       fontsize=fs - 2)

            if add_colorbar:
                plotting.add_colorbar(grid[idx + 3], cb1, **colorbar_args)

    if fig_name is None:
        scene_names = "_".join(s.get_name() for s in scenes)
        fig_name = "%s_comparison_%s" % (meta_algo.get_name(), scene_names)
    fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
    plotting.save_tight_figure(fig,
                               fig_path,
                               hide_frames=True,
                               hspace=0.02,
                               wspace=0.0)
Exemplo n.º 16
0
    def plot_algo_overview(self,
                           algorithms,
                           with_metric_vis=True,
                           subdir="algo_overview",
                           fs=14):
        self.set_scale_for_algo_overview()
        metrics = self.get_scene_specific_metrics()
        n_metrics = len(metrics)

        if not with_metric_vis:
            rows, cols = 2 + n_metrics, len(algorithms) + 2
            fig = plt.figure(figsize=(2.6 * len(algorithms), 4.9))
            offset = 0
        else:
            rows, cols = 2 + 2 * n_metrics, len(algorithms) + 2
            fig = plt.figure(figsize=(2.6 * len(algorithms), rows + 3))
            offset = n_metrics

        labelpad = -15
        hscale, wscale = 7, 5
        width_ratios = [wscale] * (len(algorithms) + 1) + [1]
        height_ratios = [hscale] * (rows - n_metrics) + [1] * n_metrics
        gs = gridspec.GridSpec(rows,
                               cols,
                               height_ratios=height_ratios,
                               width_ratios=width_ratios)

        gt = self.get_gt()
        dummy = np.ones((self.get_height() / hscale, self.get_width()))
        cb_height, w = np.shape(gt)
        cb_width = w / float(wscale)

        # first column (gt, center view, ...)
        plt.subplot(gs[0])
        plt.imshow(gt, **settings.disp_map_args(self))
        plt.title("Ground Truth", fontsize=fs)
        plt.ylabel("Disparity Map", fontsize=fs)

        plt.subplot(gs[cols])
        plt.imshow(self.get_center_view())
        plt.ylabel("diff: GT - Algo", fontsize=fs)

        for idx_m, metric in enumerate(metrics):
            plt.subplot(gs[(2 + idx_m + offset) * cols])
            plt.xlabel(metric.get_short_name(), labelpad=labelpad, fontsize=fs)
            plt.imshow(dummy, cmap="gray_r")

        # algorithm columns
        for idx_a, algorithm in enumerate(algorithms):
            log.info("Processing algorithm: %s" % algorithm)
            algo_result = misc.get_algo_result(algorithm, self)

            # algorithm disparities
            plt.subplot(gs[idx_a + 1])
            plt.title(algorithm.get_display_name(), fontsize=fs)
            cm1 = plt.imshow(algo_result, **settings.disp_map_args(self))

            # algorithm diff map
            plt.subplot(gs[cols + idx_a + 1])
            cm2 = plt.imshow(gt - algo_result, **settings.diff_map_args())

            # add colorbar if last column
            if idx_a == (len(algorithms) - 1):
                plotting.add_colorbar(gs[idx_a + 2],
                                      cm1,
                                      cb_height,
                                      cb_width,
                                      colorbar_bins=5,
                                      fontsize=fs - 4)
                plotting.add_colorbar(gs[cols + idx_a + 2],
                                      cm2,
                                      cb_height,
                                      cb_width,
                                      colorbar_bins=5,
                                      fontsize=fs - 4)

            # score and background color for metrics
            for idx_m, metric in enumerate(metrics):

                if with_metric_vis:
                    plt.subplot(gs[(2 + idx_m) * cols + idx_a + 1])
                    score, vis = metric.get_score(algo_result,
                                                  gt,
                                                  self,
                                                  with_visualization=True)
                    cm3 = plt.imshow(vis, **settings.metric_args(metric))

                    if idx_a == 0:
                        plt.ylabel(metric.get_short_name(), fontsize=fs)
                    elif idx_a == (len(algorithms) - 1):
                        plotting.add_colorbar(
                            gs[(2 + idx_m) * cols + idx_a + 2],
                            cm3,
                            cb_height,
                            cb_width,
                            colorbar_bins=metric.colorbar_bins,
                            fontsize=fs - 4)

                else:
                    score = metric.get_score(algo_result, gt, self)

                plt.subplot(gs[(2 + idx_m + offset) * cols + idx_a + 1])
                plt.imshow(
                    dummy * score,
                    **settings.score_color_args(vmin=metric.vmin,
                                                vmax=metric.vmax))
                plt.xlabel(metric.format_score(score),
                           labelpad=labelpad,
                           fontsize=fs)

        fig_name = "algo_overview_" + self.get_name(
        ) + with_metric_vis * "_vis"
        fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
        plotting.save_tight_figure(fig,
                                   fig_path,
                                   wspace=0.04,
                                   hide_frames=True)