Пример #1
0
def plot_high_accuracy(algorithms, scenes, subdir="overview"):
    metrics = [BadPix(0.07), BadPix(0.01), Quantile(25)]
    metric_overviews.plot_general_overview(algorithms,
                                           scenes,
                                           metrics,
                                           fig_name="high_accuracy",
                                           subdir=subdir)
Пример #2
0
def plot_radar_charts(algorithms, log_runtime=True, with_test_scenes=False, subdir="radar"):

    # photorealistic training
    photorealistic_metrics = [MSE(),
                              BadPix(0.07),
                              BumpinessPlanes(name="Planar\nSurfaces"),
                              BumpinessContinSurf(name="Continuous\nSurfaces"),
                              FineThinning(name="Fine Structure\nThinning"),
                              FineFattening(name="Fine Structure\nFattening"),
                              Discontinuities(name="Discontinuity\nRegions"),
                              Runtime(log=log_runtime)]

    metric_names = [m.get_display_name() for m in photorealistic_metrics]
    max_per_metric = [12, 40, 4, 4, 40, 80, 80, 6]
    radar_chart.plot(algorithms,
                     scenes=misc.get_training_scenes(),
                     metrics=photorealistic_metrics,
                     axis_labels=metric_names,
                     average="mean",
                     max_per_metric=max_per_metric,
                     title="Mean Scores for Training Scenes",
                     fig_name="radar_training",
                     subdir=subdir)

    # stratified
    metrics = [MSE(), BadPix(0.07),
               DotsBackgroundMSE(), MissedDots(),
               BackgammonFattening(), BackgammonThinning(),
               DarkStripes(), BrightStripes(), StripesLowTexture(),
               PyramidsSlantedBumpiness(), PyramidsParallelBumpiness(),
               Runtime(log=log_runtime)]

    scenes = [Backgammon(gt_scale=10.0), Pyramids(gt_scale=1.0),
              Dots(gt_scale=10.0), Stripes(gt_scale=10.0)]

    metric_names = [m.get_display_name().replace(":", ":\n") for m in metrics]
    max_per_metric = [12, 32, 6, 120, 40, 8, 64, 64, 64, 4, 4, 6]
    radar_chart.plot(algorithms,
                     scenes=scenes,
                     metrics=metrics,
                     axis_labels=metric_names,
                     average="mean",
                     max_per_metric=max_per_metric,
                     title="Mean Scores for Stratified Scenes",
                     fig_name="radar_stratified",
                     subdir=subdir)

    # photorealistic test
    if with_test_scenes:
        metric_names = [m.get_display_name() for m in photorealistic_metrics]
        max_per_metric = [16, 40, 4, 4, 16, 80, 80, 6]
        radar_chart.plot(algorithms,
                         scenes=misc.get_test_scenes(),
                         metrics=photorealistic_metrics,
                         axis_labels=metric_names,
                         average="mean",
                         max_per_metric=max_per_metric,
                         title="Mean Scores for Test Scenes",
                         fig_name="radar_test",
                         subdir=subdir)
Пример #3
0
def plot_radar_charts(algorithms, log_runtime=True, subdir="radar"):
    base_metrics = [
        Runtime(log=log_runtime),
        MSE(),
        Quantile(25),
        BadPix(0.01),
        BadPix(0.03),
        BadPix(0.07)
    ]

    region_metrics = [
        MAEPlanes(),
        MAEContinSurf(),
        BumpinessPlanes(),
        BumpinessContinSurf(),
        FineFattening(),
        FineThinning(),
        Discontinuities()
    ]

    # stratified scenes and applicable metrics
    metrics = base_metrics + misc.get_stratified_metrics()
    metric_names = [m.get_display_name().replace(":", "\n") for m in metrics]
    max_per_metric = [5, 16, 2, 120, 80, 40, 40, 8, 6, 6, 24, 128, 48, 64, 100]
    radar_chart.plot(algorithms,
                     scenes=misc.get_stratified_scenes(),
                     metrics=metrics,
                     axis_labels=metric_names,
                     max_per_metric=max_per_metric,
                     title="Median Scores for Stratified Scenes",
                     fig_name="radar_stratified",
                     subdir=subdir)

    # photorealistic scenes and applicable metrics
    metrics = base_metrics + region_metrics
    metric_names = [m.get_display_name().replace(" ", "\n") for m in metrics]
    max_per_metric = [5, 12, 2, 128, 72, 32, 80, 80, 4, 4, 80, 16, 72]
    radar_chart.plot(algorithms,
                     scenes=misc.get_training_scenes() +
                     misc.get_test_scenes(),
                     metrics=metrics,
                     axis_labels=metric_names,
                     max_per_metric=max_per_metric,
                     title="Median Scores for Test and Training Scenes",
                     fig_name="radar_photorealistic",
                     subdir=subdir)

    compare_relative_ranks(algorithms,
                           misc.get_training_scenes(),
                           metrics,
                           all_but=0)
    compare_relative_ranks(algorithms,
                           misc.get_training_scenes(),
                           metrics,
                           all_but=1)
Пример #4
0
    def __call__(self, parser, namespace, values, option_string=None):
        from toolkit.metrics import BadPix, Quantile
        metrics = []

        if not values:
            metrics = self.metric_options["all"]
        else:
            for value in values:
                try:
                    metrics += self.metric_options[value]
                except KeyError:
                    # try to match BadPix metric with threshold
                    if re.match("^badpix\d{3}", value):
                        threshold = float((value[6] + "." + value[7:]))
                        metrics.append(BadPix(threshold))
                    # try to match Quantile metric with percentage
                    elif re.match("^q\d{2}", value):
                        percentage = int(value[1:])
                        metrics.append(Quantile(percentage))
                    else:
                        parser.error("Could not find metrics for: %s.\n  "
                                     "Available options are: %s." %
                                     (value, ", ".join(
                                         sorted(self.metric_options.keys()))))

        # save result in action destination
        setattr(namespace, self.dest, metrics)
Пример #5
0
def compute_scores(algorithms,
                   scenes,
                   thresholds=THRESHOLDS,
                   penalize_missing_pixels=True):
    percentages_algo_thresh = np.full((len(algorithms), len(thresholds)),
                                      fill_value=np.nan)
    bad_pix_metric = BadPix()
    max_diff = np.max(thresholds)

    for idx_a, algorithm in enumerate(algorithms):
        combined_diffs = np.full(0, fill_value=np.nan)
        log.info('Computing BadPix scores for: %s' %
                 algorithm.get_display_name())

        for scene in scenes:
            gt = scene.get_gt()
            algo_result = misc.get_algo_result(algorithm, scene)
            diffs = np.abs(algo_result - gt)

            mask_valid = misc.get_mask_valid(
                algo_result) * misc.get_mask_valid(diffs)
            mask_eval = bad_pix_metric.get_evaluation_mask(scene)

            if penalize_missing_pixels:
                # penalize all invalid algorithm pixels with maximum error
                diffs[~mask_valid] = max_diff + 100
                diffs = diffs[mask_eval]
            else:
                diffs = diffs[mask_eval * mask_valid]

            combined_diffs = np.concatenate((combined_diffs, diffs))

        # compute BadPix score for each threshold
        for idx_t, t in enumerate(thresholds):
            bad_pix_metric.thresh = t
            bad_pix_score = bad_pix_metric.get_score_from_diffs(combined_diffs)
            percentages_algo_thresh[idx_a, idx_t] = 100 - bad_pix_score

    return percentages_algo_thresh
    def get_score(self, algo_result, gt, scene, with_visualization=False):
        grid = scene.get_boxes()
        dots_by_size = scene.get_dots_by_size()
        bad_pix = BadPix(thresh=self.thresh)

        vis = np.zeros(np.shape(algo_result), dtype=np.bool)
        diffs = np.abs(gt - algo_result)

        box_ids = sorted(list(np.unique(grid)))
        box_ids.remove(0)
        n_boxes = np.size(box_ids)

        dot_labels = list(np.unique(dots_by_size))
        # use only the nine biggest dots per box
        dot_labels = [dl for dl in dot_labels if 0 < dl < 9]
        n_dots = len(dot_labels)
        total_dots = n_dots * n_boxes
        detected_dots = 0

        for box_id in box_ids:
            m_box = (grid == box_id)

            for idx_d in range(n_dots):
                dot_mask = (dots_by_size == idx_d+1) * m_box
                bad_pix_on_dot = bad_pix.get_score_from_diffs(diffs[dot_mask])
                if bad_pix_on_dot < self.missed_dot_bad_pix:
                    detected_dots += 1
                else:
                    vis[dot_mask] = 1

        missed_dots = total_dots - detected_dots
        score = misc.percentage(total_dots, missed_dots)

        if not with_visualization:
            return score

        vis = plotting.adjust_binary_vis(vis)
        return score, vis
Пример #7
0
    def plot_algo_overview(self, algorithms, subdir="algo_overview", fs=6):
        accv_metrics = [MSE(), BadPix(0.07), BumpinessPlanes(), BumpinessContinSurf(),
                        Discontinuities(), FineFattening(), FineThinning()]
        metrics_low_res = [m for m in self.get_applicable_metrics_low_res() if m in accv_metrics]
        metrics_high_res = [m for m in self.get_applicable_metrics_high_res() if m in accv_metrics]

        # prepare figure
        rows = len(metrics_low_res + metrics_high_res) + 1
        cols = len(algorithms) + 1
        fig = plt.figure(figsize=(cols, rows*1.1))
        grids = self._get_grids(fig, rows, cols, axes_pad=-0.2)

        # center view on top left grid cell
        self.set_high_gt_scale()
        plt.sca(grids[0][0])
        plt.imshow(self.get_center_view())
        plt.title("Center View", fontsize=fs)
        plt.ylabel("Disparity Map", fontsize=fs)

        # mask visualizations + algorithm disparity maps + metric visualizations
        log.info("Computing scores and visualizations for low resolution metrics.")
        self.set_low_gt_scale()
        self.plot_metric_rows(grids, algorithms, metrics_low_res, offset=0, fontsize=fs)

        log.info("Computing scores and visualizations for high resolution metrics.")
        self.set_high_gt_scale()
        self.plot_metric_rows(grids, algorithms, metrics_high_res,
                              offset=len(metrics_low_res), fontsize=fs)

        # finalize figure
        for grid in grids:
            plotting.remove_ticks_from_axes(grid.axes_all)
            plotting.remove_frames_from_axes(grid.axes_all)
        plt.suptitle(self.get_display_name(), fontsize=fs+2)

        fig_path = plotting.get_path_to_figure("algo_overview_%s" % self.get_name(), subdir=subdir)
        plotting.save_fig(fig, fig_path, pad_inches=0.1)
Пример #8
0
def get_general_metrics():
    from toolkit.metrics import MSE, BadPix, Quantile
    return [MSE(), BadPix(0.01), BadPix(0.03), BadPix(0.07), Quantile(25)]