예제 #1
0
def add_scores(metrics, scene, algo_dir, tgt_dir, scores, visualize):
    gt = scene.get_gt()
    algo_result = misc.get_algo_result_from_dir(algo_dir, scene)

    for metric in metrics:

        if visualize:
            score, vis = metric.get_score(algo_result,
                                          gt,
                                          scene,
                                          with_visualization=True)
            relative_fname = save_visualization(algo_result, vis, metric,
                                                scene, tgt_dir)
            metric_data = {
                "value": float(score),
                "visualization": {
                    "thumb": relative_fname
                }
            }
        else:
            score = metric.get_score(algo_result, gt, scene)
            metric_data = {"value": float(score)}

        log.info("Score %5.2f for: %s, %s, Scale: %0.2f" %
                 (score, metric.get_display_name(), scene.get_display_name(),
                  scene.gt_scale))

        scores[metric.get_id()] = metric_data

    return scores
예제 #2
0
def main():
    parser = OptionParser([
        ConverterOps(input_help="path to png disparity map",
                     output_help="path to pfm disparity map")
    ])

    image_path, config_path, pfm_path = parser.parse_args()

    from toolkit.scenes import PhotorealisticScene
    from toolkit.utils import log, file_io
    import numpy as np

    scene = PhotorealisticScene("demo", path_to_config=config_path)

    disp_map = file_io.read_file(image_path)
    log.info("Input range: [%0.1f, %0.1f]" %
             (np.min(disp_map), np.max(disp_map)))

    # scale from [MIN, MAX] to [disp_min, disp_max]
    disp_map = (scene.disp_max - scene.disp_min) * (disp_map - MIN) / (
        MAX - MIN) + scene.disp_min
    log.info("Output range: [%0.1f, %0.1f]" %
             (np.min(disp_map), np.max(disp_map)))

    file_io.write_file(disp_map, pfm_path)
예제 #3
0
def main():
    parser = OptionParser([SceneOps(), AlgorithmOps(with_gt=True), MetaAlgorithmOps(default=[])])
    scenes, algorithms, meta_algorithms, compute_meta_algos = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit import settings
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.utils import log, misc, point_cloud

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms, scenes)

    algorithms += meta_algorithms

    for scene in scenes:
        center_view = scene.get_center_view()

        for algorithm in algorithms:
            if algorithm.get_name() == "gt":
                disp_map = scene.get_gt()
            else:
                disp_map = misc.get_algo_result(algorithm, scene)

            log.info("Creating point cloud for scene '%s' with '%s' disparity map." %
                     (scene.get_name(), algorithm.get_name()))

            pc = point_cloud.convert(scene, disp_map, center_view)

            file_name = "%s_%s.ply" % (scene.get_name(), algorithm.get_name())
            file_path = op.join(settings.EVAL_PATH, "point_clouds", file_name)
            log.info("Saving point cloud to: %s" % file_path)
            point_cloud.save(pc, file_path)
예제 #4
0
def visualize_algo_result(scene, algo_dir, tgt_dir, add_pfms_to_result):
    algo_result = misc.get_algo_result_from_dir(algo_dir, scene)

    # visualize
    fig = init_figure()
    cm = plt.imshow(algo_result, **settings.disp_map_args(scene))
    add_colorbar(cm, bins=8)

    # save fig
    relative_fname_thumb = get_relative_path(scene, "dispmap")
    fpath = op.normpath(op.join(tgt_dir, relative_fname_thumb))
    plotting.save_tight_figure(fig, fpath, hide_frames=True, pad_inches=0.01)

    # path info
    height, width = np.shape(algo_result)[:2]
    disp_map_data = {
        "thumb": relative_fname_thumb,
        "channels": 3,
        "height": height,
        "width": width
    }

    # save raw disparity map
    if add_pfms_to_result and not scene.is_test():
        relative_fname_raw = get_relative_path(scene,
                                               "dispmap",
                                               file_type="pfm")
        fpath_tgt = op.normpath(op.join(tgt_dir, relative_fname_raw))
        fpath_src = misc.get_fname_algo_result(algo_dir, scene)
        log.info("Copying disp map file from %s to %s" %
                 (fpath_src, fpath_tgt))
        shutil.copyfile(fpath_src, fpath_tgt)
        disp_map_data["raw"] = relative_fname_raw

    return disp_map_data
def run_validation(submission_path):
    # delay imports to speed up usage response
    from toolkit.evaluations import submission_validation as validation

    is_unpacked = op.isdir(submission_path)

    try:
        if is_unpacked:
            unpacked_submission_path = submission_path
        else:
            # unpack zip archive
            from toolkit.utils.file_io import unzip
            tmp_dir = op.normpath(op.join(os.getcwd(), "../tmp"))
            try:
                log.info("Extracting archive.")
                submission_directory = op.splitext(
                    op.basename(submission_path))[0]
                unpacked_submission_path = op.join(tmp_dir,
                                                   submission_directory)
                unzip(submission_path, unpacked_submission_path)
            except IOError as e:
                log.error('Zip Error: %s.\nTerminated submission validation.' %
                          e)

        # validate submission
        success, error_json = validation.validate_extracted_submission(
            unpacked_submission_path)

        # report results
        print_validation_results(success, error_json)

    finally:
        # clean up
        if not is_unpacked and op.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
예제 #6
0
def add_runtime(scene, algo_dir, scores, metrics):
    runtime_metrics = [m for m in metrics if "runtime" in m.get_id()]
    for metric in runtime_metrics:
        score = metric.get_score_from_dir(scene, algo_dir)
        scores[metric.get_id()] = {"value": score}
        log.info("Score %5.2f for: %s, %s, Scale: %0.2f" %
                 (score, metric.get_display_name(), scene.get_display_name(),
                  scene.gt_scale))
    return scores
예제 #7
0
def write_file(data, tgt_file, **kwargs):
    check_dir_for_fname(tgt_file)

    if tgt_file.endswith('.png') or tgt_file.endswith('.jpg'):
        write_img(data, tgt_file, **kwargs)
    elif tgt_file.endswith('.json'):
        write_json(data, tgt_file)
    elif tgt_file.endswith('.pfm'):
        write_pfm(data, tgt_file, **kwargs)
    else:
        raise NotImplementedError('No support for file: %s' % tgt_file)
    log.info('Saved %s' % tgt_file)
예제 #8
0
def save_fig(fig,
             fig_name,
             dpi=150,
             bbox_inches='tight',
             hide_frames=False,
             remove_ticks=False,
             **kwargs):
    log.info("Saving figure...")

    if remove_ticks:
        remove_ticks_from_axes(fig.get_axes())
    if hide_frames:
        remove_frames_from_axes(fig.get_axes())

    file_io.check_dir_for_fname(fig_name)
    plt.savefig(fig_name, dpi=dpi, bbox_inches=bbox_inches, **kwargs)
    fig.clf()
    plt.close(fig)
    gc.collect()
    log.info('Saved: ' + fig_name)
예제 #9
0
def compute_scores(algorithms,
                   scenes,
                   thresholds=THRESHOLDS,
                   penalize_missing_pixels=True):
    percentages_algo_thresh = np.full((len(algorithms), len(thresholds)),
                                      fill_value=np.nan)
    bad_pix_metric = BadPix()
    max_diff = np.max(thresholds)

    for idx_a, algorithm in enumerate(algorithms):
        combined_diffs = np.full(0, fill_value=np.nan)
        log.info('Computing BadPix scores for: %s' %
                 algorithm.get_display_name())

        for scene in scenes:
            gt = scene.get_gt()
            algo_result = misc.get_algo_result(algorithm, scene)
            diffs = np.abs(algo_result - gt)

            mask_valid = misc.get_mask_valid(
                algo_result) * misc.get_mask_valid(diffs)
            mask_eval = bad_pix_metric.get_evaluation_mask(scene)

            if penalize_missing_pixels:
                # penalize all invalid algorithm pixels with maximum error
                diffs[~mask_valid] = max_diff + 100
                diffs = diffs[mask_eval]
            else:
                diffs = diffs[mask_eval * mask_valid]

            combined_diffs = np.concatenate((combined_diffs, diffs))

        # compute BadPix score for each threshold
        for idx_t, t in enumerate(thresholds):
            bad_pix_metric.thresh = t
            bad_pix_score = bad_pix_metric.get_score_from_diffs(combined_diffs)
            percentages_algo_thresh[idx_a, idx_t] = 100 - bad_pix_score

    return percentages_algo_thresh
예제 #10
0
    def plot_metric_rows(self, grids, algorithms, metrics, offset, fontsize):
        gt = self.get_gt()
        center_view = self.get_center_view()

        for idx_a, algorithm in enumerate(algorithms):
            log.info("Algorithm: %s" % algorithm)
            algo_result = misc.get_algo_result(algorithm, self)

            # add algorithm disparity map
            plt.sca(grids[0][idx_a + 1])
            cm = plt.imshow(algo_result, **settings.disp_map_args(self))
            plt.title(algorithm.get_display_name(), fontsize=fontsize)

            # add colorbar to last disparity map in row
            if idx_a == (len(algorithms) - 1):
                plotting.create_colorbar(cm, cax=grids[0].cbar_axes[0],
                                         colorbar_bins=7, fontsize=fontsize)

            # add algorithm metric visualizations
            for idx_m, metric in enumerate(metrics):
                log.info(metric.get_display_name())
                mask = metric.get_evaluation_mask(self)

                plt.sca(grids[idx_m + offset + 1][idx_a + 1])
                cm = self.plot_algo_vis_for_metric(metric, algo_result, gt, mask,
                                                   self.hidden_gt(), fontsize)

                # add colorbar to last metric visualization in row
                if idx_a == len(algorithms) - 1:
                    plotting.create_colorbar(cm, cax=grids[idx_m + offset + 1].cbar_axes[0],
                                             colorbar_bins=metric.colorbar_bins, fontsize=fontsize)

                # add mask visualizations as 1st column
                if idx_a == 0:
                    plt.sca(grids[idx_m + offset + 1][0])
                    plotting.plot_img_with_transparent_mask(center_view, mask,
                                                            alpha=0.7, color=settings.MASK_COLOR)
                    plt.ylabel(metric.get_short_name(), fontsize=fontsize)
                    plt.title("Region Mask", fontsize=fontsize)
예제 #11
0
    def parse_args(self, args=None, namespace=None):
        # try to parse all provided arguments
        namespace = super(OptionParser, self).parse_args(args, namespace)

        # call action with default action if option string was not provided
        # e.g. collect default scenes if no "-s" or "--scenes" was provided
        for action in self.actions:
            if getattr(namespace, action.dest) is None:
                action.__call__(self, namespace, values=None)

        log.info("Command line arguments: ")
        [
            log.info("%s: %s" % (a.dest.title(), getattr(namespace, a.dest)))
            for a in self.actions
        ]

        # return values in order of parser options
        values = [getattr(namespace, action.dest) for action in self.actions]

        if len(values) == 1:
            return values[0]
        return values
예제 #12
0
    def plot_algo_overview(self, algorithms, subdir="algo_overview", fs=6):
        accv_metrics = [MSE(), BadPix(0.07), BumpinessPlanes(), BumpinessContinSurf(),
                        Discontinuities(), FineFattening(), FineThinning()]
        metrics_low_res = [m for m in self.get_applicable_metrics_low_res() if m in accv_metrics]
        metrics_high_res = [m for m in self.get_applicable_metrics_high_res() if m in accv_metrics]

        # prepare figure
        rows = len(metrics_low_res + metrics_high_res) + 1
        cols = len(algorithms) + 1
        fig = plt.figure(figsize=(cols, rows*1.1))
        grids = self._get_grids(fig, rows, cols, axes_pad=-0.2)

        # center view on top left grid cell
        self.set_high_gt_scale()
        plt.sca(grids[0][0])
        plt.imshow(self.get_center_view())
        plt.title("Center View", fontsize=fs)
        plt.ylabel("Disparity Map", fontsize=fs)

        # mask visualizations + algorithm disparity maps + metric visualizations
        log.info("Computing scores and visualizations for low resolution metrics.")
        self.set_low_gt_scale()
        self.plot_metric_rows(grids, algorithms, metrics_low_res, offset=0, fontsize=fs)

        log.info("Computing scores and visualizations for high resolution metrics.")
        self.set_high_gt_scale()
        self.plot_metric_rows(grids, algorithms, metrics_high_res,
                              offset=len(metrics_low_res), fontsize=fs)

        # finalize figure
        for grid in grids:
            plotting.remove_ticks_from_axes(grid.axes_all)
            plotting.remove_frames_from_axes(grid.axes_all)
        plt.suptitle(self.get_display_name(), fontsize=fs+2)

        fig_path = plotting.get_path_to_figure("algo_overview_%s" % self.get_name(), subdir=subdir)
        plotting.save_fig(fig, fig_path, pad_inches=0.1)
예제 #13
0
def compare_relative_ranks(algorithms, scenes, metrics, all_but=0):
    scores_scenes_metrics_algos = misc.collect_scores(algorithms,
                                                      scenes,
                                                      metrics,
                                                      masked=True)
    scores_metrics_algos = np.ma.median(scores_scenes_metrics_algos, axis=0)

    n_metrics = np.shape(scores_metrics_algos)[0]
    winners = dict()

    for idx_a1, algorithm1 in enumerate(algorithms):
        scores_a1 = scores_metrics_algos[:, idx_a1]
        worse_on_all_but_n = []

        for idx_a2, algorithm2 in enumerate(algorithms):
            scores_a2 = scores_metrics_algos[:, idx_a2]
            n_better = np.sum(scores_a1 < scores_a2)

            if n_better == n_metrics - all_but:
                worse_on_all_but_n.append(algorithm2)

        if worse_on_all_but_n:
            winners[algorithm1] = worse_on_all_but_n

    n_winners = len(winners.keys())
    log.info("%d Algorithm(s) better on all but %d score(s)." %
             (n_winners, all_but))

    for idx_a, (algorithm, better_than) in enumerate(winners.items()):
        inferior_algorithms = ", ".join(a.get_display_name()
                                        for a in better_than)
        log.info(
            "%d) %s is better than: %s" %
            (idx_a + 1, algorithm.get_display_name(), inferior_algorithms))

    return winners
def print_validation_results(success, error_json):
    if success:
        log.info("Yeah :) Congratulations, your submission archive is valid. "
                 "Go ahead and submit it online!")
    else:
        error_messages = error_json["messages"]
        log.info('Validation found %d error(s).' % len(error_messages))
        log.info(
            'A detailed format description can be found in the SUBMISSION_INSTRUCTIONS. '
            '\nThe identified problems are: ')
        for error in error_messages:
            log.error(error)
def main():
    accv_algo_names = ["epi1", "epi2", "lf_occ", "lf", "mv"]
    parser = OptionParser(
        [AlgorithmOps(default=accv_algo_names),
         FigureOpsACCV16()])

    algorithms, figure_options = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit.evaluations import paper_accv_2016, error_heatmaps
    from toolkit.scenes import Backgammon, Dots, Pyramids, Stripes
    from toolkit.utils import log, misc

    if "heatmaps" in figure_options:
        log.info("Creating error heatmaps.")
        scenes = misc.get_stratified_scenes() + misc.get_training_scenes()
        error_heatmaps.plot(algorithms, scenes, subdir=SUBDIR)

    if "radar" in figure_options:
        log.info("Creating radar charts for stratified and training scenes.")
        paper_accv_2016.plot_radar_charts(algorithms, subdir=SUBDIR)

    if "backgammon" in figure_options:
        log.info("Creating special chart for backgammon scene.")
        Backgammon().plot_fattening_thinning(algorithms, subdir=SUBDIR)

    if "pyramids" in figure_options:
        log.info("Creating special chart for pyramids scene.")
        Pyramids().plot_algo_disp_vs_gt_disp(algorithms, subdir=SUBDIR)

    if "dots" in figure_options:
        log.info("Creating special chart for dots scene.")
        Dots().plot_error_vs_noise(algorithms, subdir=SUBDIR)

    if "stripes" in figure_options:
        log.info("Creating special chart for stripes scene.")
        Stripes().visualize_masks(subdir=SUBDIR)

    if "stratified" in figure_options:
        for scene in misc.get_stratified_scenes():
            log.info("Creating metric visualizations for scene: %s." %
                     scene.get_display_name())
            scene.plot_algo_overview(algorithms,
                                     with_metric_vis=True,
                                     subdir=SUBDIR)

    if "training" in figure_options:
        for scene in misc.get_training_scenes():
            log.info("Creating metric visualizations for scene: %s." %
                     scene.get_display_name())
            scene.plot_algo_overview(algorithms, subdir=SUBDIR)
예제 #16
0
def main():
    figure_options = OptionParser([FigureOpsCVPR17()]).parse_args()

    # delay imports to speed up usage response
    from toolkit.algorithms import Algorithm, PerPixBest
    from toolkit.evaluations import paper_cvprw_2017 as cvprw
    from toolkit.utils import log, misc
    from toolkit.scenes import PhotorealisticScene

    # prepare scenes
    if USE_TEST_SCENE_GT:
        benchmark_scenes = misc.get_benchmark_scenes()
    else:
        benchmark_scenes = misc.get_stratified_scenes(
        ) + misc.get_training_scenes()

    # prepare algorithms
    fnames_baseline_algos = ["epi1", "epi2", "lf", "mv", "lf_occ26"]
    fnames_challenge_algos = [
        "ober", "omg_occ", "ps_rf25", "rm3de", "sc_gc", "spo_lf4cv", "zctv1"
    ]
    fnames_other_submissions = ["obercross", "ofsy_330dnr2"]

    baseline_algorithms = Algorithm.initialize_algorithms(
        fnames_baseline_algos, is_baseline=True)
    challenge_algorithms = Algorithm.initialize_algorithms(
        fnames_challenge_algos)
    other_algorithms = Algorithm.initialize_algorithms(
        fnames_other_submissions)

    for algorithm in other_algorithms:
        algorithm.display_name = "'" + algorithm.display_name

    algorithms = sorted(baseline_algorithms) + sorted(
        other_algorithms) + sorted(challenge_algorithms)
    algorithms = Algorithm.set_colors(algorithms)

    # create figures

    if "normalsdemo" in figure_options:
        log.info("Creating normals demo figure with Sideboard scene.")
        scene = PhotorealisticScene("sideboard")
        cvprw.plot_normals_explanation(Algorithm("epi1"), scene, subdir=SUBDIR)

    if "radar" in figure_options:
        log.info("Creating radar charts.")
        cvprw.plot_radar_charts(algorithms, subdir=SUBDIR)

    if "badpix" in figure_options:
        log.info("Creating figures with BadPix series.")
        per_pix_best = PerPixBest()
        per_pix_best.compute_meta_results(algorithms, benchmark_scenes)
        cvprw.plot_bad_pix_series(algorithms + [per_pix_best],
                                  USE_TEST_SCENE_GT,
                                  subdir=SUBDIR)

    if "median" in figure_options:
        log.info("Creating median comparison figures.")
        cvprw.plot_median_diffs(algorithms,
                                misc.get_stratified_scenes(),
                                subdir=SUBDIR)
        cvprw.plot_median_diffs(algorithms,
                                misc.get_training_scenes(),
                                subdir=SUBDIR)

    if "normals" in figure_options:
        log.info("Creating surface normal figure with Cotton scene.")
        cvprw.plot_normal_maps(algorithms,
                               PhotorealisticScene("cotton"),
                               subdir=SUBDIR)

    if USE_TEST_SCENE_GT and "discont" in figure_options:
        log.info("Creating discontinuity figure with Bicycle scene.")
        cvprw.plot_discont_overview(algorithms,
                                    PhotorealisticScene("bicycle"),
                                    subdir=SUBDIR)

    if "accuracy" in figure_options:
        log.info("Creating high accuracy figure.")
        selection = [
            "ofsy_330dnr2", "zctv1", "obercross", "ober", "sc_gc", "spo_lf4cv",
            "rm3de", "ps_rf25"
        ]
        high_accuracy_algorithms = []
        # algorithms should be exactly in the order of 'selection'
        for algo_name in selection:
            high_accuracy_algorithms.append(
                [a for a in algorithms if a.get_name() == algo_name][0])
        scenes = [PhotorealisticScene("cotton"), PhotorealisticScene("boxes")]
        cvprw.plot_high_accuracy(high_accuracy_algorithms,
                                 scenes,
                                 subdir=SUBDIR)

    if "scenes" in figure_options:
        log.info("Creating scene overview figure.")
        cvprw.plot_benchmark_scene_overview(misc.get_benchmark_scenes(),
                                            subdir=SUBDIR)

    if "difficulty" in figure_options:
        log.info("Creating scene difficulty figure.")
        cvprw.plot_scene_difficulty(benchmark_scenes, subdir=SUBDIR)
예제 #17
0
def validate_extracted_submission(submission_dir, data_path=None):
    log.info("Validating extracted submission: %s." % submission_dir)

    scene_names = [
        s.get_name() for s in misc.get_benchmark_scenes(data_path=data_path)
    ]
    exp_height, exp_width = settings.HEIGHT, settings.WIDTH
    errors = []

    # check disparity maps
    disp_maps_dir = op.normpath(
        op.join(submission_dir, settings.DIR_NAME_DISP_MAPS))

    if not op.isdir(disp_maps_dir):
        errors.append('Could not find disparity map directory: "%s".' %
                      settings.DIR_NAME_DISP_MAPS)
    else:
        log.info("Validating disparity map files.")

        for scene_name in scene_names:
            path_disp_maps = op.join(disp_maps_dir, "%s.pfm" % scene_name)
            relative_path_disp_maps = op.join(settings.DIR_NAME_DISP_MAPS,
                                              "%s.pfm" % scene_name)

            if not op.isfile(path_disp_maps):
                errors.append(
                    'Frame %s: Could not find disparity file: "%s".' %
                    (scene_name, relative_path_disp_maps))
            else:
                try:
                    disp_map = file_io.read_pfm(path_disp_maps)
                    height, width = np.shape(disp_map)

                    if height != exp_height or width != exp_width:
                        errors.append(
                            "Frame %s, File %s: Resolution mismatch. "
                            "Expected (%d, %d), got (%d, %d)." %
                            (scene_name, relative_path_disp_maps, exp_height,
                             exp_width, height, width))

                except file_io.PFMExeption as e:
                    errors.append("Frame %s, File %s, PFM Error: %s." %
                                  (scene_name, relative_path_disp_maps, e))

    # check runtimes
    runtimes_dir = op.normpath(op.join(submission_dir, "runtimes"))

    if not op.isdir(runtimes_dir):
        errors.append('Could not find runtimes directory: "%s".' %
                      settings.DIR_NAME_RUNTIMES)
    else:
        log.info("Validating runtime files.")

        for scene_name in scene_names:
            path_runtimes = op.join(runtimes_dir, "%s.txt" % scene_name)
            relative_path_runtimes = op.join(settings.DIR_NAME_RUNTIMES,
                                             "%s.txt" % scene_name)

            if not op.isfile(path_runtimes):
                errors.append('Frame %s: Could not find runtime file: "%s".' %
                              (scene_name, relative_path_runtimes))
            else:
                try:
                    file_io.read_runtime(path_runtimes)
                except IOError as error:
                    errors.append("Frame %s, File %s, Error: %s." %
                                  (scene_name, relative_path_runtimes, error))

    success = not errors
    error_json = {"messages": errors}

    if success:
        log.info("Validated submission successfully :)")
    return success, error_json
예제 #18
0
def evaluate(evaluation_output_path,
             algorithm_input_path,
             scenes,
             metrics,
             visualize=False,
             add_to_existing_results=True,
             add_pfms_to_result=True):
    """
    :param evaluation_output_path: target directory for all evaluation results
    :param algorithm_input_path: input directory for algorithm results,
                                 expected directories: runtimes, disp_maps
    :param scenes: scenes to be evaluated
    :param metrics: metrics to be evaluated
    :param visualize: whether to save visualizations (otherwise just the scores)
    :param add_to_existing_results: if set to True, will try to read results.json and add/replace entries,
                                    keeping existing scores of other scenes/metrics as is
    :param add_pfms_to_result: when executed on evaluation server, pfms are prepared for 3D point cloud view
    :return: success, {"messages": ["error 1", "error 2", ...]}
    """

    log.info("Evaluating algorithm results in:\n  %s" % algorithm_input_path)
    log.info("Writing results to:\n  %s" % evaluation_output_path)
    log.info("Using ground truth data from:\n  %s" % settings.DATA_PATH)
    log.info("Metrics:\n  %s" % ", ".join(m.get_display_name()
                                          for m in metrics))
    log.info("Scenes:\n  %s" % ", ".join(s.get_display_name() for s in scenes))

    file_name_results = op.join(evaluation_output_path, "results.json")
    admin_errors = []

    eval_json = dict()
    if add_to_existing_results:
        try:
            eval_json = file_io.read_file(file_name_results)
        except IOError:
            pass

    # evaluate
    for scene in scenes:
        scene_data = eval_json.get(scene.get_name(), dict())

        try:
            if visualize:
                log.info("Visualizing algorithm result on %s" %
                         scene.get_display_name())
                scene_data["algorithm_result"] = visualize_algo_result(
                    scene, algorithm_input_path, evaluation_output_path,
                    add_pfms_to_result)

            log.info("Processing scene: %s" % scene.get_display_name())
            log.info("Using data from:\n  %s" % scene.get_data_path())
            scene_scores = compute_scores(scene, metrics, algorithm_input_path,
                                          evaluation_output_path, visualize)

            if add_to_existing_results:
                existing_scores = scene_data.get("scores", dict())
                existing_scores.update(scene_scores)
                scene_scores = existing_scores

            scene_data["scores"] = scene_scores

        except IOError as e:
            admin_errors.append(e)
            log.error(e)
            continue

        eval_json[scene.get_name()] = scene_data

    # save json with scores and paths to visualizations
    file_io.write_file(eval_json, file_name_results)
    log.info("Done!")

    success = not admin_errors
    error_json = {"messages": admin_errors}
    return success, error_json
예제 #19
0
    def plot_algo_overview(self,
                           algorithms,
                           with_metric_vis=True,
                           subdir="algo_overview",
                           fs=14):
        self.set_scale_for_algo_overview()
        metrics = self.get_scene_specific_metrics()
        n_metrics = len(metrics)

        if not with_metric_vis:
            rows, cols = 2 + n_metrics, len(algorithms) + 2
            fig = plt.figure(figsize=(2.6 * len(algorithms), 4.9))
            offset = 0
        else:
            rows, cols = 2 + 2 * n_metrics, len(algorithms) + 2
            fig = plt.figure(figsize=(2.6 * len(algorithms), rows + 3))
            offset = n_metrics

        labelpad = -15
        hscale, wscale = 7, 5
        width_ratios = [wscale] * (len(algorithms) + 1) + [1]
        height_ratios = [hscale] * (rows - n_metrics) + [1] * n_metrics
        gs = gridspec.GridSpec(rows,
                               cols,
                               height_ratios=height_ratios,
                               width_ratios=width_ratios)

        gt = self.get_gt()
        dummy = np.ones((self.get_height() / hscale, self.get_width()))
        cb_height, w = np.shape(gt)
        cb_width = w / float(wscale)

        # first column (gt, center view, ...)
        plt.subplot(gs[0])
        plt.imshow(gt, **settings.disp_map_args(self))
        plt.title("Ground Truth", fontsize=fs)
        plt.ylabel("Disparity Map", fontsize=fs)

        plt.subplot(gs[cols])
        plt.imshow(self.get_center_view())
        plt.ylabel("diff: GT - Algo", fontsize=fs)

        for idx_m, metric in enumerate(metrics):
            plt.subplot(gs[(2 + idx_m + offset) * cols])
            plt.xlabel(metric.get_short_name(), labelpad=labelpad, fontsize=fs)
            plt.imshow(dummy, cmap="gray_r")

        # algorithm columns
        for idx_a, algorithm in enumerate(algorithms):
            log.info("Processing algorithm: %s" % algorithm)
            algo_result = misc.get_algo_result(algorithm, self)

            # algorithm disparities
            plt.subplot(gs[idx_a + 1])
            plt.title(algorithm.get_display_name(), fontsize=fs)
            cm1 = plt.imshow(algo_result, **settings.disp_map_args(self))

            # algorithm diff map
            plt.subplot(gs[cols + idx_a + 1])
            cm2 = plt.imshow(gt - algo_result, **settings.diff_map_args())

            # add colorbar if last column
            if idx_a == (len(algorithms) - 1):
                plotting.add_colorbar(gs[idx_a + 2],
                                      cm1,
                                      cb_height,
                                      cb_width,
                                      colorbar_bins=5,
                                      fontsize=fs - 4)
                plotting.add_colorbar(gs[cols + idx_a + 2],
                                      cm2,
                                      cb_height,
                                      cb_width,
                                      colorbar_bins=5,
                                      fontsize=fs - 4)

            # score and background color for metrics
            for idx_m, metric in enumerate(metrics):

                if with_metric_vis:
                    plt.subplot(gs[(2 + idx_m) * cols + idx_a + 1])
                    score, vis = metric.get_score(algo_result,
                                                  gt,
                                                  self,
                                                  with_visualization=True)
                    cm3 = plt.imshow(vis, **settings.metric_args(metric))

                    if idx_a == 0:
                        plt.ylabel(metric.get_short_name(), fontsize=fs)
                    elif idx_a == (len(algorithms) - 1):
                        plotting.add_colorbar(
                            gs[(2 + idx_m) * cols + idx_a + 2],
                            cm3,
                            cb_height,
                            cb_width,
                            colorbar_bins=metric.colorbar_bins,
                            fontsize=fs - 4)

                else:
                    score = metric.get_score(algo_result, gt, self)

                plt.subplot(gs[(2 + idx_m + offset) * cols + idx_a + 1])
                plt.imshow(
                    dummy * score,
                    **settings.score_color_args(vmin=metric.vmin,
                                                vmax=metric.vmax))
                plt.xlabel(metric.format_score(score),
                           labelpad=labelpad,
                           fontsize=fs)

        fig_name = "algo_overview_" + self.get_name(
        ) + with_metric_vis * "_vis"
        fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
        plotting.save_tight_figure(fig,
                                   fig_path,
                                   wspace=0.04,
                                   hide_frames=True)