Exemplo n.º 1
0
    def _get_data(self, descr, file_type, order=0):
        if self.gt_scale == 1:
            # original lowres version if gt_scale is 1
            fname = "%s_%s.%s" % (descr, settings.LOWRES, file_type)
            data = file_io.read_file(op.join(self.data_path, fname))
        else:
            # original highres version if gt_scale matches highres_scale
            fname = "%s_%s.%s" % (descr, settings.HIGHRES, file_type)
            data = file_io.read_file(op.join(self.data_path, fname))

            # otherwise scale highres version to required shape
            if self.gt_scale != self.highres_scale:
                data = misc.resize_to_shape(data, self.get_height(), self.get_width(), order=order)

        return data
Exemplo n.º 2
0
def collect_scores(algorithms, scenes, metrics, masked=False):
    scores_scenes_metrics_algos = np.full(
        (len(scenes), len(metrics), len(algorithms)), fill_value=np.nan)

    for idx_a, algorithm in enumerate(algorithms):
        fname_json = op.join(settings.ALGO_EVAL_PATH, algorithm.get_name(),
                             "results.json")

        try:
            results = file_io.read_file(fname_json)
        except IOError:
            log.error(
                "Could not find scores at: %s. \n"
                "Please execute 'run_evaluation.py' with the algorithms, scenes and metrics "
                "that you want to use in your figure." % fname_json)
            exit()

        for idx_s, scene in enumerate(scenes):
            scene_scores = results[scene.get_name()]["scores"]

            for idx_m, metric in enumerate(metrics):
                metric_score = scene_scores.get(metric.get_id(), None)

                if metric_score is not None:
                    scores_scenes_metrics_algos[idx_s, idx_m,
                                                idx_a] = metric_score["value"]

    if masked:
        mask = get_mask_invalid(scores_scenes_metrics_algos)
        scores_scenes_metrics_algos = np.ma.masked_array(
            scores_scenes_metrics_algos, mask=mask)

    return scores_scenes_metrics_algos
Exemplo n.º 3
0
    def initialize_algorithms(file_names_algorithms,
                              set_colors=True,
                              is_baseline=False,
                              is_meta=False):
        try:
            meta_data = file_io.read_file(settings.PATH_TO_ALGO_META_DATA)
        except IOError:
            meta_data = dict()

        algorithms = []
        for file_name in file_names_algorithms:
            algorithm = Algorithm(file_name=file_name,
                                  is_baseline=is_baseline,
                                  is_meta=is_meta)

            algo_data = meta_data.get(file_name, dict())
            display_name = algo_data.get("acronym", None)
            if display_name:
                algorithm.display_name = display_name

            algorithms.append(algorithm)

        if set_colors:
            algorithms = Algorithm.set_colors(algorithms)

        return algorithms
Exemplo n.º 4
0
 def get_center_view(self):
     fname = "input_Cam%03d.png" % self.get_center_cam()
     center_view = file_io.read_file(op.join(self.data_path, fname))
     if self.gt_scale != 1.0:
         center_view = misc.resize_to_shape(center_view,
                                            self.get_height(), self.get_width(), order=0)
     return center_view
Exemplo n.º 5
0
def main():
    parser = OptionParser([
        ConverterOps(input_help="path to png disparity map",
                     output_help="path to pfm disparity map")
    ])

    image_path, config_path, pfm_path = parser.parse_args()

    from toolkit.scenes import PhotorealisticScene
    from toolkit.utils import log, file_io
    import numpy as np

    scene = PhotorealisticScene("demo", path_to_config=config_path)

    disp_map = file_io.read_file(image_path)
    log.info("Input range: [%0.1f, %0.1f]" %
             (np.min(disp_map), np.max(disp_map)))

    # scale from [MIN, MAX] to [disp_min, disp_max]
    disp_map = (scene.disp_max - scene.disp_min) * (disp_map - MIN) / (
        MAX - MIN) + scene.disp_min
    log.info("Output range: [%0.1f, %0.1f]" %
             (np.min(disp_map), np.max(disp_map)))

    file_io.write_file(disp_map, pfm_path)
Exemplo n.º 6
0
def main():
    parser = OptionParser([ConverterOpsExt(input_help="path to disparity map",
                                           output_help="path to point cloud",
                                           optional_input=[("-c", "color_map_file",
                                                            "path to color map, "
                                                            "e.g. to center view of the scene")])])

    disp_map_path, config_path, point_cloud_path, color_map_path = parser.parse_args()

    from toolkit.scenes import PhotorealisticScene
    from toolkit.utils import point_cloud, file_io

    scene = PhotorealisticScene("demo", path_to_config=config_path)
    disp_map = file_io.read_file(disp_map_path)

    if color_map_path:
        color_map = file_io.read_file(color_map_path)
    else:
        color_map = None

    points = point_cloud.convert(scene, disp_map, color_map)
    point_cloud.save(points, point_cloud_path)
Exemplo n.º 7
0
def main():
    parser = OptionParser([ConverterOps(input_help="path to disparity map",
                                        output_help="path to depth map")])

    disp_map_path, config_path, depth_map_path = parser.parse_args()

    from toolkit.scenes import PhotorealisticScene
    from toolkit.utils import file_io

    scene = PhotorealisticScene("demo", path_to_config=config_path)
    disp_map = file_io.read_file(disp_map_path)
    depth_map = scene.disp2depth(disp_map)
    file_io.write_file(depth_map, depth_map_path)
Exemplo n.º 8
0
def evaluate(evaluation_output_path,
             algorithm_input_path,
             scenes,
             metrics,
             visualize=False,
             add_to_existing_results=True,
             add_pfms_to_result=True):
    """
    :param evaluation_output_path: target directory for all evaluation results
    :param algorithm_input_path: input directory for algorithm results,
                                 expected directories: runtimes, disp_maps
    :param scenes: scenes to be evaluated
    :param metrics: metrics to be evaluated
    :param visualize: whether to save visualizations (otherwise just the scores)
    :param add_to_existing_results: if set to True, will try to read results.json and add/replace entries,
                                    keeping existing scores of other scenes/metrics as is
    :param add_pfms_to_result: when executed on evaluation server, pfms are prepared for 3D point cloud view
    :return: success, {"messages": ["error 1", "error 2", ...]}
    """

    log.info("Evaluating algorithm results in:\n  %s" % algorithm_input_path)
    log.info("Writing results to:\n  %s" % evaluation_output_path)
    log.info("Using ground truth data from:\n  %s" % settings.DATA_PATH)
    log.info("Metrics:\n  %s" % ", ".join(m.get_display_name()
                                          for m in metrics))
    log.info("Scenes:\n  %s" % ", ".join(s.get_display_name() for s in scenes))

    file_name_results = op.join(evaluation_output_path, "results.json")
    admin_errors = []

    eval_json = dict()
    if add_to_existing_results:
        try:
            eval_json = file_io.read_file(file_name_results)
        except IOError:
            pass

    # evaluate
    for scene in scenes:
        scene_data = eval_json.get(scene.get_name(), dict())

        try:
            if visualize:
                log.info("Visualizing algorithm result on %s" %
                         scene.get_display_name())
                scene_data["algorithm_result"] = visualize_algo_result(
                    scene, algorithm_input_path, evaluation_output_path,
                    add_pfms_to_result)

            log.info("Processing scene: %s" % scene.get_display_name())
            log.info("Using data from:\n  %s" % scene.get_data_path())
            scene_scores = compute_scores(scene, metrics, algorithm_input_path,
                                          evaluation_output_path, visualize)

            if add_to_existing_results:
                existing_scores = scene_data.get("scores", dict())
                existing_scores.update(scene_scores)
                scene_scores = existing_scores

            scene_data["scores"] = scene_scores

        except IOError as e:
            admin_errors.append(e)
            log.error(e)
            continue

        eval_json[scene.get_name()] = scene_data

    # save json with scores and paths to visualizations
    file_io.write_file(eval_json, file_name_results)
    log.info("Done!")

    success = not admin_errors
    error_json = {"messages": admin_errors}
    return success, error_json
Exemplo n.º 9
0
def get_algo_result_from_dir(algo_dir, scene):
    fname = get_fname_algo_result(algo_dir, scene)
    algo_result = file_io.read_file(fname)
    if scene.gt_scale != 1:
        algo_result = sci.zoom(algo_result, scene.gt_scale, order=0)
    return algo_result