Exemple #1
0
def main():
    parser = OptionParser([SceneOps(), AlgorithmOps(with_gt=True), MetaAlgorithmOps(default=[])])
    scenes, algorithms, meta_algorithms, compute_meta_algos = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit import settings
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.utils import log, misc, point_cloud

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms, scenes)

    algorithms += meta_algorithms

    for scene in scenes:
        center_view = scene.get_center_view()

        for algorithm in algorithms:
            if algorithm.get_name() == "gt":
                disp_map = scene.get_gt()
            else:
                disp_map = misc.get_algo_result(algorithm, scene)

            log.info("Creating point cloud for scene '%s' with '%s' disparity map." %
                     (scene.get_name(), algorithm.get_name()))

            pc = point_cloud.convert(scene, disp_map, center_view)

            file_name = "%s_%s.ply" % (scene.get_name(), algorithm.get_name())
            file_path = op.join(settings.EVAL_PATH, "point_clouds", file_name)
            log.info("Saving point cloud to: %s" % file_path)
            point_cloud.save(pc, file_path)
def main():
    parser = OptionParser([SceneOps(), AlgorithmOps(), ThresholdOps()])
    scenes, algorithms, threshold = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit.evaluations import error_heatmaps
    error_heatmaps.plot(algorithms, scenes, thresh=threshold)
Exemple #3
0
def main():
    parser = OptionParser([
        SceneOps(),
        AlgorithmOps(),
        MetricOps(),
        VisualizationOps(),
        OverwriteOps(),
        MetaAlgorithmOps(default=[])
    ])
    scenes, algorithms, metrics, with_vis, add_to_existing, meta_algorithms, compute_meta_algos = parser.parse_args(
    )

    # delay import to speed up usage response
    from toolkit import settings
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.evaluations import submission_evaluation
    from toolkit.utils import misc

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms,
                                              scenes)

    algorithms += meta_algorithms

    for algorithm in algorithms:
        evaluation_output_path = op.join(settings.ALGO_EVAL_PATH,
                                         algorithm.get_name())
        algorithm_input_path = misc.get_path_to_algo_data(algorithm)
        submission_evaluation.evaluate(
            scenes=scenes,
            metrics=metrics,
            visualize=with_vis,
            evaluation_output_path=evaluation_output_path,
            algorithm_input_path=algorithm_input_path,
            add_to_existing_results=add_to_existing)
Exemple #4
0
def main():
    parser = OptionParser([
        AlgorithmOps(),
        SceneOps(),
        MetaAlgorithmOps(with_load_argument=False)
    ])
    algorithms, scenes, meta_algorithms = parser.parse_args()

    from toolkit.algorithms import MetaAlgorithm
    MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms, scenes)
Exemple #5
0
def main():
    parser = OptionParser([SceneOps(), AlgorithmOps(), MetaAlgorithmOps(default=[])])
    scenes, algorithms, meta_algorithms, compute_meta_algos = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.evaluations import bad_pix_series

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms, scenes)

    bad_pix_series.plot(algorithms+meta_algorithms, scenes)
def main():
    parser = OptionParser([SceneOps(), AlgorithmOps(), MetricOps(), MetaAlgorithmOps(default=[])])
    scenes, algorithms, metrics, meta_algorithms, compute_meta_algos = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.evaluations import metric_overviews

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms, scenes)

    metric_overviews.plot_general_overview(algorithms+meta_algorithms, scenes, metrics)
def main():
    accv_algo_names = ["epi1", "epi2", "lf_occ", "lf", "mv"]
    parser = OptionParser(
        [AlgorithmOps(default=accv_algo_names),
         FigureOpsACCV16()])

    algorithms, figure_options = parser.parse_args()

    # delay imports to speed up usage response
    from toolkit.evaluations import paper_accv_2016, error_heatmaps
    from toolkit.scenes import Backgammon, Dots, Pyramids, Stripes
    from toolkit.utils import log, misc

    if "heatmaps" in figure_options:
        log.info("Creating error heatmaps.")
        scenes = misc.get_stratified_scenes() + misc.get_training_scenes()
        error_heatmaps.plot(algorithms, scenes, subdir=SUBDIR)

    if "radar" in figure_options:
        log.info("Creating radar charts for stratified and training scenes.")
        paper_accv_2016.plot_radar_charts(algorithms, subdir=SUBDIR)

    if "backgammon" in figure_options:
        log.info("Creating special chart for backgammon scene.")
        Backgammon().plot_fattening_thinning(algorithms, subdir=SUBDIR)

    if "pyramids" in figure_options:
        log.info("Creating special chart for pyramids scene.")
        Pyramids().plot_algo_disp_vs_gt_disp(algorithms, subdir=SUBDIR)

    if "dots" in figure_options:
        log.info("Creating special chart for dots scene.")
        Dots().plot_error_vs_noise(algorithms, subdir=SUBDIR)

    if "stripes" in figure_options:
        log.info("Creating special chart for stripes scene.")
        Stripes().visualize_masks(subdir=SUBDIR)

    if "stratified" in figure_options:
        for scene in misc.get_stratified_scenes():
            log.info("Creating metric visualizations for scene: %s." %
                     scene.get_display_name())
            scene.plot_algo_overview(algorithms,
                                     with_metric_vis=True,
                                     subdir=SUBDIR)

    if "training" in figure_options:
        for scene in misc.get_training_scenes():
            log.info("Creating metric visualizations for scene: %s." %
                     scene.get_display_name())
            scene.plot_algo_overview(algorithms, subdir=SUBDIR)
Exemple #8
0
def main():
    parser = OptionParser([SceneOps(), AlgorithmOps(), MetaAlgorithmOps()])
    scenes, algorithms, meta_algorithms, compute_meta_algos = parser.parse_args(
    )

    # delay imports to speed up usage response
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.evaluations import meta_algo_comparisons

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms,
                                              scenes)

    for meta_algorithm in meta_algorithms:
        meta_algo_comparisons.plot(algorithms, scenes, meta_algorithm)
def main():
    parser = OptionParser(
        [AlgorithmOps(),
         SceneOps(), MetaAlgorithmOps(default=[])])
    algorithms, scenes, meta_algorithms, compute_meta_algos = parser.parse_args(
    )

    # delay imports to speed up usage response
    from toolkit.algorithms import MetaAlgorithm
    from toolkit.evaluations import pairwise_algo_comparisons

    if compute_meta_algos and meta_algorithms:
        MetaAlgorithm.prepare_meta_algorithms(meta_algorithms, algorithms,
                                              scenes)

    pairwise_algo_comparisons.plot_pairwise_comparisons(
        algorithms + meta_algorithms, scenes)