def process_trajectory_data(self,
                                traj_ref,
                                traj_est,
                                segments,
                                is_vio_traj=True):
        """
        """
        suffix = "VIO" if is_vio_traj else "PGO"
        data = (traj_ref, traj_est)

        evt.print_purple("Calculating APE translation part for " + suffix)
        ape_metric = get_ape_trans(data)
        ape_result = ape_metric.get_result()
        evt.print_green("APE translation: %f" % ape_result.stats['mean'])

        evt.print_purple("Calculating RPE translation part for " + suffix)
        rpe_metric_trans = get_rpe_trans(data)

        evt.print_purple("Calculating RPE rotation angle for " + suffix)
        rpe_metric_rot = get_rpe_rot(data)

        # Collect results:
        results = dict()
        results["absolute_errors"] = ape_result

        results["relative_errors"] = self.calc_rpe_results(
            rpe_metric_trans, rpe_metric_rot, data, segments)

        # Add as well how long hte trajectory was.
        results["trajectory_length_m"] = traj_est.path_length()

        return (ape_metric, rpe_metric_trans, rpe_metric_rot, results)
    def run_dataset(self, dataset):
        """ Run a single dataset from an experiments file and save all output. This is done
            for every pipeline requested for the dataset.

            Args:
                dataset: a dataset to run as defined in the experiments yaml file.

            Returns: True if all pipelines for the dataset succeed, False otherwise.
        """
        dataset_name = dataset['name']

        has_a_pipeline_failed = False
        pipelines_to_run_list = dataset['pipelines']
        if len(pipelines_to_run_list) == 0:
            log.warning("Not running pipeline...")
        for pipeline_type in pipelines_to_run_list:
            # TODO shouldn't this break when a pipeline has failed? Not necessarily
            # if we want to plot all pipelines except the failing ones.
            evt.print_green("Run pipeline: %s" % pipeline_type)
            pipeline_success = self.__run_vio(dataset, pipeline_type)
            if pipeline_success:
                evt.print_green("Successful pipeline run.")
            else:
                log.error("Failed pipeline run!")
                has_a_pipeline_failed = True

        if not has_a_pipeline_failed:
            evt.print_green("All pipeline runs were successful.")
        evt.print_green("Finished evaluation for dataset: " + dataset_name)
        return not has_a_pipeline_failed
    def save_results_to_file(self, results, title,
                             dataset_pipeline_result_dir):
        """ Writes a result dictionary to file as a yaml file.

            Args:
                results: a dictionary containing ape, rpe rotation and rpe translation results and
                    statistics.
                title: a string representing the filename without the '.yaml' extension.
                dataset_pipeline_result_dir: a string representing the filepath for the location to
                    save the results file.
        """
        results_file = os.path.join(dataset_pipeline_result_dir,
                                    title + '.yaml')
        evt.print_green("Saving analysis results to: %s" % results_file)
        evt.create_full_path_if_not_exists(results_file)
        with open(results_file, 'w') as outfile:
            outfile.write(yaml.dump(results, default_flow_style=False))
    def save_plots_to_file(self,
                           plot_collection,
                           dataset_pipeline_result_dir,
                           save_pdf=True):
        """ Wrie plot collection to disk as both eps and pdf.

            Args:
                - plot_collection: a PlotCollection containing all the plots to save to file.
                - dataset_pipeline_result_dir: a string representing the filepath for the location to
                    which the plot files are saved.
                - save_pdf: whether to save figures to pdf or eps format
        """
        # Config output format (pdf, eps, ...) using evo_config...
        if save_pdf:
            pdf_output_file_path = os.path.join(dataset_pipeline_result_dir,
                                                "plots.pdf")
            evt.print_green("Saving plots to: %s" % pdf_output_file_path)
            plot_collection.export(pdf_output_file_path, False)
        else:
            eps_output_file_path = os.path.join(dataset_pipeline_result_dir,
                                                "plots.eps")
            evt.print_green("Saving plots to: %s" % eps_output_file_path)
            plot_collection.export(eps_output_file_path, False)
    def run_analysis(self,
                     traj_ref_path,
                     traj_vio_path,
                     traj_pgo_path,
                     segments,
                     dataset_name="",
                     discard_n_start_poses=0,
                     discard_n_end_poses=0):
        """ Analyze data from a set of trajectory csv files.

            Args:
                traj_ref_path: string representing filepath of the reference (ground-truth) trajectory.
                traj_vio_path: string representing filepath of the vio estimated trajectory.
                traj_pgo_path: string representing filepath of the pgo estimated trajectory.
                segments: list of segments for RPE calculation, defined in the experiments yaml file.
                dataset_name: string representing the dataset's name
                discard_n_start_poses: int representing number of poses to discard from start of analysis.
                discard_n_end_poses: int representing the number of poses to discard from end of analysis.
        """
        import copy

        # Mind that traj_est_pgo might be None
        traj_ref, traj_est_vio, traj_est_pgo = self.read_traj_files(
            traj_ref_path, traj_vio_path, traj_pgo_path)

        # We copy to distinguish from the pgo version that may be created
        traj_ref_vio = copy.deepcopy(traj_ref)

        # Register and align trajectories:
        evt.print_purple("Registering and aligning trajectories")
        traj_ref_vio, traj_est_vio = sync.associate_trajectories(
            traj_ref_vio, traj_est_vio)
        traj_est_vio = trajectory.align_trajectory(
            traj_est_vio,
            traj_ref_vio,
            correct_scale=False,
            discard_n_start_poses=int(discard_n_start_poses),
            discard_n_end_poses=int(discard_n_end_poses))

        # We do the same for the PGO trajectory if needed:
        traj_ref_pgo = None
        if traj_est_pgo is not None:
            traj_ref_pgo = copy.deepcopy(traj_ref)
            traj_ref_pgo, traj_est_pgo = sync.associate_trajectories(
                traj_ref_pgo, traj_est_pgo)
            traj_est_pgo = trajectory.align_trajectory(
                traj_est_pgo,
                traj_ref_pgo,
                correct_scale=False,
                discard_n_start_poses=int(discard_n_start_poses),
                discard_n_end_poses=int(discard_n_end_poses))

        # We need to pick the lowest num_poses before doing any computation:
        num_of_poses = traj_est_vio.num_poses
        if traj_est_pgo is not None:
            num_of_poses = min(num_of_poses, traj_est_pgo.num_poses)
            traj_est_pgo.reduce_to_ids(
                range(int(discard_n_start_poses),
                      int(num_of_poses - discard_n_end_poses), 1))
            traj_ref_pgo.reduce_to_ids(
                range(int(discard_n_start_poses),
                      int(num_of_poses - discard_n_end_poses), 1))

        traj_est_vio.reduce_to_ids(
            range(int(discard_n_start_poses),
                  int(num_of_poses - discard_n_end_poses), 1))
        traj_ref_vio.reduce_to_ids(
            range(int(discard_n_start_poses),
                  int(num_of_poses - discard_n_end_poses), 1))

        # Calculate all metrics:
        (ape_metric_vio, rpe_metric_trans_vio, rpe_metric_rot_vio,
         results_vio) = self.process_trajectory_data(traj_ref_vio,
                                                     traj_est_vio, segments,
                                                     True)

        # We do the same for the pgo trajectory if needed:
        ape_metric_pgo = None
        rpe_metric_trans_pgo = None
        rpe_metric_rot_pgo = None
        results_pgo = None
        if traj_est_pgo is not None:
            (ape_metric_pgo, rpe_metric_trans_pgo, rpe_metric_rot_pgo,
             results_pgo) = self.process_trajectory_data(
                 traj_ref_pgo, traj_est_pgo, segments, False)

        # Generate plots for return:
        plot_collection = None
        if self.display_plots or self.save_plots:
            evt.print_green("Plotting:")
            log.info(dataset_name)
            plot_collection = plot.PlotCollection("Example")

            if traj_est_pgo is not None:
                # APE Metric Plot:
                plot_collection.add_figure(
                    "PGO_APE_translation",
                    plot_metric(ape_metric_pgo, "PGO + VIO APE Translation"))

                # Trajectory Colormapped with ATE Plot:
                plot_collection.add_figure(
                    "PGO_APE_translation_trajectory_error",
                    plot_traj_colormap_ape(
                        ape_metric_pgo, traj_ref_pgo, traj_est_vio,
                        traj_est_pgo, "PGO + VIO ATE Mapped Onto Trajectory"))

                # RPE Translation Metric Plot:
                plot_collection.add_figure(
                    "PGO_RPE_translation",
                    plot_metric(rpe_metric_trans_pgo,
                                "PGO + VIO RPE Translation"))

                # Trajectory Colormapped with RTE Plot:
                plot_collection.add_figure(
                    "PGO_RPE_translation_trajectory_error",
                    plot_traj_colormap_rpe(
                        rpe_metric_trans_pgo, traj_ref_pgo, traj_est_vio,
                        traj_est_pgo,
                        "PGO + VIO RPE Translation Error Mapped Onto Trajectory"
                    ))

                # RPE Rotation Metric Plot:
                plot_collection.add_figure(
                    "PGO_RPE_Rotation",
                    plot_metric(rpe_metric_rot_pgo, "PGO + VIO RPE Rotation"))

                # Trajectory Colormapped with RTE Plot:
                plot_collection.add_figure(
                    "PGO_RPE_rotation_trajectory_error",
                    plot_traj_colormap_rpe(
                        rpe_metric_rot_pgo, traj_ref_pgo, traj_est_vio,
                        traj_est_pgo,
                        "PGO + VIO RPE Rotation Error Mapped Onto Trajectory"))

            # Plot VIO results
            plot_collection.add_figure(
                "VIO_APE_translation",
                plot_metric(ape_metric_vio, "VIO APE Translation"))

            plot_collection.add_figure(
                "VIO_APE_translation_trajectory_error",
                plot_traj_colormap_ape(ape_metric_vio, traj_ref_vio,
                                       traj_est_vio, None,
                                       "VIO ATE Mapped Onto Trajectory"))

            plot_collection.add_figure(
                "VIO_RPE_translation",
                plot_metric(rpe_metric_trans_vio, "VIO RPE Translation"))

            plot_collection.add_figure(
                "VIO_RPE_translation_trajectory_error",
                plot_traj_colormap_rpe(
                    rpe_metric_trans_vio, traj_ref_vio, traj_est_vio, None,
                    "VIO RPE Translation Error Mapped Onto Trajectory"))

            plot_collection.add_figure(
                "VIO_RPE_Rotation",
                plot_metric(rpe_metric_rot_vio, "VIO RPE Rotation"))

            plot_collection.add_figure(
                "VIO_RPE_rotation_trajectory_error",
                plot_traj_colormap_rpe(
                    rpe_metric_rot_vio, traj_ref_vio, traj_est_vio, None,
                    "VIO RPE Rotation Error Mapped Onto Trajectory"))

        return [plot_collection, results_vio, results_pgo]
    def __evaluate_run(self, pipeline_type, dataset):
        """ Evaluate performance of one pipeline of one dataset, as defined in the experiments
            yaml file.

            Assumes that the files traj_gt.csv traj_vio.csv and traj_pgo.csv are present.

            Args:
                dataset: a dataset to evaluate as defined in the experiments yaml file.
                pipeline_type: a pipeline representing a set of parameters to use, as
                    defined in the experiments yaml file for the dataset in question.

            Returns: True if there are no exceptions during evaluation, False otherwise.
        """
        dataset_name = dataset["name"]
        dataset_results_dir = os.path.join(self.results_dir, dataset_name)
        dataset_pipeline_result_dir = os.path.join(dataset_results_dir,
                                                   pipeline_type)

        traj_gt_path = os.path.join(dataset_pipeline_result_dir,
                                    self.traj_gt_csv_name)
        traj_vio_path = os.path.join(dataset_pipeline_result_dir,
                                     self.traj_vio_csv_name)
        traj_pgo_path = os.path.join(dataset_pipeline_result_dir,
                                     self.traj_pgo_csv_name)

        # Analyze dataset:
        log.debug(
            "\033[1mAnalysing dataset:\033[0m \n %s \n \033[1m for pipeline \033[0m %s."
            % (dataset_results_dir, pipeline_type))
        evt.print_green("Starting analysis of pipeline: %s" % pipeline_type)

        discard_n_start_poses = dataset["discard_n_start_poses"]
        discard_n_end_poses = dataset["discard_n_end_poses"]
        segments = dataset["segments"]

        [plot_collection, results_vio,
         results_pgo] = self.run_analysis(traj_gt_path, traj_vio_path,
                                          traj_pgo_path, segments,
                                          dataset_name, discard_n_start_poses,
                                          discard_n_end_poses)

        if self.save_results:
            if results_vio is not None:
                self.save_results_to_file(results_vio, "results_vio",
                                          dataset_pipeline_result_dir)
            if results_pgo is not None:
                self.save_results_to_file(results_pgo, "results_pgo",
                                          dataset_pipeline_result_dir)

        if self.display_plots and plot_collection is not None:
            evt.print_green("Displaying plots.")
            plot_collection.show()

        if self.save_plots and plot_collection is not None:
            self.save_plots_to_file(plot_collection,
                                    dataset_pipeline_result_dir)

        if self.write_website:
            log.info("Writing performance website for dataset: %s" %
                     dataset_name)
            self.website_builder.add_dataset_to_website(
                dataset_name, pipeline_type, dataset_pipeline_result_dir)
            self.website_builder.write_datasets_website()

        return True
Пример #7
0
def run_dataset(results_dir,
                params_dir,
                dataset_dir,
                dataset_properties,
                executable_path,
                run_pipeline,
                analyse_vio,
                plot,
                save_results,
                save_plots,
                save_boxplots,
                pipelines_to_run_list,
                initial_k,
                final_k,
                discard_n_start_poses=0,
                discard_n_end_poses=0,
                extra_flagfile_path='',
                verbose_sparkvio=False):
    """ Evaluates pipeline using Structureless(S), Structureless(S) + Projection(P), \
            and Structureless(S) + Projection(P) + Regular(R) factors \
            and then compiles a list of results """
    dataset_name = dataset_properties['name']
    dataset_segments = dataset_properties['segments']

    ################### RUN PIPELINE ################################
    pipeline_output_dir = os.path.join(results_dir, "tmp_output/output/")
    evt.create_full_path_if_not_exists(pipeline_output_dir)
    output_file = os.path.join(pipeline_output_dir, "output_posesVIO.csv")
    has_a_pipeline_failed = False
    if len(pipelines_to_run_list) == 0:
        log.warning("Not running pipeline...")
    for pipeline_type in pipelines_to_run_list:
        has_a_pipeline_failed = not process_vio(
            executable_path, dataset_dir, dataset_name, results_dir,
            params_dir, pipeline_output_dir, pipeline_type, dataset_segments,
            save_results, plot, save_plots, output_file, run_pipeline,
            analyse_vio, discard_n_start_poses, discard_n_end_poses, initial_k,
            final_k, extra_flagfile_path, verbose_sparkvio)

    # Save boxplots
    if save_boxplots:
        # TODO(Toni) is this really saving the boxplots?
        if not has_a_pipeline_failed:
            stats = dict()
            for pipeline_type in pipelines_to_run_list:
                results_dataset_dir = os.path.join(results_dir, dataset_name)
                results = os.path.join(results_dataset_dir, pipeline_type,
                                       "results.yaml")
                if not os.path.exists(results):
                    raise Exception(
                        "\033[91mCannot plot boxplots: missing results for %s pipeline \
                                    and dataset: %s" %
                        (pipeline_type, dataset_name) + "\033[99m \n \
                                    Expected results here: %s" % results)

                try:
                    stats[pipeline_type] = yaml.load(open(results, 'r'),
                                                     Loader=yaml.Loader)
                except yaml.YAMLError as e:
                    raise Exception("Error in results file: ", e)

                log.info("Check stats %s in %s" % (pipeline_type, results))
                check_stats(stats[pipeline_type])

            log.info("Drawing boxplots.")
            evt.draw_rpe_boxplots(results_dataset_dir, stats,
                                  len(dataset_segments))
        else:
            log.warning(
                "A pipeline run has failed... skipping boxplot drawing.")

    if not has_a_pipeline_failed:
        evt.print_green("All pipeline runs were successful.")
    else:
        log.error("A pipeline has failed!")
    evt.print_green("Finished evaluation for dataset: " + dataset_name)
    return not has_a_pipeline_failed
Пример #8
0
def process_vio(executable_path,
                dataset_dir,
                dataset_name,
                results_dir,
                params_dir,
                pipeline_output_dir,
                pipeline_type,
                SEGMENTS,
                save_results,
                plot,
                save_plots,
                output_file,
                run_pipeline,
                analyse_vio,
                discard_n_start_poses,
                discard_n_end_poses,
                initial_k,
                final_k,
                extra_flagfile_path='',
                verbose_sparkvio=False):
    """ 
    Args:
        - executable_path: path to the pipeline executable (i.e. `./build/spark_vio`).
        - dataset_dir: directory of the dataset, must contain traj_gt.csv (the ground truth trajectory for analysis to work).
        - dataset_name: specific dataset to run.
        - results_dir: directory where the results of the run will reside:
        -   used as results_dir/dataset_name/S, results_dir/dataset_name/SP, results_dir/dataset_name/SPR
        -   where each directory have traj_est.csv (the estimated trajectory), and plots if requested.
        - params_dir: directory where the parameters for each pipeline reside:
        -   used as params_dir/S, params_dir/SP, params_dir/SPR.
        - pipeline_output_dir: where to store all output_* files produced by the pipeline.
        - pipeline_type: type of pipeline to process (1: S, 2: SP, 3: SPR).
        - SEGMENTS: segments for RPE boxplots.
        - save_results: saves APE, and RPE per segment results of the run.
        - plot: whether to plot the APE/RPE results or not.
        - save_plots: saves plots of APE/RPE.
        - output_file: the name of the trajectory estimate output of the vio which will then be copied as traj_est.csv.
        - run_pipeline: whether to run the VIO to generate a new traj_est.csv.
        - analyse_vio: whether to analyse traj_est.csv or not.
        - extra_flagfile_path: to be used in order to override other flags or add new ones.
            Useful for regression tests when the param to be regressed is a gflag.
        - verbose_sparkvio: whether to print the SparkVIO messages or not.
            This is useful for debugging, but too verbose when you want to see APE/RPE results.
    """
    dataset_results_dir = os.path.join(results_dir, dataset_name)
    dataset_pipeline_result_dir = os.path.join(dataset_results_dir,
                                               pipeline_type)
    traj_ref_path = os.path.join(dataset_dir, dataset_name,
                                 "mav0/state_groundtruth_estimate0/data.csv"
                                 )  # TODO make it not specific to EUROC
    traj_es = os.path.join(dataset_results_dir, pipeline_type, "traj_es.csv")
    evt.create_full_path_if_not_exists(traj_es)
    if run_pipeline:
        evt.print_green("Run pipeline: %s" % pipeline_type)
        # The override flags are used by the regression tests.
        if run_vio(executable_path, dataset_dir, dataset_name, params_dir,
                   pipeline_output_dir, pipeline_type, initial_k, final_k,
                   extra_flagfile_path, verbose_sparkvio) == 0:
            evt.print_green("Successful pipeline run.")
            log.debug(
                "\033[1mCopying output file: \033[0m \n %s \n \033[1m to results file:\033[0m\n %s"
                % (output_file, traj_es))
            copyfile(output_file, traj_es)
            output_destination_dir = os.path.join(dataset_pipeline_result_dir,
                                                  "output")
            log.debug(
                "\033[1mMoving output dir:\033[0m \n %s \n \033[1m to destination:\033[0m \n %s"
                % (pipeline_output_dir, output_destination_dir))
            try:
                evt.move_output_from_to(pipeline_output_dir,
                                        output_destination_dir)
            except:
                log.fatal(
                    "\033[1mFailed copying output dir: \033[0m\n %s \n \033[1m to destination: %s \033[0m\n"
                    % (pipeline_output_dir, output_destination_dir))
        else:
            log.error("Pipeline failed on dataset: " + dataset_name)
            # Avoid writting results.yaml with analysis if the pipeline failed.
            log.info("Not writting results.yaml")
            return False

    if analyse_vio:
        log.debug(
            "\033[1mAnalysing dataset:\033[0m \n %s \n \033[1m for pipeline \033[0m %s."
            % (dataset_results_dir, pipeline_type))
        evt.print_green("Starting analysis of pipeline: %s" % pipeline_type)
        run_analysis(traj_ref_path, traj_es, SEGMENTS, save_results, plot,
                     save_plots, dataset_pipeline_result_dir, False,
                     dataset_name, discard_n_start_poses, discard_n_end_poses)
    return True
Пример #9
0
def run_analysis(traj_ref_path,
                 traj_est_path,
                 segments,
                 save_results,
                 display_plot,
                 save_plots,
                 save_folder,
                 confirm_overwrite=False,
                 dataset_name="",
                 discard_n_start_poses=0,
                 discard_n_end_poses=0):
    """ Run analysis on given trajectories, saves plots on given path:
    :param traj_ref_path: path to the reference (ground truth) trajectory.
    :param traj_est_path: path to the estimated trajectory.
    :param save_results: saves APE, and RPE per segment results.
    :param save_plots: whether to save the plots.
    :param save_folder: where to save the plots.
    :param confirm_overwrite: whether to confirm overwriting plots or not.
    :param dataset_name: optional param, to allow setting the same scale on different plots.
    """
    # Load trajectories.
    from evo.tools import file_interface
    traj_ref = None
    try:
        traj_ref = file_interface.read_euroc_csv_trajectory(
            traj_ref_path)  # TODO make it non-euroc specific.
    except file_interface.FileInterfaceException as e:
        raise Exception(
            "\033[91mMissing ground truth csv! \033[93m {}.".format(e))

    traj_est = None
    try:
        traj_est = file_interface.read_swe_csv_trajectory(traj_est_path)
    except file_interface.FileInterfaceException as e:
        log.info(e)
        raise Exception("\033[91mMissing vio output csv.\033[99m")

    evt.print_purple("Registering trajectories")
    traj_ref, traj_est = sync.associate_trajectories(traj_ref, traj_est)

    evt.print_purple("Aligning trajectories")
    traj_est = trajectory.align_trajectory(
        traj_est,
        traj_ref,
        correct_scale=False,
        discard_n_start_poses=int(discard_n_start_poses),
        discard_n_end_poses=int(discard_n_end_poses))

    num_of_poses = traj_est.num_poses
    traj_est.reduce_to_ids(
        range(int(discard_n_start_poses),
              int(num_of_poses - discard_n_end_poses), 1))
    traj_ref.reduce_to_ids(
        range(int(discard_n_start_poses),
              int(num_of_poses - discard_n_end_poses), 1))

    results = dict()

    evt.print_purple("Calculating APE translation part")
    data = (traj_ref, traj_est)
    ape_metric = metrics.APE(metrics.PoseRelation.translation_part)
    ape_metric.process_data(data)
    ape_result = ape_metric.get_result()
    results["absolute_errors"] = ape_result

    log.info(ape_result.pretty_str(info=True))

    # TODO(Toni): Save RPE computation results rather than the statistics
    # you can compute statistics later...
    evt.print_purple("Calculating RPE translation part for plotting")
    rpe_metric_trans = metrics.RPE(metrics.PoseRelation.translation_part, 1.0,
                                   metrics.Unit.frames, 0.0, False)
    rpe_metric_trans.process_data(data)
    rpe_stats_trans = rpe_metric_trans.get_all_statistics()
    log.info("mean: %f" % rpe_stats_trans["mean"])

    evt.print_purple("Calculating RPE rotation angle for plotting")
    rpe_metric_rot = metrics.RPE(metrics.PoseRelation.rotation_angle_deg, 1.0,
                                 metrics.Unit.frames, 1.0, False)
    rpe_metric_rot.process_data(data)
    rpe_stats_rot = rpe_metric_rot.get_all_statistics()
    log.info("mean: %f" % rpe_stats_rot["mean"])

    results["relative_errors"] = dict()
    # Read segments file
    for segment in segments:
        results["relative_errors"][segment] = dict()
        evt.print_purple("RPE analysis of segment: %d" % segment)
        evt.print_lightpurple("Calculating RPE segment translation part")
        rpe_segment_metric_trans = metrics.RPE(
            metrics.PoseRelation.translation_part, float(segment),
            metrics.Unit.meters, 0.01, True)
        rpe_segment_metric_trans.process_data(data)
        rpe_segment_stats_trans = rpe_segment_metric_trans.get_all_statistics()
        results["relative_errors"][segment][
            "rpe_trans"] = rpe_segment_stats_trans
        # print(rpe_segment_stats_trans)
        # print("mean:", rpe_segment_stats_trans["mean"])

        evt.print_lightpurple("Calculating RPE segment rotation angle")
        rpe_segment_metric_rot = metrics.RPE(
            metrics.PoseRelation.rotation_angle_deg, float(segment),
            metrics.Unit.meters, 0.01, True)
        rpe_segment_metric_rot.process_data(data)
        rpe_segment_stats_rot = rpe_segment_metric_rot.get_all_statistics()
        results["relative_errors"][segment]["rpe_rot"] = rpe_segment_stats_rot
        # print(rpe_segment_stats_rot)
        # print("mean:", rpe_segment_stats_rot["mean"])

    if save_results:
        # Save results file
        results_file = os.path.join(save_folder, 'results.yaml')
        evt.print_green("Saving analysis results to: %s" % results_file)
        with open(results_file, 'w') as outfile:
            if confirm_overwrite:
                if evt.user.check_and_confirm_overwrite(results_file):
                    outfile.write(yaml.dump(results, default_flow_style=False))
                else:
                    log.info("Not overwritting results.")
            else:
                outfile.write(yaml.dump(results, default_flow_style=False))

    # For each segment in segments file
    # Calculate rpe with delta = segment in meters with all-pairs set to True
    # Calculate max, min, rmse, mean, median etc

    # Plot boxplot, or those cumulative figures you see in evo (like demographic plots)
    if display_plot or save_plots:
        evt.print_green("Plotting:")
        log.info(dataset_name)
        plot_collection = plot.PlotCollection("Example")
        # metric values
        fig_1 = plt.figure(figsize=(8, 8))
        ymax = -1
        if dataset_name is not "" and FIX_MAX_Y:
            ymax = Y_MAX_APE_TRANS[dataset_name]

        ape_statistics = ape_metric.get_all_statistics()
        plot.error_array(
            fig_1,
            ape_metric.error,
            statistics=ape_statistics,
            name="APE translation",
            title=""  #str(ape_metric)
            ,
            xlabel="Keyframe index [-]",
            ylabel="APE translation [m]",
            y_min=0.0,
            y_max=ymax)
        plot_collection.add_figure("APE_translation", fig_1)

        # trajectory colormapped with error
        fig_2 = plt.figure(figsize=(8, 8))
        plot_mode = plot.PlotMode.xy
        ax = plot.prepare_axis(fig_2, plot_mode)
        plot.traj(ax, plot_mode, traj_ref, '--', 'gray', 'reference')
        plot.traj_colormap(ax,
                           traj_est,
                           ape_metric.error,
                           plot_mode,
                           min_map=0.0,
                           max_map=math.ceil(ape_statistics['max'] * 10) / 10,
                           title="ATE mapped onto trajectory [m]")
        plot_collection.add_figure("APE_translation_trajectory_error", fig_2)

        # RPE
        ## Trans
        ### metric values
        fig_3 = plt.figure(figsize=(8, 8))
        if dataset_name is not "" and FIX_MAX_Y:
            ymax = Y_MAX_RPE_TRANS[dataset_name]
        plot.error_array(
            fig_3,
            rpe_metric_trans.error,
            statistics=rpe_stats_trans,
            name="RPE translation",
            title=""  #str(rpe_metric_trans)
            ,
            xlabel="Keyframe index [-]",
            ylabel="RPE translation [m]",
            y_max=ymax)
        plot_collection.add_figure("RPE_translation", fig_3)

        ### trajectory colormapped with error
        fig_4 = plt.figure(figsize=(8, 8))
        plot_mode = plot.PlotMode.xy
        ax = plot.prepare_axis(fig_4, plot_mode)
        traj_ref_trans = copy.deepcopy(traj_ref)
        traj_ref_trans.reduce_to_ids(rpe_metric_trans.delta_ids)
        traj_est_trans = copy.deepcopy(traj_est)
        traj_est_trans.reduce_to_ids(rpe_metric_trans.delta_ids)
        plot.traj(ax, plot_mode, traj_ref_trans, '--', 'gray', 'Reference')
        plot.traj_colormap(
            ax,
            traj_est_trans,
            rpe_metric_trans.error,
            plot_mode,
            min_map=0.0,
            max_map=math.ceil(rpe_stats_trans['max'] * 10) / 10,
            title="RPE translation error mapped onto trajectory [m]")
        plot_collection.add_figure("RPE_translation_trajectory_error", fig_4)

        ## Rot
        ### metric values
        fig_5 = plt.figure(figsize=(8, 8))
        if dataset_name is not "" and FIX_MAX_Y:
            ymax = Y_MAX_RPE_ROT[dataset_name]
        plot.error_array(
            fig_5,
            rpe_metric_rot.error,
            statistics=rpe_stats_rot,
            name="RPE rotation error",
            title=""  #str(rpe_metric_rot)
            ,
            xlabel="Keyframe index [-]",
            ylabel="RPE rotation [deg]",
            y_max=ymax)
        plot_collection.add_figure("RPE_rotation", fig_5)

        ### trajectory colormapped with error
        fig_6 = plt.figure(figsize=(8, 8))
        plot_mode = plot.PlotMode.xy
        ax = plot.prepare_axis(fig_6, plot_mode)
        traj_ref_rot = copy.deepcopy(traj_ref)
        traj_ref_rot.reduce_to_ids(rpe_metric_rot.delta_ids)
        traj_est_rot = copy.deepcopy(traj_est)
        traj_est_rot.reduce_to_ids(rpe_metric_rot.delta_ids)
        plot.traj(ax, plot_mode, traj_ref_rot, '--', 'gray', 'Reference')
        plot.traj_colormap(
            ax,
            traj_est_rot,
            rpe_metric_rot.error,
            plot_mode,
            min_map=0.0,
            max_map=math.ceil(rpe_stats_rot['max'] * 10) / 10,
            title="RPE rotation error mapped onto trajectory [deg]")
        plot_collection.add_figure("RPE_rotation_trajectory_error", fig_6)

        if display_plot:
            evt.print_green("Displaying plots.")
            plot_collection.show()

        if save_plots:
            evt.print_green("Saving plots to: ")
            log.info(save_folder)
            # Config output format (pdf, eps, ...) using evo_config...
            plot_collection.export(os.path.join(save_folder, "plots.eps"),
                                   False)
            plot_collection.export(os.path.join(save_folder, "plots.pdf"),
                                   False)