Beispiel #1
0
def draw_timing_plot(filename,
                     keyframe_ids,
                     pipelines_times,
                     ylabel='Optimization time [s]',
                     display_plot=False,
                     display_x_label=True,
                     latexify=True,
                     fig_width=6,
                     fig_height=3):
    """ Plots timing information for each pipeline contained in the list of dicts
    pipelines_times:
    - filename: where to save the figure.
    - pipelines_times: list of dicts of the form:
    [{
        'pipeline_name': pipeline_name,
        'line_color': np.random.rand(3),
        'line_style': '-',
        'times': update_times
    }, ... ]
    - keyframe_ids: corresponds to the x ticks, so update_times and keyframe_ids must
    be the same length.
    - ylabel: Y label used for the plot.
    - display_plot: whether to display the plot or not.
    - display_x_label: whether to display the x label of the plot or not.
    - latexify: whether to use latex for the generation of the plot.
    """
    if latexify:
        pt.latexify(fig_width, fig_height)
    fig = plt.figure(figsize=[fig_width, fig_height], dpi=1000)
    i = 0
    for pipeline_time in pipelines_times:
        assert len(keyframe_ids) == len(pipeline_time['times'])
        plt.plot(keyframe_ids,
                 pipeline_time['times'],
                 linestyle=pipeline_time['line_style'],
                 color=pipeline_time['line_color'],
                 linewidth=0.5,
                 label="$t_{" + pipeline_time['pipeline_name'] + "}^{opt}$")
        i = i + 1
    plt.ylabel(ylabel)
    if display_x_label:
        plt.xlabel('Keyframe Index [-]')
    plt.xlim(min(keyframe_ids), max(keyframe_ids))
    plt.ylim(bottom=0)
    plt.grid(axis='both', linestyle='--')
    plt.legend()
    # Create path to filename if it does not exist.
    evt.create_full_path_if_not_exists(filename)
    plt.savefig(filename, bbox_inches='tight', transparent=True, dpi=1000)
    if display_plot:
        plt.show()
    def __init__(self, experiment_params, args, extra_flagfile_path=''):
        self.vocabulary_path = os.path.expandvars(
            experiment_params['vocabulary_path'])
        self.results_dir = os.path.expandvars(experiment_params['results_dir'])
        self.params_dir = os.path.expandvars(experiment_params['params_dir'])
        self.dataset_dir = os.path.expandvars(experiment_params['dataset_dir'])
        self.executable_path = os.path.expandvars(
            experiment_params['executable_path'])
        self.datasets_to_run = experiment_params['datasets_to_run']
        self.verbose_vio = args.verbose_sparkvio

        self.extra_flagfile_path = extra_flagfile_path

        self.pipeline_output_dir = os.path.join(self.results_dir,
                                                "tmp_output/output/")
        evt.create_full_path_if_not_exists(self.pipeline_output_dir)
    def save_results_to_file(self, results, title,
                             dataset_pipeline_result_dir):
        """ Writes a result dictionary to file as a yaml file.

            Args:
                results: a dictionary containing ape, rpe rotation and rpe translation results and
                    statistics.
                title: a string representing the filename without the '.yaml' extension.
                dataset_pipeline_result_dir: a string representing the filepath for the location to
                    save the results file.
        """
        results_file = os.path.join(dataset_pipeline_result_dir,
                                    title + '.yaml')
        evt.print_green("Saving analysis results to: %s" % results_file)
        evt.create_full_path_if_not_exists(results_file)
        with open(results_file, 'w') as outfile:
            outfile.write(yaml.dump(results, default_flow_style=False))
def write_vio_results_summary(results, vio_results_summary_path):
    # Get APE.
    ATE_mean = results['absolute_errors'].stats['mean']
    ATE_rmse = results['absolute_errors'].stats['rmse']
    # Get RPE for smallest segments.
    #assert(len(results['relative_errors']) > 0)
    #RPE_mean = results['relative_errors'][0]['mean']
    #RPE_rmse = results['relative_errors'][0]['rmse']
    # Generate path to summary if it does not exist.
    evt.create_full_path_if_not_exists(vio_results_summary_path)
    # Write to CSV file.
    with open(vio_results_summary_path, 'w') as vio_results_summary_file:
        log.info('Writing VIO summary results to: %s' % vio_results_summary_path)
        performance_metrics = ['ATE_mean', 'ATE_rmse']
        writer = csv.DictWriter(vio_results_summary_file, fieldnames=performance_metrics)
        writer.writeheader()
        writer.writerow({'ATE_mean': ATE_mean, 'ATE_rmse': ATE_rmse})
def run_dataset(results_dir,
                params_dir,
                dataset_dir,
                dataset_properties,
                executable_path,
                run_pipeline,
                analyse_vio,
                plot,
                save_results,
                save_plots,
                save_boxplots,
                pipelines_to_run_list,
                initial_k,
                final_k,
                discard_n_start_poses=0,
                discard_n_end_poses=0,
                extra_flagfile_path='',
                verbose_sparkvio=False):
    """ Evaluates pipeline using Structureless(S), Structureless(S) + Projection(P), \
            and Structureless(S) + Projection(P) + Regular(R) factors \
            and then compiles a list of results """
    dataset_name = dataset_properties['name']
    dataset_segments = dataset_properties['segments']

    ################### RUN PIPELINE ################################
    pipeline_output_dir = os.path.join(results_dir, "tmp_output/output/")
    evt.create_full_path_if_not_exists(pipeline_output_dir)
    output_file = os.path.join(pipeline_output_dir, "output_posesVIO.csv")
    has_a_pipeline_failed = False
    if len(pipelines_to_run_list) == 0:
        log.warning("Not running pipeline...")
    for pipeline_type in pipelines_to_run_list:
        has_a_pipeline_failed = not process_vio(
            executable_path, dataset_dir, dataset_name, results_dir,
            params_dir, pipeline_output_dir, pipeline_type, dataset_segments,
            save_results, plot, save_plots, output_file, run_pipeline,
            analyse_vio, discard_n_start_poses, discard_n_end_poses, initial_k,
            final_k, extra_flagfile_path, verbose_sparkvio)

    # Save boxplots
    if save_boxplots:
        # TODO(Toni) is this really saving the boxplots?
        if not has_a_pipeline_failed:
            stats = dict()
            for pipeline_type in pipelines_to_run_list:
                results_dataset_dir = os.path.join(results_dir, dataset_name)
                results = os.path.join(results_dataset_dir, pipeline_type,
                                       "results.yaml")
                if not os.path.exists(results):
                    raise Exception(
                        "\033[91mCannot plot boxplots: missing results for %s pipeline \
                                    and dataset: %s" %
                        (pipeline_type, dataset_name) + "\033[99m \n \
                                    Expected results here: %s" % results)

                try:
                    stats[pipeline_type] = yaml.load(open(results, 'r'),
                                                     Loader=yaml.Loader)
                except yaml.YAMLError as e:
                    raise Exception("Error in results file: ", e)

                log.info("Check stats %s in %s" % (pipeline_type, results))
                check_stats(stats[pipeline_type])

            log.info("Drawing boxplots.")
            evt.draw_rpe_boxplots(results_dataset_dir, stats,
                                  len(dataset_segments))
        else:
            log.warning(
                "A pipeline run has failed... skipping boxplot drawing.")

    if not has_a_pipeline_failed:
        evt.print_green("All pipeline runs were successful.")
    else:
        log.error("A pipeline has failed!")
    evt.print_green("Finished evaluation for dataset: " + dataset_name)
    return not has_a_pipeline_failed
def process_vio(executable_path,
                dataset_dir,
                dataset_name,
                results_dir,
                params_dir,
                pipeline_output_dir,
                pipeline_type,
                SEGMENTS,
                save_results,
                plot,
                save_plots,
                output_file,
                run_pipeline,
                analyse_vio,
                discard_n_start_poses,
                discard_n_end_poses,
                initial_k,
                final_k,
                extra_flagfile_path='',
                verbose_sparkvio=False):
    """ 
    Args:
        - executable_path: path to the pipeline executable (i.e. `./build/spark_vio`).
        - dataset_dir: directory of the dataset, must contain traj_gt.csv (the ground truth trajectory for analysis to work).
        - dataset_name: specific dataset to run.
        - results_dir: directory where the results of the run will reside:
        -   used as results_dir/dataset_name/S, results_dir/dataset_name/SP, results_dir/dataset_name/SPR
        -   where each directory have traj_est.csv (the estimated trajectory), and plots if requested.
        - params_dir: directory where the parameters for each pipeline reside:
        -   used as params_dir/S, params_dir/SP, params_dir/SPR.
        - pipeline_output_dir: where to store all output_* files produced by the pipeline.
        - pipeline_type: type of pipeline to process (1: S, 2: SP, 3: SPR).
        - SEGMENTS: segments for RPE boxplots.
        - save_results: saves APE, and RPE per segment results of the run.
        - plot: whether to plot the APE/RPE results or not.
        - save_plots: saves plots of APE/RPE.
        - output_file: the name of the trajectory estimate output of the vio which will then be copied as traj_est.csv.
        - run_pipeline: whether to run the VIO to generate a new traj_est.csv.
        - analyse_vio: whether to analyse traj_est.csv or not.
        - extra_flagfile_path: to be used in order to override other flags or add new ones.
            Useful for regression tests when the param to be regressed is a gflag.
        - verbose_sparkvio: whether to print the SparkVIO messages or not.
            This is useful for debugging, but too verbose when you want to see APE/RPE results.
    """
    dataset_results_dir = os.path.join(results_dir, dataset_name)
    dataset_pipeline_result_dir = os.path.join(dataset_results_dir,
                                               pipeline_type)
    traj_ref_path = os.path.join(dataset_dir, dataset_name,
                                 "mav0/state_groundtruth_estimate0/data.csv"
                                 )  # TODO make it not specific to EUROC
    traj_es = os.path.join(dataset_results_dir, pipeline_type, "traj_es.csv")
    evt.create_full_path_if_not_exists(traj_es)
    if run_pipeline:
        evt.print_green("Run pipeline: %s" % pipeline_type)
        # The override flags are used by the regression tests.
        if run_vio(executable_path, dataset_dir, dataset_name, params_dir,
                   pipeline_output_dir, pipeline_type, initial_k, final_k,
                   extra_flagfile_path, verbose_sparkvio) == 0:
            evt.print_green("Successful pipeline run.")
            log.debug(
                "\033[1mCopying output file: \033[0m \n %s \n \033[1m to results file:\033[0m\n %s"
                % (output_file, traj_es))
            copyfile(output_file, traj_es)
            output_destination_dir = os.path.join(dataset_pipeline_result_dir,
                                                  "output")
            log.debug(
                "\033[1mMoving output dir:\033[0m \n %s \n \033[1m to destination:\033[0m \n %s"
                % (pipeline_output_dir, output_destination_dir))
            try:
                evt.move_output_from_to(pipeline_output_dir,
                                        output_destination_dir)
            except:
                log.fatal(
                    "\033[1mFailed copying output dir: \033[0m\n %s \n \033[1m to destination: %s \033[0m\n"
                    % (pipeline_output_dir, output_destination_dir))
        else:
            log.error("Pipeline failed on dataset: " + dataset_name)
            # Avoid writting results.yaml with analysis if the pipeline failed.
            log.info("Not writting results.yaml")
            return False

    if analyse_vio:
        log.debug(
            "\033[1mAnalysing dataset:\033[0m \n %s \n \033[1m for pipeline \033[0m %s."
            % (dataset_results_dir, pipeline_type))
        evt.print_green("Starting analysis of pipeline: %s" % pipeline_type)
        run_analysis(traj_ref_path, traj_es, SEGMENTS, save_results, plot,
                     save_plots, dataset_pipeline_result_dir, False,
                     dataset_name, discard_n_start_poses, discard_n_end_poses)
    return True