コード例 #1
0
ファイル: main_res.py プロジェクト: zaalsabb/evo
def load_results_as_dataframe(result_files: typing.Iterable[str],
                              use_filenames: bool = False,
                              merge: bool = False) -> pd.DataFrame:
    if merge:
        results = [file_interface.load_res_file(f) for f in result_files]
        return pandas_bridge.result_to_df(merge_results(results))

    df = pd.DataFrame()
    for result_file in result_files:
        result = file_interface.load_res_file(result_file)
        name = result_file if use_filenames else None
        df = pd.concat([df, pandas_bridge.result_to_df(result, name)],
                       axis="columns")
    return df
コード例 #2
0
ファイル: stats.py プロジェクト: kostaskonkk/evo
def load_results_as_dataframe(result_files, use_filenames=False, merge=False):
    import pandas as pd
    from evo.tools import pandas_bridge
    from evo.tools import file_interface

    if merge:
        from evo.core.result import merge_results
        results = [file_interface.load_res_file(f) for f in result_files]
        return pandas_bridge.result_to_df(merge_results(results))

    df = pd.DataFrame()
    for result_file in result_files:
        result = file_interface.load_res_file(result_file)
        name = result_file if use_filenames else None
        df = pd.concat([df, pandas_bridge.result_to_df(result, name)],
                       axis="columns")
    return df
コード例 #3
0
ファイル: test_file_interface.py プロジェクト: zxw610/evo
 def test_write_read_integrity(self):
     result_out = Result()
     result_out.add_np_array("test-array", np.ones(1000))
     result_out.add_info({"name": "test", "number": 666})
     result_out.add_trajectory("traj", helpers.fake_trajectory(1000, 0.1))
     file_interface.save_res_file(self.mock_file, result_out)
     result_in = file_interface.load_res_file(self.mock_file,
                                              load_trajectories=True)
     self.assertEqual(result_in, result_out)
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

from evo.tools import file_interface
#load FB trakectory error data
FB_ate_MH_01_error = file_interface.load_res_file(
    "/home/cc/output/FB_MH01_ape.zip")
FB_ate_MH_01 = FB_ate_MH_01_error.np_arrays["error_array"]

#load of trakectory error data
of_ate_MH_01_error = file_interface.load_res_file(
    "/home/cc/output/of_MH01_ape.zip")
of_ate_MH_01 = of_ate_MH_01_error.np_arrays["error_array"]

FB_Frame = np.linspace(1, len(FB_ate_MH_01), num=len(FB_ate_MH_01))
of_Frame = np.linspace(1, len(of_ate_MH_01), num=len(of_ate_MH_01))

#print(np.mean(FB_process_time))
#print(np.mean(of_process_time))
#plt.plot(FB_Frame, FB_ate_MH_01, marker=".",label='Forward-Backend', linewidth=1)
#plt.plot(of_Frame, of_ate_MH_01, marker=".",label='L-K', linewidth=1)
plt.plot(FB_Frame, FB_ate_MH_01, label='Forward-backend', linewidth=1)
plt.plot(of_Frame, of_ate_MH_01, label='Lucas-Kanade', linewidth=1)

plt.xlim(0, max(len(FB_Frame), len(of_Frame)))
#plt.ylim(0, max(of_ate_MH_01))
plt.xlabel("Keyframe")
plt.ylabel("ATE(m)")
コード例 #5
0
ファイル: main_res.py プロジェクト: zhengguoxian123/evo
def run(args):
    import os
    import sys
    import logging

    import pandas as pd
    import numpy as np
    from natsort import natsorted

    from evo.tools import file_interface, user, settings
    from evo.tools.settings import SETTINGS

    settings.configure_logging(args.verbose, args.silent, args.debug)
    if args.debug:
        import pprint
        logging.debug(
            "main_parser config:\n" +
            pprint.pformat({arg: getattr(args, arg)
                            for arg in vars(args)}) + "\n")

    # store data in Pandas data frames for easier analysis
    raw_df = pd.DataFrame()
    stat_df = pd.DataFrame()
    info_df = pd.DataFrame()
    use_seconds = False

    for result_file in args.result_files:
        logging.debug(SEP)
        result_obj = file_interface.load_res_file(result_file, True)
        short_est_name = os.path.splitext(
            os.path.basename(result_obj.info["est_name"]))[0]
        error_array = result_obj.np_arrays["error_array"]
        if "seconds_from_start" in result_obj.np_arrays:
            seconds_from_start = result_obj.np_arrays["seconds_from_start"]
        else:
            seconds_from_start = None

        if not args.no_warnings and (short_est_name in info_df.columns):
            logging.warning("double entry detected: " + short_est_name)
            if not user.confirm(
                    "ignore? enter 'y' to go on or any other key to quit"):
                sys.exit()

        if SETTINGS.plot_usetex:
            short_est_name = short_est_name.replace("_", "\\_")

        if args.use_abs_time:
            if "traj_est" in result_obj.trajectories:
                traj_est = result_obj.trajectories["traj_est"]
                index = traj_est.timestamps
                use_seconds = True
            else:
                msg = "no 'traj_est' trajectory found in " + result_file \
                      + " but --use_abs_time requires the trajectory in the result file - " \
                      + "to let the metrics app include them run: evo_config set save_traj_in_zip"
                raise RuntimeError(msg)
        elif seconds_from_start is not None:
            index = seconds_from_start.tolist()
            use_seconds = True
        else:
            index = np.arange(0, error_array.shape[0])

        result_obj.info["traj. backup?"] = \
            all(k in result_obj.trajectories for k in ("traj_ref", "traj_est"))
        result_obj.info["res_file"] = result_file
        new_raw_df = pd.DataFrame({short_est_name: error_array.tolist()},
                                  index=index)
        new_info_df = pd.DataFrame({short_est_name: result_obj.info})
        new_stat_df = pd.DataFrame({short_est_name: result_obj.stats})
        # natural sort num strings "10" "100" "20" -> "10" "20" "100"
        new_stat_df = new_stat_df.reindex(index=natsorted(new_stat_df.index))
        # column-wise concatenation
        raw_df = pd.concat([raw_df, new_raw_df], axis=1)
        info_df = pd.concat([info_df, new_info_df], axis=1)
        stat_df = pd.concat([stat_df, new_stat_df], axis=1)
        # if verbose: log infos of the current data
        logging.debug(
            "\n" + result_obj.pretty_str(title=True, stats=False, info=True))

    logging.debug(SEP)
    logging.info("\nstatistics overview:\n" +
                 stat_df.T.to_string(line_width=80) + "\n")

    # check titles
    first_title = info_df.ix["title", 0]
    first_res_file = info_df.ix["res_file", 0]
    if args.save_table or args.plot or args.save_plot:
        for short_est_name, column in info_df.iteritems():
            if column.ix["title"] != first_title and not args.no_warnings:
                logging.info(SEP)
                logging.warning(
                    "mismatching titles, you probably use data from different metrics"
                )
                logging.warning("conflict:\n" + "<" * 7 + " " +
                                first_res_file + "\n" + first_title + "\n" +
                                "=" * 7 + "\n" + column.ix["title"] + "\n" +
                                ">" * 7 + " " + column.ix["res_file"])
                logging.warning(
                    "only the first one will be used as the title!")
                if not user.confirm(
                        "plot/save anyway? - enter 'y' or any other key to exit"
                ):
                    sys.exit()

    if args.save_table:
        logging.debug(SEP)
        if args.no_warnings or user.check_and_confirm_overwrite(
                args.save_table):
            table_fmt = SETTINGS.table_export_format
            if SETTINGS.table_export_transpose:
                getattr(stat_df.T, "to_" + table_fmt)(args.save_table)
            else:
                getattr(stat_df, "to_" + table_fmt)(args.save_table)
            logging.debug(table_fmt + " table saved to: " + args.save_table)

    if args.plot or args.save_plot or args.serialize_plot:
        # check if data has NaN "holes" due to different indices
        inconsistent = raw_df.isnull().values.any()
        if inconsistent and not args.no_warnings:
            logging.debug(SEP)
            logging.warning(
                "data lengths/indices are not consistent, plotting probably makes no sense"
            )
            if not user.confirm(
                    "plot anyway? - enter 'y' or any other key to exit"):
                sys.exit()

        from evo.tools import plot
        import matplotlib.pyplot as plt
        import seaborn as sns
        import math
        from scipy import stats

        # use default plot settings
        figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1])
        use_cmap = SETTINGS.plot_multi_cmap.lower() != "none"
        colormap = SETTINGS.plot_multi_cmap if use_cmap else None
        linestyles = ["-o" for x in args.result_files
                      ] if args.plot_markers else None

        # labels according to first dataset
        title = first_title
        if "xlabel" in info_df.ix[:, 0].index:
            index_label = info_df.ix["xlabel", 0]
        else:
            index_label = "$t$ (s)" if use_seconds else "index"
        metric_label = info_df.ix["label", 0]

        plot_collection = plot.PlotCollection(title)
        # raw value plot
        fig_raw = plt.figure(figsize=figsize)
        # handle NaNs from concat() above
        raw_df.interpolate(method="index").plot(ax=fig_raw.gca(),
                                                colormap=colormap,
                                                style=linestyles,
                                                title=first_title)
        plt.xlabel(index_label)
        plt.ylabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("raw", fig_raw)

        # statistics plot
        fig_stats = plt.figure(figsize=figsize)
        exclude = stat_df.index.isin(["sse"])  # don't plot sse
        stat_df[~exclude].plot(kind="barh",
                               ax=fig_stats.gca(),
                               colormap=colormap,
                               stacked=False)
        plt.xlabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("stats", fig_stats)

        # grid of distribution plots
        raw_tidy = pd.melt(raw_df,
                           value_vars=list(raw_df.columns.values),
                           var_name="estimate",
                           value_name=metric_label)
        col_wrap = 2 if len(args.result_files) <= 2 else math.ceil(
            len(args.result_files) / 2.0)
        dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap)
        dist_grid.map(sns.distplot, metric_label)  # fits=stats.gamma
        plot_collection.add_figure("histogram", dist_grid.fig)

        # box plot
        fig_box = plt.figure(figsize=figsize)
        ax = sns.boxplot(x=raw_tidy["estimate"],
                         y=raw_tidy[metric_label],
                         ax=fig_box.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("box_plot", fig_box)

        # violin plot
        fig_violin = plt.figure(figsize=figsize)
        ax = sns.violinplot(x=raw_tidy["estimate"],
                            y=raw_tidy[metric_label],
                            ax=fig_violin.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("violin_histogram", fig_violin)

        if args.plot:
            plot_collection.show()
        if args.save_plot:
            logging.debug(SEP)
            plot_collection.export(args.save_plot,
                                   confirm_overwrite=not args.no_warnings)
        if args.serialize_plot:
            logging.debug(SEP)
            plot_collection.serialize(args.serialize_plot,
                                      confirm_overwrite=not args.no_warnings)
コード例 #6
0
ファイル: main_res.py プロジェクト: yinyanpeng/evo
def run(args):
    import sys
    import logging

    import pandas as pd

    from evo.tools import file_interface, user, settings, pandas_bridge
    from evo.tools.settings import SETTINGS

    pd.options.display.width = 80
    pd.options.display.max_colwidth = 20

    settings.configure_logging(args.verbose, args.silent, args.debug)
    if args.debug:
        import pprint
        arg_dict = {arg: getattr(args, arg) for arg in vars(args)}
        logging.debug("main_parser config:\n{}\n".format(
            pprint.pformat(arg_dict)))

    df = pd.DataFrame()
    for result_file in args.result_files:
        result = file_interface.load_res_file(result_file)
        name = result_file if args.use_filenames else None
        df = pd.concat([df, pandas_bridge.result_to_df(result, name)],
                       axis="columns")

    keys = df.columns.values.tolist()
    if SETTINGS.plot_usetex:
        keys = [key.replace("_", "\\_") for key in keys]
        df.columns = keys
    duplicates = [x for x in keys if keys.count(x) > 1]
    if duplicates:
        logging.error("Values of 'est_name' must be unique - duplicates: {}\n"
                      "Try using the --use_filenames option to use filenames "
                      "for labeling instead.".format(", ".join(duplicates)))
        sys.exit(1)

    # derive a common index type if possible - preferably timestamps
    common_index = None
    time_indices = ["timestamps", "seconds_from_start", "sec_from_start"]
    if args.use_rel_time:
        del time_indices[0]
    for idx in time_indices:
        if idx not in df.loc["np_arrays"].index:
            continue
        if df.loc["np_arrays", idx].isnull().values.any():
            continue
        else:
            common_index = idx
            break

    # build error_df (raw values) according to common_index
    if common_index is None:
        # use a non-timestamp index
        error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(),
                                index=keys).T
    else:
        error_df = pd.DataFrame()
        for key in keys:
            new_error_df = pd.DataFrame(
                {key: df.loc["np_arrays", "error_array"][key]},
                index=df.loc["np_arrays", common_index][key])
            duplicates = new_error_df.index.duplicated(keep="first")
            if any(duplicates):
                logging.warning(
                    "duplicate indices in error array of {} - "
                    "keeping only first occurrence of duplicates".format(key))
                new_error_df = new_error_df[~duplicates]
            error_df = pd.concat([error_df, new_error_df], axis=1)

    # check titles
    first_title = df.loc["info", "title"][0]
    first_file = args.result_files[0]
    if not args.no_warnings:
        checks = df.loc["info", "title"] != first_title
        for i, differs in enumerate(checks):
            if not differs:
                continue
            else:
                mismatching_title = df.loc["info", "title"][i]
                mismatching_file = args.result_files[i]
                logging.debug(SEP)
                logging.warning(
                    CONFLICT_TEMPLATE.format(first_file, first_title,
                                             mismatching_title,
                                             mismatching_file))
                if not user.confirm(
                        "Go on anyway? - enter 'y' or any other key to exit"):
                    sys.exit()

    if logging.getLogger().isEnabledFor(logging.DEBUG):
        logging.debug(SEP)
        logging.debug("Aggregated dataframe:\n{}".format(
            df.to_string(line_width=80)))

    # show a statistics overview
    logging.debug(SEP)
    logging.info("\n{}\n\n{}\n".format(
        first_title, df.loc["stats"].T.to_string(line_width=80)))

    if args.save_table:
        logging.debug(SEP)
        if args.no_warnings or user.check_and_confirm_overwrite(
                args.save_table):
            if SETTINGS.table_export_data.lower() == "error_array":
                data = error_df
            elif SETTINGS.table_export_data.lower() in ("info", "stats"):
                data = df.loc[SETTINGS.table_export_data.lower()]
            else:
                raise ValueError(
                    "unsupported export data specifier: {}".format(
                        SETTINGS.table_export_data))
            if SETTINGS.table_export_transpose:
                data = data.T

            if SETTINGS.table_export_format == "excel":
                writer = pd.ExcelWriter(args.save_table)
                data.to_excel(writer)
                writer.save()
                writer.close()
            else:
                getattr(data,
                        "to_" + SETTINGS.table_export_format)(args.save_table)
            logging.debug("{} table saved to: {}".format(
                SETTINGS.table_export_format, args.save_table))

    if args.plot or args.save_plot or args.serialize_plot:
        # check if data has NaN "holes" due to different indices
        inconsistent = error_df.isnull().values.any()
        if inconsistent and common_index != "timestamps" and not args.no_warnings:
            logging.debug(SEP)
            logging.warning("Data lengths/indices are not consistent, "
                            "raw value plot might not be correctly aligned")

        from evo.tools import plot
        import matplotlib.pyplot as plt
        import seaborn as sns
        import math

        # use default plot settings
        figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1])
        use_cmap = SETTINGS.plot_multi_cmap.lower() != "none"
        colormap = SETTINGS.plot_multi_cmap if use_cmap else None
        linestyles = ["-o" for x in args.result_files
                      ] if args.plot_markers else None

        # labels according to first dataset
        title = first_title
        if "xlabel" in df.loc["info"].index and not df.loc[
                "info", "xlabel"].isnull().values.any():
            index_label = df.loc["info", "xlabel"][0]
        else:
            index_label = "$t$ (s)" if common_index else "index"
        metric_label = df.loc["info", "label"][0]

        plot_collection = plot.PlotCollection(title)
        # raw value plot
        fig_raw = plt.figure(figsize=figsize)
        # handle NaNs from concat() above
        error_df.interpolate(method="index").plot(ax=fig_raw.gca(),
                                                  colormap=colormap,
                                                  style=linestyles,
                                                  title=first_title)
        plt.xlabel(index_label)
        plt.ylabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("raw", fig_raw)

        # statistics plot
        fig_stats = plt.figure(figsize=figsize)
        exclude = df.loc["stats"].index.isin(["sse"])  # don't plot sse
        df.loc["stats"][~exclude].plot(kind="barh",
                                       ax=fig_stats.gca(),
                                       colormap=colormap,
                                       stacked=False)
        plt.xlabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("stats", fig_stats)

        # grid of distribution plots
        raw_tidy = pd.melt(error_df,
                           value_vars=list(error_df.columns.values),
                           var_name="estimate",
                           value_name=metric_label)
        col_wrap = 2 if len(args.result_files) <= 2 else math.ceil(
            len(args.result_files) / 2.0)
        dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap)
        dist_grid.map(sns.distplot, metric_label)  # fits=stats.gamma
        plot_collection.add_figure("histogram", dist_grid.fig)

        # box plot
        fig_box = plt.figure(figsize=figsize)
        ax = sns.boxplot(x=raw_tidy["estimate"],
                         y=raw_tidy[metric_label],
                         ax=fig_box.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("box_plot", fig_box)

        # violin plot
        fig_violin = plt.figure(figsize=figsize)
        ax = sns.violinplot(x=raw_tidy["estimate"],
                            y=raw_tidy[metric_label],
                            ax=fig_violin.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("violin_histogram", fig_violin)

        if args.plot:
            plot_collection.show()
        if args.save_plot:
            logging.debug(SEP)
            plot_collection.export(args.save_plot,
                                   confirm_overwrite=not args.no_warnings)
        if args.serialize_plot:
            logging.debug(SEP)
            plot_collection.serialize(args.serialize_plot,
                                      confirm_overwrite=not args.no_warnings)
コード例 #7
0
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import seaborn as sns

from evo.tools import file_interface
#load SVPI-SLAM trakectory error data:MH_01, MH_02, MH_03, MH_04, MH_05, V1_01, V1_02, V1_03, V2_01, V2_02, V2_03
SPVI_rpe_MH_01_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_MH_01.zip")
SPVI_rpe_MH_01 = SPVI_rpe_MH_01_error.np_arrays["error_array"]
SPVI_rpe_MH_02_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_MH_02.zip")
SPVI_rpe_MH_02 = SPVI_rpe_MH_02_error.np_arrays["error_array"]
SPVI_rpe_MH_03_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_MH_03.zip")
SPVI_rpe_MH_03 = SPVI_rpe_MH_03_error.np_arrays["error_array"]
SPVI_rpe_MH_04_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_MH_04.zip")
SPVI_rpe_MH_04 = SPVI_rpe_MH_04_error.np_arrays["error_array"]
SPVI_rpe_MH_05_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_MH_05.zip")
SPVI_rpe_MH_05 = SPVI_rpe_MH_05_error.np_arrays["error_array"]
SPVI_rpe_V1_01_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_V1_01.zip")
SPVI_rpe_V1_01 = SPVI_rpe_V1_01_error.np_arrays["error_array"]
SPVI_rpe_V1_02_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_V1_02.zip")
SPVI_rpe_V1_02 = SPVI_rpe_V1_02_error.np_arrays["error_array"]
SPVI_rpe_V1_03_error = file_interface.load_res_file(
    "/home/cc/output/SPVI_trajectory_190429/rpe_V1_03.zip")
コード例 #8
0
ファイル: evo_cons.py プロジェクト: muetimueti/ORBextractor
                         skip_footer=0,
                         names=['mean', 'median', 'rmse'],
                         usecols=(2, 3, 5))
    return data


if __name__ == "__main__":
    prs = parser()
    args = prs.parse_args()
    csv_data = [load_from_csv(f) for f in args.csv_tables]
    title = 'RPE'
    ref_name = 'groundtruth.txt'

    for i in range(len(csv_data)):
        est_name = args.csv_tables[i][-10:]
        res = evof.load_res_file('/home/ralph/SLAM/evo_dummy_result.zip')
        res.info["title"] = title
        res.info["est_name"] = est_name

        err_arr = []
        ts_arr = []

        for j in range(len(csv_data[i])):
            err_arr.append(csv_data[i][j][0])
            ts_arr.append(j)

        res.add_np_array("error_array", err_arr)
        res.add_np_array("timestamps", ts_arr)

        rmse_arr = []
        for j in range(len(csv_data[i])):
コード例 #9
0
def main(res_file, new_name):
    result = file_interface.load_res_file(res_file)
    result.info["est_name"] = new_name
    file_interface.save_res_file(res_file, result)