예제 #1
0
def main():
    import argparse
    import argcomplete
    basic_desc = "experimental tool for opening a serialized PlotCollection (pickle format)"
    lic = "(c) [email protected]"
    main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc, lic))
    main_parser.add_argument("in_file", help="path to a serialized plot_collection")
    main_parser.add_argument("-t", "--title", help="custom title (default: file name)")
    main_parser.add_argument("--save_plot", help="path to save plot", default=None)
    main_parser.add_argument("--serialize_plot", help="path to re-serialize PlotCollection",
                             default=None)
    main_parser.add_argument("--to_html", help="convert to html (requires mpld3 library)",
                             action="store_true")
    main_parser.add_argument("--no_warnings", help="no warnings requiring user confirmation",
                             action="store_true")
    argcomplete.autocomplete(main_parser)
    args = main_parser.parse_args()

    from evo.tools import plot, settings, user
    settings.configure_logging(verbose=True)
    if not args.title:
        title = os.path.basename(args.in_file)
    else:
        title = args.title
    if not args.no_warnings:
        logging.warning("Please note that this tool is experimental and not guranteed to work.\n"
                        "Only works if the same matplotlib backend is used as for serialization.\n"
                        "If not, try: evo_config set plot_backend <backend_name>\n" + SEP)
    
    plot_collection = plot.PlotCollection(title, deserialize=args.in_file)
    logging.debug("deserialized PlotCollection: " + str(plot_collection))
    plot_collection.show()

    if args.serialize_plot:
        logging.debug(SEP)
        plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings)
    if args.save_plot:
        logging.debug(SEP)
        plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings)
    if args.to_html:
        import mpld3
        logging.debug(SEP + "\nhtml export\n")
        for name, fig in plot_collection.figures.items():
            html = mpld3.fig_to_html(fig)
            out = name + ".html"
            with open(out, 'w') as f:
                logging.debug(out)
                f.write(html)
    if not args.no_warnings:
        logging.debug(SEP)
        if user.confirm("save changes & overwrite original file "
                        + args.in_file + "? (y/n)"):
            plot_collection.serialize(args.in_file, confirm_overwrite=False)
예제 #2
0
파일: main_evo.py 프로젝트: yinyanpeng/evo
def main():
    import sys
    import argparse
    import argcomplete
    main_parser = argparse.ArgumentParser()
    shared_parser = argparse.ArgumentParser(add_help=False)
    sub_parsers = main_parser.add_subparsers(dest="subcommand")
    sub_parsers.required = True
    pkg_parser = sub_parsers.add_parser("pkg", description="show infos of the package",
                                         parents=[shared_parser])
    pkg_parser.add_argument("--info", help="show the package description", action="store_true")
    pkg_parser.add_argument("--version", help="print the package version", action="store_true")
    pkg_parser.add_argument("--pyversion", help="print the Python version", action="store_true")
    pkg_parser.add_argument("--license", help="print the package license", action="store_true")
    pkg_parser.add_argument("--location", help="print the package path", action="store_true")
    pkg_parser.add_argument("--logfile", help="print the logfile path", action="store_true")
    pkg_parser.add_argument("--open_log", help="open the package logfile", action="store_true")
    pkg_parser.add_argument("--clear_log", help="clear package logfile", action="store_true")
    cat_parser = sub_parsers.add_parser("cat_log", description="pipe stdin to evo logfile"
                                        " or print logfile to stdout (if no stdin)",
                                        parents=[shared_parser])
    cat_parser.add_argument("-l", "--loglevel", help="loglevel of the message", default="info",
                            choices=["error", "warning", "info", "debug"])
    cat_parser.add_argument("-m", "--message", help="explicit message instead of pipe")
    cat_parser.add_argument("-s", "--source", help="source name to use for the log message")
    cat_parser.add_argument("--mark_entry", help="include the default log entry header", 
                            action="store_true", default=False)
    cat_parser.add_argument("--clear_log", help="clear logfile before exiting", action="store_true")
    argcomplete.autocomplete(main_parser)
    if len(sys.argv[1:])==0:
        sys.argv.extend(["pkg", "--info"])  # cheap trick because YOLO
    args = main_parser.parse_args()
    line_end = "\n" if sys.stdout.isatty() else ""

    if args.subcommand == "pkg":
        if not len(sys.argv) > 2:
            pkg_parser.print_help()
            sys.exit(1)
        if args.license:
            print(open(os.path.join(settings.PACKAGE_BASE_PATH, "LICENSE")).read())
        if args.info:
            main_parser.print_usage()
            print(DESC)
        if args.version:
            here = os.path.dirname(os.path.abspath(__file__))
            version_path = os.path.join(here, "version")
            print(" ".join(open(version_path).read().splitlines()), end=line_end)
        if args.pyversion:
            import platform as pf
            print(pf.python_version(), end=line_end)
        if args.location:
            print(settings.PACKAGE_BASE_PATH, end=line_end)
        if args.logfile or args.open_log:
            if not os.path.exists(settings.DEFAULT_LOGFILE_PATH):
                print("no logfile found - run: evo_config set logfile_enabled", end=line_end)
                sys.exit(1)
            print(settings.DEFAULT_LOGFILE_PATH, end=line_end)
            if args.open_log:
                import webbrowser
                webbrowser.open(settings.DEFAULT_LOGFILE_PATH)
        if args.clear_log:
            from evo.tools import user
            if user.confirm("clear logfile? (y/n)"):
                open(settings.DEFAULT_LOGFILE_PATH, mode='w')

    elif args.subcommand == "cat_log":
        if os.name == "nt":
            print("cat_log feature not available on Windows")
            sys.exit(1)
        if not args.message and sys.stdin.isatty():
            if not os.path.exists(settings.DEFAULT_LOGFILE_PATH):
                print("no logfile found - run: evo_config set logfile_enabled", end=line_end)
            else:
                print(open(settings.DEFAULT_LOGFILE_PATH).read(), end="")
        elif not settings.SETTINGS.logfile_enabled:
            print("logfile disabled", end=line_end)
            sys.exit(1)
        else:
            import logging
            dbg_fmt = None
            if args.source:
                dbg_fmt = "[%(levelname)s][%(asctime)s][" + args.source + "]\n%(message)s"
            settings.configure_logging(silent=True, dbg_fmt=dbg_fmt, mark_entry=args.mark_entry)
            if not args.message:
                msg = sys.stdin.read()
            else:
                msg = args.message
            getattr(logging, args.loglevel)(msg)
        if args.clear_log:
            open(settings.DEFAULT_LOGFILE_PATH, mode='w')
예제 #3
0
def run(args):
    import os
    import sys
    import logging

    import pandas as pd
    import numpy as np
    from natsort import natsorted

    from evo.tools import file_interface, user, settings
    from evo.tools.settings import SETTINGS

    settings.configure_logging(args.verbose, args.silent, args.debug)
    if args.debug:
        import pprint
        logging.debug(
            "main_parser config:\n" +
            pprint.pformat({arg: getattr(args, arg)
                            for arg in vars(args)}) + "\n")

    # store data in Pandas data frames for easier analysis
    raw_df = pd.DataFrame()
    stat_df = pd.DataFrame()
    info_df = pd.DataFrame()
    use_seconds = False

    for result_file in args.result_files:
        logging.debug(SEP)
        result_obj = file_interface.load_res_file(result_file, True)
        short_est_name = os.path.splitext(
            os.path.basename(result_obj.info["est_name"]))[0]
        error_array = result_obj.np_arrays["error_array"]
        if "seconds_from_start" in result_obj.np_arrays:
            seconds_from_start = result_obj.np_arrays["seconds_from_start"]
        else:
            seconds_from_start = None

        if not args.no_warnings and (short_est_name in info_df.columns):
            logging.warning("double entry detected: " + short_est_name)
            if not user.confirm(
                    "ignore? enter 'y' to go on or any other key to quit"):
                sys.exit()

        if SETTINGS.plot_usetex:
            short_est_name = short_est_name.replace("_", "\\_")

        if args.use_abs_time:
            if "traj_est" in result_obj.trajectories:
                traj_est = result_obj.trajectories["traj_est"]
                index = traj_est.timestamps
                use_seconds = True
            else:
                msg = "no 'traj_est' trajectory found in " + result_file \
                      + " but --use_abs_time requires the trajectory in the result file - " \
                      + "to let the metrics app include them run: evo_config set save_traj_in_zip"
                raise RuntimeError(msg)
        elif seconds_from_start is not None:
            index = seconds_from_start.tolist()
            use_seconds = True
        else:
            index = np.arange(0, error_array.shape[0])

        result_obj.info["traj. backup?"] = \
            all(k in result_obj.trajectories for k in ("traj_ref", "traj_est"))
        result_obj.info["res_file"] = result_file
        new_raw_df = pd.DataFrame({short_est_name: error_array.tolist()},
                                  index=index)
        new_info_df = pd.DataFrame({short_est_name: result_obj.info})
        new_stat_df = pd.DataFrame({short_est_name: result_obj.stats})
        # natural sort num strings "10" "100" "20" -> "10" "20" "100"
        new_stat_df = new_stat_df.reindex(index=natsorted(new_stat_df.index))
        # column-wise concatenation
        raw_df = pd.concat([raw_df, new_raw_df], axis=1)
        info_df = pd.concat([info_df, new_info_df], axis=1)
        stat_df = pd.concat([stat_df, new_stat_df], axis=1)
        # if verbose: log infos of the current data
        logging.debug(
            "\n" + result_obj.pretty_str(title=True, stats=False, info=True))

    logging.debug(SEP)
    logging.info("\nstatistics overview:\n" +
                 stat_df.T.to_string(line_width=80) + "\n")

    # check titles
    first_title = info_df.ix["title", 0]
    first_res_file = info_df.ix["res_file", 0]
    if args.save_table or args.plot or args.save_plot:
        for short_est_name, column in info_df.iteritems():
            if column.ix["title"] != first_title and not args.no_warnings:
                logging.info(SEP)
                logging.warning(
                    "mismatching titles, you probably use data from different metrics"
                )
                logging.warning("conflict:\n" + "<" * 7 + " " +
                                first_res_file + "\n" + first_title + "\n" +
                                "=" * 7 + "\n" + column.ix["title"] + "\n" +
                                ">" * 7 + " " + column.ix["res_file"])
                logging.warning(
                    "only the first one will be used as the title!")
                if not user.confirm(
                        "plot/save anyway? - enter 'y' or any other key to exit"
                ):
                    sys.exit()

    if args.save_table:
        logging.debug(SEP)
        if args.no_warnings or user.check_and_confirm_overwrite(
                args.save_table):
            table_fmt = SETTINGS.table_export_format
            if SETTINGS.table_export_transpose:
                getattr(stat_df.T, "to_" + table_fmt)(args.save_table)
            else:
                getattr(stat_df, "to_" + table_fmt)(args.save_table)
            logging.debug(table_fmt + " table saved to: " + args.save_table)

    if args.plot or args.save_plot or args.serialize_plot:
        # check if data has NaN "holes" due to different indices
        inconsistent = raw_df.isnull().values.any()
        if inconsistent and not args.no_warnings:
            logging.debug(SEP)
            logging.warning(
                "data lengths/indices are not consistent, plotting probably makes no sense"
            )
            if not user.confirm(
                    "plot anyway? - enter 'y' or any other key to exit"):
                sys.exit()

        from evo.tools import plot
        import matplotlib.pyplot as plt
        import seaborn as sns
        import math
        from scipy import stats

        # use default plot settings
        figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1])
        use_cmap = SETTINGS.plot_multi_cmap.lower() != "none"
        colormap = SETTINGS.plot_multi_cmap if use_cmap else None
        linestyles = ["-o" for x in args.result_files
                      ] if args.plot_markers else None

        # labels according to first dataset
        title = first_title
        if "xlabel" in info_df.ix[:, 0].index:
            index_label = info_df.ix["xlabel", 0]
        else:
            index_label = "$t$ (s)" if use_seconds else "index"
        metric_label = info_df.ix["label", 0]

        plot_collection = plot.PlotCollection(title)
        # raw value plot
        fig_raw = plt.figure(figsize=figsize)
        # handle NaNs from concat() above
        raw_df.interpolate(method="index").plot(ax=fig_raw.gca(),
                                                colormap=colormap,
                                                style=linestyles,
                                                title=first_title)
        plt.xlabel(index_label)
        plt.ylabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("raw", fig_raw)

        # statistics plot
        fig_stats = plt.figure(figsize=figsize)
        exclude = stat_df.index.isin(["sse"])  # don't plot sse
        stat_df[~exclude].plot(kind="barh",
                               ax=fig_stats.gca(),
                               colormap=colormap,
                               stacked=False)
        plt.xlabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("stats", fig_stats)

        # grid of distribution plots
        raw_tidy = pd.melt(raw_df,
                           value_vars=list(raw_df.columns.values),
                           var_name="estimate",
                           value_name=metric_label)
        col_wrap = 2 if len(args.result_files) <= 2 else math.ceil(
            len(args.result_files) / 2.0)
        dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap)
        dist_grid.map(sns.distplot, metric_label)  # fits=stats.gamma
        plot_collection.add_figure("histogram", dist_grid.fig)

        # box plot
        fig_box = plt.figure(figsize=figsize)
        ax = sns.boxplot(x=raw_tidy["estimate"],
                         y=raw_tidy[metric_label],
                         ax=fig_box.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("box_plot", fig_box)

        # violin plot
        fig_violin = plt.figure(figsize=figsize)
        ax = sns.violinplot(x=raw_tidy["estimate"],
                            y=raw_tidy[metric_label],
                            ax=fig_violin.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("violin_histogram", fig_violin)

        if args.plot:
            plot_collection.show()
        if args.save_plot:
            logging.debug(SEP)
            plot_collection.export(args.save_plot,
                                   confirm_overwrite=not args.no_warnings)
        if args.serialize_plot:
            logging.debug(SEP)
            plot_collection.serialize(args.serialize_plot,
                                      confirm_overwrite=not args.no_warnings)
예제 #4
0
파일: main_traj.py 프로젝트: skylook/evo
def run(args):
    import os
    import sys
    import logging
    import evo.algorithms.lie_algebra as lie
    from evo.algorithms import trajectory
    from evo.tools import file_interface, settings
    from evo.tools.settings import SETTINGS

    settings.configure_logging(verbose=True,
                               silent=args.silent,
                               debug=args.debug)
    if args.debug:
        import pprint
        logging.debug(
            "main_parser config:\n" +
            pprint.pformat({arg: getattr(args, arg)
                            for arg in vars(args)}) + "\n")
    logging.debug(SEP)

    trajectories = []
    ref_traj = None
    if args.subcommand == "tum":
        for traj_file in args.traj_files:
            if traj_file != args.ref:
                trajectories.append(
                    (traj_file,
                     file_interface.read_tum_trajectory_file(traj_file)))
        if args.ref:
            ref_traj = file_interface.read_tum_trajectory_file(args.ref)
    elif args.subcommand == "kitti":
        for pose_file in args.pose_files:
            if pose_file != args.ref:
                trajectories.append(
                    (pose_file,
                     file_interface.read_kitti_poses_file(pose_file)))
        if args.ref:
            ref_traj = file_interface.read_kitti_poses_file(args.ref)
    elif args.subcommand == "euroc":
        for csv_file in args.state_gt_csv:
            if csv_file != args.ref:
                trajectories.append(
                    (csv_file,
                     file_interface.read_euroc_csv_trajectory(csv_file)))
        if args.ref:
            ref_traj = file_interface.read_euroc_csv_trajectory(args.ref)
    elif args.subcommand == "bag":
        import rosbag
        bag = rosbag.Bag(args.bag)
        try:
            if args.all_topics:
                topic_info = bag.get_type_and_topic_info()
                topics = sorted([
                    t for t in topic_info[1].keys()
                    if topic_info[1][t][0] == "geometry_msgs/PoseStamped"
                    and t != args.ref
                ])
                if len(topics) == 0:
                    logging.error("no geometry_msgs/PoseStamped topics found!")
                    sys.exit(1)
            else:
                topics = args.topics
                if not topics:
                    logging.warning(
                        "no topics used - specify topics or use the --all_topics flag"
                    )
                    sys.exit(1)
            for topic in topics:
                trajectories.append(
                    (topic, file_interface.read_bag_trajectory(bag, topic)))
            if args.ref:
                ref_traj = file_interface.read_bag_trajectory(bag, args.ref)
        finally:
            bag.close()
    else:
        raise RuntimeError("unsupported subcommand: " + args.subcommand)

    if args.transform_left or args.transform_right:
        tf_path = args.transform_left if args.transform_left else args.transform_right
        t, xyz, quat = file_interface.load_transform_json(tf_path)
        logging.debug(SEP)
        logging.debug("applying transformation to the trajectories:\n" +
                      str(t))
        if args.invert_transform:
            t = lie.se3_inverse(t)
        for name, traj in trajectories:
            traj.transform(t, right_mul=args.transform_right)

    if args.align or args.correct_scale:
        if args.ref:
            if args.subcommand == "kitti":
                traj_tmp, ref_traj_tmp = trajectories, [
                    ref_traj for n, t in trajectories
                ]
            else:
                traj_tmp, ref_traj_tmp = [], []
                from evo.algorithms import sync
                for name, traj in trajectories:
                    logging.debug(SEP)
                    ref_assoc, traj_assoc = sync.associate_trajectories(
                        ref_traj, traj, first_name="ref", snd_name=name)
                    ref_traj_tmp.append(ref_assoc)
                    traj_tmp.append((name, traj_assoc))
                    trajectories = traj_tmp
            correct_only_scale = args.correct_scale and not args.align
            trajectories_new = []
            for nt, ref_assoc in zip(trajectories, ref_traj_tmp):
                logging.debug(SEP)
                logging.debug("aligning " + nt[0] + " to " + args.ref + "...")
                trajectories_new.append(
                    (nt[0],
                     trajectory.align_trajectory(nt[1], ref_assoc,
                                                 args.correct_scale,
                                                 correct_only_scale,
                                                 args.n_to_align)))
            trajectories = trajectories_new

    for name, traj in trajectories:
        if args.t_offset and traj.timestamps.shape[0] != 0:
            logging.debug(SEP)
            logging.info("adding time offset to " + name + ": " +
                         str(args.t_offset) + " (s)")
            traj.timestamps += args.t_offset
        print_traj_info(name, traj, args.full_check)
    if (args.align or args.correct_scale) and not args.ref:
        logging.debug(SEP)
        logging.warning("can't align without a reference! (--ref)  *grunt*")
    if args.ref:
        print_traj_info(args.ref, ref_traj, args.full_check)

    if args.plot or args.save_plot or args.serialize_plot:
        from evo.tools.plot import PlotMode
        plot_mode = PlotMode.xyz if not args.plot_mode else PlotMode[
            args.plot_mode]
        import numpy as np
        from evo.tools import plot
        import matplotlib.pyplot as plt
        import matplotlib.cm as cm
        plot_collection = plot.PlotCollection("evo_traj - trajectory plot")
        fig_xyz, axarr_xyz = plt.subplots(3,
                                          sharex="col",
                                          figsize=tuple(SETTINGS.plot_figsize))
        fig_traj = plt.figure(figsize=tuple(SETTINGS.plot_figsize))
        if (args.align or args.correct_scale) and not args.ref:
            plt.xkcd(scale=2, randomness=4)
            fig_traj.suptitle("what if --ref?")
            fig_xyz.suptitle("what if --ref?")
        ax_traj = plot.prepare_axis(fig_traj, plot_mode)
        if args.ref:
            short_traj_name = os.path.splitext(os.path.basename(args.ref))[0]
            if SETTINGS.plot_usetex:
                short_traj_name = short_traj_name.replace("_", "\\_")
            plot.traj(ax_traj,
                      plot_mode,
                      ref_traj,
                      '--',
                      'grey',
                      short_traj_name,
                      alpha=0 if SETTINGS.plot_hideref else 1)
            plot.traj_xyz(axarr_xyz,
                          ref_traj,
                          '--',
                          'grey',
                          short_traj_name,
                          alpha=0 if SETTINGS.plot_hideref else 1)
        cmap_colors = None
        if SETTINGS.plot_multi_cmap.lower() != "none":
            cmap = getattr(cm, SETTINGS.plot_multi_cmap)
            cmap_colors = iter(cmap(np.linspace(0, 1, len(trajectories))))
        for name, traj in trajectories:
            if cmap_colors is None:
                color = next(ax_traj._get_lines.prop_cycler)['color']
            else:
                color = next(cmap_colors)
            short_traj_name = os.path.splitext(os.path.basename(name))[0]
            if SETTINGS.plot_usetex:
                short_traj_name = short_traj_name.replace("_", "\\_")
            plot.traj(ax_traj, plot_mode, traj, '-', color, short_traj_name)
            if args.ref and isinstance(ref_traj, trajectory.PoseTrajectory3D):
                start_time = ref_traj.timestamps[0]
            else:
                start_time = None
            plot.traj_xyz(axarr_xyz,
                          traj,
                          '-',
                          color,
                          short_traj_name,
                          start_timestamp=start_time)
        plt.tight_layout()
        plot_collection.add_figure("trajectories", fig_traj)
        plot_collection.add_figure("xyz_view", fig_xyz)
        if args.plot:
            plot_collection.show()
        if args.save_plot:
            logging.debug(SEP)
            plot_collection.export(args.save_plot,
                                   confirm_overwrite=not args.no_warnings)
        if args.serialize_plot:
            logging.debug(SEP)
            plot_collection.serialize(args.serialize_plot,
                                      confirm_overwrite=not args.no_warnings)

    if args.save_as_tum:
        logging.debug(SEP)
        for name, traj in trajectories:
            dest = os.path.splitext(os.path.basename(name))[0] + ".tum"
            file_interface.write_tum_trajectory_file(
                dest, traj, confirm_overwrite=not args.no_warnings)
        if args.ref:
            dest = os.path.splitext(os.path.basename(args.ref))[0] + ".tum"
            file_interface.write_tum_trajectory_file(
                dest, ref_traj, confirm_overwrite=not args.no_warnings)
    if args.save_as_kitti:
        logging.debug(SEP)
        for name, traj in trajectories:
            dest = os.path.splitext(os.path.basename(name))[0] + ".kitti"
            file_interface.write_kitti_poses_file(
                dest, traj, confirm_overwrite=not args.no_warnings)
        if args.ref:
            dest = os.path.splitext(os.path.basename(args.ref))[0] + ".kitti"
            file_interface.write_kitti_poses_file(
                dest, ref_traj, confirm_overwrite=not args.no_warnings)
    if args.save_as_bag:
        logging.debug(SEP)
        import datetime
        import rosbag
        dest_bag_path = str(
            datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f')) + ".bag"
        logging.debug("saving trajectories to " + dest_bag_path + "...")
        bag = rosbag.Bag(dest_bag_path, 'w')
        try:
            for name, traj in trajectories:
                dest_topic = os.path.splitext(os.path.basename(name))[0]
                file_interface.write_bag_trajectory(bag, traj, dest_topic)
            if args.ref:
                dest_topic = os.path.splitext(os.path.basename(args.ref))[0]
                file_interface.write_bag_trajectory(bag, ref_traj, dest_topic)
        finally:
            bag.close()
예제 #5
0
파일: main_ape.py 프로젝트: skylook/evo
def run(args):
    from evo.algorithms import metrics
    from evo.tools import file_interface, settings
    settings.configure_logging(args.verbose, args.silent, args.debug)
    if args.debug:
        import pprint
        logging.debug(
            "main_parser config:\n" +
            pprint.pformat({arg: getattr(args, arg)
                            for arg in vars(args)}) + "\n")
    logging.debug(SEP)

    pose_relation = None
    if args.pose_relation == "full":
        pose_relation = metrics.PoseRelation.full_transformation
    elif args.pose_relation == "rot_part":
        pose_relation = metrics.PoseRelation.rotation_part
    elif args.pose_relation == "trans_part":
        pose_relation = metrics.PoseRelation.translation_part
    elif args.pose_relation == "angle_deg":
        pose_relation = metrics.PoseRelation.rotation_angle_deg
    elif args.pose_relation == "angle_rad":
        pose_relation = metrics.PoseRelation.rotation_angle_rad

    traj_ref, traj_est, stamps_est = None, None, None
    ref_name, est_name = "", ""
    plot_mode = None  # no plot imports unless really needed (slow)
    if args.subcommand == "tum":
        traj_ref, traj_est = file_interface.load_assoc_tum_trajectories(
            args.ref_file,
            args.est_file,
            args.t_max_diff,
            args.t_offset,
        )
        ref_name, est_name = args.ref_file, args.est_file
        if args.plot or args.save_plot:
            from evo.tools.plot import PlotMode
            plot_mode = PlotMode.xyz if not args.plot_mode else PlotMode[
                args.plot_mode]
    elif args.subcommand == "kitti":
        traj_ref = file_interface.read_kitti_poses_file(args.ref_file)
        traj_est = file_interface.read_kitti_poses_file(args.est_file)
        ref_name, est_name = args.ref_file, args.est_file
        if args.plot or args.save_plot:
            from evo.tools.plot import PlotMode
            plot_mode = PlotMode.xz if not args.plot_mode else PlotMode[
                args.plot_mode]
    elif args.subcommand == "euroc":
        args.align = True
        logging.info(
            "forcing trajectory alignment implicitly (EuRoC ground truth is in IMU frame)"
        )
        logging.debug(SEP)
        traj_ref, traj_est = file_interface.load_assoc_euroc_trajectories(
            args.state_gt_csv,
            args.est_file,
            args.t_max_diff,
            args.t_offset,
        )
        ref_name, est_name = args.state_gt_csv, args.est_file
        if args.plot or args.save_plot:
            from evo.tools.plot import PlotMode
            plot_mode = PlotMode.xyz if not args.plot_mode else PlotMode[
                args.plot_mode]
    elif args.subcommand == "bag":
        import rosbag
        logging.debug("opening bag file " + args.bag)
        bag = rosbag.Bag(args.bag, 'r')
        try:
            traj_ref, traj_est = file_interface.load_assoc_bag_trajectories(
                bag,
                args.ref_topic,
                args.est_topic,
                args.t_max_diff,
                args.t_offset,
            )
        finally:
            bag.close()
        ref_name, est_name = args.ref_topic, args.est_topic
        if args.plot or args.save_plot:
            from evo.tools.plot import PlotMode
            plot_mode = PlotMode.xy if not args.plot_mode else PlotMode[
                args.plot_mode]

    main_ape(traj_ref,
             traj_est,
             pose_relation,
             args.align,
             args.correct_scale,
             ref_name,
             est_name,
             args.plot,
             args.save_plot,
             plot_mode,
             args.save_results,
             args.no_warnings,
             serialize_plot=args.serialize_plot)
예제 #6
0
파일: main_res.py 프로젝트: yinyanpeng/evo
def run(args):
    import sys
    import logging

    import pandas as pd

    from evo.tools import file_interface, user, settings, pandas_bridge
    from evo.tools.settings import SETTINGS

    pd.options.display.width = 80
    pd.options.display.max_colwidth = 20

    settings.configure_logging(args.verbose, args.silent, args.debug)
    if args.debug:
        import pprint
        arg_dict = {arg: getattr(args, arg) for arg in vars(args)}
        logging.debug("main_parser config:\n{}\n".format(
            pprint.pformat(arg_dict)))

    df = pd.DataFrame()
    for result_file in args.result_files:
        result = file_interface.load_res_file(result_file)
        name = result_file if args.use_filenames else None
        df = pd.concat([df, pandas_bridge.result_to_df(result, name)],
                       axis="columns")

    keys = df.columns.values.tolist()
    if SETTINGS.plot_usetex:
        keys = [key.replace("_", "\\_") for key in keys]
        df.columns = keys
    duplicates = [x for x in keys if keys.count(x) > 1]
    if duplicates:
        logging.error("Values of 'est_name' must be unique - duplicates: {}\n"
                      "Try using the --use_filenames option to use filenames "
                      "for labeling instead.".format(", ".join(duplicates)))
        sys.exit(1)

    # derive a common index type if possible - preferably timestamps
    common_index = None
    time_indices = ["timestamps", "seconds_from_start", "sec_from_start"]
    if args.use_rel_time:
        del time_indices[0]
    for idx in time_indices:
        if idx not in df.loc["np_arrays"].index:
            continue
        if df.loc["np_arrays", idx].isnull().values.any():
            continue
        else:
            common_index = idx
            break

    # build error_df (raw values) according to common_index
    if common_index is None:
        # use a non-timestamp index
        error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(),
                                index=keys).T
    else:
        error_df = pd.DataFrame()
        for key in keys:
            new_error_df = pd.DataFrame(
                {key: df.loc["np_arrays", "error_array"][key]},
                index=df.loc["np_arrays", common_index][key])
            duplicates = new_error_df.index.duplicated(keep="first")
            if any(duplicates):
                logging.warning(
                    "duplicate indices in error array of {} - "
                    "keeping only first occurrence of duplicates".format(key))
                new_error_df = new_error_df[~duplicates]
            error_df = pd.concat([error_df, new_error_df], axis=1)

    # check titles
    first_title = df.loc["info", "title"][0]
    first_file = args.result_files[0]
    if not args.no_warnings:
        checks = df.loc["info", "title"] != first_title
        for i, differs in enumerate(checks):
            if not differs:
                continue
            else:
                mismatching_title = df.loc["info", "title"][i]
                mismatching_file = args.result_files[i]
                logging.debug(SEP)
                logging.warning(
                    CONFLICT_TEMPLATE.format(first_file, first_title,
                                             mismatching_title,
                                             mismatching_file))
                if not user.confirm(
                        "Go on anyway? - enter 'y' or any other key to exit"):
                    sys.exit()

    if logging.getLogger().isEnabledFor(logging.DEBUG):
        logging.debug(SEP)
        logging.debug("Aggregated dataframe:\n{}".format(
            df.to_string(line_width=80)))

    # show a statistics overview
    logging.debug(SEP)
    logging.info("\n{}\n\n{}\n".format(
        first_title, df.loc["stats"].T.to_string(line_width=80)))

    if args.save_table:
        logging.debug(SEP)
        if args.no_warnings or user.check_and_confirm_overwrite(
                args.save_table):
            if SETTINGS.table_export_data.lower() == "error_array":
                data = error_df
            elif SETTINGS.table_export_data.lower() in ("info", "stats"):
                data = df.loc[SETTINGS.table_export_data.lower()]
            else:
                raise ValueError(
                    "unsupported export data specifier: {}".format(
                        SETTINGS.table_export_data))
            if SETTINGS.table_export_transpose:
                data = data.T

            if SETTINGS.table_export_format == "excel":
                writer = pd.ExcelWriter(args.save_table)
                data.to_excel(writer)
                writer.save()
                writer.close()
            else:
                getattr(data,
                        "to_" + SETTINGS.table_export_format)(args.save_table)
            logging.debug("{} table saved to: {}".format(
                SETTINGS.table_export_format, args.save_table))

    if args.plot or args.save_plot or args.serialize_plot:
        # check if data has NaN "holes" due to different indices
        inconsistent = error_df.isnull().values.any()
        if inconsistent and common_index != "timestamps" and not args.no_warnings:
            logging.debug(SEP)
            logging.warning("Data lengths/indices are not consistent, "
                            "raw value plot might not be correctly aligned")

        from evo.tools import plot
        import matplotlib.pyplot as plt
        import seaborn as sns
        import math

        # use default plot settings
        figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1])
        use_cmap = SETTINGS.plot_multi_cmap.lower() != "none"
        colormap = SETTINGS.plot_multi_cmap if use_cmap else None
        linestyles = ["-o" for x in args.result_files
                      ] if args.plot_markers else None

        # labels according to first dataset
        title = first_title
        if "xlabel" in df.loc["info"].index and not df.loc[
                "info", "xlabel"].isnull().values.any():
            index_label = df.loc["info", "xlabel"][0]
        else:
            index_label = "$t$ (s)" if common_index else "index"
        metric_label = df.loc["info", "label"][0]

        plot_collection = plot.PlotCollection(title)
        # raw value plot
        fig_raw = plt.figure(figsize=figsize)
        # handle NaNs from concat() above
        error_df.interpolate(method="index").plot(ax=fig_raw.gca(),
                                                  colormap=colormap,
                                                  style=linestyles,
                                                  title=first_title)
        plt.xlabel(index_label)
        plt.ylabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("raw", fig_raw)

        # statistics plot
        fig_stats = plt.figure(figsize=figsize)
        exclude = df.loc["stats"].index.isin(["sse"])  # don't plot sse
        df.loc["stats"][~exclude].plot(kind="barh",
                                       ax=fig_stats.gca(),
                                       colormap=colormap,
                                       stacked=False)
        plt.xlabel(metric_label)
        plt.legend(frameon=True)
        plot_collection.add_figure("stats", fig_stats)

        # grid of distribution plots
        raw_tidy = pd.melt(error_df,
                           value_vars=list(error_df.columns.values),
                           var_name="estimate",
                           value_name=metric_label)
        col_wrap = 2 if len(args.result_files) <= 2 else math.ceil(
            len(args.result_files) / 2.0)
        dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap)
        dist_grid.map(sns.distplot, metric_label)  # fits=stats.gamma
        plot_collection.add_figure("histogram", dist_grid.fig)

        # box plot
        fig_box = plt.figure(figsize=figsize)
        ax = sns.boxplot(x=raw_tidy["estimate"],
                         y=raw_tidy[metric_label],
                         ax=fig_box.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("box_plot", fig_box)

        # violin plot
        fig_violin = plt.figure(figsize=figsize)
        ax = sns.violinplot(x=raw_tidy["estimate"],
                            y=raw_tidy[metric_label],
                            ax=fig_violin.gca())
        # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30)
        plot_collection.add_figure("violin_histogram", fig_violin)

        if args.plot:
            plot_collection.show()
        if args.save_plot:
            logging.debug(SEP)
            plot_collection.export(args.save_plot,
                                   confirm_overwrite=not args.no_warnings)
        if args.serialize_plot:
            logging.debug(SEP)
            plot_collection.serialize(args.serialize_plot,
                                      confirm_overwrite=not args.no_warnings)
예제 #7
0
def main():
    import argcomplete
    basic_desc = "crappy configuration tool"
    lic = "(c) [email protected]"
    shared_parser = argparse.ArgumentParser(add_help=False)
    shared_parser.add_argument("--no_color", help="don't color output", action="store_true")
    main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc, lic))
    sub_parsers = main_parser.add_subparsers(dest="subcommand")
    sub_parsers.required = True

    show_parser = sub_parsers.add_parser("show", description="show configuration - %s" % lic,
                                         parents=[shared_parser])
    show_parser.add_argument("config",
                             help="optional config file to display (default: package settings)",
                             nargs='?')
    show_parser.add_argument("--brief", help="show only the .json data",
                             action="store_true")

    set_parser = sub_parsers.add_parser("set", description=SET_HELP, parents=[shared_parser],
                                        formatter_class=argparse.RawTextHelpFormatter)
    set_parser.add_argument("params", choices=list(DEFAULT_SETTINGS_DICT.keys()),
                            nargs=argparse.REMAINDER, help="parameters to set")
    set_parser.add_argument("-c", "--config",
                            help="optional config file (default: package settings)", default=None)
    set_parser.add_argument("-m", "--merge",
                            help="other config file to merge in (priority)", default=None)
    set_parser.add_argument("--soft", help="do a soft-merge (no overwriting)", action="store_true")

    gen_parser = sub_parsers.add_parser("generate", description=GENERATE_HELP,
                                        parents=[shared_parser],
                                        formatter_class=argparse.RawTextHelpFormatter)
    gen_parser.add_argument("-o", "--out", help="path for config file to generate")

    reset_parser = sub_parsers.add_parser("reset", description="reset package settings - %s" % lic,
                                          parents=[shared_parser])
    reset_parser.add_argument("-y", help="acknowledge automatically", action="store_true")

    argcomplete.autocomplete(main_parser)
    if len(sys.argv) > 1 and sys.argv[1] == "set":
        args, other_args = main_parser.parse_known_args()
        other_args = [arg for arg in sys.argv[2:] if not arg.startswith('-')]
    else:
        args, other_args = main_parser.parse_known_args()
    settings.configure_logging()
    colorama.init()

    config = settings.DEFAULT_PATH
    if hasattr(args, "config"):
        if args.config:
            config = args.config

    if args.subcommand == "show":
        if not args.brief and not args.config:
            style = Style.BRIGHT if not args.no_color else Style.NORMAL
            doc_str = "\n".join("{0}{1}{2}:\n{3}\n".format(style, k, Style.RESET_ALL, v[1])
                                for k, v in sorted(DEFAULT_SETTINGS_DICT_DOC.items()))
            logging.info(doc_str)
            logging.info("{0}\n{1}\n{0}".format(SEP, config))
        show(config, colored=not args.no_color)
        if config == settings.DEFAULT_PATH and not args.brief:
            logging.info(SEP + "\nSee text above for parameter descriptions.")

    elif args.subcommand == "set":
        if not os.access(config, os.W_OK):
            logging.info("No permission to modify " + config)
            sys.exit()
        if other_args or args.merge:
            logging.info("{0}\nOld configuration:\n{0}".format(SEP))
            show(config, colored=not args.no_color)
            try:
                set_cfg(config, other_args)
            except ConfigError as e:
                logging.error(e)
                sys.exit(1)
            if args.merge:
                merge_json_union(config, args.merge, args.soft)
            logging.info(SEP + "\nNew configuration:\n" + SEP)
            show(config, colored=not args.no_color)
        else:
            logging.info("No configuration parameters given (see --help).")

    elif args.subcommand == "generate":
        if other_args:
            logging.info("{0}\nParsed by argparse:\n{1}\n"
                         "{0}\nWARNING:\n"
                         "Make sure you use the 'long-style' -- options (e.g. --plot) if possible\n"
                         "and no combined short '-' flags, (e.g. -vp)\n{0}".format(SEP, other_args))
            data = generate(other_args)
            log_info_dict_json(data, colored=not args.no_color)
            if args.out and user.check_and_confirm_overwrite(args.out):
                with open(args.out, 'w') as out:
                    out.write(json.dumps(data, indent=4, sort_keys=True))
            elif not args.out:
                logging.info("{0}\n(-o | --out) not specified - saving nothing\n{0}".format(SEP))
        else:
            logging.info("No command line arguments given (see --help)")

    elif args.subcommand == "reset":
        if not os.access(config, os.W_OK):
            logging.info("No permission to modify" + config)
            sys.exit()
        if args.y or user.confirm("Reset the package settings to the default settings? (y/n)"):
            settings.reset()
            logging.info("{0}\nPackage settings after reset:\n{0}".format(SEP))
            show(settings.DEFAULT_PATH, colored=not args.no_color)
예제 #8
0
def main():
    import argcomplete
    basic_desc = "crappy configuration tool"
    lic = "(c) [email protected]"
    shared_parser = argparse.ArgumentParser(add_help=False)
    shared_parser.add_argument("--no_color",
                               help="don't color output",
                               action="store_false",
                               default=True)
    main_parser = argparse.ArgumentParser(description="%s %s" %
                                          (basic_desc, lic))
    sub_parsers = main_parser.add_subparsers(dest="subcommand")
    sub_parsers.required = True

    show_parser = sub_parsers.add_parser(
        "show",
        description="show configuration - %s" % lic,
        parents=[shared_parser])
    show_parser.add_argument(
        "config",
        help="optional config file to display (default: package settings)",
        nargs='?')
    show_parser.add_argument("--brief",
                             help="show only the .json data",
                             action="store_true")

    set_parser = sub_parsers.add_parser(
        "set",
        description=SET_HELP,
        parents=[shared_parser],
        formatter_class=argparse.RawTextHelpFormatter)
    set_parser.add_argument(
        "-c",
        "--config",
        help="optional config file (default: package settings)",
        default=None)
    set_parser.add_argument("-m",
                            "--merge",
                            help="other config file to merge in (priority)",
                            default=None)

    gen_parser = sub_parsers.add_parser(
        "generate",
        description=GENERATE_HELP,
        parents=[shared_parser],
        formatter_class=argparse.RawTextHelpFormatter)
    gen_parser.add_argument("-o",
                            "--out",
                            help="path for config file to generate")

    reset_parser = sub_parsers.add_parser(
        "reset",
        description="reset package settings - %s" % lic,
        parents=[shared_parser])

    argcomplete.autocomplete(main_parser)
    args, other_args = main_parser.parse_known_args()
    settings.configure_logging()
    colorama.init()

    config = settings.DEFAULT_PATH
    if hasattr(args, "config"):
        if args.config:
            config = args.config

    if args.subcommand == "show":
        if not args.brief and not args.config:
            logging.info(settings.DEFAULT_SETTINGS_HELP)
            logging.info(SEP + "\n" + config + "\n" + SEP)
        show(config, colored=args.no_color)
        if config == settings.DEFAULT_PATH and not args.brief:
            logging.info(SEP + "\nsee text above for parameter descriptions")

    elif args.subcommand == "set":
        if not os.access(config, os.W_OK):
            logging.info("no permission to modify " + config)
            sys.exit()
        if other_args or args.merge:
            logging.info(SEP + "\nold configuration:\n" + SEP)
            show(config, colored=args.no_color)
            try:
                set_cfg(config, other_args)
            except ConfigError as e:
                logging.error(e)
                sys.exit(1)
            if args.merge:
                merge_json_union(config, args.merge)
            logging.info(SEP + "\nnew configuration:\n" + SEP)
            show(config, colored=args.no_color)
        else:
            logging.info("no configuration parameters given (see --help)")

    elif args.subcommand == "generate":
        if other_args:
            logging.info(SEP + "\nparsed by argparse:\n" + str(other_args))
            logging.info(
                SEP + "\nWARNING:\n" +
                "make sure you use the 'long-style' -- options (e.g. --plot) if possible\n"
                + "and no combined 'short' - flags, (e.g. -avp)\n" + SEP)
            data = generate(other_args)
            log_info_dict_json(data, colored=args.no_color)
            if args.out and user.check_and_confirm_overwrite(args.out):
                with open(args.out, 'w') as out:
                    out.write(json.dumps(data, indent=4, sort_keys=True))
            elif not args.out:
                logging.info(
                    SEP +
                    "\nno output file specified (-o / --out) - doing nothing\n"
                    + SEP)
        else:
            logging.info("no command line arguments given (see --help)")

    elif args.subcommand == "reset":
        if not os.access(config, os.W_OK):
            logging.info("no permission to modify" + config)
            sys.exit()
        if user.confirm(
                "reset the package settings to the default settings? (y/n)"):
            reset_pkg_settings(settings.DEFAULT_PATH)
            logging.info(SEP + "\npackage settings after reset:\n" + SEP)
            show(settings.DEFAULT_PATH, colored=args.no_color)
예제 #9
0
def run(args):
    import sys
    from evo.algorithms import metrics
    from evo.tools import file_interface, settings

    # manually check bins and tols arguments to allow them to be in config files
    if not args.bins or not args.tols:
        logging.error(
            "the following arguments are required: -b/--bins, -t/--tols")
        sys.exit(1)

    settings.configure_logging(args.verbose, args.silent, args.debug)
    if args.debug:
        import pprint
        logging.debug(
            "main_parser config:\n" +
            pprint.pformat({arg: getattr(args, arg)
                            for arg in vars(args)}) + "\n")
    logging.debug(SEP)

    pose_relation = None
    if args.pose_relation == "trans_part":
        pose_relation = metrics.PoseRelation.translation_part
    elif args.pose_relation == "angle_deg":
        pose_relation = metrics.PoseRelation.rotation_angle_deg
    elif args.pose_relation == "angle_rad":
        pose_relation = metrics.PoseRelation.rotation_angle_rad

    traj_ref, traj_est, stamps_est = None, None, None
    ref_name, est_name = "", ""
    if args.subcommand == "tum":
        traj_ref, traj_est = file_interface.load_assoc_tum_trajectories(
            args.ref_file,
            args.est_file,
            args.t_max_diff,
            args.t_offset,
        )
        ref_name, est_name = args.ref_file, args.est_file
    elif args.subcommand == "kitti":
        traj_ref = file_interface.read_kitti_poses_file(args.ref_file)
        traj_est = file_interface.read_kitti_poses_file(args.est_file)
        ref_name, est_name = args.ref_file, args.est_file
    elif args.subcommand == "euroc":
        args.align = True
        logging.info(
            "forcing trajectory alignment implicitly (EuRoC ground truth is in IMU frame)"
        )
        logging.debug(SEP)
        traj_ref, traj_est = file_interface.load_assoc_euroc_trajectories(
            args.state_gt_csv,
            args.est_file,
            args.t_max_diff,
            args.t_offset,
        )
        ref_name, est_name = args.state_gt_csv, args.est_file
    elif args.subcommand == "bag":
        import rosbag
        logging.debug("opening bag file " + args.bag)
        bag = rosbag.Bag(args.bag, 'r')
        try:
            traj_ref, traj_est = file_interface.load_assoc_bag_trajectories(
                bag,
                args.ref_topic,
                args.est_topic,
                args.t_max_diff,
                args.t_offset,
            )
        finally:
            bag.close()
        ref_name, est_name = args.ref_topic, args.est_topic

    main_rpe_for_each(traj_ref,
                      traj_est,
                      pose_relation,
                      args.mode,
                      args.bins,
                      args.tols,
                      args.align,
                      args.correct_scale,
                      ref_name,
                      est_name,
                      args.plot,
                      args.save_plot,
                      args.save_results,
                      args.no_warnings,
                      serialize_plot=args.serialize_plot)