def launch(main_module, parser): args = parser.parse_args() if hasattr(args, "config"): args = merge_config(args) import sys from evo.tools import settings try: main_module.run(args) except SystemExit as e: sys.exit(e.code) except: logger.exception("Unhandled error in " + main_module.__name__) print("") err_msg = "evo module " + main_module.__name__ + " crashed" if settings.SETTINGS.logfile_enabled: err_msg += " - see " + settings.DEFAULT_LOGFILE_PATH else: err_msg += " - no logfile written (disabled)" logger.error(err_msg) from evo.tools import user if not args.no_warnings: if settings.SETTINGS.logfile_enabled and user.confirm( "Open logfile? (y/n)"): import webbrowser webbrowser.open(settings.DEFAULT_LOGFILE_PATH) sys.exit(1)
def launch(main_module, parser): args = parser.parse_args() if hasattr(args, "config"): args = merge_config(args) import sys from evo.tools import settings try: main_module.run(args) except SystemExit as e: sys.exit(e.code) except: logger.exception("Unhandled error in " + main_module.__name__) print("") err_msg = "evo module " + main_module.__name__ + " crashed" if settings.SETTINGS.logfile_enabled: err_msg += " - see " + settings.DEFAULT_LOGFILE_PATH else: err_msg += " - no logfile written (disabled)" logger.error(err_msg) from evo.tools import user if not args.no_warnings: if settings.SETTINGS.logfile_enabled and user.confirm("Open logfile? (y/n)"): import webbrowser webbrowser.open(settings.DEFAULT_LOGFILE_PATH) sys.exit(1)
def launch(ground_truth_file, result_file): args = Namespace( align=False, config=None, correct_scale=False, debug=False, full_check=False, invert_transform=False, merge=False, n_to_align=-1, no_warnings=False, plot=True, plot_mode='xyz', ref=ground_truth_file, save_as_bag=False, save_as_kitti=False, save_as_tum=False, save_plot=None, serialize_plot=None, silent=False, subcommand='tum', t_max_diff=0.01, t_offset=0.0, # traj_files=['logs/result_ceres.txt', 'logs/result_g2o.txt'], traj_files=[result_file], transform_left=None, transform_right=None, verbose=True) # args = parser.parse_args() if hasattr(args, "config"): args = merge_config(args) import sys from evo.tools import settings try: main_traj.run(args) except SystemExit as e: sys.exit(e.code) except: logger.exception("Unhandled error in " + main_traj.__name__) print("") err_msg = "evo module " + main_traj.__name__ + " crashed" if settings.SETTINGS.logfile_enabled: err_msg += " - see " + settings.DEFAULT_LOGFILE_PATH else: err_msg += " - no logfile written (disabled)" logger.error(err_msg) from evo.tools import user if not args.no_warnings: if settings.SETTINGS.logfile_enabled and user.confirm( "Open logfile? (y/n)"): import webbrowser webbrowser.open(settings.DEFAULT_LOGFILE_PATH) sys.exit(1) # def start(): # parser = main_traj.parser() # argcomplete.autocomplete(parser) # launch(main_traj, parser)
def main(): import argparse import argcomplete basic_desc = "experimental tool for opening a serialized PlotCollection (pickle format)" lic = "(c) [email protected]" main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc, lic)) main_parser.add_argument("in_file", help="path to a serialized plot_collection") main_parser.add_argument("-t", "--title", help="custom title (default: file name)") main_parser.add_argument("--save_plot", help="path to save plot", default=None) main_parser.add_argument("--serialize_plot", help="path to re-serialize PlotCollection", default=None) main_parser.add_argument("--to_html", help="convert to html (requires mpld3 library)", action="store_true") main_parser.add_argument("--no_warnings", help="no warnings requiring user confirmation", action="store_true") argcomplete.autocomplete(main_parser) args = main_parser.parse_args() from evo.tools import plot, settings, user settings.configure_logging(verbose=True) if not args.title: title = os.path.basename(args.in_file) else: title = args.title if not args.no_warnings: logging.warning("Please note that this tool is experimental and not guranteed to work.\n" "Only works if the same matplotlib backend is used as for serialization.\n" "If not, try: evo_config set plot_backend <backend_name>\n" + SEP) plot_collection = plot.PlotCollection(title, deserialize=args.in_file) logging.debug("deserialized PlotCollection: " + str(plot_collection)) plot_collection.show() if args.serialize_plot: logging.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings) if args.save_plot: logging.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.to_html: import mpld3 logging.debug(SEP + "\nhtml export\n") for name, fig in plot_collection.figures.items(): html = mpld3.fig_to_html(fig) out = name + ".html" with open(out, 'w') as f: logging.debug(out) f.write(html) if not args.no_warnings: logging.debug(SEP) if user.confirm("save changes & overwrite original file " + args.in_file + "? (y/n)"): plot_collection.serialize(args.in_file, confirm_overwrite=False)
def launch(ground_truth_file, result_file): args = Namespace(align=False, all_pairs=False, config=None, correct_scale=False, debug=False, delta=1, delta_tol=0.1, delta_unit='f', est_file=result_file, no_warnings=False, plot=True, plot_colormap_max=None, plot_colormap_max_percentile=None, plot_colormap_min=None, plot_mode='xyz', pose_relation='trans_part', ref_file=ground_truth_file, save_plot=None, save_results='logs/evo_rpe.zip', serialize_plot=None, silent=False, subcommand='tum', t_max_diff=0.01, t_offset=0.0, verbose=True) # args = parser.parse_args() if hasattr(args, "config"): args = merge_config(args) import sys from evo.tools import settings try: main_rpe.run(args) except SystemExit as e: sys.exit(e.code) except: logger.exception("Unhandled error in " + main_rpe.__name__) print("") err_msg = "evo module " + main_rpe.__name__ + " crashed" if settings.SETTINGS.logfile_enabled: err_msg += " - see " + settings.DEFAULT_LOGFILE_PATH else: err_msg += " - no logfile written (disabled)" logger.error(err_msg) from evo.tools import user if not args.no_warnings: if settings.SETTINGS.logfile_enabled and user.confirm( "Open logfile? (y/n)"): import webbrowser webbrowser.open(settings.DEFAULT_LOGFILE_PATH) sys.exit(1)
def launch(main_module, parser): args = parser.parse_args() if hasattr(args, "config"): args = merge_config(args) import sys from evo.tools import settings from evo import EvoException, NullHandler try: main_module.run(args) except KeyboardInterrupt: sys.exit(1) except SystemExit as e: sys.exit(e.code) except (EvoException, FileNotFoundError) as e: logger.error(str(e)) sys.exit(1) except Exception: base_logger = logging.getLogger("evo") if len(base_logger.handlers) == 0 or isinstance( base_logger.handlers[0], NullHandler): # In case logging couldn't be configured, print & exit directly. import traceback traceback.print_exc() sys.exit(1) logger.exception("Unhandled error in " + main_module.__name__) print("") err_msg = "evo module " + main_module.__name__ + " crashed" if settings.SETTINGS.global_logfile_enabled: err_msg += " - see " + settings.GLOBAL_LOGFILE_PATH else: err_msg += " - no logfile written (disabled)" logger.error(err_msg) from evo.tools import user if not args.no_warnings: if settings.SETTINGS.global_logfile_enabled and user.confirm( "Open logfile? (y/n)"): import webbrowser webbrowser.open(settings.GLOBAL_LOGFILE_PATH) sys.exit(1)
def main(): import sys import argparse import argcomplete main_parser = argparse.ArgumentParser() shared_parser = argparse.ArgumentParser(add_help=False) sub_parsers = main_parser.add_subparsers(dest="subcommand") sub_parsers.required = True pkg_parser = sub_parsers.add_parser("pkg", description="show infos of the package", parents=[shared_parser]) pkg_parser.add_argument("--info", help="show the package description", action="store_true") pkg_parser.add_argument("--version", help="print the package version", action="store_true") pkg_parser.add_argument("--pyversion", help="print the Python version", action="store_true") pkg_parser.add_argument("--license", help="print the package license", action="store_true") pkg_parser.add_argument("--location", help="print the package path", action="store_true") pkg_parser.add_argument("--logfile", help="print the logfile path", action="store_true") pkg_parser.add_argument("--open_log", help="open the package logfile", action="store_true") pkg_parser.add_argument("--clear_log", help="clear package logfile", action="store_true") cat_parser = sub_parsers.add_parser("cat_log", description="pipe stdin to evo logfile" " or print logfile to stdout (if no stdin)", parents=[shared_parser]) cat_parser.add_argument("-l", "--loglevel", help="loglevel of the message", default="info", choices=["error", "warning", "info", "debug"]) cat_parser.add_argument("-m", "--message", help="explicit message instead of pipe") cat_parser.add_argument("-s", "--source", help="source name to use for the log message") cat_parser.add_argument("--mark_entry", help="include the default log entry header", action="store_true", default=False) cat_parser.add_argument("--clear_log", help="clear logfile before exiting", action="store_true") argcomplete.autocomplete(main_parser) if len(sys.argv[1:])==0: sys.argv.extend(["pkg", "--info"]) # cheap trick because YOLO args = main_parser.parse_args() line_end = "\n" if sys.stdout.isatty() else "" if args.subcommand == "pkg": if not len(sys.argv) > 2: pkg_parser.print_help() sys.exit(1) if args.license: print(open(os.path.join(settings.PACKAGE_BASE_PATH, "LICENSE")).read()) if args.info: main_parser.print_usage() print(DESC) if args.version: here = os.path.dirname(os.path.abspath(__file__)) version_path = os.path.join(here, "version") print(" ".join(open(version_path).read().splitlines()), end=line_end) if args.pyversion: import platform as pf print(pf.python_version(), end=line_end) if args.location: print(settings.PACKAGE_BASE_PATH, end=line_end) if args.logfile or args.open_log: if not os.path.exists(settings.DEFAULT_LOGFILE_PATH): print("no logfile found - run: evo_config set logfile_enabled", end=line_end) sys.exit(1) print(settings.DEFAULT_LOGFILE_PATH, end=line_end) if args.open_log: import webbrowser webbrowser.open(settings.DEFAULT_LOGFILE_PATH) if args.clear_log: from evo.tools import user if user.confirm("clear logfile? (y/n)"): open(settings.DEFAULT_LOGFILE_PATH, mode='w') elif args.subcommand == "cat_log": if os.name == "nt": print("cat_log feature not available on Windows") sys.exit(1) if not args.message and sys.stdin.isatty(): if not os.path.exists(settings.DEFAULT_LOGFILE_PATH): print("no logfile found - run: evo_config set logfile_enabled", end=line_end) else: print(open(settings.DEFAULT_LOGFILE_PATH).read(), end="") elif not settings.SETTINGS.logfile_enabled: print("logfile disabled", end=line_end) sys.exit(1) else: import logging dbg_fmt = None if args.source: dbg_fmt = "[%(levelname)s][%(asctime)s][" + args.source + "]\n%(message)s" settings.configure_logging(silent=True, dbg_fmt=dbg_fmt, mark_entry=args.mark_entry) if not args.message: msg = sys.stdin.read() else: msg = args.message getattr(logging, args.loglevel)(msg) if args.clear_log: open(settings.DEFAULT_LOGFILE_PATH, mode='w')
def run(args): import os import sys import logging import pandas as pd import numpy as np from natsort import natsorted from evo.tools import file_interface, user, settings from evo.tools.settings import SETTINGS settings.configure_logging(args.verbose, args.silent, args.debug) if args.debug: import pprint logging.debug( "main_parser config:\n" + pprint.pformat({arg: getattr(args, arg) for arg in vars(args)}) + "\n") # store data in Pandas data frames for easier analysis raw_df = pd.DataFrame() stat_df = pd.DataFrame() info_df = pd.DataFrame() use_seconds = False for result_file in args.result_files: logging.debug(SEP) result_obj = file_interface.load_res_file(result_file, True) short_est_name = os.path.splitext( os.path.basename(result_obj.info["est_name"]))[0] error_array = result_obj.np_arrays["error_array"] if "seconds_from_start" in result_obj.np_arrays: seconds_from_start = result_obj.np_arrays["seconds_from_start"] else: seconds_from_start = None if not args.no_warnings and (short_est_name in info_df.columns): logging.warning("double entry detected: " + short_est_name) if not user.confirm( "ignore? enter 'y' to go on or any other key to quit"): sys.exit() if SETTINGS.plot_usetex: short_est_name = short_est_name.replace("_", "\\_") if args.use_abs_time: if "traj_est" in result_obj.trajectories: traj_est = result_obj.trajectories["traj_est"] index = traj_est.timestamps use_seconds = True else: msg = "no 'traj_est' trajectory found in " + result_file \ + " but --use_abs_time requires the trajectory in the result file - " \ + "to let the metrics app include them run: evo_config set save_traj_in_zip" raise RuntimeError(msg) elif seconds_from_start is not None: index = seconds_from_start.tolist() use_seconds = True else: index = np.arange(0, error_array.shape[0]) result_obj.info["traj. backup?"] = \ all(k in result_obj.trajectories for k in ("traj_ref", "traj_est")) result_obj.info["res_file"] = result_file new_raw_df = pd.DataFrame({short_est_name: error_array.tolist()}, index=index) new_info_df = pd.DataFrame({short_est_name: result_obj.info}) new_stat_df = pd.DataFrame({short_est_name: result_obj.stats}) # natural sort num strings "10" "100" "20" -> "10" "20" "100" new_stat_df = new_stat_df.reindex(index=natsorted(new_stat_df.index)) # column-wise concatenation raw_df = pd.concat([raw_df, new_raw_df], axis=1) info_df = pd.concat([info_df, new_info_df], axis=1) stat_df = pd.concat([stat_df, new_stat_df], axis=1) # if verbose: log infos of the current data logging.debug( "\n" + result_obj.pretty_str(title=True, stats=False, info=True)) logging.debug(SEP) logging.info("\nstatistics overview:\n" + stat_df.T.to_string(line_width=80) + "\n") # check titles first_title = info_df.ix["title", 0] first_res_file = info_df.ix["res_file", 0] if args.save_table or args.plot or args.save_plot: for short_est_name, column in info_df.iteritems(): if column.ix["title"] != first_title and not args.no_warnings: logging.info(SEP) logging.warning( "mismatching titles, you probably use data from different metrics" ) logging.warning("conflict:\n" + "<" * 7 + " " + first_res_file + "\n" + first_title + "\n" + "=" * 7 + "\n" + column.ix["title"] + "\n" + ">" * 7 + " " + column.ix["res_file"]) logging.warning( "only the first one will be used as the title!") if not user.confirm( "plot/save anyway? - enter 'y' or any other key to exit" ): sys.exit() if args.save_table: logging.debug(SEP) if args.no_warnings or user.check_and_confirm_overwrite( args.save_table): table_fmt = SETTINGS.table_export_format if SETTINGS.table_export_transpose: getattr(stat_df.T, "to_" + table_fmt)(args.save_table) else: getattr(stat_df, "to_" + table_fmt)(args.save_table) logging.debug(table_fmt + " table saved to: " + args.save_table) if args.plot or args.save_plot or args.serialize_plot: # check if data has NaN "holes" due to different indices inconsistent = raw_df.isnull().values.any() if inconsistent and not args.no_warnings: logging.debug(SEP) logging.warning( "data lengths/indices are not consistent, plotting probably makes no sense" ) if not user.confirm( "plot anyway? - enter 'y' or any other key to exit"): sys.exit() from evo.tools import plot import matplotlib.pyplot as plt import seaborn as sns import math from scipy import stats # use default plot settings figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1]) use_cmap = SETTINGS.plot_multi_cmap.lower() != "none" colormap = SETTINGS.plot_multi_cmap if use_cmap else None linestyles = ["-o" for x in args.result_files ] if args.plot_markers else None # labels according to first dataset title = first_title if "xlabel" in info_df.ix[:, 0].index: index_label = info_df.ix["xlabel", 0] else: index_label = "$t$ (s)" if use_seconds else "index" metric_label = info_df.ix["label", 0] plot_collection = plot.PlotCollection(title) # raw value plot fig_raw = plt.figure(figsize=figsize) # handle NaNs from concat() above raw_df.interpolate(method="index").plot(ax=fig_raw.gca(), colormap=colormap, style=linestyles, title=first_title) plt.xlabel(index_label) plt.ylabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("raw", fig_raw) # statistics plot fig_stats = plt.figure(figsize=figsize) exclude = stat_df.index.isin(["sse"]) # don't plot sse stat_df[~exclude].plot(kind="barh", ax=fig_stats.gca(), colormap=colormap, stacked=False) plt.xlabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("stats", fig_stats) # grid of distribution plots raw_tidy = pd.melt(raw_df, value_vars=list(raw_df.columns.values), var_name="estimate", value_name=metric_label) col_wrap = 2 if len(args.result_files) <= 2 else math.ceil( len(args.result_files) / 2.0) dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap) dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma plot_collection.add_figure("histogram", dist_grid.fig) # box plot fig_box = plt.figure(figsize=figsize) ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_box.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("box_plot", fig_box) # violin plot fig_violin = plt.figure(figsize=figsize) ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_violin.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("violin_histogram", fig_violin) if args.plot: plot_collection.show() if args.save_plot: logging.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.serialize_plot: logging.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings)
def main(): import argcomplete basic_desc = "crappy configuration tool" lic = "(c) [email protected]" shared_parser = argparse.ArgumentParser(add_help=False) shared_parser.add_argument("--no_color", help="don't color output", action="store_true") main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc, lic)) sub_parsers = main_parser.add_subparsers(dest="subcommand") sub_parsers.required = True show_parser = sub_parsers.add_parser( "show", description="show configuration - %s" % lic, parents=[shared_parser]) show_parser.add_argument( "config", help="optional config file to display (default: package settings)", nargs='?') show_parser.add_argument("--brief", help="show only the .json data", action="store_true") set_parser = sub_parsers.add_parser( "set", description=SET_HELP, parents=[shared_parser], formatter_class=argparse.RawTextHelpFormatter) set_parser.add_argument("params", choices=list(DEFAULT_SETTINGS_DICT.keys()), nargs=argparse.REMAINDER, help="parameters to set") set_parser.add_argument( "-c", "--config", help="optional config file (default: package settings)", default=None) set_parser.add_argument("-m", "--merge", help="other config file to merge in (priority)", default=None) set_parser.add_argument("--soft", help="do a soft-merge (no overwriting)", action="store_true") gen_parser = sub_parsers.add_parser( "generate", description=GENERATE_HELP, parents=[shared_parser], formatter_class=argparse.RawTextHelpFormatter) gen_parser.add_argument("-o", "--out", help="path for config file to generate") reset_parser = sub_parsers.add_parser( "reset", description="reset package settings - %s" % lic, parents=[shared_parser]) reset_parser.add_argument("-y", help="acknowledge automatically", action="store_true") argcomplete.autocomplete(main_parser) if len(sys.argv) > 1 and sys.argv[1] == "set": args, other_args = main_parser.parse_known_args() other_args = [arg for arg in sys.argv[2:] if not arg.startswith('-')] else: args, other_args = main_parser.parse_known_args() log.configure_logging() colorama.init() config = settings.DEFAULT_PATH if hasattr(args, "config"): if args.config: config = args.config if args.subcommand == "show": if not args.brief and not args.config: style = Style.BRIGHT if not args.no_color else Style.NORMAL doc_str = "\n".join( "{0}{1}{2}:\n{3}\n".format(style, k, Style.RESET_ALL, v[1]) for k, v in sorted(DEFAULT_SETTINGS_DICT_DOC.items())) logger.info(doc_str) logger.info("{0}\n{1}\n{0}".format(SEP, config)) show(config, colored=not args.no_color) if config == settings.DEFAULT_PATH and not args.brief: logger.info(SEP + "\nSee text above for parameter descriptions.") elif args.subcommand == "set": if not os.access(config, os.W_OK): logger.error("No permission to modify " + config) sys.exit() if other_args or args.merge: logger.info("{0}\nOld configuration:\n{0}".format(SEP)) show(config, colored=not args.no_color) try: set_cfg(config, other_args) except ConfigError as e: logger.error(e) sys.exit(1) if args.merge: merge_json_union(config, args.merge, args.soft) logger.info(SEP + "\nNew configuration:\n" + SEP) show(config, colored=not args.no_color) else: logger.error("No configuration parameters given (see --help).") elif args.subcommand == "generate": if other_args: logger.info( "{0}\nParsed by argparse:\n{1}\n" "{0}\nWARNING:\nMake sure you use the 'long-style' -- options " "(e.g. --plot) if possible\nand no combined short '-' flags, " "(e.g. -vp)\n{0}".format(SEP, other_args)) data = generate(other_args) log_info_dict_json(data, colored=not args.no_color) if args.out and user.check_and_confirm_overwrite(args.out): with open(args.out, 'w') as out: out.write(json.dumps(data, indent=4, sort_keys=True)) elif not args.out: logger.warning("\n(-o | --out) not specified - saving nothing") else: logger.error("No command line arguments given (see --help)") elif args.subcommand == "reset": if not os.access(config, os.W_OK): logger.error("No permission to modify" + config) sys.exit() if args.y or user.confirm( "Reset the package settings to the default settings? (y/n)"): settings.reset() logger.info("{0}\nPackage settings after reset:\n{0}".format(SEP)) show(settings.DEFAULT_PATH, colored=not args.no_color)
def run(args): import sys import logging import pandas as pd from evo.tools import file_interface, user, settings, pandas_bridge from evo.tools.settings import SETTINGS pd.options.display.width = 80 pd.options.display.max_colwidth = 20 settings.configure_logging(args.verbose, args.silent, args.debug) if args.debug: import pprint arg_dict = {arg: getattr(args, arg) for arg in vars(args)} logging.debug("main_parser config:\n{}\n".format( pprint.pformat(arg_dict))) df = pd.DataFrame() for result_file in args.result_files: result = file_interface.load_res_file(result_file) name = result_file if args.use_filenames else None df = pd.concat([df, pandas_bridge.result_to_df(result, name)], axis="columns") keys = df.columns.values.tolist() if SETTINGS.plot_usetex: keys = [key.replace("_", "\\_") for key in keys] df.columns = keys duplicates = [x for x in keys if keys.count(x) > 1] if duplicates: logging.error("Values of 'est_name' must be unique - duplicates: {}\n" "Try using the --use_filenames option to use filenames " "for labeling instead.".format(", ".join(duplicates))) sys.exit(1) # derive a common index type if possible - preferably timestamps common_index = None time_indices = ["timestamps", "seconds_from_start", "sec_from_start"] if args.use_rel_time: del time_indices[0] for idx in time_indices: if idx not in df.loc["np_arrays"].index: continue if df.loc["np_arrays", idx].isnull().values.any(): continue else: common_index = idx break # build error_df (raw values) according to common_index if common_index is None: # use a non-timestamp index error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(), index=keys).T else: error_df = pd.DataFrame() for key in keys: new_error_df = pd.DataFrame( {key: df.loc["np_arrays", "error_array"][key]}, index=df.loc["np_arrays", common_index][key]) duplicates = new_error_df.index.duplicated(keep="first") if any(duplicates): logging.warning( "duplicate indices in error array of {} - " "keeping only first occurrence of duplicates".format(key)) new_error_df = new_error_df[~duplicates] error_df = pd.concat([error_df, new_error_df], axis=1) # check titles first_title = df.loc["info", "title"][0] first_file = args.result_files[0] if not args.no_warnings: checks = df.loc["info", "title"] != first_title for i, differs in enumerate(checks): if not differs: continue else: mismatching_title = df.loc["info", "title"][i] mismatching_file = args.result_files[i] logging.debug(SEP) logging.warning( CONFLICT_TEMPLATE.format(first_file, first_title, mismatching_title, mismatching_file)) if not user.confirm( "Go on anyway? - enter 'y' or any other key to exit"): sys.exit() if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(SEP) logging.debug("Aggregated dataframe:\n{}".format( df.to_string(line_width=80))) # show a statistics overview logging.debug(SEP) logging.info("\n{}\n\n{}\n".format( first_title, df.loc["stats"].T.to_string(line_width=80))) if args.save_table: logging.debug(SEP) if args.no_warnings or user.check_and_confirm_overwrite( args.save_table): if SETTINGS.table_export_data.lower() == "error_array": data = error_df elif SETTINGS.table_export_data.lower() in ("info", "stats"): data = df.loc[SETTINGS.table_export_data.lower()] else: raise ValueError( "unsupported export data specifier: {}".format( SETTINGS.table_export_data)) if SETTINGS.table_export_transpose: data = data.T if SETTINGS.table_export_format == "excel": writer = pd.ExcelWriter(args.save_table) data.to_excel(writer) writer.save() writer.close() else: getattr(data, "to_" + SETTINGS.table_export_format)(args.save_table) logging.debug("{} table saved to: {}".format( SETTINGS.table_export_format, args.save_table)) if args.plot or args.save_plot or args.serialize_plot: # check if data has NaN "holes" due to different indices inconsistent = error_df.isnull().values.any() if inconsistent and common_index != "timestamps" and not args.no_warnings: logging.debug(SEP) logging.warning("Data lengths/indices are not consistent, " "raw value plot might not be correctly aligned") from evo.tools import plot import matplotlib.pyplot as plt import seaborn as sns import math # use default plot settings figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1]) use_cmap = SETTINGS.plot_multi_cmap.lower() != "none" colormap = SETTINGS.plot_multi_cmap if use_cmap else None linestyles = ["-o" for x in args.result_files ] if args.plot_markers else None # labels according to first dataset title = first_title if "xlabel" in df.loc["info"].index and not df.loc[ "info", "xlabel"].isnull().values.any(): index_label = df.loc["info", "xlabel"][0] else: index_label = "$t$ (s)" if common_index else "index" metric_label = df.loc["info", "label"][0] plot_collection = plot.PlotCollection(title) # raw value plot fig_raw = plt.figure(figsize=figsize) # handle NaNs from concat() above error_df.interpolate(method="index").plot(ax=fig_raw.gca(), colormap=colormap, style=linestyles, title=first_title) plt.xlabel(index_label) plt.ylabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("raw", fig_raw) # statistics plot fig_stats = plt.figure(figsize=figsize) exclude = df.loc["stats"].index.isin(["sse"]) # don't plot sse df.loc["stats"][~exclude].plot(kind="barh", ax=fig_stats.gca(), colormap=colormap, stacked=False) plt.xlabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("stats", fig_stats) # grid of distribution plots raw_tidy = pd.melt(error_df, value_vars=list(error_df.columns.values), var_name="estimate", value_name=metric_label) col_wrap = 2 if len(args.result_files) <= 2 else math.ceil( len(args.result_files) / 2.0) dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap) dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma plot_collection.add_figure("histogram", dist_grid.fig) # box plot fig_box = plt.figure(figsize=figsize) ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_box.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("box_plot", fig_box) # violin plot fig_violin = plt.figure(figsize=figsize) ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_violin.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("violin_histogram", fig_violin) if args.plot: plot_collection.show() if args.save_plot: logging.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.serialize_plot: logging.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings)
def run(args): import sys import pandas as pd from evo.tools import log, user, settings from evo.tools.settings import SETTINGS pd.options.display.width = 80 pd.options.display.max_colwidth = 20 log.configure_logging(args.verbose, args.silent, args.debug, local_logfile=args.logfile) if args.debug: import pprint arg_dict = {arg: getattr(args, arg) for arg in vars(args)} # logger.debug("main_parser config:\n{}\n".format( # pprint.pformat(arg_dict))) df = load_results_as_dataframe(args.result_files, args.use_filenames, args.merge) keys = df.columns.values.tolist() if SETTINGS.plot_usetex: keys = [key.replace("_", "\\_") for key in keys] df.columns = keys duplicates = [x for x in keys if keys.count(x) > 1] if duplicates: # logger.error("Values of 'est_name' must be unique - duplicates: {}\n" # "Try using the --use_filenames option to use filenames " # "for labeling instead.".format(", ".join(duplicates))) sys.exit(1) # derive a common index type if possible - preferably timestamps common_index = None time_indices = ["timestamps", "seconds_from_start", "sec_from_start"] if args.use_rel_time: del time_indices[0] for idx in time_indices: if idx not in df.loc["np_arrays"].index: continue if df.loc["np_arrays", idx].isnull().values.any(): continue else: common_index = idx break # build error_df (raw values) according to common_index if common_index is None: # use a non-timestamp index error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(), index=keys).T else: error_df = pd.DataFrame() for key in keys: new_error_df = pd.DataFrame( {key: df.loc["np_arrays", "error_array"][key]}, index=df.loc["np_arrays", common_index][key]) duplicates = new_error_df.index.duplicated(keep="first") if any(duplicates): # logger.warning( # "duplicate indices in error array of {} - " # "keeping only first occurrence of duplicates".format(key)) new_error_df = new_error_df[~duplicates] error_df = pd.concat([error_df, new_error_df], axis=1) # check titles first_title = df.loc["info", "title"][0] if not args.ignore_title else "" first_file = args.result_files[0] if not args.no_warnings and not args.ignore_title: checks = df.loc["info", "title"] != first_title for i, differs in enumerate(checks): if not differs: continue else: mismatching_title = df.loc["info", "title"][i] mismatching_file = args.result_files[i] # logger.debug(SEP) # logger.warning( # CONFLICT_TEMPLATE.format(first_file, first_title, # mismatching_title, # mismatching_file)) if not user.confirm( "You can use --ignore_title to just aggregate data.\n" "Go on anyway? - enter 'y' or any other key to exit"): sys.exit() # logger.debug(SEP) # logger.debug("Aggregated dataframe:\n{}".format( # df.to_string(line_width=80))) # show a statistics overview # logger.debug(SEP) # if not args.ignore_title: # logger.info("\n" + first_title + "\n\n") # logger.info(df.loc["stats"].T.to_string(line_width=80) + "\n") if args.save_table: # logger.debug(SEP) if args.no_warnings or user.check_and_confirm_overwrite( args.save_table): if SETTINGS.table_export_data.lower() == "error_array": data = error_df elif SETTINGS.table_export_data.lower() in ("info", "stats"): data = df.loc[SETTINGS.table_export_data.lower()] else: raise ValueError( "unsupported export data specifier: {}".format( SETTINGS.table_export_data)) if SETTINGS.table_export_transpose: data = data.T if SETTINGS.table_export_format == "excel": writer = pd.ExcelWriter(args.save_table) data.to_excel(writer) writer.save() writer.close() else: getattr(data, "to_" + SETTINGS.table_export_format)(args.save_table) # logger.debug("{} table saved to: {}".format( # SETTINGS.table_export_format, args.save_table)) if args.plot or args.save_plot or args.serialize_plot: # check if data has NaN "holes" due to different indices inconsistent = error_df.isnull().values.any() # if inconsistent and common_index != "timestamps" and not args.no_warnings: # logger.debug(SEP) # logger.warning("Data lengths/indices are not consistent, " # "raw value plot might not be correctly aligned") from evo.tools import plot import matplotlib.pyplot as plt import seaborn as sns import math # use default plot settings figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1]) use_cmap = SETTINGS.plot_multi_cmap.lower() != "none" colormap = SETTINGS.plot_multi_cmap if use_cmap else None linestyles = ["-o" for x in args.result_files ] if args.plot_markers else None # labels according to first dataset if "xlabel" in df.loc["info"].index and not df.loc[ "info", "xlabel"].isnull().values.any(): index_label = df.loc["info", "xlabel"][0] else: index_label = "$t$ (s)" if common_index else "index" metric_label = df.loc["info", "label"][0] plot_collection = plot.PlotCollection(first_title) # raw value plot fig_raw = plt.figure(figsize=figsize) # handle NaNs from concat() above error_df.interpolate(method="index").plot( ax=fig_raw.gca(), colormap=colormap, style=linestyles, title=first_title, alpha=SETTINGS.plot_trajectory_alpha) plt.xlabel(index_label) plt.ylabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("raw", fig_raw) name = "test" plt.savefig("/home/kostas/report/figures/appendix_stats/" + name + "_raw.png", dpi=300, format='png', bbox_inches='tight') # statistics plot if SETTINGS.plot_statistics: fig_stats = plt.figure(figsize=figsize) include = df.loc["stats"].index.isin(SETTINGS.plot_statistics) if any(include): df.loc["stats"][include].plot(kind="barh", ax=fig_stats.gca(), colormap=colormap, stacked=False) plt.xlabel(metric_label) plt.legend(frameon=True) # df.loc["stats"][include].plot(kind="barh", ax=axarr[0,1], # colormap=colormap, stacked=False) plt.savefig("/home/kostas/report/figures/appendix_stats/" + name + "_statistics.png", dpi=300, format='png', bbox_inches='tight') plot_collection.add_figure("stats", fig_stats) # grid of distribution plots raw_tidy = pd.melt(error_df, value_vars=list(error_df.columns.values), var_name="estimate", value_name=metric_label) col_wrap = 2 if len(args.result_files) <= 2 else math.ceil( len(args.result_files) / 2.0) dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap) # TODO: see issue #98 import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma plt.savefig("/home/kostas/report/figures/appendix_stats/" + name + "_distributions.png", dpi=300, format='png', bbox_inches='tight') plot_collection.add_figure("histogram", dist_grid.fig) # box plot fig_box = plt.figure(figsize=figsize) ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_box.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plt.savefig("/home/kostas/report/figures/appendix_stats/" + name + "_boxes.png", dpi=300, format='png', bbox_inches='tight') plot_collection.add_figure("box_plot", fig_box) # violin plot fig_violin = plt.figure(figsize=figsize) ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_violin.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("violin_histogram", fig_violin) if args.plot: plot_collection.show() if args.save_plot: # logger.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.serialize_plot: # logger.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings) if args.latex_plot: # check if data has NaN "holes" due to different indices inconsistent = error_df.isnull().values.any() # if inconsistent and common_index != "timestamps" and not args.no_warnings: # logger.debug(SEP) # logger.warning("Data lengths/indices are not consistent, " # "raw value plot might not be correctly aligned") from evo.tools import plot import matplotlib.pyplot as plt import seaborn as sns import math # use default plot settings # figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1]) use_cmap = SETTINGS.plot_multi_cmap.lower() != "none" colormap = SETTINGS.plot_multi_cmap if use_cmap else None linestyles = ["-o" for x in args.result_files ] if args.plot_markers else None # labels according to first dataset if "xlabel" in df.loc["info"].index and not df.loc[ "info", "xlabel"].isnull().values.any(): index_label = df.loc["info", "xlabel"][0] else: index_label = "$t$ (s)" if common_index else "index" metric_label = df.loc["info", "label"][0] plt.style.use('seaborn-whitegrid') lfig, axes = plt.subplots(2, 2, figsize=(6.125, 4)) # plot_collection = plot.PlotCollection(first_title) # raw value plot # fig_raw = plt.figure(figsize=figsize) # handle NaNs from concat() above # error_df.interpolate(method="index").plot( # ax=fig_raw.gca(), colormap=colormap, style=linestyles, # title=first_title, alpha=SETTINGS.plot_trajectory_alpha) error_df.interpolate(method="index").plot( ax=axes[0, 0], colormap=colormap, style=linestyles, title="Absolute Position Error", alpha=SETTINGS.plot_trajectory_alpha, legend=False) plt.xlabel(index_label) plt.ylabel(metric_label) # plt.legend(frameon=True) # plot_collection.add_figure("raw", fig_raw) name = "test" # plt.savefig("/home/kostas/report/figures/appendix_stats/"+name+"_raw.png", # dpi = 300, format='png', bbox_inches='tight') # statistics plot if SETTINGS.plot_statistics: # fig_stats = plt.figure(figsize=figsize) include = df.loc["stats"].index.isin(SETTINGS.plot_statistics) if any(include): df.loc["stats"][include].plot(kind="barh", ax=axes[0, 1], colormap=colormap, stacked=False, legend=False) plt.xlabel(metric_label) # plt.legend(frameon=True) # df.loc["stats"][include].plot(kind="barh", ax=axarr[0,1], # colormap=colormap, stacked=False) # plot_collection.add_figure("stats", fig_stats) # grid of distribution plots raw_tidy = pd.melt(error_df, value_vars=list(error_df.columns.values), var_name="estimate", value_name=metric_label) col_wrap = 2 if len(args.result_files) <= 2 else math.ceil( len(args.result_files) / 2.0) # axes[1,0] = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap) # # TODO: see issue #98 # import warnings # with warnings.catch_warnings(): # warnings.simplefilter("ignore") # dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma # plot_collection.add_figure("histogram", dist_grid.fig) # box plot # fig_box = plt.figure(figsize=figsize) # ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], # ax=fig_box.gca()) axes[1, 1] = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label]) # plt.waitforbuttonpress() plt.savefig("/home/kostas/results/test.png", dpi=300, format='png', bbox_inches='tight') # plt.savefig("/home/kostas/report/figures/appendix_stats/test.png", # dpi = 300, format='png', bbox_inches='tight') # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) # plt.savefig("/home/kostas/report/figures/appendix_stats/"+name+"_boxes.png", # dpi = 300, format='png', bbox_inches='tight') # plot_collection.add_figure("box_plot", fig_box) # violin plot # fig_violin = plt.figure(figsize=figsize) # ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], # ax=fig_violin.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) # plot_collection.add_figure("violin_histogram", fig_violin) if args.plot: plot_collection.show() if args.save_plot: # logger.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.serialize_plot: # logger.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings)
def main(): import argcomplete basic_desc = "crappy configuration tool" lic = "(c) [email protected]" shared_parser = argparse.ArgumentParser(add_help=False) shared_parser.add_argument("--no_color", help="don't color output", action="store_false", default=True) main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc, lic)) sub_parsers = main_parser.add_subparsers(dest="subcommand") sub_parsers.required = True show_parser = sub_parsers.add_parser( "show", description="show configuration - %s" % lic, parents=[shared_parser]) show_parser.add_argument( "config", help="optional config file to display (default: package settings)", nargs='?') show_parser.add_argument("--brief", help="show only the .json data", action="store_true") set_parser = sub_parsers.add_parser( "set", description=SET_HELP, parents=[shared_parser], formatter_class=argparse.RawTextHelpFormatter) set_parser.add_argument( "-c", "--config", help="optional config file (default: package settings)", default=None) set_parser.add_argument("-m", "--merge", help="other config file to merge in (priority)", default=None) gen_parser = sub_parsers.add_parser( "generate", description=GENERATE_HELP, parents=[shared_parser], formatter_class=argparse.RawTextHelpFormatter) gen_parser.add_argument("-o", "--out", help="path for config file to generate") reset_parser = sub_parsers.add_parser( "reset", description="reset package settings - %s" % lic, parents=[shared_parser]) argcomplete.autocomplete(main_parser) args, other_args = main_parser.parse_known_args() settings.configure_logging() colorama.init() config = settings.DEFAULT_PATH if hasattr(args, "config"): if args.config: config = args.config if args.subcommand == "show": if not args.brief and not args.config: logging.info(settings.DEFAULT_SETTINGS_HELP) logging.info(SEP + "\n" + config + "\n" + SEP) show(config, colored=args.no_color) if config == settings.DEFAULT_PATH and not args.brief: logging.info(SEP + "\nsee text above for parameter descriptions") elif args.subcommand == "set": if not os.access(config, os.W_OK): logging.info("no permission to modify " + config) sys.exit() if other_args or args.merge: logging.info(SEP + "\nold configuration:\n" + SEP) show(config, colored=args.no_color) try: set_cfg(config, other_args) except ConfigError as e: logging.error(e) sys.exit(1) if args.merge: merge_json_union(config, args.merge) logging.info(SEP + "\nnew configuration:\n" + SEP) show(config, colored=args.no_color) else: logging.info("no configuration parameters given (see --help)") elif args.subcommand == "generate": if other_args: logging.info(SEP + "\nparsed by argparse:\n" + str(other_args)) logging.info( SEP + "\nWARNING:\n" + "make sure you use the 'long-style' -- options (e.g. --plot) if possible\n" + "and no combined 'short' - flags, (e.g. -avp)\n" + SEP) data = generate(other_args) log_info_dict_json(data, colored=args.no_color) if args.out and user.check_and_confirm_overwrite(args.out): with open(args.out, 'w') as out: out.write(json.dumps(data, indent=4, sort_keys=True)) elif not args.out: logging.info( SEP + "\nno output file specified (-o / --out) - doing nothing\n" + SEP) else: logging.info("no command line arguments given (see --help)") elif args.subcommand == "reset": if not os.access(config, os.W_OK): logging.info("no permission to modify" + config) sys.exit() if user.confirm( "reset the package settings to the default settings? (y/n)"): reset_pkg_settings(settings.DEFAULT_PATH) logging.info(SEP + "\npackage settings after reset:\n" + SEP) show(settings.DEFAULT_PATH, colored=args.no_color)
def run(args): import sys import pandas as pd from evo.tools import log, user, settings, pandas_bridge from evo.tools.settings import SETTINGS pd.options.display.width = 80 pd.options.display.max_colwidth = 20 log.configure_logging(args.verbose, args.silent, args.debug, local_logfile=args.logfile) if args.debug: import pprint arg_dict = {arg: getattr(args, arg) for arg in vars(args)} logger.debug("main_parser config:\n{}\n".format( pprint.pformat(arg_dict))) df = load_results_as_dataframe(args.result_files, args.use_filenames, args.merge) keys = df.columns.values.tolist() if SETTINGS.plot_usetex: keys = [key.replace("_", "\\_") for key in keys] df.columns = keys duplicates = [x for x in keys if keys.count(x) > 1] if duplicates: logger.error("Values of 'est_name' must be unique - duplicates: {}\n" "Try using the --use_filenames option to use filenames " "for labeling instead.".format(", ".join(duplicates))) sys.exit(1) # derive a common index type if possible - preferably timestamps common_index = None time_indices = ["timestamps", "seconds_from_start", "sec_from_start"] if args.use_rel_time: del time_indices[0] for idx in time_indices: if idx not in df.loc["np_arrays"].index: continue if df.loc["np_arrays", idx].isnull().values.any(): continue else: common_index = idx break # build error_df (raw values) according to common_index if common_index is None: # use a non-timestamp index error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(), index=keys).T else: error_df = pd.DataFrame() for key in keys: new_error_df = pd.DataFrame( {key: df.loc["np_arrays", "error_array"][key]}, index=df.loc["np_arrays", common_index][key]) duplicates = new_error_df.index.duplicated(keep="first") if any(duplicates): logger.warning( "duplicate indices in error array of {} - " "keeping only first occurrence of duplicates".format(key)) new_error_df = new_error_df[~duplicates] error_df = pd.concat([error_df, new_error_df], axis=1) # check titles first_title = df.loc["info", "title"][0] if not args.ignore_title else "" first_file = args.result_files[0] if not args.no_warnings and not args.ignore_title: checks = df.loc["info", "title"] != first_title for i, differs in enumerate(checks): if not differs: continue else: mismatching_title = df.loc["info", "title"][i] mismatching_file = args.result_files[i] logger.debug(SEP) logger.warning( CONFLICT_TEMPLATE.format(first_file, first_title, mismatching_title, mismatching_file)) if not user.confirm( "You can use --ignore_title to just aggregate data.\n" "Go on anyway? - enter 'y' or any other key to exit"): sys.exit() logger.debug(SEP) logger.debug("Aggregated dataframe:\n{}".format( df.to_string(line_width=80))) # show a statistics overview logger.debug(SEP) if not args.ignore_title: logger.info("\n" + first_title + "\n\n") logger.info(df.loc["stats"].T.to_string(line_width=80) + "\n") if args.save_table: logger.debug(SEP) if SETTINGS.table_export_data.lower() == "error_array": data = error_df elif SETTINGS.table_export_data.lower() in ("info", "stats"): data = df.loc[SETTINGS.table_export_data.lower()] else: raise ValueError("unsupported export data specifier: {}".format( SETTINGS.table_export_data)) pandas_bridge.save_df_as_table(data, args.save_table, confirm_overwrite=not args.no_warnings) if args.plot or args.save_plot or args.serialize_plot: # check if data has NaN "holes" due to different indices inconsistent = error_df.isnull().values.any() if inconsistent and common_index != "timestamps" and not args.no_warnings: logger.debug(SEP) logger.warning("Data lengths/indices are not consistent, " "raw value plot might not be correctly aligned") from evo.tools import plot import matplotlib.pyplot as plt import seaborn as sns import math # use default plot settings figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1]) use_cmap = SETTINGS.plot_multi_cmap.lower() != "none" colormap = SETTINGS.plot_multi_cmap if use_cmap else None linestyles = ["-o" for x in args.result_files ] if args.plot_markers else None # labels according to first dataset if "xlabel" in df.loc["info"].index and not df.loc[ "info", "xlabel"].isnull().values.any(): index_label = df.loc["info", "xlabel"][0] else: index_label = "$t$ (s)" if common_index else "index" metric_label = df.loc["info", "label"][0] print plot_collection = plot.PlotCollection(first_title) # raw value plot fig_raw = plt.figure(figsize=figsize) # handle NaNs from concat() above error_df.interpolate(method="index", limit_area="inside").plot( ax=fig_raw.gca(), colormap=colormap, style=linestyles, title=first_title, alpha=SETTINGS.plot_trajectory_alpha) plt.xlabel(index_label) plt.ylabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("raw", fig_raw) # statistics plot if SETTINGS.plot_statistics: fig_stats = plt.figure(figsize=figsize) include = df.loc["stats"].index.isin(SETTINGS.plot_statistics) if any(include): df.loc["stats"][include].plot(kind="barh", ax=fig_stats.gca(), colormap=colormap, stacked=False) plt.xlabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("stats", fig_stats) # grid of distribution plots raw_tidy = pd.melt(error_df, value_vars=list(error_df.columns.values), var_name="estimate", value_name=metric_label) col_wrap = 2 if len(args.result_files) <= 2 else math.ceil( len(args.result_files) / 2.0) dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap) # TODO: see issue #98 import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma plot_collection.add_figure("histogram", dist_grid.fig) ################### box plot ############################## fig_box = plt.figure(figsize=figsize) algorithm = str(raw_tidy["estimate"]) mono_pal = { "tx2/vins-mono": "lightpink", "tx2/alvio": "coral", "tx2/rovio": "lightskyblue", "../d1": "white", "nx/vins-mono": "lightpink", "nx/alvio": "coral", "nx/rovio": "lightskyblue", "../d2": "white", "xavier/vins-mono": "lightpink", "xavier/alvio": "coral", "xavier/rovio": "lightskyblue" } tmp_pal = {"tx2/vins-fusion-gpu": "mediumpurple"} stereo_pal = { "tx2/orb2": "mediumseagreen", "tx2/vins-fusion-gpu": "mediumpurple", "tx2/msckf-vio": "khaki", "tx2/kimera": "indianred", "../d1": "white", "nx/vins-fusion": "plum", "nx/orb2": "mediumseagreen", "nx/vins-fusion-imu": "hotpink", "nx/vins-fusion-gpu": "mediumpurple", "nx/msckf-vio": "khaki", "nx/kimera": "indianred", "../d2": "white", "xavier/vins-fusion": "plum", "xavier/orb2": "mediumseagreen", "xavier/vins-fusion-imu": "hotpink", "xavier/vins-fusion-gpu": "mediumpurple", "xavier/msckf-vio": "khaki", "xavier/kimera": "indianred" } # print(algorithm) print("called") ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_box.gca(), palette=stereo_pal, sym='') #color="blue") ax.set_xticklabels( labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("box_plot", fig_box) ################### box plot ############################## # violin plot fig_violin = plt.figure(figsize=figsize) ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_violin.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("violin_histogram", fig_violin) if args.plot: plot_collection.show() if args.save_plot: logger.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.serialize_plot: logger.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings)
def main(): import argcomplete basic_desc = "crappy configuration tool" lic = "(c) [email protected]" shared_parser = argparse.ArgumentParser(add_help=False) shared_parser.add_argument("--no_color", help="don't color output", action="store_true") main_parser = argparse.ArgumentParser(description="%s %s" % (basic_desc, lic)) sub_parsers = main_parser.add_subparsers(dest="subcommand") sub_parsers.required = True show_parser = sub_parsers.add_parser("show", description="show configuration - %s" % lic, parents=[shared_parser]) show_parser.add_argument("config", help="optional config file to display (default: package settings)", nargs='?') show_parser.add_argument("--brief", help="show only the .json data", action="store_true") set_parser = sub_parsers.add_parser("set", description=SET_HELP, parents=[shared_parser], formatter_class=argparse.RawTextHelpFormatter) set_parser.add_argument("params", choices=list(DEFAULT_SETTINGS_DICT.keys()), nargs=argparse.REMAINDER, help="parameters to set") set_parser.add_argument("-c", "--config", help="optional config file (default: package settings)", default=None) set_parser.add_argument("-m", "--merge", help="other config file to merge in (priority)", default=None) set_parser.add_argument("--soft", help="do a soft-merge (no overwriting)", action="store_true") gen_parser = sub_parsers.add_parser("generate", description=GENERATE_HELP, parents=[shared_parser], formatter_class=argparse.RawTextHelpFormatter) gen_parser.add_argument("-o", "--out", help="path for config file to generate") reset_parser = sub_parsers.add_parser("reset", description="reset package settings - %s" % lic, parents=[shared_parser]) reset_parser.add_argument("-y", help="acknowledge automatically", action="store_true") argcomplete.autocomplete(main_parser) if len(sys.argv) > 1 and sys.argv[1] == "set": args, other_args = main_parser.parse_known_args() other_args = [arg for arg in sys.argv[2:] if not arg.startswith('-')] else: args, other_args = main_parser.parse_known_args() log.configure_logging() colorama.init() config = settings.DEFAULT_PATH if hasattr(args, "config"): if args.config: config = args.config if args.subcommand == "show": if not args.brief and not args.config: style = Style.BRIGHT if not args.no_color else Style.NORMAL doc_str = "\n".join("{0}{1}{2}:\n{3}\n".format(style, k, Style.RESET_ALL, v[1]) for k, v in sorted(DEFAULT_SETTINGS_DICT_DOC.items())) logger.info(doc_str) logger.info("{0}\n{1}\n{0}".format(SEP, config)) show(config, colored=not args.no_color) if config == settings.DEFAULT_PATH and not args.brief: logger.info(SEP + "\nSee text above for parameter descriptions.") elif args.subcommand == "set": if not os.access(config, os.W_OK): logger.error("No permission to modify " + config) sys.exit() if other_args or args.merge: logger.info("{0}\nOld configuration:\n{0}".format(SEP)) show(config, colored=not args.no_color) try: set_cfg(config, other_args) except ConfigError as e: logger.error(e) sys.exit(1) if args.merge: merge_json_union(config, args.merge, args.soft) logger.info(SEP + "\nNew configuration:\n" + SEP) show(config, colored=not args.no_color) else: logger.error("No configuration parameters given (see --help).") elif args.subcommand == "generate": if other_args: logger.info("{0}\nParsed by argparse:\n{1}\n" "{0}\nWARNING:\n" "Make sure you use the 'long-style' -- options (e.g. --plot) if possible\n" "and no combined short '-' flags, (e.g. -vp)\n{0}".format(SEP, other_args)) data = generate(other_args) log_info_dict_json(data, colored=not args.no_color) if args.out and user.check_and_confirm_overwrite(args.out): with open(args.out, 'w') as out: out.write(json.dumps(data, indent=4, sort_keys=True)) elif not args.out: logger.warning("\n(-o | --out) not specified - saving nothing") else: logger.error("No command line arguments given (see --help)") elif args.subcommand == "reset": if not os.access(config, os.W_OK): logger.error("No permission to modify" + config) sys.exit() if args.y or user.confirm("Reset the package settings to the default settings? (y/n)"): settings.reset() logger.info("{0}\nPackage settings after reset:\n{0}".format(SEP)) show(settings.DEFAULT_PATH, colored=not args.no_color)
args = merge_config(args) import sys from evo.tools import settings from evo import EvoException try: run(args) except SystemExit as e: sys.exit(e.code) except EvoException as e: logger.error(e.message) sys.exit(1) except: logger.exception("Unhandled error in main_plot") print("") err_msg = "evo module main_plot crashed" if settings.SETTINGS.logfile_enabled: err_msg += " - see " + settings.DEFAULT_LOGFILE_PATH else: err_msg += " - no logfile written (disabled)" logger.error(err_msg) from evo.tools import user if not args.no_warnings: if settings.SETTINGS.logfile_enabled and user.confirm( "Open logfile? (y/n)"): import webbrowser webbrowser.open(settings.DEFAULT_LOGFILE_PATH) sys.exit(1)
def main(): import sys import argparse import argcomplete main_parser = argparse.ArgumentParser() shared_parser = argparse.ArgumentParser(add_help=False) sub_parsers = main_parser.add_subparsers(dest="subcommand") sub_parsers.required = True pkg_parser = sub_parsers.add_parser( "pkg", description="show infos of the package", parents=[shared_parser]) pkg_parser.add_argument("--info", help="show the package description", action="store_true") pkg_parser.add_argument("--version", help="print the package version", action="store_true") pkg_parser.add_argument("--pyversion", help="print the Python version", action="store_true") pkg_parser.add_argument("--license", help="print the package license", action="store_true") pkg_parser.add_argument("--location", help="print the package path", action="store_true") pkg_parser.add_argument("--logfile", help="print the logfile path", action="store_true") pkg_parser.add_argument("--open_log", help="open the package logfile", action="store_true") pkg_parser.add_argument("--clear_log", help="clear package logfile", action="store_true") cat_parser = sub_parsers.add_parser( "cat_log", description="pipe stdin to global evo logfile" " or print logfile to stdout (if no stdin)", parents=[shared_parser]) cat_parser.add_argument("-l", "--loglevel", help="loglevel of the message", default="info", choices=["error", "warning", "info", "debug"]) cat_parser.add_argument("-m", "--message", help="explicit message instead of pipe") cat_parser.add_argument("-s", "--source", help="source name to use for the log message") cat_parser.add_argument("--clear_log", help="clear logfile before exiting", action="store_true") argcomplete.autocomplete(main_parser) if len(sys.argv[1:]) == 0: sys.argv.extend(["pkg", "--info"]) # cheap trick because YOLO args = main_parser.parse_args() line_end = "\n" if sys.stdout.isatty() else "" if args.subcommand == "pkg": if not len(sys.argv) > 2: pkg_parser.print_help() sys.exit(1) if args.license: print(open(os.path.join(PACKAGE_BASE_PATH, "LICENSE")).read()) if args.info: main_parser.print_usage() print(DESC) if args.version: print(__version__, end=line_end) if args.pyversion: import platform as pf print(pf.python_version(), end=line_end) if args.location: print(PACKAGE_BASE_PATH, end=line_end) if args.logfile or args.open_log: print(settings.GLOBAL_LOGFILE_PATH, end=line_end) if not os.path.exists(settings.GLOBAL_LOGFILE_PATH): print( "no logfile found - run: " "evo_config set global_logfile_enabled", end=line_end) sys.exit(1) if args.open_log: import webbrowser webbrowser.open(settings.GLOBAL_LOGFILE_PATH) if args.clear_log: from evo.tools import user if user.confirm("clear logfile? (y/n)"): open(settings.GLOBAL_LOGFILE_PATH, mode='w') elif args.subcommand == "cat_log": if os.name == "nt": print("cat_log feature not available on Windows") sys.exit(1) if not args.message and sys.stdin.isatty(): if not os.path.exists(settings.GLOBAL_LOGFILE_PATH): print( "no logfile found - run: " "evo_config set global_logfile_enabled", end=line_end) else: print(open(settings.GLOBAL_LOGFILE_PATH).read(), end="") elif not settings.SETTINGS.global_logfile_enabled: print("logfile disabled", end=line_end) sys.exit(1) else: import logging logger = logging.getLogger(__name__) from evo.tools import log file_fmt = log.DEFAULT_LONG_FMT if args.source: file_fmt = file_fmt.replace( "%(module)s.%(funcName)s():%(lineno)s", args.source) log.configure_logging(silent=True, file_fmt=file_fmt) if not args.message: msg = sys.stdin.read() else: msg = args.message getattr(logger, args.loglevel)(msg) if args.clear_log: open(settings.GLOBAL_LOGFILE_PATH, mode='w')