def load_results_as_dataframe(result_files: typing.Iterable[str], use_filenames: bool = False, merge: bool = False) -> pd.DataFrame: if merge: results = [file_interface.load_res_file(f) for f in result_files] return pandas_bridge.result_to_df(merge_results(results)) df = pd.DataFrame() for result_file in result_files: result = file_interface.load_res_file(result_file) name = result_file if use_filenames else None df = pd.concat([df, pandas_bridge.result_to_df(result, name)], axis="columns") return df
def load_results_as_dataframe(result_files, use_filenames=False, merge=False): import pandas as pd from evo.tools import pandas_bridge from evo.tools import file_interface if merge: from evo.core.result import merge_results results = [file_interface.load_res_file(f) for f in result_files] return pandas_bridge.result_to_df(merge_results(results)) df = pd.DataFrame() for result_file in result_files: result = file_interface.load_res_file(result_file) name = result_file if use_filenames else None df = pd.concat([df, pandas_bridge.result_to_df(result, name)], axis="columns") return df
def run(args): import sys import logging import pandas as pd from evo.tools import file_interface, user, settings, pandas_bridge from evo.tools.settings import SETTINGS pd.options.display.width = 80 pd.options.display.max_colwidth = 20 settings.configure_logging(args.verbose, args.silent, args.debug) if args.debug: import pprint arg_dict = {arg: getattr(args, arg) for arg in vars(args)} logging.debug("main_parser config:\n{}\n".format( pprint.pformat(arg_dict))) df = pd.DataFrame() for result_file in args.result_files: result = file_interface.load_res_file(result_file) name = result_file if args.use_filenames else None df = pd.concat([df, pandas_bridge.result_to_df(result, name)], axis="columns") keys = df.columns.values.tolist() if SETTINGS.plot_usetex: keys = [key.replace("_", "\\_") for key in keys] df.columns = keys duplicates = [x for x in keys if keys.count(x) > 1] if duplicates: logging.error("Values of 'est_name' must be unique - duplicates: {}\n" "Try using the --use_filenames option to use filenames " "for labeling instead.".format(", ".join(duplicates))) sys.exit(1) # derive a common index type if possible - preferably timestamps common_index = None time_indices = ["timestamps", "seconds_from_start", "sec_from_start"] if args.use_rel_time: del time_indices[0] for idx in time_indices: if idx not in df.loc["np_arrays"].index: continue if df.loc["np_arrays", idx].isnull().values.any(): continue else: common_index = idx break # build error_df (raw values) according to common_index if common_index is None: # use a non-timestamp index error_df = pd.DataFrame(df.loc["np_arrays", "error_array"].tolist(), index=keys).T else: error_df = pd.DataFrame() for key in keys: new_error_df = pd.DataFrame( {key: df.loc["np_arrays", "error_array"][key]}, index=df.loc["np_arrays", common_index][key]) duplicates = new_error_df.index.duplicated(keep="first") if any(duplicates): logging.warning( "duplicate indices in error array of {} - " "keeping only first occurrence of duplicates".format(key)) new_error_df = new_error_df[~duplicates] error_df = pd.concat([error_df, new_error_df], axis=1) # check titles first_title = df.loc["info", "title"][0] first_file = args.result_files[0] if not args.no_warnings: checks = df.loc["info", "title"] != first_title for i, differs in enumerate(checks): if not differs: continue else: mismatching_title = df.loc["info", "title"][i] mismatching_file = args.result_files[i] logging.debug(SEP) logging.warning( CONFLICT_TEMPLATE.format(first_file, first_title, mismatching_title, mismatching_file)) if not user.confirm( "Go on anyway? - enter 'y' or any other key to exit"): sys.exit() if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug(SEP) logging.debug("Aggregated dataframe:\n{}".format( df.to_string(line_width=80))) # show a statistics overview logging.debug(SEP) logging.info("\n{}\n\n{}\n".format( first_title, df.loc["stats"].T.to_string(line_width=80))) if args.save_table: logging.debug(SEP) if args.no_warnings or user.check_and_confirm_overwrite( args.save_table): if SETTINGS.table_export_data.lower() == "error_array": data = error_df elif SETTINGS.table_export_data.lower() in ("info", "stats"): data = df.loc[SETTINGS.table_export_data.lower()] else: raise ValueError( "unsupported export data specifier: {}".format( SETTINGS.table_export_data)) if SETTINGS.table_export_transpose: data = data.T if SETTINGS.table_export_format == "excel": writer = pd.ExcelWriter(args.save_table) data.to_excel(writer) writer.save() writer.close() else: getattr(data, "to_" + SETTINGS.table_export_format)(args.save_table) logging.debug("{} table saved to: {}".format( SETTINGS.table_export_format, args.save_table)) if args.plot or args.save_plot or args.serialize_plot: # check if data has NaN "holes" due to different indices inconsistent = error_df.isnull().values.any() if inconsistent and common_index != "timestamps" and not args.no_warnings: logging.debug(SEP) logging.warning("Data lengths/indices are not consistent, " "raw value plot might not be correctly aligned") from evo.tools import plot import matplotlib.pyplot as plt import seaborn as sns import math # use default plot settings figsize = (SETTINGS.plot_figsize[0], SETTINGS.plot_figsize[1]) use_cmap = SETTINGS.plot_multi_cmap.lower() != "none" colormap = SETTINGS.plot_multi_cmap if use_cmap else None linestyles = ["-o" for x in args.result_files ] if args.plot_markers else None # labels according to first dataset title = first_title if "xlabel" in df.loc["info"].index and not df.loc[ "info", "xlabel"].isnull().values.any(): index_label = df.loc["info", "xlabel"][0] else: index_label = "$t$ (s)" if common_index else "index" metric_label = df.loc["info", "label"][0] plot_collection = plot.PlotCollection(title) # raw value plot fig_raw = plt.figure(figsize=figsize) # handle NaNs from concat() above error_df.interpolate(method="index").plot(ax=fig_raw.gca(), colormap=colormap, style=linestyles, title=first_title) plt.xlabel(index_label) plt.ylabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("raw", fig_raw) # statistics plot fig_stats = plt.figure(figsize=figsize) exclude = df.loc["stats"].index.isin(["sse"]) # don't plot sse df.loc["stats"][~exclude].plot(kind="barh", ax=fig_stats.gca(), colormap=colormap, stacked=False) plt.xlabel(metric_label) plt.legend(frameon=True) plot_collection.add_figure("stats", fig_stats) # grid of distribution plots raw_tidy = pd.melt(error_df, value_vars=list(error_df.columns.values), var_name="estimate", value_name=metric_label) col_wrap = 2 if len(args.result_files) <= 2 else math.ceil( len(args.result_files) / 2.0) dist_grid = sns.FacetGrid(raw_tidy, col="estimate", col_wrap=col_wrap) dist_grid.map(sns.distplot, metric_label) # fits=stats.gamma plot_collection.add_figure("histogram", dist_grid.fig) # box plot fig_box = plt.figure(figsize=figsize) ax = sns.boxplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_box.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("box_plot", fig_box) # violin plot fig_violin = plt.figure(figsize=figsize) ax = sns.violinplot(x=raw_tidy["estimate"], y=raw_tidy[metric_label], ax=fig_violin.gca()) # ax.set_xticklabels(labels=[item.get_text() for item in ax.get_xticklabels()], rotation=30) plot_collection.add_figure("violin_histogram", fig_violin) if args.plot: plot_collection.show() if args.save_plot: logging.debug(SEP) plot_collection.export(args.save_plot, confirm_overwrite=not args.no_warnings) if args.serialize_plot: logging.debug(SEP) plot_collection.serialize(args.serialize_plot, confirm_overwrite=not args.no_warnings)
def stats(apes_x, apes_y, apes_vx, apes_vy, apes_psi, apes_omega, apes_length, apes_width, rpes_x, rpes_y, rpes_length, rpes_width, filename): import pandas as pd from evo.tools import pandas_bridge, plot import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as mpatches import seaborn as sns current_palette = sns.color_palette() sns.set_color_codes("dark") # sns.palplot(current_palette) df_x = pd.DataFrame() df_y = pd.DataFrame() df_vx = pd.DataFrame() df_vy = pd.DataFrame() df_psi = pd.DataFrame() df_omega = pd.DataFrame() df_length = pd.DataFrame() df_width = pd.DataFrame() df_rx = pd.DataFrame() df_ry = pd.DataFrame() df_rlength = pd.DataFrame() df_rwidth = pd.DataFrame() # print(list(df_x.columns.values)) for ape in apes_x: name = None df_x = pd.concat([df_x, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_y: name = None df_y = pd.concat([df_y, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_vx: name = None df_vx = pd.concat([df_vx, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_vy: name = None df_vy = pd.concat([df_vy, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_psi: name = None df_psi = pd.concat( [df_psi, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_omega: name = None df_omega = pd.concat( [df_omega, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_length: name = None df_length = pd.concat( [df_length, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in apes_width: name = None df_width = pd.concat( [df_width, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in rpes_x: name = None df_rx = pd.concat([df_rx, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in rpes_y: name = None df_ry = pd.concat([df_ry, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in rpes_width: name = None df_rwidth = pd.concat( [df_rwidth, pandas_bridge.result_to_df(ape, name)], axis="columns") for ape in rpes_length: name = None df_rlength = pd.concat( [df_rlength, pandas_bridge.result_to_df(ape, name)], axis="columns") # print(df_omega) mpl.use('pgf') mpl.rcParams.update({ "text.usetex": True, "pgf.texsystem": "pdflatex", }) setting = ["RMSE"] include = df_x.loc["stats"].index.isin(setting) # print(df_x.loc["stats"]) # print(include) print("ape_x", df_x.loc["stats"][include]) print("ape_y", df_y.loc["stats"][include]) print("ape_vx", df_vx.loc["stats"][include]) print("ape_vy", df_vy.loc["stats"][include]) print("ape_psi", df_psi.loc["stats"][include]) print("ape_omega", df_omega.loc["stats"][include]) print("ape_length", df_length.loc["stats"][include]) print("ape_width", df_width.loc["stats"][include]) fig_stats, axarr = plt.subplots(4, 2, figsize=(6.125, 8.6)) setting = ["Mean", "STD", "Max", "Min", "RMSE"] include = df_x.loc["stats"].index.isin(setting) # df_x.loc["stats"].reorder_levels(['STD','SSE','RMSE','Min','Median','Mean','Max']) # print(df_x.names) # df_x.reindex(['STD','SSE','RMSE','Min','Median','Mean','Max'], # level='stats') # df_x.loc["stats"][['STD','SSE','RMSE','Min','Median','Mean','Max']] # print(df_x.xs('stats')) dfx_stats = df_x.xs('stats') x_stats = dfx_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) x_stats.drop(index="SSE", inplace=True) dfy_stats = df_y.xs('stats') y_stats = dfx_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) y_stats.drop(index="SSE", inplace=True) dfvx_stats = df_vx.xs('stats') vx_stats = dfvx_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) vx_stats.drop(index="SSE", inplace=True) dfvy_stats = df_vy.xs('stats') vy_stats = dfvy_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) vy_stats.drop(index="SSE", inplace=True) dfpsi_stats = df_psi.xs('stats') psi_stats = dfpsi_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) psi_stats.drop(index="SSE", inplace=True) dfomega_stats = df_omega.xs('stats') omega_stats = dfomega_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) omega_stats.drop(index="SSE", inplace=True) dflength_stats = df_length.xs('stats') length_stats = dflength_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) length_stats.drop(index="SSE", inplace=True) dfwidth_stats = df_width.xs('stats') width_stats = dfwidth_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) width_stats.drop(index="SSE", inplace=True) dfrx_stats = df_rx.xs('stats') rx_stats = dfrx_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) rx_stats.drop(index="SSE", inplace=True) dfry_stats = df_ry.xs('stats') ry_stats = dfy_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) ry_stats.drop(index="SSE", inplace=True) dfrlength_stats = df_rlength.xs('stats') rlength_stats = dfrlength_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) rlength_stats.drop(index="SSE", inplace=True) dfrwidth_stats = df_rwidth.xs('stats') rwidth_stats = dfrwidth_stats.reindex( ["Max", "Min", "STD", "RMSE", "SSE", "Median", "Mean"]) rwidth_stats.drop(index="SSE", inplace=True) # setting = ["Mean", "STD", "Max","Min","RMSE"] # include = df_x.loc["stats"].index.isin(setting) # print(x_stats) # x_stats.plot(kind="barh", ax = axarr[0,0],legend =None) # axarr[0,0].set_xlabel("Absolute error $x$ (m)") # y_stats.plot(kind="barh", ax = axarr[0,1], legend=None) # axarr[0,1].set_xlabel("Absolute error $y$ (m)") rx_stats.plot(kind="barh", ax=axarr[0, 0], legend=None) axarr[0, 0].set_xlabel("Relative error $x$") ry_stats.plot(kind="barh", ax=axarr[0, 1], legend=None) axarr[0, 1].set_xlabel("Relative error $y$") vx_stats.plot(kind="barh", ax=axarr[1, 0], legend=None) axarr[1, 0].set_xlabel("Absolute error $v_x$ (m/s)") vy_stats.plot(kind="barh", ax=axarr[1, 1], legend=None) axarr[1, 1].set_xlabel("Absolute error $v_y$ (m/s)") psi_stats.plot(kind="barh", width=0.3, ax=axarr[2, 0], legend=None, color='indianred') axarr[2, 0].set_xlabel("Absolute error $\psi$ (degrees)") omega_stats.plot(kind="barh", width=0.3, ax=axarr[2, 1], legend=None, color='indianred') axarr[2, 1].set_xlabel("Absolute error $\omega$ (degrees/s)") # length_stats.plot(kind="barh", width =0.3, ax = axarr[3,0], # legend=None, color='indianred') # axarr[3,0].set_xlabel("Absolute error Length (m)") # width_stats.plot(kind="barh", width =0.3, ax = axarr[3,1], # legend=None, color='indianred') # axarr[3,1].set_xlabel("Absolute error Width (m)") rlength_stats.plot(kind="barh", width=0.3, ax=axarr[3, 0], legend=None, color='indianred') axarr[3, 0].set_xlabel("Relative error Length") rwidth_stats.plot(kind="barh", width=0.3, ax=axarr[3, 1], legend=None, color='indianred') axarr[3, 1].set_xlabel("Relative error Width") # handles, labels = axarr[0,0].get_legend_handles_labels() # lgd = fig_stats.legend(handles, labels, loc='lower center',ncol = len(labels)) current_palette = sns.color_palette() sns.set_color_codes() red = mpatches.Patch(color='indianred', label='Shape KF') gray = mpatches.Patch(color='gray', label='Reference') green = mpatches.Patch(color='b', label='KF') blue = mpatches.Patch(color='g', label='UKF') lgd = fig_stats.legend(handles=[green,blue,red,gray],\ loc='lower center',ncol = 4, borderpad=0.7,\ bbox_to_anchor=(0.54,0), columnspacing=0.8) fig_stats.tight_layout() fig_stats.subplots_adjust(bottom=0.12) # plt.show() fig_stats.savefig("/home/kostas/report/figures/" + filename + "_stats.pgf")