def get_best_performing_dataframe_separately_packetreceived(): """Selects the runs corresponding to the most received packets, and returns them in three separate dataframes for the three configurations""" # filter expressions for packetReceived:count for the three configurations filter_expression_bmac = """type =~ scalar AND isfield =~ false AND runattr:configname =~ "StatisticBMac" AND module =~ "*.server.app[0]" AND name =~ packetReceived:count""" filter_expression_xmac = """type =~ scalar AND isfield =~ false AND runattr:configname =~ "StatisticXMac" AND module =~ "*.server.app[0]" AND name =~ packetReceived:count""" filter_expression_lmac = """type =~ scalar AND isfield =~ false AND runattr:configname =~ "StatisticLMac" AND module =~ "*.server.app[0]" AND name =~ packetReceived:count""" # dataframes for packetReceived:count df_b = results.get_scalars(filter_expression_bmac, include_fields=False, include_attrs=True, include_runattrs=True, include_itervars=True) df_x = results.get_scalars(filter_expression_xmac, include_fields=False, include_attrs=True, include_runattrs=True, include_itervars=True) df_l = results.get_scalars(filter_expression_lmac, include_fields=False, include_attrs=True, include_runattrs=True, include_itervars=True) # average repetitions df_b = df_b.groupby( ['name', 'configname', 'module', 'iterationvars', 'inifile'], as_index=False).mean() if debug: print("\ndf_b after averaging:\n-------------------", df_b) df_x = df_x.groupby( ['name', 'configname', 'module', 'iterationvars', 'inifile'], as_index=False).mean() if debug: print("\ndf_x after averaging:\n-------------------", df_x) df_l = df_l.groupby( ['name', 'configname', 'module', 'iterationvars', 'inifile'], as_index=False).mean() if debug: print("\ndf_l after averaging:\n-------------------", df_l) # get index of the most received packets df_b_max_index = df_b[['value']].idxmax() df_x_max_index = df_x[['value']].idxmax() df_l_max_index = df_l[['value']].idxmax() # create dataframe containing only the run with the most received packets df_b = df_b.iloc[df_b_max_index] df_x = df_x.iloc[df_x_max_index] df_l = df_l.iloc[df_l_max_index] if debug: print( "----\nget_best_performing_dataframe_separately_packetreceived()\n----" ) print("returning ", df_b, "\n", df_x, "\n", df_l, "\n") return df_b, df_x, df_l
def importing_5(): df = results.get_scalars("name =~ rxBytes:sum OR name =~ txBytes:sum", include_runattrs=True) print(chart.extract_label_columns(df)) df = pd.pivot_table(df, columns=["name", "replication"], index=["module"]) print(df) chart.plot_scalars(df)
def get_power_for_each_module_dataframes(): """Returns the per-module residualEnergyCapacity values for the three configurations in three separate dataframes.""" filter_expression = """* AND name =~ residualEnergyCapacity:last""" filter_expression_bmac = """type =~ scalar AND isfield =~ false AND runattr:configname =~ "StatisticBMac" AND """ + filter_expression filter_expression_xmac = """type =~ scalar AND isfield =~ false AND runattr:configname =~ "StatisticXMac" AND """ + filter_expression filter_expression_lmac = """type =~ scalar AND isfield =~ false AND runattr:configname =~ "StatisticLMac" AND """ + filter_expression df_b = results.get_scalars(filter_expression_bmac, include_fields=False, include_attrs=True, include_runattrs=True, include_itervars=True) df_x = results.get_scalars(filter_expression_xmac, include_fields=False, include_attrs=True, include_runattrs=True, include_itervars=True) df_l = results.get_scalars(filter_expression_lmac, include_fields=False, include_attrs=True, include_runattrs=True, include_itervars=True) # average repetitions df_b = df_b.groupby( ['name', 'configname', 'module', 'iterationvars', 'inifile'], as_index=False).mean() # print("\ndf_b after averaging:\n-------------------",df_b) df_x = df_x.groupby( ['name', 'configname', 'module', 'iterationvars', 'inifile'], as_index=False).mean() # print("\ndf_x after averaging:\n-------------------",df_x) df_l = df_l.groupby( ['name', 'configname', 'module', 'iterationvars', 'inifile'], as_index=False).mean() # print("\ndf_l after averaging:\n-------------------",df_l) print("get_power_dataframes\nreturning:") print("df_b:\n", df_b) print("df_x:\n", df_x) print("df_l:\n", df_l) return df_b, df_x, df_l
def test_scalars_with_all(): df = results.get_scalars(r, include_attrs=True, include_itervars=True, include_runattrs=True, include_param_assignments=True, include_config_entries=True) _assert_sequential_index(df) _assert(sanitize_and_compare_csv(df, "scalars_with_all.csv"), "content mismatch")
def chart_function_bars(): df = results.get_scalars("name =~ rxBytes:sum OR name =~ txBytes:sum", include_runattrs=True) names = utils.get_names_for_title(df, dict()) df = pd.pivot_table(df, index="name", columns="module", values='value') utils.preconfigure_plot(dict()) utils.plot_bars(df, dict(), names) utils.postconfigure_plot(dict())
def get_data(filter): sc = results.get_scalars(filter, include_attrs=True) iv = results.get_itervars(filter) ra = results.get_run_attrs(filter) df = pd.concat([sc, iv, ra]) #print(df) df["value"] = pd.to_numeric(df["value"], errors="ignore") df = pd.pivot_table(df, columns="name", index="runID", dropna=False, aggfunc=aggfunc) #print(df) return df
def get_data(filter): try: sc = results.get_scalars(filter, include_attrs=True) iv = results.get_itervars(filter) ra = results.get_runattrs(filter) except ValueError as e: raise chart.ChartScriptError("Error while querying results: " + str(e)) df = pd.concat([sc, iv, ra]) df["value"] = pd.to_numeric(df["value"], errors="ignore") df = pd.pivot_table(df, columns="name", index="runID", dropna=False, aggfunc=aggfunc) return df
filter_expression = params["filter"] groups = params["groups"] bars = params["bars"] if not groups: groups = "module,experiment" if not bars: bars = "name,measurement" # TODO: make sure no column is present in both lists # The data is returned as a Pandas DataFrame df = results.get_scalars(filter_expression, include_attrs=True, include_itervars=True, include_runattrs=True) print(df) # You can perform any transformations on the data here df = pd.pivot_table(df, values="value", index=groups.split(","), columns=bars.split(",")) # You can perform any transformations on the data here # Finally, the results are plotted chart.plot_scalars(df) chart.copy_properties()
import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from omnetpp.scave import results, chart params = chart.get_configured_properties() if not (params["x_attr"] and params["y_attr"]): raise Exception("Please select axis attributes in the properties dialog!") df = results.get_scalars(params["scalar_filter"], include_attrs=True, include_itervars=True, include_runattrs=True) df[params["x_attr"]] = pd.to_numeric(df[params["x_attr"]]) df[params["y_attr"]] = pd.to_numeric(df[params["y_attr"]]) #print(df) title_col, legend_cols = chart.extract_label_columns(df) print(legend_cols) title = str(list(df[title_col])[0]) if title_col else None df = pd.pivot_table(df, columns=[params["x_attr"]], index=params["y_attr"],
def test_scalars_with_config_entries(): df = results.get_scalars(r, include_config_entries=True) _assert_sequential_index(df) _assert(sanitize_and_compare_csv(df, "scalars_with_config_entries.csv"), "content mismatch")
def test_scalars(): df = results.get_scalars(r) _assert_sequential_index(df) _assert(sanitize_and_compare_csv(df, "scalars.csv"), "content mismatch")
from omnetpp.scave import results, chart params = chart.get_properties() # This expression selects the results (you might be able to logically simplify it) filter_expression = params["filter"] # The data is returned as a Pandas DataFrame df = results.get_scalars(filter_expression) # You can perform any transformations on the data here # Finally, the results are plotted chart.plot_scalars(df)