def get_statistics(filter_expression, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries): df = _read_result_files(filter_expression, 't') df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) _ensure_columns_exist(df, _STATISTIC_COLUMN_NAMES) return df
def get_parameters(filter_expression, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries): df = _read_result_files(filter_expression, 'p') df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) _ensure_columns_exist(df, _PARAMETER_COLUMN_NAMES) return df
def get_scalars(filter_expression, include_attrs, include_fields, include_runattrs, include_itervars, include_param_assignments, include_config_entries): args = [] if include_fields: args.append("--add-fields-as-scalars") # TODO filter row types based on include_ args, as optimization df = _read_result_files(filter_expression, 's', *args) df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) df["value"] = pd.to_numeric(df["value"], errors="raise") return df
def get_vectors(filter_expression, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries, start_time, end_time): df = _read_result_files(filter_expression, 'v', '--start-time', str(start_time), '--end-time', str(end_time)) df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) _ensure_columns_exist(df, _VECTOR_COLUMN_NAMES) return df
def get_scalars(filter_or_dataframe="", include_attrs=False, include_fields=False, include_runattrs=False, include_itervars=False, include_param_assignments=False, include_config_entries=False): """ Returns a filtered list of scalar results. Parameters: - `filter_or_dataframe` (string): The filter expression to select the desired scalars, or a dataframe in the "raw" format. Example: `name =~ "channelUtilization*" AND runattr:replication =~ "#0"` - `include_attrs` (bool): Optional. When set to `True`, result attributes (like `unit` or `source` for example) are appended to the DataFrame, pivoted into columns. - `include_fields` (bool): Optional. If `True`, the fields of statistics and histograms (`:min`, `:mean`, etc.) are also returned as synthetic scalars. - `include_runattrs`, `include_itervars`, `include_param_assignments`, `include_config_entries` (bool): Optional. When set to `True`, additional pieces of metadata about the run is appended to the DataFrame, pivoted into columns. See the "Metadata columns" section of the module documentation for details. Columns of the returned DataFrame: - `runID` (string): Identifies the simulation run - `module` (string): Hierarchical name (a.k.a. full path) of the module that recorded the result item - `name` (string): The name of the scalar - `value` (double): The value of the scalar - Additional metadata items (result attributes, run attributes, iteration variables, etc.), as requested """ if type(filter_or_dataframe) is str: filter_expression = filter_or_dataframe del filter_or_dataframe return impl.get_scalars(**locals()) else: if include_fields: raise ValueError( "include_fields is not supported when filter_or_dataframe is a dataframe" ) df = filter_or_dataframe row_types = ["scalar", "itervar", "runattr", "config", "attr"] df = df[df["type"].isin(row_types)] df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) df.dropna(axis='columns', how='all', inplace=True) if "value" in df: # it might be empty df["value"] = pd.to_numeric(df["value"], errors="raise") return df
def get_parameters(filter_or_dataframe="", include_attrs=False, include_runattrs=False, include_itervars=False, include_param_assignments=False, include_config_entries=False): """ Returns a filtered list of parameters - actually computed values of individual `cPar` instances in the fully built network. Parameters are considered "pseudo-results", similar to scalars - except their values are strings. Even though they act mostly as input to the actual simulation run, the actually assigned value of individual `cPar` instances is valuable information, as it is the result of the network setup process. For example, even if a parameter is set up as an expression like `normal(3, 0.4)` from `omnetpp.ini`, the returned DataFrame will contain the single concrete value picked for every instance of the parameter. Parameters: - `filter_or_dataframe` (string): The filter expression to select the desired parameters, or a dataframe in the "raw" format. Example: `name =~ "x" AND module =~ Aloha.server` - `include_attrs` (bool): Optional. When set to `True`, result attributes (like `unit` for example) are appended to the DataFrame, pivoted into columns. - `include_runattrs`, `include_itervars`, `include_param_assignments`, `include_config_entries` (bool): Optional. When set to `True`, additional pieces of metadata about the run is appended to the DataFrame, pivoted into columns. See the "Metadata columns" section of the module documentation for details. Columns of the returned DataFrame: - `runID` (string): Identifies the simulation run - `module` (string): Hierarchical name (a.k.a. full path) of the module that recorded the result item - `name` (string): The name of the parameter - `value` (string): The value of the parameter. - Additional metadata items (result attributes, run attributes, iteration variables, etc.), as requested """ if type(filter_or_dataframe) is str: filter_expression = filter_or_dataframe del filter_or_dataframe return impl.get_parameters(**locals()) else: df = filter_or_dataframe row_types = ["param", "itervar", "runattr", "config", "attr"] df = df[df["type"].isin(row_types)] df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) df.dropna(axis='columns', how='all', inplace=True) return df
def get_histograms(filter_or_dataframe="", include_attrs=False, include_runattrs=False, include_itervars=False, include_param_assignments=False, include_config_entries=False): """ Returns a filtered list of histogram results. Parameters: - `filter_or_dataframe` (string): The filter expression to select the desired histograms, or a dataframe in the "raw" format. Example: `name =~ "collisionMultiplicity:histogram" AND itervar:iaMean =~ "2"` - `include_attrs` (bool): Optional. When set to `True`, result attributes (like `unit` or `source` for example) are appended to the DataFrame, pivoted into columns. - `include_runattrs`, `include_itervars`, `include_param_assignments`, `include_config_entries` (bool): Optional. When set to `True`, additional pieces of metadata about the run is appended to the DataFrame, pivoted into columns. See the "Metadata columns" section of the module documentation for details. Columns of the returned DataFrame: - `runID` (string): Identifies the simulation run - `module` (string): Hierarchical name (a.k.a. full path) of the module that recorded the result item - `name` (string): The name of the vector - `count`, `sumweights`, `mean`, `stddev`, `min`, `max` (double): The characteristic mathematical properties of the histogram - `binedges`, `binvalues` (np.array): The histogram edge locations and the weighted sum of the collected samples in each bin. `len(binedges) == len(binvalues) + 1` - `underflows`, `overflows` (double): The weighted sum of the samples that fell outside of the histogram bin range in the two directions - Additional metadata items (result attributes, run attributes, iteration variables, etc.), as requested """ if type(filter_or_dataframe) is str: filter_expression = filter_or_dataframe del filter_or_dataframe return impl.get_histograms(**locals()) else: df = filter_or_dataframe row_types = ["histogram", "itervar", "runattr", "config", "attr"] df = df[df["type"].isin(row_types)] df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) df.dropna(axis='columns', how='all', inplace=True) return df
def get_vectors(filter_or_dataframe="", include_attrs=False, include_runattrs=False, include_itervars=False, include_param_assignments=False, include_config_entries=False, start_time=-inf, end_time=inf): """ Returns a filtered list of vector results. Parameters: - `filter_or_dataframe` (string): The filter expression to select the desired vectors, or a dataframe in the "raw" format. Example: `name =~ "radioState*" AND runattr:replication =~ "#0"` - `include_attrs` (bool): Optional. When set to `True`, result attributes (like `unit` or `source` for example) are appended to the DataFrame, pivoted into columns. - `include_runattrs`, `include_itervars`, `include_param_assignments`, `include_config_entries` (bool): Optional. When set to `True`, additional pieces of metadata about the run is appended to the DataFrame, pivoted into columns. See the "Metadata columns" section of the module documentation for details. - `start_time`, `end_time` (double): Optional time limits to trim the data of vector type results. The unit is seconds, both the `vectime` and `vecvalue` arrays will be affected, the interval is left-closed, right-open. Columns of the returned DataFrame: - `runID` (string): Identifies the simulation run - `module` (string): Hierarchical name (a.k.a. full path) of the module that recorded the result item - `name` (string): The name of the vector - `vectime`, `vecvalue` (np.array): The simulation times and the corresponding values in the vector - Additional metadata items (result attributes, run attributes, iteration variables, etc.), as requested """ if type(filter_or_dataframe) is str: filter_expression = filter_or_dataframe del filter_or_dataframe return impl.get_vectors(**locals()) else: df = filter_or_dataframe row_types = ["vector", "itervar", "runattr", "config", "attr"] df = df[df["type"].isin(row_types)] df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) df.dropna(axis='columns', how='all', inplace=True) if start_time != -inf or end_time != inf: def crop(row): t = row['vectime'] v = row['vecvalue'] from_index = np.searchsorted(t, start_time, 'left') to_index = np.searchsorted(t, end_time, 'left') row['vectime'] = t[from_index:to_index] row['vecvalue'] = v[from_index:to_index] return row df = df.transform(crop, axis='columns') return df
def get_parameters(filter_expression, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries): df = _read_result_files(filter_expression, 'p') df = _pivot_results(df, include_attrs, include_runattrs, include_itervars, include_param_assignments, include_config_entries) return df