def get_single_profile_info(pcs, minor_version, profile_source): """Function for loading single performance profile info :param PCS pcs: object with performance control system wrapper :param str minor_version: commit to which the profiles belongs :param str profile_source: name of the performance profile :return: dictionary containing performance profile info """ try: profiles_objs = commands.get_minor_version_profiles(pcs, minor_version); for num, profile_obj in enumerate(profiles_objs): if (profile_obj.source == profile_source): perf_profile = profile.load_profile_from_file(profile_obj.realpath, is_raw_profile=False) options = [o for o in query.all_resource_fields_of(perf_profile)] numerical = [o for o in query.all_numerical_resource_fields_of(perf_profile)] dataframe = convert.resources_to_pandas_dataframe(perf_profile) for option in options: dataframe = dataframe[pandas.notnull(dataframe[option])] dataframe = dataframe.astype(str) resource_values = dataframe.to_dict(orient='records') formatted = formatter.format_single_profile_info(profile_obj, minor_version, options, numerical, resource_values) return formatted, json.dumps({'profile' : formatted}) profiles_objs = commands.get_untracked_profiles(pcs); for num, profile_obj in enumerate(profiles_objs): if (profile_obj.source == profile_source): perf_profile = profile.load_profile_from_file(profile_obj.realpath, is_raw_profile=True) options = [o for o in query.all_resource_fields_of(perf_profile)] numerical = [o for o in query.all_numerical_resource_fields_of(perf_profile)] dataframe = convert.resources_to_pandas_dataframe(perf_profile) for option in options: dataframe = dataframe[pandas.notnull(dataframe[option])] dataframe = dataframe.astype(str) resource_values = dataframe.to_dict(orient='records') formatted = formatter.format_single_profile_info(profile_obj, minor_version, options, numerical, resource_values) return formatted, json.dumps({'profile' : formatted}) return create_response('Something went wrong', 404) except Exception as e: eprint(e) return create_response(e, 404)
def generate_plot_data_slices(profile): """ Generates data slices for plotting resources and models. The resources are split by unique uids, models are sliced into parts by uid and interval. :param dict profile: loaded perun profile :returns generator: generator: resources and models slices of unique uid as pair (data_slice(pandas.DataFrame), uid_models(list)) """ # Get resources for scatter plot points and models for curves resource_table = convert.resources_to_pandas_dataframe(profile) models = list(map(itemgetter(1), query.all_models_of(profile))) # Get unique uids from profile, each uid (and optionally interval) will have separate graph uids = map(convert.flatten, query.unique_resource_values_of(profile, 'uid')) # Process each uid data for uid_slice, uid_models in slice_resources_by_uid( resource_table, models, uids): # Slice the uid models according to different intervals (each interval is plotted # separately as it improves readability) if uid_models: for interval_models in slice_models_by_interval(uid_models): yield uid_slice, interval_models else: # There are no models to plot yield uid_slice, []
def get_averages(profile): """Retrieves the averages of all amounts grouped by the uid :param dict profile: dictionary representation of profile :returns: dictionary with averages for all uids """ data_frame = convert.resources_to_pandas_dataframe(profile) return data_frame.groupby('uid').mean().to_dict()['amount']
def create_from_params(profile, func, of_key, through_key, by_key, stacked, accumulate, x_axis_label, y_axis_label, graph_title, graph_width=800): """Creates Flow graph according to the given parameters. Takes the input profile, converts it first to pandas.DataFrame. Then the data are grouped according to the 'by_key' and then grouped again for each 'through' key. For this atomic groups aggregation function is used. For each computed data, we output the area and points. Arguments: profile(dict): dictionary with measured data func(str): function that will be used for aggregation of the data of_key(str): key that specifies which fields of the resource entry will be used as data through_key(str): key that specifies fields of the resource that will be on the x axis by_key(str): key that specifies values for which graphs will be outputed stacked(bool): true if the values of the graphs should be stacked on each other -> this shows the overall values accumulate(bool): true if the values from previous x values should be accumulated x_axis_label(str): label on the x axis y_axis_label(str): label on the y axis graph_title(str): name of the graph graph_width(int): width of the created bokeh graph Returns: charts.Area: flow graph according to the params """ # Convert profile to pandas data grid data_frame = convert.resources_to_pandas_dataframe(profile) data_source = construct_data_source_from(data_frame, func, of_key, by_key, through_key, accumulate) # Obtain colours, which will be sorted in reverse key_colours = bokeh_helpers.get_unique_colours_for_( data_frame, by_key, sort_color_style=bokeh_helpers.ColourSort.Reverse) # Construct the area chart area_chart = charts.Area(data_source, stack=stacked, color=key_colours) # Configure graph and return it bokeh_helpers.configure_graph(area_chart, profile, func, graph_title, x_axis_label, y_axis_label, graph_width) configure_area_chart(area_chart, data_frame, data_source, through_key, stacked) return area_chart
def create_from_params(profile, func, of_key, per_key, by_key, cummulation_type, x_axis_label, y_axis_label, graph_title, graph_width=800): """Creates Bar graph according to the given parameters. Takes the input profile, convert it to pandas.DataFrame. Then the data according to 'of_key' parameter are used as values and are output by aggregation function of 'func' depending on values of 'per_key'. Values are further stacked by 'by_key' key and cummulated according to the type. Arguments: profile(dict): dictionary with measured data func(str): function that will be used for aggregation of the data of_key(str): key that specifies which fields of the resource entry will be used as data per_key(str): key that specifies fields of the resource that will be on the x axis by_key(str): key that specifies grouping or stacking of the resources cummulation_type(str): type of the cummulation of the data (either stacked or grouped) x_axis_label(str): label on the x axis y_axis_label(str): label on the y axis graph_title(str): name of the graph graph_width(int): width of the created bokeh graph Returns: charts.Bar: bar graph according to the params """ # Convert profile to pandas data grid data_frame = convert.resources_to_pandas_dataframe(profile) # Create basic graph: if cummulation_type == 'stacked': bar_graph = create_stacked_bar_graph(data_frame, func, of_key, per_key, by_key) elif cummulation_type == 'grouped': bar_graph = create_grouped_bar_graph(data_frame, func, of_key, per_key, by_key) else: log.error("unknown cummulation type '{}'".format(cummulation_type)) # Call basic configuration of the graph bokeh_helpers.configure_graph(bar_graph, profile, func, graph_title, x_axis_label, y_axis_label, graph_width) return bar_graph