def fetch_rrd_data_for_graph(graph_recipe, graph_data_range): needed_rrd_data = get_needed_sources(graph_recipe["metrics"]) by_service = group_needed_rrd_data_by_service(needed_rrd_data) # TODO: The Unions below are horrible! Fix this by making this a NewType/class. rrd_data = { } # type: Dict[Union[str, Tuple[Any, Any, Any, Any, Any, Any]], Union[Tuple[float, float, float], TimeSeries]] for (site, host_name, service_description), entries in by_service.items(): try: for (perfvar, cf, scale), data in \ fetch_rrd_data(site, host_name, service_description, entries, graph_recipe, graph_data_range): rrd_data[(site, host_name, service_description, perfvar, cf, scale)] = TimeSeries(data) except livestatus.MKLivestatusNotFoundError: pass start_time, end_time, step = align_and_resample_rrds( rrd_data, graph_recipe["consolidation_function"]) if start_time is None: # Empty graph start_time, end_time = graph_data_range["time_range"] step = 60 elif chop_last_empty_step(graph_data_range, step, rrd_data): end_time -= step rrd_data['__range'] = (start_time, end_time, step) return rrd_data
def time_series_math( operator_id: Literal["+", "*", "-", "/", "MAX", "MIN", "AVERAGE", "MERGE"], operands_evaluated: List[TimeSeries], ) -> Optional[TimeSeries]: operators = time_series_operators() if operator_id not in operators: raise MKGeneralException( _("Undefined operator '%s' in graph expression") % escaping.escape_attribute(operator_id) ) # Test for correct arity on FOUND[evaluated] data if any( ( operator_id in ["-", "/"] and len(operands_evaluated) != 2, len(operands_evaluated) < 1, ) ): # raise MKGeneralException(_("Incorrect amount of data to correctly evaluate expression")) # Silently return so to get an empty graph slot return None _op_title, op_func = operators[operator_id] twindow = operands_evaluated[0].twindow return TimeSeries([op_func_wrapper(op_func, tsp) for tsp in zip(*operands_evaluated)], twindow)
def merge_multicol(row: Dict, rrdcols: List[ColumnName], params: Dict) -> TimeSeries: """Establish single timeseries for desired metric If Livestatus query is performed in bulk, over all possible named metrics that translate to desired one, it results in many empty columns per row. Yet, non-empty values have 3 options: 1. Correspond to desired metric 2. Correspond to old metric that translates into desired metric 3. Name collision: Metric of different service translates to desired metric, yet same metric exist too in current service Thus filter first case 3, then pick both cases 1 & 2. Finalize by merging the at most remaining 2 timeseries into a single one. """ relevant_ts = [] desired_metric = params["metric"] check_command = row["service_check_command"] translations = check_metrics.get(check_command, {}) for rrdcol in rrdcols: if not rrdcol.startswith("rrddata"): continue if row[rrdcol] is None: raise MKGeneralException( _("Cannot retrieve historic data with Nagios core")) current_metric = rrdcol.split(":")[1] if translations.get(current_metric, {}).get("name", desired_metric) == desired_metric: if len(row[rrdcol]) > 3: relevant_ts.append(row[rrdcol]) if not relevant_ts: return TimeSeries([0, 0, 0]) _op_title, op_func = ts.time_series_operators()["MERGE"] single_value_series = [ ts.op_func_wrapper(op_func, tsp) for tsp in zip(*relevant_ts) ] return TimeSeries(single_value_series)
def expression_rrd(parameters: ExpressionParams, rrd_data: RRDData) -> Sequence[TimeSeries]: key = (parameters[0], parameters[1], parameters[2], parameters[3], parameters[4], parameters[5]) if key in rrd_data: return [rrd_data[key]] num_points, twindow = _derive_num_points_twindow(rrd_data) return [TimeSeries([None] * num_points, twindow)]
def time_series_math(operator_id, operands_evaluated) -> TimeSeries: operators = time_series_operators() if operator_id not in operators: raise MKGeneralException( _("Undefined operator '%s' in graph expression") % escaping.escape_attribute(operator_id)) _op_title, op_func = operators[operator_id] twindow = operands_evaluated[0].twindow return TimeSeries([op_func_wrapper(op_func, tsp) for tsp in zip(*operands_evaluated)], twindow)
def test_halfstep_interpolation() -> None: assert artwork.halfstep_interpolation( TimeSeries([5.0, 7.0, None], (123, 234, 10))) == [ 5.0, 5.0, 5.0, 6.0, 7.0, 7.0, None, ]
def evaluate_time_series_expression(expression, rrd_data) -> List[TimeSeries]: if rrd_data: sample_data = next(iter(rrd_data.values())) num_points = len(sample_data) twindow = sample_data.twindow else: # no data, default clean graph, use for pure scalars on custom graphs num_points = 1 twindow = (0, 60, 60) if expression[0] == "operator": operator_id, operands = expression[1:] operands_evaluated_l = [evaluate_time_series_expression(a, rrd_data) for a in operands] operands_evaluated = [item for lists in operands_evaluated_l for item in lists] return [time_series_math(operator_id, operands_evaluated)] if expression[0] == "transformation": (transform, conf), operands = expression[1:] operands_evaluated = evaluate_time_series_expression(operands[0], rrd_data) return evaluate_timeseries_transformation(transform, conf, operands_evaluated) if expression[0] == "rrd": key = tuple(expression[1:]) if key in rrd_data: return [rrd_data[key]] return [TimeSeries([None] * num_points, twindow)] if expression[0] == "constant": return [TimeSeries([expression[1]] * num_points, twindow)] if expression[0] == "combined": metrics = resolve_combined_single_metric_spec(expression[1]) curves = [] for m in metrics: for curve in evaluate_time_series_expression(m['expression'], rrd_data): curve.metadata = {k: m[k] for k in m if k in ['line_type', 'title']} curves.append(curve) return curves raise NotImplementedError()
def evaluate_time_series_expression(expression, rrd_data): if rrd_data: sample_data = next(iter(rrd_data.values())) num_points = len(sample_data) twindow = sample_data.twindow else: # no data, default clean graph, use for pure scalars on custom graphs num_points = 1 twindow = (0, 60, 60) if expression[0] == "operator": operator_id, operands = expression[1:] operands_evaluated = [evaluate_time_series_expression(a, rrd_data) for a in operands] return time_series_math(operator_id, operands_evaluated) if expression[0] == "transformation": (transform, conf), operands = expression[1:] operands_evaluated = evaluate_time_series_expression(operands[0], rrd_data) return evaluate_timeseries_transformation(transform, conf, operands_evaluated) if expression[0] == "rrd": key = tuple(expression[1:]) if key in rrd_data: return rrd_data[key] return TimeSeries([None] * num_points, twindow) if expression[0] == "constant": return TimeSeries([expression[1]] * num_points, twindow) if expression[0] == "combined": metrics = resolve_combined_single_metric_spec(expression[1]) return [(m["line_type"], m["color"], m['title'], evaluate_time_series_expression(m['expression'], rrd_data)) for m in metrics] raise NotImplementedError()
def fetch_rrd_data_for_graph(graph_recipe, graph_data_range): needed_rrd_data = get_needed_sources(graph_recipe["metrics"]) by_service = group_needed_rrd_data_by_service(needed_rrd_data) rrd_data: Dict[Tuple[str, str, str, str, str, str], TimeSeries] = {} for (site, host_name, service_description), entries in by_service.items(): try: for (perfvar, cf, scale), data in \ fetch_rrd_data(site, host_name, service_description, entries, graph_recipe, graph_data_range): rrd_data[(site, host_name, service_description, perfvar, cf, scale)] = TimeSeries(data) except livestatus.MKLivestatusNotFoundError: pass align_and_resample_rrds(rrd_data, graph_recipe["consolidation_function"]) chop_last_empty_step(graph_data_range, rrd_data) return rrd_data
def evaluate_time_series_expression(expression, rrd_data): if rrd_data: num_points = len(list(rrd_data.values())[0]) else: num_points = 1 if expression[0] == "operator": operator_id, operands = expression[1:] operands_evaluated = [ evaluate_time_series_expression(a, rrd_data) for a in operands ] return time_series_math(operator_id, operands_evaluated) if expression[0] == "transformation": (transform, conf), operands = expression[1:] operands_evaluated = evaluate_time_series_expression( operands[0], rrd_data) if transform == 'percentile': return time_series_operator_perc(operands_evaluated, conf) if transform == 'forecast': if cmk_version.is_raw_edition(): raise MKGeneralException( _("Forecast calculations are only available with the " "Checkmk Enterprise Editions")) # Suppression is needed to silence pylint in CRE environment from cmk.gui.cee.plugins.metrics.forecasts import time_series_transform_forecast # pylint: disable=no-name-in-module return time_series_transform_forecast( TimeSeries(operands_evaluated, rrd_data['__range']), conf) if expression[0] == "rrd": key = tuple(expression[1:]) if key in rrd_data: return rrd_data[key] return [None] * num_points if expression[0] == "constant": return [expression[1]] * num_points raise NotImplementedError()
def fetch_rrd_data_for_graph(graph_recipe, graph_data_range): needed_rrd_data = get_needed_sources(graph_recipe["metrics"]) by_service = group_needed_rrd_data_by_service(needed_rrd_data) rrd_data = {} for (site, host_name, service_description), entries in by_service.items(): try: for (perfvar, cf, scale), data in \ fetch_rrd_data(site, host_name, service_description, entries, graph_recipe, graph_data_range): rrd_data[(site, host_name, service_description, perfvar, cf, scale)] = TimeSeries(data) except livestatus.MKLivestatusNotFoundError: pass start_time, end_time, step = align_and_resample_rrds( rrd_data, graph_recipe["consolidation_function"]) if start_time is None: # Empty graph start_time, end_time = graph_data_range["time_range"] step = 60 elif chop_last_empty_step(graph_data_range, step, rrd_data): end_time -= step rrd_data['__range'] = (start_time, end_time, step) return rrd_data
def evaluate_time_series_expression(expression, rrd_data): if rrd_data: num_points = len(next(iter(rrd_data.values()))) else: num_points = 1 if expression[0] == "operator": operator_id, operands = expression[1:] operands_evaluated = [ evaluate_time_series_expression(a, rrd_data) for a in operands ] return time_series_math(operator_id, operands_evaluated) if expression[0] == "transformation": (transform, conf), operands = expression[1:] operands_evaluated = evaluate_time_series_expression( operands[0], rrd_data) if transform == 'percentile': return time_series_operator_perc(operands_evaluated, conf) if transform == 'filter_top': if isinstance(operands_evaluated, TimeSeries): return operands_evaluated return operands_evaluated[:conf["amount"]] if transform == 'value_sort': if isinstance(operands_evaluated, TimeSeries): return operands_evaluated aggr_func = { "min": lambda x: min(x or [0]), "max": lambda x: max(x or [0]), "average": lambda x: sum(x) / float(len(x) or 1), }[conf['aggregation']] orderlist = sorted(operands_evaluated, key=lambda metric: aggr_func( clean_time_series_point(metric[3])), reverse=conf["reverse"]) # fix multi-line stack line styling if orderlist[0][0] == 'stack': line_types = ['area'] + ['stack'] * (len(orderlist) - 1) orderlist = [(lt, ) + metric[1:] for lt, metric in zip(line_types, orderlist)] return orderlist if transform == 'forecast': if cmk_version.is_raw_edition(): raise MKGeneralException( _("Forecast calculations are only available with the " "Checkmk Enterprise Editions")) # Suppression is needed to silence pylint in CRE environment from cmk.gui.cee.plugins.metrics.forecasts import time_series_transform_forecast # pylint: disable=no-name-in-module return time_series_transform_forecast( TimeSeries(operands_evaluated, rrd_data['__range']), conf) if expression[0] == "rrd": key = tuple(expression[1:]) if key in rrd_data: return rrd_data[key] return [None] * num_points if expression[0] == "constant": return [expression[1]] * num_points if expression[0] == "combined" and not cmk_version.is_raw_edition(): # Suppression is needed to silence pylint in CRE environment from cmk.gui.cee.plugins.metrics.graphs import resolve_combined_single_metric_spec # pylint: disable=no-name-in-module metrics = resolve_combined_single_metric_spec(expression[1]) return [(m["line_type"], m["color"], m['title'], evaluate_time_series_expression(m['expression'], rrd_data)) for m in metrics] raise NotImplementedError()
operands_evaluated = list( chain.from_iterable(evaluate_time_series_expression(a, rrd_data) for a in operands)) if result := time_series_math(operator_id, operands_evaluated): return [result] return [] if expression[0] == "transformation": (transform, conf), operands = expression[1:] operands_evaluated = evaluate_time_series_expression(operands[0], rrd_data) return evaluate_timeseries_transformation(transform, conf, operands_evaluated) if expression[0] == "rrd": key = tuple(expression[1:]) if key in rrd_data: return [rrd_data[key]] return [TimeSeries([None] * num_points, twindow)] if expression[0] == "constant": return [TimeSeries([expression[1]] * num_points, twindow)] if expression[0] == "combined": metrics = resolve_combined_single_metric_spec(expression[1]) curves = [] for m in metrics: for curve in evaluate_time_series_expression(m['expression'], rrd_data): curve.metadata = {k: m[k] for k in m if k in ['line_type', 'title']} curves.append(curve) return curves raise NotImplementedError()
def expression_constant(parameters: ExpressionParams, rrd_data: RRDData) -> Sequence[TimeSeries]: num_points, twindow = _derive_num_points_twindow(rrd_data) return [TimeSeries([parameters[0]] * num_points, twindow)]