def test_rpn_stack(expression, result): translated_metrics = metrics.translate_perf_data( '/=163651.992188;;;; fs_size=477500.03125;;;; growth=-1280.489081;;;;', "check_mk-df") lq_row = {"site": "", "host_name": "", "service_description": ""} assert gt.metric_expression_to_graph_recipe_expression( expression, translated_metrics, lq_row, None) == result
def test_horizontal_rules_from_thresholds(perf_string, result): assert (utils.horizontal_rules_from_thresholds( [ "one:warn", ("power:crit", "Critical power"), ("output:warn,-1,*", "Warning output"), ], metrics.translate_perf_data(perf_string), ) == result)
def test_horizontal_rules_from_thresholds(perf_string, result): thresholds = [ "one:warn", ("power:crit", "Critical power"), ("output:warn,-1,*", "Warning output"), ] translated_metrics = metrics.translate_perf_data(perf_string) assert gt._horizontal_rules_from_thresholds(thresholds, translated_metrics) == result
def create_data_for_single_metric(cls, properties, context): columns, data_rows = cls._get_data(properties, context) data = [] used_metrics = [] for idx, row in enumerate(data_rows): d_row = dict(zip(columns, row)) translated_metrics = translate_perf_data( d_row["service_perf_data"], d_row["service_check_command"]) metric = translated_metrics.get(properties['metric']) if metric is None: continue series = merge_multicol(d_row, columns, properties) site = d_row['site'] host = d_row["host_name"] svc_url = makeuri( request, [("view_name", "service"), ("site", site), ("host", host), ("service", d_row['service_description'])], filename="view.py", ) row_id = "row_%d" % idx # Historic values for ts, elem in series.time_data_pairs(): if elem: data.append({ "tag": row_id, "timestamp": ts, "value": elem, "label": host, }) # Live value data.append({ "tag": row_id, "timestamp": int(time.time()), "value": metric['value'], "formatted_value": metric['unit']['render'](metric['value']), "url": svc_url, "label": host, }) used_metrics.append((row_id, metric, d_row)) return data, used_metrics
def show_without_timeseries(self): @site_query def query(cls, properties, context): return [ "host_name", "service_check_command", "service_description", "service_perf_data", "service_state" ] col_names, data = query( # pylint: disable=unbalanced-tuple-unpacking self, json.dumps(self.vs_parameters().value_to_json(self._dashlet_spec)), self._dashlet_spec["context"]) if not data: raise MKUserError(None, _("There are no metrics meeting your context filters.")) row = dict(zip(col_names, data[0])) site = row["site"] host = row["host_name"] service = row["service_description"] metric = self._dashlet_spec.get("metric", "") t_metrics = translate_perf_data(row["service_perf_data"], row["service_check_command"]) chosen_metric = t_metrics.get(metric) if chosen_metric is None: raise MKUserError( None, _("The configured metric \"%s\" could not be found. For the " "selected service \"%s\" you can choose from the following metrics: %s") % (metric, service, ", ".join([m["title"] for m in t_metrics.values()]))) svc_url = "view.py?view_name=service&site=%s&host=%s&service=%s" % ( html.urlencode(site), html.urlencode(host), html.urlencode(service)) links = { "site": html.render_a(site, "view.py?view_name=sitehosts&site=%s" % (html.urlencode(site))), "host": html.render_a( host, "view.py?view_name=host&site=%s&host=%s" % (html.urlencode(site), html.urlencode(host))), "service": html.render_a(service, svc_url) } render_options = self._dashlet_spec["render_options"] svc_state = row["service_state"] html.open_div(class_="metric") metric_spec = { "site": site, "host": host, "service": service, "metric": chosen_metric.get("title", metric) } titles = self._get_titles(metric_spec, links, render_options) self._render_metric_content(chosen_metric, render_options, titles, svc_state, svc_url) html.close_div()
def create_data_for_single_metric( properties, context: VisualContext, column_generator: Callable[[Any, VisualContext], List[str]], ) -> Tuple[List[Dict[str, Any]], List[Tuple[str, TranslatedMetric, Dict[str, LivestatusColumn]]]]: # TODO: should return live value and historic values as two different elements, for better typing support. columns, data_rows = service_table_query(properties, context, column_generator) data = [] used_metrics = [] for idx, row in enumerate(data_rows): d_row = dict(zip(columns, row)) translated_metrics = translate_perf_data( d_row["service_perf_data"], d_row["service_check_command"] ) metric = translated_metrics.get(properties["metric"]) if metric is None: continue series = merge_multicol(d_row, columns, properties) host = d_row["host_name"] row_id = "row_%d" % idx # Historic values for ts, elem in series.time_data_pairs(): if elem: data.append( { "tag": row_id, "timestamp": ts, "value": elem, "label": host, } ) # Live value data.append( { "tag": row_id, "last_value": True, "timestamp": int(time.time()), "value": metric["value"], "label": host, "url": create_service_view_url(d_row), } ) used_metrics.append((row_id, metric, d_row)) return data, used_metrics
def test_metric_unit_color(expression, perf_string, check_command, result_color): metrics.fixup_unit_info() translated_metrics = metrics.translate_perf_data(perf_string, check_command) reference = { "color": result_color, "unit": translated_metrics.get(expression, {}).get('unit', {}).get('id', '') } assert gt.metric_unit_color(expression, translated_metrics, ['test']) == reference
def test_metric_unit_color(expression, perf_string, check_command, result_color): metrics.fixup_unit_info() translated_metrics = metrics.translate_perf_data(perf_string, check_command) translated_metric = translated_metrics.get(expression) assert translated_metric is not None unit = translated_metric.get("unit") assert unit is not None unit_id = unit.get("id") reference = { "color": result_color, "unit": unit_id, } assert gt.metric_unit_color(expression, translated_metrics, ["test"]) == reference
def create_data_for_single_metric(properties, context, column_generator): columns, data_rows = service_table_query(properties, context, column_generator) data = [] used_metrics = [] for idx, row in enumerate(data_rows): d_row = dict(zip(columns, row)) translated_metrics = translate_perf_data( d_row["service_perf_data"], d_row["service_check_command"]) metric = translated_metrics.get(properties['metric']) if metric is None: continue series = merge_multicol(d_row, columns, properties) host = d_row["host_name"] row_id = "row_%d" % idx # Historic values for ts, elem in series.time_data_pairs(): if elem: data.append({ "tag": row_id, "timestamp": ts, "value": elem, "label": host, }) # Live value data.append({ "tag": row_id, "last_value": True, "timestamp": int(time.time()), "value": metric['value'], "label": host, "url": create_service_view_url(d_row), }) used_metrics.append((row_id, metric, d_row)) return data, used_metrics
def test_create_graph_recipe_from_template(): metrics.fixup_unit_info() graph_template = { "metrics": [ ("fs_used", "area"), ("fs_size,fs_used,-#e3fff9", "stack", "Free space"), ("fs_size", "line"), ], "scalars": [ "fs_used:warn", "fs_used:crit", ], "range": (0, "fs_used:max"), "conflicting_metrics": ["fs_free"], } translated_metrics = metrics.translate_perf_data( '/=163651.992188;;;; fs_size=477500.03125;;;; growth=-1280.489081;;;;', "check_mk-df") lq_row = {"site": "", "host_name": "", "service_description": ""} assert gt.create_graph_recipe_from_template( graph_template, translated_metrics, lq_row) == { 'title': 'Used filesystem space', 'metrics': [{ 'unit': 'bytes', 'color': '#00ffc6', 'title': 'Used filesystem space', 'line_type': 'area', 'expression': ('rrd', '', '', '', '_', 'max', 1048576) }, { 'unit': 'bytes', 'color': '#e3fff9', 'title': 'Free space', 'line_type': 'stack', 'expression': ('operator', '-', [('rrd', '', '', '', 'fs_size', 'max', 1048576), ('rrd', '', '', '', '_', 'max', 1048576)]) }, { 'unit': 'bytes', 'color': '#006040', 'title': 'Filesystem size', 'line_type': 'line', 'expression': ('rrd', '', '', '', 'fs_size', 'max', 1048576) }], 'unit': 'bytes', 'explicit_vertical_range': (0.0, None), 'horizontal_rules': [], 'omit_zero_metrics': False, 'consolidation_function': 'max' }
def test_metric_unit_color_exception(metric, perf_string, check_command): translated_metrics = metrics.translate_perf_data(perf_string, check_command) with pytest.raises(gt.MKGeneralException): gt.metric_unit_color(metric, translated_metrics, ['test'])
def test_metric_unit_color_skip(expression, perf_string, check_command): translated_metrics = metrics.translate_perf_data(perf_string, check_command) assert gt.metric_unit_color(expression, translated_metrics, ['test']) is None
def test_create_graph_recipe_from_template(): metrics.fixup_unit_info() graph_template = { "metrics": [ ("fs_used", "area"), ("fs_size,fs_used,-#e3fff9", "stack", "Free space"), ("fs_size", "line"), ], "scalars": [ "fs_used:warn", "fs_used:crit", ], "range": (0, "fs_used:max"), "conflicting_metrics": ["fs_free"], } translated_metrics = metrics.translate_perf_data( "/=163651.992188;;;; fs_size=477500.03125;;;; growth=-1280.489081;;;;", "check_mk-df") lq_row = {"site": "", "host_name": "", "service_description": ""} assert gt.create_graph_recipe_from_template( graph_template, translated_metrics, lq_row) == { "title": "Used filesystem space", "metrics": [ { "unit": "bytes", "color": "#00ffc6", "title": "Used filesystem space", "line_type": "area", "expression": ("rrd", "", "", "", "_", "max", 1048576), }, { "unit": "bytes", "color": "#e3fff9", "title": "Free space", "line_type": "stack", "expression": ( "operator", "-", [ ("rrd", "", "", "", "fs_size", "max", 1048576), ("rrd", "", "", "", "_", "max", 1048576), ], ), }, { "unit": "bytes", "color": "#006040", "title": "Filesystem size", "line_type": "line", "expression": ("rrd", "", "", "", "fs_size", "max", 1048576), }, ], "unit": "bytes", "explicit_vertical_range": (0.0, None), "horizontal_rules": [], "omit_zero_metrics": False, "consolidation_function": "max", }