def test_evaluate(): perfdata: Perfdata = [(n, len(n), u'', 120, 240, 0, 24) for n in ['in', 'out']] translated_metrics = utils.translate_metrics(perfdata, 'check_mk-openvpn_clients') assert utils.evaluate("if_in_octets,8,*@bits/s", translated_metrics) == (16.0, utils.unit_info['bits/s'], '#00e060') perfdata = [(n, len(n), u'', None, None, None, None) for n in ['/', 'fs_size']] translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df') assert utils.evaluate("fs_size,fs_used,-#e3fff9", translated_metrics) == (6291456, utils.unit_info['bytes'], '#e3fff9') # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent # graphs for active-icmp check when host has multiple addresses. assert utils.evaluate( '127.0.0.1pl', utils.translate_metrics( utils.parse_perf_data('127.0.0.1pl=5%;80;100;;')[0], "check_mk_active-icmp")) == (5, utils.unit_info[""], '#cc00ff')
def test_evaluate(): perfdata = [(n, len(n), u'', 120, 240, 0, 24) for n in ['in', 'out']] # type: List[Tuple] translated_metrics = utils.translate_metrics(perfdata, 'check_mk-openvpn_clients') assert utils.evaluate("if_in_octets,8,*@bits/s", translated_metrics) == (16.0, utils.unit_info['bits/s'], '#00e060') perfdata = [(n, len(n), u'', None, None, None, None) for n in ['/', 'fs_size']] translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df') assert utils.evaluate("fs_size,fs_used,-#e3fff9", translated_metrics) == (6291456, utils.unit_info['bytes'], '#e3fff9')
def _get_translated_metrics_from_perf_data(self, row): perf_data_string = row["service_perf_data"].decode("utf-8").strip() if not perf_data_string: return self._perf_data, self._check_command = parse_perf_data(perf_data_string, row["check_command"]) return translate_metrics(self._perf_data, self._check_command)
def test_replace_expression(): perfdata: Perfdata = [(n, len(n), "", 120, 240, 0, 25) for n in ["load1"]] translated_metrics = utils.translate_metrics(perfdata, "check_mk-cpu.loads") assert (utils.replace_expressions( "CPU Load - %(load1:max@count) CPU Cores", translated_metrics) == "CPU Load - 25 CPU Cores")
def test_get_graph_templates(load_plugins, metric_names, check_command, graph_ids): perfdata: List[Tuple] = [(n, 0, u'', None, None, None, None) for n in metric_names] translated_metrics = utils.translate_metrics(perfdata, check_command) assert set(graph_ids) == set( t['id'] for t in utils.get_graph_templates(translated_metrics))
def test_replace_expression(): perfdata: Perfdata = [(n, len(n), u'', 120, 240, 0, 25) for n in ['load1']] translated_metrics = utils.translate_metrics(perfdata, 'check_mk-cpu.loads') assert utils.replace_expressions( "CPU Load - %(load1:max@count) CPU Cores", translated_metrics) == 'CPU Load - 25 CPU Cores'
def test_evaluate(): perfdata: Perfdata = [(n, len(n), "", 120, 240, 0, 24) for n in ["in", "out"]] translated_metrics = utils.translate_metrics(perfdata, "check_mk-openvpn_clients") assert utils.evaluate("if_in_octets,8,*@bits/s", translated_metrics) == ( 16.0, utils.unit_info["bits/s"], "#00e060", ) perfdata = [(n, len(n), "", None, None, None, None) for n in ["/", "fs_size"]] translated_metrics = utils.translate_metrics(perfdata, "check_mk-df") assert utils.evaluate("fs_size,fs_used,-#e3fff9", translated_metrics) == ( 6291456, utils.unit_info["bytes"], "#e3fff9", ) # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent # graphs for active-icmp check when host has multiple addresses. assert (utils.evaluate( "127.0.0.1pl", utils.translate_metrics( utils.parse_perf_data("127.0.0.1pl=5%;80;100;;")[0], "check_mk_active-icmp"), ) == (5, utils.unit_info[""], "#cc00ff")) # Here the user has a metrics that represent subnets, but the values look like floats # Test that evaluation recognizes the metric from the perf data assert (utils.evaluate( "10.172", utils.translate_metrics( utils.parse_perf_data("10.172=6")[0], "check_mk-local"), ) == (6, utils.unit_info[""], "#cc00ff"))
def page_pnp_template(): try: template_id = html.request.var("id") check_command, perf_string = template_id.split(":", 1) # TODO: pnp-templates/default.php still returns a default value of # 1 for the value and "" for the unit. perf_data, _ = parse_perf_data(perf_string) translated_metrics = translate_metrics(perf_data, check_command) if not translated_metrics: return # check not supported # Collect output in string. In case of an exception to not output # any definitions output = "" for graph_template in get_graph_templates(translated_metrics): graph_code = render_graph_pnp(graph_template, translated_metrics) output += graph_code html.write(output) except Exception: html.write("An error occured:\n%s\n" % traceback.format_exc())
def test_get_graph_templates(metric_names, check_command, graph_ids): perfdata: Perfdata = [(n, 0, "", None, None, None, None) for n in metric_names] translated_metrics = utils.translate_metrics(perfdata, check_command) assert set(graph_ids) == set( t["id"] for t in utils.get_graph_templates(translated_metrics))
def translate_perf_data(perf_data_string, check_command=None): perf_data, check_command = parse_perf_data(perf_data_string, check_command) return translate_metrics(perf_data, check_command)
def translate_perf_data(perf_data_string, check_command=None): # type: (str, Optional[str]) -> TranslatedMetrics perf_data, check_command = parse_perf_data(perf_data_string, check_command) return translate_metrics(perf_data, check_command)
def translate_perf_data( perf_data_string: str, check_command: Optional[str] = None) -> TranslatedMetrics: perf_data, check_command = parse_perf_data(perf_data_string, check_command) return translate_metrics(perf_data, check_command)