示例#1
0
def test_evaluate():
    perfdata: Perfdata = [(n, len(n), u'', 120, 240, 0, 24)
                          for n in ['in', 'out']]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 'check_mk-openvpn_clients')
    assert utils.evaluate("if_in_octets,8,*@bits/s",
                          translated_metrics) == (16.0,
                                                  utils.unit_info['bits/s'],
                                                  '#00e060')
    perfdata = [(n, len(n), u'', None, None, None, None)
                for n in ['/', 'fs_size']]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df')
    assert utils.evaluate("fs_size,fs_used,-#e3fff9",
                          translated_metrics) == (6291456,
                                                  utils.unit_info['bytes'],
                                                  '#e3fff9')

    # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness
    # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent
    # graphs for active-icmp check when host has multiple addresses.
    assert utils.evaluate(
        '127.0.0.1pl',
        utils.translate_metrics(
            utils.parse_perf_data('127.0.0.1pl=5%;80;100;;')[0],
            "check_mk_active-icmp")) == (5, utils.unit_info[""], '#cc00ff')
示例#2
0
def test_evaluate():
    perfdata = [(n, len(n), u'', 120, 240, 0, 24) for n in ['in', 'out']]  # type: List[Tuple]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-openvpn_clients')
    assert utils.evaluate("if_in_octets,8,*@bits/s",
                          translated_metrics) == (16.0, utils.unit_info['bits/s'], '#00e060')
    perfdata = [(n, len(n), u'', None, None, None, None) for n in ['/', 'fs_size']]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df')
    assert utils.evaluate("fs_size,fs_used,-#e3fff9",
                          translated_metrics) == (6291456, utils.unit_info['bytes'], '#e3fff9')
示例#3
0
 def _get_translated_metrics_from_perf_data(self, row):
     perf_data_string = row["service_perf_data"].decode("utf-8").strip()
     if not perf_data_string:
         return
     self._perf_data, self._check_command = parse_perf_data(perf_data_string,
                                                            row["check_command"])
     return translate_metrics(self._perf_data, self._check_command)
示例#4
0
def test_replace_expression():
    perfdata: Perfdata = [(n, len(n), "", 120, 240, 0, 25) for n in ["load1"]]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 "check_mk-cpu.loads")
    assert (utils.replace_expressions(
        "CPU Load - %(load1:max@count) CPU Cores",
        translated_metrics) == "CPU Load - 25  CPU Cores")
示例#5
0
def test_get_graph_templates(load_plugins, metric_names, check_command,
                             graph_ids):
    perfdata: List[Tuple] = [(n, 0, u'', None, None, None, None)
                             for n in metric_names]
    translated_metrics = utils.translate_metrics(perfdata, check_command)
    assert set(graph_ids) == set(
        t['id'] for t in utils.get_graph_templates(translated_metrics))
示例#6
0
def test_replace_expression():
    perfdata: Perfdata = [(n, len(n), u'', 120, 240, 0, 25) for n in ['load1']]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 'check_mk-cpu.loads')
    assert utils.replace_expressions(
        "CPU Load - %(load1:max@count) CPU Cores",
        translated_metrics) == 'CPU Load - 25  CPU Cores'
示例#7
0
def test_evaluate():
    perfdata: Perfdata = [(n, len(n), "", 120, 240, 0, 24)
                          for n in ["in", "out"]]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 "check_mk-openvpn_clients")
    assert utils.evaluate("if_in_octets,8,*@bits/s", translated_metrics) == (
        16.0,
        utils.unit_info["bits/s"],
        "#00e060",
    )
    perfdata = [(n, len(n), "", None, None, None, None)
                for n in ["/", "fs_size"]]
    translated_metrics = utils.translate_metrics(perfdata, "check_mk-df")
    assert utils.evaluate("fs_size,fs_used,-#e3fff9", translated_metrics) == (
        6291456,
        utils.unit_info["bytes"],
        "#e3fff9",
    )

    # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness
    # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent
    # graphs for active-icmp check when host has multiple addresses.
    assert (utils.evaluate(
        "127.0.0.1pl",
        utils.translate_metrics(
            utils.parse_perf_data("127.0.0.1pl=5%;80;100;;")[0],
            "check_mk_active-icmp"),
    ) == (5, utils.unit_info[""], "#cc00ff"))

    # Here the user has a metrics that represent subnets, but the values look like floats
    # Test that evaluation recognizes the metric from the perf data
    assert (utils.evaluate(
        "10.172",
        utils.translate_metrics(
            utils.parse_perf_data("10.172=6")[0], "check_mk-local"),
    ) == (6, utils.unit_info[""], "#cc00ff"))
示例#8
0
def page_pnp_template():
    try:
        template_id = html.request.var("id")

        check_command, perf_string = template_id.split(":", 1)

        # TODO: pnp-templates/default.php still returns a default value of
        # 1 for the value and "" for the unit.
        perf_data, _ = parse_perf_data(perf_string)
        translated_metrics = translate_metrics(perf_data, check_command)
        if not translated_metrics:
            return  # check not supported

        # Collect output in string. In case of an exception to not output
        # any definitions
        output = ""
        for graph_template in get_graph_templates(translated_metrics):
            graph_code = render_graph_pnp(graph_template, translated_metrics)
            output += graph_code

        html.write(output)

    except Exception:
        html.write("An error occured:\n%s\n" % traceback.format_exc())
示例#9
0
def test_get_graph_templates(metric_names, check_command, graph_ids):
    perfdata: Perfdata = [(n, 0, "", None, None, None, None)
                          for n in metric_names]
    translated_metrics = utils.translate_metrics(perfdata, check_command)
    assert set(graph_ids) == set(
        t["id"] for t in utils.get_graph_templates(translated_metrics))
示例#10
0
def translate_perf_data(perf_data_string, check_command=None):
    perf_data, check_command = parse_perf_data(perf_data_string, check_command)
    return translate_metrics(perf_data, check_command)
示例#11
0
def translate_perf_data(perf_data_string, check_command=None):
    # type: (str, Optional[str]) -> TranslatedMetrics
    perf_data, check_command = parse_perf_data(perf_data_string, check_command)
    return translate_metrics(perf_data, check_command)
示例#12
0
def translate_perf_data(
        perf_data_string: str,
        check_command: Optional[str] = None) -> TranslatedMetrics:
    perf_data, check_command = parse_perf_data(perf_data_string, check_command)
    return translate_metrics(perf_data, check_command)