Ejemplo n.º 1
0
def test_evaluate():
    perfdata: Perfdata = [(n, len(n), u'', 120, 240, 0, 24)
                          for n in ['in', 'out']]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 'check_mk-openvpn_clients')
    assert utils.evaluate("if_in_octets,8,*@bits/s",
                          translated_metrics) == (16.0,
                                                  utils.unit_info['bits/s'],
                                                  '#00e060')
    perfdata = [(n, len(n), u'', None, None, None, None)
                for n in ['/', 'fs_size']]
    translated_metrics = utils.translate_metrics(perfdata, 'check_mk-df')
    assert utils.evaluate("fs_size,fs_used,-#e3fff9",
                          translated_metrics) == (6291456,
                                                  utils.unit_info['bytes'],
                                                  '#e3fff9')

    # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness
    # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent
    # graphs for active-icmp check when host has multiple addresses.
    assert utils.evaluate(
        '127.0.0.1pl',
        utils.translate_metrics(
            utils.parse_perf_data('127.0.0.1pl=5%;80;100;;')[0],
            "check_mk_active-icmp")) == (5, utils.unit_info[""], '#cc00ff')
Ejemplo n.º 2
0
 def _get_translated_metrics_from_perf_data(self, row):
     perf_data_string = row["service_perf_data"].decode("utf-8").strip()
     if not perf_data_string:
         return
     self._perf_data, self._check_command = parse_perf_data(perf_data_string,
                                                            row["check_command"])
     return translate_metrics(self._perf_data, self._check_command)
Ejemplo n.º 3
0
def test_evaluate():
    perfdata: Perfdata = [(n, len(n), "", 120, 240, 0, 24)
                          for n in ["in", "out"]]
    translated_metrics = utils.translate_metrics(perfdata,
                                                 "check_mk-openvpn_clients")
    assert utils.evaluate("if_in_octets,8,*@bits/s", translated_metrics) == (
        16.0,
        utils.unit_info["bits/s"],
        "#00e060",
    )
    perfdata = [(n, len(n), "", None, None, None, None)
                for n in ["/", "fs_size"]]
    translated_metrics = utils.translate_metrics(perfdata, "check_mk-df")
    assert utils.evaluate("fs_size,fs_used,-#e3fff9", translated_metrics) == (
        6291456,
        utils.unit_info["bytes"],
        "#e3fff9",
    )

    # This is a terrible metric from Nagios plugins. Test is for survival instead of correctness
    # The unit "percent" is lost on the way. Fixing this would imply also figuring out how to represent
    # graphs for active-icmp check when host has multiple addresses.
    assert (utils.evaluate(
        "127.0.0.1pl",
        utils.translate_metrics(
            utils.parse_perf_data("127.0.0.1pl=5%;80;100;;")[0],
            "check_mk_active-icmp"),
    ) == (5, utils.unit_info[""], "#cc00ff"))

    # Here the user has a metrics that represent subnets, but the values look like floats
    # Test that evaluation recognizes the metric from the perf data
    assert (utils.evaluate(
        "10.172",
        utils.translate_metrics(
            utils.parse_perf_data("10.172=6")[0], "check_mk-local"),
    ) == (6, utils.unit_info[""], "#cc00ff"))
Ejemplo n.º 4
0
def page_pnp_template():
    try:
        template_id = html.request.var("id")

        check_command, perf_string = template_id.split(":", 1)

        # TODO: pnp-templates/default.php still returns a default value of
        # 1 for the value and "" for the unit.
        perf_data, _ = parse_perf_data(perf_string)
        translated_metrics = translate_metrics(perf_data, check_command)
        if not translated_metrics:
            return  # check not supported

        # Collect output in string. In case of an exception to not output
        # any definitions
        output = ""
        for graph_template in get_graph_templates(translated_metrics):
            graph_code = render_graph_pnp(graph_template, translated_metrics)
            output += graph_code

        html.write(output)

    except Exception:
        html.write("An error occured:\n%s\n" % traceback.format_exc())
Ejemplo n.º 5
0
def test_parse_perf_data2(request_context, monkeypatch):
    with pytest.raises(ValueError):
        monkeypatch.setattr(config, "debug", True)
        utils.parse_perf_data("hi ho", None)
Ejemplo n.º 6
0
def test_parse_perf_data(request_context, perf_str, check_command, result):
    assert utils.parse_perf_data(perf_str, check_command) == result
Ejemplo n.º 7
0
def translate_perf_data(perf_data_string, check_command=None):
    perf_data, check_command = parse_perf_data(perf_data_string, check_command)
    return translate_metrics(perf_data, check_command)
Ejemplo n.º 8
0
def translate_perf_data(perf_data_string, check_command=None):
    # type: (str, Optional[str]) -> TranslatedMetrics
    perf_data, check_command = parse_perf_data(perf_data_string, check_command)
    return translate_metrics(perf_data, check_command)
Ejemplo n.º 9
0
def translate_perf_data(
        perf_data_string: str,
        check_command: Optional[str] = None) -> TranslatedMetrics:
    perf_data, check_command = parse_perf_data(perf_data_string, check_command)
    return translate_metrics(perf_data, check_command)