Esempio n. 1
0
def test_cpu_loads_predictive(mocker):
    # make sure cpu_load check can handle predictive values
    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.2, 4.2, None, None)))
    # TODO: don't mock this. Use the context managers.
    mocker.patch("cmk.base.plugin_contexts._hostname", value="unittest")
    mocker.patch("cmk.base.plugin_contexts._service_description",
                 value="unittest-sd")
    params = {
        'levels': {
            'period': 'minute',
            'horizon': 1,
            'levels_upper': ('absolute', (2.0, 4.0))
        }
    }
    section = Section(load=Load(0.5, 1.0, 1.5), num_cpus=4, num_threads=123)
    result = set(check_cpu_load(params, section))

    assert result == set((
        Result(state=State.OK,
               summary='15 min load: 1.50 (no reference for prediction yet)'),
        Result(state=State.OK, summary='15 min load per core: 0.38 (4 cores)'),
        Metric('load1', 0.5, boundaries=(0, 4.0)),
        Metric('load5', 1.0, boundaries=(0, 4.0)),
        Metric('load15', 1.5,
               levels=(2.2, 4.2)),  # those are the predicted values
    ))
Esempio n. 2
0
def test_cpu_threads_max_threads():
    section = Section(load=Load(0.1, 0.1, 0.1), num_cpus=4, num_threads=1234, max_threads=2468)
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric("thread_usage", 50.0),
        Metric("threads", 1234.0),
        Result(state=State.OK, summary="1234"),
        Result(state=State.OK, summary="Usage: 50.00%"),
    }
def test_metric():
    metric1 = Metric('reproduction_rate', 1.0, levels=(2.4, 3.0), boundaries=(0, None))
    metric2 = Metric('reproduction_rate', 2.0, levels=(2.4, 3.0), boundaries=(0, None))
    assert metric1.name == 'reproduction_rate'
    assert metric1.value == 1.0
    assert metric1.levels == (2.4, 3.0)
    assert metric1.boundaries == (0., None)

    assert metric1 == metric1  # pylint: disable=comparison-with-itself
    assert metric1 != metric2
Esempio n. 4
0
def test_cpu_loads_fixed_levels(mocker):
    section = Section(load=Load(0.5, 1.0, 1.5), num_cpus=4, num_threads=123)
    params = {'levels': (2.0, 4.0)}
    result = set(check_cpu_load(params, section))
    assert result == set((
        Result(state=State.OK, summary='15 min load: 1.50'),
        Result(state=State.OK, summary='15 min load per core: 0.38 (4 cores)'),
        Metric('load1', 0.5, boundaries=(0, 4.0)),
        Metric('load5', 1.0, boundaries=(0, 4.0)),
        Metric('load15', 1.5, levels=(8.0, 16.0)),  # levels multiplied by num_cpus
    ))
Esempio n. 5
0
def check_levels(
    value: float,
    *,
    levels_upper: Optional[Tuple[float, float]] = None,
    levels_lower: Optional[Tuple[float, float]] = None,
    metric_name: Optional[str] = None,
    render_func: Optional[Callable[[float], str]] = None,
    label: Optional[str] = None,
    boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None,
    notice_only: bool = False,
) -> Generator[Union[Result, Metric], None, None]:
    """Generic function for checking a value against levels.

    Args:

        value:        The currently measured value
        levels_upper: A pair of upper thresholds. If value is larger than these, the
                      service goes to **WARN** or **CRIT**, respecively.
        levels_lower: A pair of lower thresholds. If value is smaller than these, the
                      service goes to **WARN** or **CRIT**, respecively.
        metric_name:  The name of the datasource in the RRD that corresponds to this value
                      or None in order not to generate a metric.
        render_func:  A single argument function to convert the value from float into a
                      human readable string.
        label:        The label to prepend to the output.
        boundaries:   Minimum and maximum to add to the metric.
        notice_only:  Only show up in service output if not OK (otherwise in details).
                      See `notice` keyword of `Result` class.

    Example:

        >>> result, = check_levels(
        ...     23.0,
        ...     levels_upper=(12., 42.),
        ...     label="Fridge",
        ...     render_func=lambda v: "%.1f°" % v,
        ... )
        >>> print(result.summary)
        Fridge: 23.0° (warn/crit at 12.0°/42.0°)

    """
    if render_func is None:
        render_func = lambda f: "%.2f" % f

    info_text = str(render_func(value))  # forgive wrong output type
    if label:
        info_text = "%s: %s" % (label, info_text)

    value_state, levels_text = _do_check_levels(value, levels_upper,
                                                levels_lower, render_func)

    if notice_only:
        yield Result(state=value_state, notice=info_text + levels_text)
    else:
        yield Result(state=value_state, summary=info_text + levels_text)
    if metric_name:
        yield Metric(metric_name,
                     value,
                     levels=levels_upper,
                     boundaries=boundaries)
Esempio n. 6
0
def _create_new_result(
        is_details: bool,
        legacy_state: int,
        legacy_text: str,
        legacy_metrics: Union[Tuple, List] = (),
) -> Generator[Union[Metric, Result], None, bool]:
    result_state = state(legacy_state)

    if legacy_state or legacy_text:  # skip "Null"-Result
        if is_details:
            summary: Optional[str] = None
            details: Optional[str] = legacy_text
        else:
            is_details = "\n" in legacy_text
            summary, details = legacy_text.split("\n", 1) if is_details else (legacy_text, None)
        yield Result(
            state=result_state,
            summary=summary or None,
            details=details or None,
        )

    for metric in legacy_metrics:
        # fill up with None:
        name, value, warn, crit, min_, max_ = (
            v for v, _ in itertools.zip_longest(metric, range(6)))
        yield Metric(name, value, levels=(warn, crit), boundaries=(min_, max_))

    return is_details
def test_node_returns_metric():
    node_results = _check_function_node((_OK_RESULT, Metric("panic", 42)))
    result = aggregate_node_details("test_node", node_results)
    assert result is not None
    assert result.state is state.OK
    assert result.summary == ""
    assert result.details == "[test_node]: I am fine"
Esempio n. 8
0
def _create_new_result(
    is_details: bool,
    legacy_state: int,
    legacy_text: str,
    legacy_metrics: Union[Tuple, List] = (),
) -> Generator[Union[Metric, Result], None, bool]:

    if legacy_state or legacy_text:  # skip "Null"-Result
        # Bypass the validation of the Result class:
        # Legacy plugins may relie on the fact that once a newline
        # as been in the output, *all* following ouput is sent to
        # the details. That means we have to create Results with
        # details only, which is prohibited by the original Result
        # class.
        yield Result(state=State(legacy_state), summary="Fake")._replace(
            summary="" if is_details else legacy_text.split("\n", 1)[0],
            details=legacy_text.strip(),
        )

    for metric in legacy_metrics:
        if len(metric) < 2:
            continue
        name = str(metric[0])
        value = _get_float(metric[1])
        if value is None:  # skip bogus metrics
            continue
        # fill up with None:
        warn, crit, min_, max_ = (
            _get_float(v)
            for v, _ in itertools.zip_longest(metric[2:], range(4)))
        yield Metric(name, value, levels=(warn, crit), boundaries=(min_, max_))

    return ("\n" in legacy_text) or is_details
Esempio n. 9
0
def test_cpu_threads():
    section = Section(load=Load(0.1, 0.1, 0.1), num_cpus=4, num_threads=1234)
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric('threads', 1234.0),
        Result(state=State.OK, summary='1234'),
    }
Esempio n. 10
0
def test_cluster_check_best_yield_best_nodes_metrics() -> None:

    check_best = _get_cluster_check_function(_simple_check, mode="best")

    assert list(m for m in check_best(section={
        "Nodett": [0, 23],
        "Nodebert": [1, 42],
    }, ) if isinstance(m, Metric))[0] == Metric("n", 23)  # Nodetts value
Esempio n. 11
0
def test_check():
    item = "mysql:reddb"
    params = {"levels": (None, None)}
    section = {"mysql": {"reddb": 42}}

    assert list(mysql_capacity.check_capacity(item, params, section)) == [
        Result(state=State.OK, summary="Size: 42 B"),
        Metric("database_size", 42.0),
    ]
Esempio n. 12
0
def test_cluster_check_worst_yield_selected_nodes_metrics() -> None:

    check_worst = _get_cluster_check_function(
        _simple_check,
        mode="worst",
        clusterization_parameters={"metrics_node": "Nodett"})

    assert list(m for m in check_worst(section={
        "Nodett": [0, 23],
        "Nodebert": [1, 42],
    }, ) if isinstance(m, Metric))[0] == Metric("n", 23)  # Nodetts value
Esempio n. 13
0
def test_cpu_threads():
    section = Section(
        load=Load(0.1, 0.1, 0.1),
        num_cpus=4,
        threads=Threads(count=1234),
    )
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric("threads", 1234.0),
        Result(state=State.OK, summary="1234"),
    }
Esempio n. 14
0
def _simple_check(section: Iterable[int]) -> CheckResult:
    """just a simple way to create test check results"""
    for value in section:
        try:
            yield Result(state=State(value), summary="Hi")
        except ValueError:
            if value == -1:
                yield IgnoreResults("yielded")
            elif value == -2:
                raise IgnoreResultsError("raised")
            else:
                yield Metric("n", value)
Esempio n. 15
0
def check_levels(
    value: float,
    *,
    levels_upper=None,  # tpye: Optional[Tuple[float, float]]
    levels_lower=None,  # tpye: Optional[Tuple[float, float]]
    metric_name: str = None,
    render_func: Callable[[float], str] = None,
    label: str = None,
    boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None,
) -> Generator[Union[Result, Metric], None, None]:
    """Generic function for checking a value against levels.

    :param value:        Currently measured value
    :param levels_upper: Pair of upper thresholds. If value is larger than these, the
                         service goes to **WARN** or **CRIT**, respecively.
    :param levels_lower: Pair of lower thresholds. If value is smaller than these, the
                         service goes to **WARN** or **CRIT**, respecively.
    :param metric_name:  Name of the datasource in the RRD that corresponds to this value
                         or None in order to skip perfdata
    :param render_func:  Single argument function to convert the value from float into a
                         human readable string.
                         readable fashion
    :param label:        Label to prepend to the output.
    :param boundaries:   Minimum and maximum to add to the metric.
    """
    if render_func is None:
        render_func = lambda f: "%.2f" % f

    info_text = str(render_func(value))  # forgive wrong output type
    if label:
        info_text = "%s: %s" % (label, info_text)

    value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func)

    yield Result(state=value_state, summary=info_text + levels_text)
    if metric_name:
        yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries)
Esempio n. 16
0
        ([], checking.ITEM_NOT_FOUND),
        (
            [
                Result(state=state.OK, notice="details"),
            ],
            (0, "Everything looks OK - 1 detail available\ndetails", []),
        ),
        (
            [
                Result(state=state.OK,
                       summary="summary1",
                       details="detailed info1"),
                Result(state=state.WARN,
                       summary="summary2",
                       details="detailed info2"),
            ],
            (1, "summary1, summary2(!)\ndetailed info1\ndetailed info2(!)",
             []),
        ),
        (
            [
                Result(state=state.OK, summary="summary"),
                Metric(name="name", value=42),
            ],
            (0, "summary\nsummary", [("name", 42.0, None, None, None, None)]),
        ),
    ],
)
def test_aggregate_result(subresults, aggregated_results):
    assert checking._aggregate_results(subresults) == aggregated_results
Esempio n. 17
0
def test_node_returns_metric():
    node_results = _check_function_node((_OK_RESULT, Metric("panic", 42)))
    assert list(make_node_notice_results("test_node", node_results)) == [
        Result(state=State.OK, notice="[test_node]: I am fine"),
    ]
Esempio n. 18
0
def check_levels_predictive(
    value: float,
    *,
    levels: Dict[str, Any],
    metric_name: str,
    render_func: Optional[Callable[[float], str]] = None,
    label: Optional[str] = None,
    boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None,
) -> Generator[Union[Result, Metric], None, None]:
    """Generic function for checking a value against levels.

    Args:

        value:        Currently measured value
        levels:       Predictive levels. These are used automatically.
                      Lower levels are imposed if the passed dictionary contains "lower"
                      as key, upper levels are imposed if it contains "upper" or
                      "levels_upper_min" as key.
                      If value is lower/higher than these, the service goes to **WARN**
                      or **CRIT**, respecively.
        metric_name:  Name of the datasource in the RRD that corresponds to this value
        render_func:  Single argument function to convert the value from float into a
                      human readable string.
                      readable fashion
        label:        Label to prepend to the output.
        boundaries:   Minimum and maximum to add to the metric.

    """
    if render_func is None:
        render_func = "{:.2f}".format

    # validate the metric name, before we can get the levels.
    _ = Metric(metric_name, value)

    try:
        ref_value, levels_tuple = cmk.base.prediction.get_levels(
            plugin_contexts.host_name(),
            plugin_contexts.service_description(),
            metric_name,
            levels,
            "MAX",
        )
        if ref_value:
            predictive_levels_msg = " (predicted reference: %s)" % render_func(ref_value)
        else:
            predictive_levels_msg = " (no reference for prediction yet)"

    except MKGeneralException as e:
        ref_value = None
        levels_tuple = (None, None, None, None)
        predictive_levels_msg = " (no reference for prediction: %s)" % e

    except Exception as e:
        if cmk.utils.debug.enabled():
            raise
        yield Result(state=State.UNKNOWN, summary="%s" % e)
        return

    levels_upper = (None if levels_tuple[0] is None or levels_tuple[1] is None else
                    (levels_tuple[0], levels_tuple[1]))

    levels_lower = (None if levels_tuple[2] is None or levels_tuple[3] is None else
                    (levels_tuple[2], levels_tuple[3]))

    value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func)

    if label:
        info_text = "%s: %s%s" % (label, render_func(value), predictive_levels_msg)
    else:
        info_text = "%s%s" % (render_func(value), predictive_levels_msg)

    yield Result(state=value_state, summary=info_text + levels_text)
    yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries)
    if ref_value:
        yield Metric("predict_%s" % metric_name, ref_value)
Esempio n. 19
0
def test_metric_invalid(name, value, levels, boundaries):
    with pytest.raises(TypeError):
        _ = Metric(name, value, levels=levels, boundaries=boundaries)
Esempio n. 20
0
def test_metric_kwarg():
    with pytest.raises(TypeError):
        _ = Metric("universe", 42, (23, 23))  # type: ignore[misc] # pylint: disable=too-many-function-args
Esempio n. 21
0
        (1, (3, 6), (1, 0), int, (State.OK, "")),
        (0, (3, 6), (1, 0), int, (State.WARN, " (warn/crit below 1/0)")),
        (-1, (3, 6), (1, 0), int, (State.CRIT, " (warn/crit below 1/0)")),
    ])
def test_boundaries(value, levels_upper, levels_lower, render_func, result):
    assert utils._do_check_levels(value, levels_upper, levels_lower,
                                  render_func) == result


@pytest.mark.parametrize("value, kwargs, result", [
    (5, {
        "metric_name": "battery",
        "render_func": render.percent,
    }, [
        Result(state=State.OK, summary="5.00%"),
        Metric("battery", 5.0),
    ]),
    (6, {
        "metric_name": "disk",
        "levels_upper": (4, 8),
        "render_func": lambda x: "%.2f years" % x,
        "label": "Disk Age",
    }, [
        Result(
            state=State.WARN,
            summary="Disk Age: 6.00 years (warn/crit at 4.00 years/8.00 years)"
        ),
        Metric("disk", 6.0, levels=(4., 8.)),
    ]),
    (5e-7, {
        "metric_name": "H_concentration",
Esempio n. 22
0
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric("thread_usage", 50.0),
        Metric("threads", 1234.0),
        Result(state=State.OK, summary="1234"),
        Result(state=State.OK, summary="Usage: 50.00%"),
    }


@pytest.mark.parametrize(
    "info, check_result",
    [
        (
            [["0.88", "0.83", "0.87", "2/2148", "21050", "8"]],
            {
                Metric("threads", 2148.0, levels=(2000.0, 4000.0)),
                Result(state=State.WARN, summary="2148 (warn/crit at 2000/4000)"),
            },
        ),
        (
            [["0.88", "0.83", "0.87", "2/1748", "21050", "8"], ["124069"]],
            {
                Metric("threads", 1748.0, levels=(2000.0, 4000.0)),
                Result(state=State.OK, summary="1748"),
                Metric("thread_usage", 1.408893438328672),
                Result(state=State.OK, summary="Usage: 1.41%"),
            },
        ),
    ],
)
def test_cpu_threads_regression(info, check_result):
Esempio n. 23
0
                      num_cpus=4,
                      num_threads=1234,
                      max_threads=2468)
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric('thread_usage', 50.0),
        Metric('threads', 1234.0),
        Result(state=State.OK, summary='1234'),
        Result(state=State.OK, summary='Usage: 50.00%')
    }


@pytest.mark.parametrize('info, check_result', [
    ([[u'0.88', u'0.83', u'0.87', u'2/2148', u'21050', u'8']], {
        Metric('threads', 2148.0, levels=(2000.0, 4000.0)),
        Result(state=State.WARN, summary='2148 (warn/crit at 2000/4000)'),
    }),
    ([[u'0.88', u'0.83', u'0.87', u'2/1748', u'21050', u'8'], [u'124069']], {
        Metric('threads', 1748.0, levels=(2000.0, 4000.0)),
        Result(state=State.OK, summary='1748'),
        Metric('thread_usage', 1.408893438328672),
        Result(state=State.OK, summary='Usage: 1.41%')
    }),
])
def test_cpu_threads_regression(info, check_result):
    section = parse_cpu(info)
    assert section is not None
    params = {'levels': (2000, 4000)}
    assert list(discover_cpu_threads(section)) == [Service()]
    assert set(check_cpu_threads(params, section)) == check_result
def test_node_returns_metric():
    node_results = _check_function_node((_OK_RESULT, Metric("panic", 42)))
    state, text = aggregate_node_details("test_node", node_results)
    assert state is State.OK
    assert text == "[test_node]: I am fine"