Esempio n. 1
0
def check_levels(
    value: float,
    *,
    levels_upper: Optional[Tuple[float, float]] = None,
    levels_lower: Optional[Tuple[float, float]] = None,
    metric_name: Optional[str] = None,
    render_func: Optional[Callable[[float], str]] = None,
    label: Optional[str] = None,
    boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None,
    notice_only: bool = False,
) -> Generator[Union[Result, Metric], None, None]:
    """Generic function for checking a value against levels.

    Args:

        value:        The currently measured value
        levels_upper: A pair of upper thresholds. If value is larger than these, the
                      service goes to **WARN** or **CRIT**, respecively.
        levels_lower: A pair of lower thresholds. If value is smaller than these, the
                      service goes to **WARN** or **CRIT**, respecively.
        metric_name:  The name of the datasource in the RRD that corresponds to this value
                      or None in order not to generate a metric.
        render_func:  A single argument function to convert the value from float into a
                      human readable string.
        label:        The label to prepend to the output.
        boundaries:   Minimum and maximum to add to the metric.
        notice_only:  Only show up in service output if not OK (otherwise in details).
                      See `notice` keyword of `Result` class.

    Example:

        >>> result, = check_levels(
        ...     23.0,
        ...     levels_upper=(12., 42.),
        ...     label="Fridge",
        ...     render_func=lambda v: "%.1f°" % v,
        ... )
        >>> print(result.summary)
        Fridge: 23.0° (warn/crit at 12.0°/42.0°)

    """
    if render_func is None:
        render_func = lambda f: "%.2f" % f

    info_text = str(render_func(value))  # forgive wrong output type
    if label:
        info_text = "%s: %s" % (label, info_text)

    value_state, levels_text = _do_check_levels(value, levels_upper,
                                                levels_lower, render_func)

    if notice_only:
        yield Result(state=value_state, notice=info_text + levels_text)
    else:
        yield Result(state=value_state, summary=info_text + levels_text)
    if metric_name:
        yield Metric(metric_name,
                     value,
                     levels=levels_upper,
                     boundaries=boundaries)
Esempio n. 2
0
def test_cpu_loads_predictive(mocker):
    # make sure cpu_load check can handle predictive values
    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.2, 4.2, None, None)))
    # TODO: don't mock this. Use the context managers.
    mocker.patch("cmk.base.plugin_contexts._hostname", value="unittest")
    mocker.patch("cmk.base.plugin_contexts._service_description",
                 value="unittest-sd")
    params = {
        'levels': {
            'period': 'minute',
            'horizon': 1,
            'levels_upper': ('absolute', (2.0, 4.0))
        }
    }
    section = Section(load=Load(0.5, 1.0, 1.5), num_cpus=4, num_threads=123)
    result = set(check_cpu_load(params, section))

    assert result == set((
        Result(state=State.OK,
               summary='15 min load: 1.50 (no reference for prediction yet)'),
        Result(state=State.OK, summary='15 min load per core: 0.38 (4 cores)'),
        Metric('load1', 0.5, boundaries=(0, 4.0)),
        Metric('load5', 1.0, boundaries=(0, 4.0)),
        Metric('load15', 1.5,
               levels=(2.2, 4.2)),  # those are the predicted values
    ))
Esempio n. 3
0
def test_result(state_, summary, notice, details, expected_triple):
    result = Result(state=state_,
                    summary=summary,
                    notice=notice,
                    details=details)
    assert (result.state, result.summary, result.details) == expected_triple
    assert result != Result(state=state.OK,
                            summary="a total different summary")
Esempio n. 4
0
 def primary_results(self) -> Iterable[Result]:
     if self._preferred is None or self._preferred == self._pivoting:
         yield Result(state=State.OK, summary=f"{self._label}: [{self._pivoting}]")
     else:
         yield Result(
             state=self._unpreferred_node_state,
             summary=f"{self._label}: [{self._pivoting}]",
             details=f"{self._label}: [{self._pivoting}], Preferred node is [{self._preferred}]",
         )
     yield from self._node_results.results[self._pivoting]
Esempio n. 5
0
def test_cpu_threads_max_threads():
    section = Section(load=Load(0.1, 0.1, 0.1), num_cpus=4, num_threads=1234, max_threads=2468)
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric("thread_usage", 50.0),
        Metric("threads", 1234.0),
        Result(state=State.OK, summary="1234"),
        Result(state=State.OK, summary="Usage: 50.00%"),
    }
Esempio n. 6
0
def test_cpu_loads_fixed_levels(mocker):
    section = Section(load=Load(0.5, 1.0, 1.5), num_cpus=4, num_threads=123)
    params = {'levels': (2.0, 4.0)}
    result = set(check_cpu_load(params, section))
    assert result == set((
        Result(state=State.OK, summary='15 min load: 1.50'),
        Result(state=State.OK, summary='15 min load per core: 0.38 (4 cores)'),
        Metric('load1', 0.5, boundaries=(0, 4.0)),
        Metric('load5', 1.0, boundaries=(0, 4.0)),
        Metric('load15', 1.5, levels=(8.0, 16.0)),  # levels multiplied by num_cpus
    ))
Esempio n. 7
0
def test_cluster_check_best_others_are_notice_only() -> None:
    check_best = _get_cluster_check_function(_simple_check, mode="best")

    assert list(check_best(section={
        "Nodett": [2],
        "Nomo": [1],
    }, )) == [
        Result(state=State.OK, summary="Best: [Nomo]"),
        Result(state=State.WARN, summary="Hi", details="[Nomo]: Hi"),
        Result(state=State.OK, summary="Additional results from: [Nodett]"),
        Result(state=State.OK, notice="[Nodett]: Hi(!!)"),
    ]
Esempio n. 8
0
def test_result(
    state_: state,
    summary: Optional[str],
    notice: Optional[str],
    details: Optional[str],
    expected_triple: Tuple[state, str, str],
) -> None:
    result = Result(
        state=state_,
        summary=summary,
        notice=notice,
        details=details,
    )  # type: ignore[call-overload]
    assert (result.state, result.summary, result.details) == expected_triple
    assert result != Result(state=state_, summary="a different summary")
Esempio n. 9
0
def test_node_mutliline():
    node_results = (Result(state=State.WARN,
                           notice="These\nare\nfour\nlines"), )
    assert list(
        make_node_notice_results(
            "test_node", _check_function_node(node_results))) == [
                Result(
                    state=State.WARN,
                    summary="[test_node]: These, are, four, lines",
                    details=("[test_node]: These\n"
                             "[test_node]: are\n"
                             "[test_node]: four\n"
                             "[test_node]: lines"),
                ),
            ]
def test_node_returns_details_only():
    node_results = _check_function_node((Result(state=state.OK, details="This is detailed"),))
    result = aggregate_node_details("test_node", node_results)
    assert result is not None
    assert result.state is state.OK
    assert result.summary == ""
    assert result.details == "[test_node]: This is detailed"
Esempio n. 11
0
def _create_new_result(
        is_details: bool,
        legacy_state: int,
        legacy_text: str,
        legacy_metrics: Union[Tuple, List] = (),
) -> Generator[Union[Metric, Result], None, bool]:
    result_state = state(legacy_state)

    if legacy_state or legacy_text:  # skip "Null"-Result
        if is_details:
            summary: Optional[str] = None
            details: Optional[str] = legacy_text
        else:
            is_details = "\n" in legacy_text
            summary, details = legacy_text.split("\n", 1) if is_details else (legacy_text, None)
        yield Result(
            state=result_state,
            summary=summary or None,
            details=details or None,
        )

    for metric in legacy_metrics:
        # fill up with None:
        name, value, warn, crit, min_, max_ = (
            v for v, _ in itertools.zip_longest(metric, range(6)))
        yield Metric(name, value, levels=(warn, crit), boundaries=(min_, max_))

    return is_details
Esempio n. 12
0
 def _add_node_name(result: Result, node_name: str) -> Result:
     return Result(
         state=result.state,
         summary=result.summary,
         details="\n".join(f"[{node_name}]: {line}"
                           for line in result.details.splitlines()),
     )
Esempio n. 13
0
def _create_new_result(
    is_details: bool,
    legacy_state: int,
    legacy_text: str,
    legacy_metrics: Union[Tuple, List] = (),
) -> Generator[Union[Metric, Result], None, bool]:

    if legacy_state or legacy_text:  # skip "Null"-Result
        # Bypass the validation of the Result class:
        # Legacy plugins may relie on the fact that once a newline
        # as been in the output, *all* following ouput is sent to
        # the details. That means we have to create Results with
        # details only, which is prohibited by the original Result
        # class.
        yield Result(state=State(legacy_state), summary="Fake")._replace(
            summary="" if is_details else legacy_text.split("\n", 1)[0],
            details=legacy_text.strip(),
        )

    for metric in legacy_metrics:
        if len(metric) < 2:
            continue
        name = str(metric[0])
        value = _get_float(metric[1])
        if value is None:  # skip bogus metrics
            continue
        # fill up with None:
        warn, crit, min_, max_ = (
            _get_float(v)
            for v, _ in itertools.zip_longest(metric[2:], range(4)))
        yield Metric(name, value, levels=(warn, crit), boundaries=(min_, max_))

    return ("\n" in legacy_text) or is_details
Esempio n. 14
0
def _unfit_for_clustering(**_kw) -> CheckResult:
    """A cluster_check_function that displays a generic warning"""
    yield Result(
        state=State.UNKNOWN,
        summary=("This service does not implement a native cluster mode. Please change your "
                 "configuration using the rule 'Aggregation options for clustered services', "
                 "and select one of the other available aggregation modes."),
    )
Esempio n. 15
0
def test_result_invalid(state_, summary, notice, details):
    with pytest.raises((TypeError, ValueError)):
        _ = Result(
            state=state_,
            summary=summary,
            notice=notice,
            details=details,
        )  # type: ignore[call-overload]
Esempio n. 16
0
def test_cpu_threads():
    section = Section(load=Load(0.1, 0.1, 0.1), num_cpus=4, num_threads=1234)
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric('threads', 1234.0),
        Result(state=State.OK, summary='1234'),
    }
Esempio n. 17
0
    def secondary_results(
        self,
        *,
        levels_additional_nodes_count: Tuple[float, float],
    ) -> Iterable[Result]:
        secondary_nodes = sorted(n for n in self._node_results.results if n != self._pivoting)
        if not secondary_nodes:
            return

        yield Result(
            state=self._secondary_nodes_state(secondary_nodes, levels_additional_nodes_count),
            summary=f"Additional results from: {', '.join(f'[{n}]' for n in secondary_nodes)}",
        )
        yield from (Result(
            state=State.OK,
            notice=r.summary,
            details=f"{r.details}{state_markers[int(r.state)]}",
        ) for node in secondary_nodes for r in self._node_results.results[node])
Esempio n. 18
0
def test_check():
    item = "mysql:reddb"
    params = {"levels": (None, None)}
    section = {"mysql": {"reddb": 42}}

    assert list(mysql_capacity.check_capacity(item, params, section)) == [
        Result(state=State.OK, summary="Size: 42 B"),
        Metric("database_size", 42.0),
    ]
Esempio n. 19
0
def test_cluster_check_failover_others_are_notice_only() -> None:
    check_failover = _get_cluster_check_function(_simple_check,
                                                 mode="failover")

    assert list(check_failover(section={
        "Nodett": [2],
        "Nomo": [1],
    }, ))[3:] == [
        Result(state=State.OK, notice="[Nomo]: Hi(!)"),
    ]
def test_node_mutliline():
    node_results = (Result(state=state.WARN, details="These\nare\nfour\nlines"),)
    result = aggregate_node_details("test_node", _check_function_node(node_results))
    assert result is not None
    assert result.state is state.WARN
    assert result.summary == ""
    assert result.details == ("[test_node]: These\n"
                              "[test_node]: are\n"
                              "[test_node]: four\n"
                              "[test_node]: lines(!)")
def test_node_mutliline():
    node_results = (Result(state=State.WARN,
                           notice="These\nare\nfour\nlines"), )
    state, text = aggregate_node_details("test_node",
                                         _check_function_node(node_results))
    assert state is State.WARN
    assert text == ("[test_node]: These\n"
                    "[test_node]: are\n"
                    "[test_node]: four\n"
                    "[test_node]: lines(!)")
Esempio n. 22
0
def _simple_check(section: Iterable[int]) -> CheckResult:
    """just a simple way to create test check results"""
    for value in section:
        try:
            yield Result(state=State(value), summary="Hi")
        except ValueError:
            if value == -1:
                yield IgnoreResults("yielded")
            elif value == -2:
                raise IgnoreResultsError("raised")
            else:
                yield Metric("n", value)
Esempio n. 23
0
def test_cpu_threads():
    section = Section(
        load=Load(0.1, 0.1, 0.1),
        num_cpus=4,
        threads=Threads(count=1234),
    )
    params: Dict[str, Any] = {}
    result = set(check_cpu_threads(params, section))
    assert result == {
        Metric("threads", 1234.0),
        Result(state=State.OK, summary="1234"),
    }
Esempio n. 24
0
 def cluster_legacy_mode_from_hell(*args, **kwargs):
     # This function will *almost* never be called:
     #
     # If legacy plugins are executed on clusters, the original check function is called,
     # as it is impossible to recreate the "complex" behavior of the legacy API using the new API.
     # We maintain an extra code path in cmk/base/checking.py for those cases.
     #
     # Unfortunately, when discovering cluster hosts, this function will still be called, as
     # part of the code designed for the new API is used.
     # Since fixing this issue would dramatically worsen the code in cmk/base/checking.py,
     # We simply issue an Message here, similar to the preview for counter based checks:
     yield Result(
         state=State.OK,
         summary=
         "Service preview for legacy plugins on clusters not available.",
     )
Esempio n. 25
0
def check_levels(
    value: float,
    *,
    levels_upper=None,  # tpye: Optional[Tuple[float, float]]
    levels_lower=None,  # tpye: Optional[Tuple[float, float]]
    metric_name: str = None,
    render_func: Callable[[float], str] = None,
    label: str = None,
    boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None,
) -> Generator[Union[Result, Metric], None, None]:
    """Generic function for checking a value against levels.

    :param value:        Currently measured value
    :param levels_upper: Pair of upper thresholds. If value is larger than these, the
                         service goes to **WARN** or **CRIT**, respecively.
    :param levels_lower: Pair of lower thresholds. If value is smaller than these, the
                         service goes to **WARN** or **CRIT**, respecively.
    :param metric_name:  Name of the datasource in the RRD that corresponds to this value
                         or None in order to skip perfdata
    :param render_func:  Single argument function to convert the value from float into a
                         human readable string.
                         readable fashion
    :param label:        Label to prepend to the output.
    :param boundaries:   Minimum and maximum to add to the metric.
    """
    if render_func is None:
        render_func = lambda f: "%.2f" % f

    info_text = str(render_func(value))  # forgive wrong output type
    if label:
        info_text = "%s: %s" % (label, info_text)

    value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func)

    yield Result(state=value_state, summary=info_text + levels_text)
    if metric_name:
        yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries)
Esempio n. 26
0
 def unfit_for_clustering(*args, **kwargs):
     yield Result(
         state=State.UNKNOWN,
         summary=("This service is not ready to handle clustered data. "
                  "Please change your configuration."),
     )
Esempio n. 27
0
    resolved_check_params = checking.time_resolved_check_parameters(rules)
    assert expected_result == resolved_check_params


def _check_timeperiod(timeperiod, active_timeperiods):
    return timeperiod in active_timeperiods


@pytest.mark.parametrize(
    "subresults, aggregated_results",
    [
        ([], checking.ITEM_NOT_FOUND),
        (
            [
                Result(state=state.OK, notice="details"),
            ],
            (0, "Everything looks OK - 1 detail available\ndetails", []),
        ),
        (
            [
                Result(state=state.OK,
                       summary="summary1",
                       details="detailed info1"),
                Result(state=state.WARN,
                       summary="summary2",
                       details="detailed info2"),
            ],
            (1, "summary1, summary2(!)\ndetailed info1\ndetailed info2(!)",
             []),
        ),
Esempio n. 28
0
    @staticmethod
    def _secondary_nodes_state(
        secondary_nodes: Sequence[str],
        levels: Tuple[float, float],
    ) -> State:
        count = len(secondary_nodes)
        return State.CRIT if count >= levels[1] else State(count >= levels[0])

    def metrics(self, node_name: Optional[str]) -> CheckResult:
        used_node = node_name or self._pivoting
        if not (metrics := self._node_results.metrics.get(used_node, ())):
            return
        yield Result(
            state=State.OK,
            notice=
            f"[{used_node}] Metrics: {', '.join(m.name for m in metrics)}",
        )
        yield from metrics


class NodeCheckExecutor:
    def __init__(self, *, service_id: ServiceID,
                 persist_value_store_changes: bool) -> None:
        self._service_id = service_id
        self._persist_value_store_changes = persist_value_store_changes

    def __call__(
        self,
        check_function: Callable[..., CheckResult],
        cluster_kwargs: _Kwargs,
Esempio n. 29
0
def check_levels_predictive(
    value: float,
    *,
    levels: Dict[str, Any],
    metric_name: str,
    render_func: Optional[Callable[[float], str]] = None,
    label: Optional[str] = None,
    boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None,
) -> Generator[Union[Result, Metric], None, None]:
    """Generic function for checking a value against levels.

    Args:

        value:        Currently measured value
        levels:       Predictive levels. These are used automatically.
                      Lower levels are imposed if the passed dictionary contains "lower"
                      as key, upper levels are imposed if it contains "upper" or
                      "levels_upper_min" as key.
                      If value is lower/higher than these, the service goes to **WARN**
                      or **CRIT**, respecively.
        metric_name:  Name of the datasource in the RRD that corresponds to this value
        render_func:  Single argument function to convert the value from float into a
                      human readable string.
                      readable fashion
        label:        Label to prepend to the output.
        boundaries:   Minimum and maximum to add to the metric.

    """
    if render_func is None:
        render_func = "{:.2f}".format

    # validate the metric name, before we can get the levels.
    _ = Metric(metric_name, value)

    try:
        ref_value, levels_tuple = cmk.base.prediction.get_levels(
            plugin_contexts.host_name(),
            plugin_contexts.service_description(),
            metric_name,
            levels,
            "MAX",
        )
        if ref_value:
            predictive_levels_msg = " (predicted reference: %s)" % render_func(ref_value)
        else:
            predictive_levels_msg = " (no reference for prediction yet)"

    except MKGeneralException as e:
        ref_value = None
        levels_tuple = (None, None, None, None)
        predictive_levels_msg = " (no reference for prediction: %s)" % e

    except Exception as e:
        if cmk.utils.debug.enabled():
            raise
        yield Result(state=State.UNKNOWN, summary="%s" % e)
        return

    levels_upper = (None if levels_tuple[0] is None or levels_tuple[1] is None else
                    (levels_tuple[0], levels_tuple[1]))

    levels_lower = (None if levels_tuple[2] is None or levels_tuple[3] is None else
                    (levels_tuple[2], levels_tuple[3]))

    value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func)

    if label:
        info_text = "%s: %s%s" % (label, render_func(value), predictive_levels_msg)
    else:
        info_text = "%s%s" % (render_func(value), predictive_levels_msg)

    yield Result(state=value_state, summary=info_text + levels_text)
    yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries)
    if ref_value:
        yield Metric("predict_%s" % metric_name, ref_value)
Esempio n. 30
0
def test_result_invalid(state_, summary, notice, details):
    with pytest.raises((TypeError, ValueError)):
        _ = Result(state=state_,
                   summary=summary,
                   notice=notice,
                   details=details)