def test_metric(): metric1 = Metric('reproduction_rate', 1.0, levels=(2.4, 3.0), boundaries=(0, None)) metric2 = Metric('reproduction_rate', 2.0, levels=(2.4, 3.0), boundaries=(0, None)) assert metric1.name == 'reproduction_rate' assert metric1.value == 1.0 assert metric1.levels == (2.4, 3.0) assert metric1.boundaries == (0., None) assert metric1 == metric1 # pylint: disable=comparison-with-itself assert metric1 != metric2
def _create_new_result( is_details, # type: bool legacy_state, # type: int legacy_text, # type: str legacy_metrics=(), # type: Union[Tuple, List] ): # type: (...) -> Generator[Union[Metric, Result], None, bool] result_state = state(legacy_state) if legacy_state or legacy_text: # skip "Null"-Result if is_details: summary = None # type: Optional[str] details = legacy_text # type: Optional[str] else: is_details = "\n" in legacy_text summary, details = legacy_text.split( "\n", 1) if is_details else (legacy_text, None) yield Result(state=result_state, summary=summary, details=details) for metric in legacy_metrics: # fill up with None: name, value, warn, crit, min_, max_ = ( v for v, _ in itertools.zip_longest(metric, range(6))) yield Metric(name, value, levels=(warn, crit), boundaries=(min_, max_)) return is_details
def test_metric(): metric = Metric('reproduction_rate', 2.0, levels=(2.4, 3.0), boundaries=(0, None)) assert metric.name == 'reproduction_rate' assert metric.value == 2.0 assert metric.levels == (2.4, 3.0) assert metric.boundaries == (0., None)
def check_levels( value, # type: float _sentinel=_SENTINEL, # type: Any # enforce keyword usage, remove with CMK-3983 # *, levels_upper=None, # tpye: Optional[Tuple[float, float]] levels_lower=None, # tpye: Optional[Tuple[float, float]] metric_name=None, # type: str render_func=None, # type: Callable[[float], str] label=None, # type: str boundaries=None, # type: Optional[Tuple[Optional[float], Optional[float]]] ): # type: (...) -> Generator[Union[Result, Metric], None, None] """Generic function for checking a value against levels :param value: Currently measured value :param levels_upper: Pair of upper thresholds. If value is larger than these, the service goes to **WARN** or **CRIT**, respecively. :param levels_lower: Pair of lower thresholds. If value is smaller than these, the service goes to **WARN** or **CRIT**, respecively. :param metric_name: Name of the datasource in the RRD that corresponds to this value or None in order to skip perfdata :param render_func: Single argument function to convert the value from float into a human readable string. readable fashion :param label: Label to prepend to the output. :param boundaries: Minimum and maximum to add to the metric. """ # TODO (mo): unhack this CMK-3983 if _sentinel is not _SENTINEL: raise TypeError("check_levels only accepts one positional argument") if render_func is None: render_func = "%.2f".format info_text = str(render_func(value)) # forgive wrong output type if label: info_text = "%s: %s" % (label, info_text) value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func) yield Result(state=value_state, summary=info_text + levels_text) if metric_name: yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries)
def check_levels( value: float, *, levels_upper=None, # tpye: Optional[Tuple[float, float]] levels_lower=None, # tpye: Optional[Tuple[float, float]] metric_name: str = None, render_func: Callable[[float], str] = None, label: str = None, boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None, ) -> Generator[Union[Result, Metric], None, None]: """Generic function for checking a value against levels. :param value: Currently measured value :param levels_upper: Pair of upper thresholds. If value is larger than these, the service goes to **WARN** or **CRIT**, respecively. :param levels_lower: Pair of lower thresholds. If value is smaller than these, the service goes to **WARN** or **CRIT**, respecively. :param metric_name: Name of the datasource in the RRD that corresponds to this value or None in order to skip perfdata :param render_func: Single argument function to convert the value from float into a human readable string. readable fashion :param label: Label to prepend to the output. :param boundaries: Minimum and maximum to add to the metric. """ if render_func is None: render_func = lambda f: "%.2f" % f info_text = str(render_func(value)) # forgive wrong output type if label: info_text = "%s: %s" % (label, info_text) value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func) yield Result(state=value_state, summary=info_text + levels_text) if metric_name: yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries)
def _create_new_result( implicit_newline, # type: str legacy_state, # type: int legacy_text, # type: str legacy_metrics=(), # type: Union[Tuple, List] ): # type: (...) -> Generator[Union[Metric, Result], None, str] kwargs = {"state": state(legacy_state)} # type: Dict[str, Any] components = (implicit_newline + legacy_text).split("\n", 1) kwargs["summary"] = components[0] if len(components) > 1: # make sure its longer than summary kwargs["details"] = ("%%-%ds" % (len(components[0]) + 1)) % components[1] yield Result(**kwargs) for metric in legacy_metrics: # fill up with None: name, value, warn, crit, min_, max_ = ( v for v, _ in itertools.zip_longest(metric, range(6))) yield Metric(name, value, levels=(warn, crit), boundaries=(min_, max_)) return "\n" if len(components) > 1 else ""
def test_metric_invalid(name, value, levels, boundaries): with pytest.raises(TypeError): _ = Metric(name, value, levels=levels, boundaries=boundaries)
def test_metric_kwarg(): with pytest.raises(TypeError): _ = Metric("universe", 42, (23, 23)) # py # lint: disable=too-many-function-args
(1, (3, 6), (1, 0), int, (state.OK, "")), (0, (3, 6), (1, 0), int, (state.WARN, " (warn/crit below 1/0)")), (-1, (3, 6), (1, 0), int, (state.CRIT, " (warn/crit below 1/0)")), ]) def test_boundaries(value, levels_upper, levels_lower, render_func, result): assert utils._do_check_levels(value, levels_upper, levels_lower, render_func) == result @pytest.mark.parametrize("value, kwargs, result", [ (5, { "metric_name": "battery", "render_func": render.percent, }, [ Result(state=state.OK, summary="5.00%"), Metric("battery", 5.0), ]), (6, { "metric_name": "disk", "levels_upper": (4, 8), "render_func": lambda x: "%.2f years" % x, "label": "Disk Age", }, [ Result( state=state.WARN, summary="Disk Age: 6.00 years (warn/crit at 4.00 years/8.00 years)" ), Metric("disk", 6.0, levels=(4., 8.)), ]), (5e-7, { "metric_name": "H_concentration",
def check_levels_predictive( value: float, *, levels, # tpye: Dict[str, Any] metric_name: str, render_func: Optional[Callable[[float], str]] = None, label: Optional[str] = None, boundaries: Optional[Tuple[Optional[float], Optional[float]]] = None, ) -> Generator[Union[Result, Metric], None, None]: """Generic function for checking a value against levels. :param value: Currently measured value :param levels: Predictive levels. These are used automatically. Lower levels are imposed if the passed dictionary contains "lower" as key, upper levels are imposed if it contains "upper" or "levels_upper_min" as key. If value is lower/higher than these, the service goes to **WARN** or **CRIT**, respecively. :param metric_name: Name of the datasource in the RRD that corresponds to this value :param render_func: Single argument function to convert the value from float into a human readable string. readable fashion :param label: Label to prepend to the output. :param boundaries: Minimum and maximum to add to the metric. """ if render_func is None: render_func = "%.2f".format # validate the metric name, before we can get the levels. Metric.validate_name(metric_name) try: ref_value, levels_tuple = cmk.base.prediction.get_levels( check_api_utils.host_name(), check_api_utils.service_description(), metric_name, levels, "MAX", ) if ref_value: predictive_levels_msg = " (predicted reference: %s)" % render_func( ref_value) else: predictive_levels_msg = " (no reference for prediction yet)" except MKGeneralException as e: ref_value = None levels_tuple = (None, None, None, None) predictive_levels_msg = " (no reference for prediction: %s)" % e except Exception as e: if cmk.utils.debug.enabled(): raise yield Result(state=state.UNKNOWN, summary="%s" % e) return levels_upper = (None if levels_tuple[0] is None or levels_tuple[1] is None else (levels_tuple[0], levels_tuple[1])) levels_lower = (None if levels_tuple[2] is None or levels_tuple[3] is None else (levels_tuple[2], levels_tuple[3])) value_state, levels_text = _do_check_levels(value, levels_upper, levels_lower, render_func) if label: info_text = "%s: %s%s" % (label, render_func(value), predictive_levels_msg) else: info_text = "%s%s" % (render_func(value), predictive_levels_msg) yield Result(state=value_state, summary=info_text + levels_text) yield Metric(metric_name, value, levels=levels_upper, boundaries=boundaries) if ref_value: Metric("predict_%s" % metric_name, ref_value)
lambda tp: _check_timeperiod(tp, active_timeperiods)) determined_check_params = cmk.base.checking.legacy_determine_check_params( rules) assert expected_result == determined_check_params, ( "Determine params: Expected '%s' but got '%s'" % (expected_result, determined_check_params)) def _check_timeperiod(timeperiod, active_timeperiods): return timeperiod in active_timeperiods @pytest.mark.parametrize("subresults, aggregated_results", [ ([], cmk.base.checking.ITEM_NOT_FOUND), ([ Result(state=state.OK, details="details"), ], (0, "Everything looks OK - 1 detail available\ndetails", [])), ([ Result(state=state.OK, summary="summary1", details="detailed info1"), Result(state=state.WARN, summary="summary2", details="detailed info2"), ], (1, "summary1, summary2(!)\ndetailed info1\ndetailed info2(!)", [])), ([ Result(state=state.OK, summary="summary"), Metric(name="name", value=42), ], (0, "summary\nsummary", [("name", 42.0, None, None, None, None)])), ]) def test_aggregate_result(subresults, aggregated_results): assert cmk.base.checking._aggregate_results( subresults) == aggregated_results