Esempio n. 1
0
def test_get_effective_service_level(monkeypatch):
    ts = Scenario()
    ts.add_host("testhost1")
    ts.add_host("testhost2")
    ts.add_host("testhost3")
    ts.set_ruleset(
        "host_service_levels",
        [
            (10, [], ["testhost2"], {}),
            (2, [], ["testhost2"], {}),
        ],
    )
    ts.set_ruleset(
        "service_service_levels",
        [
            (33, [], ["testhost1"], ["CPU load$"], {}),
        ],
    )
    ts.apply(monkeypatch)

    with plugin_contexts.current_service(CheckPluginName("cpu_loads"), "CPU load"):

        with plugin_contexts.current_host("testhost1"):
            assert check_api.get_effective_service_level() == 33

        with plugin_contexts.current_host("testhost2"):
            assert check_api.get_effective_service_level() == 10

        with plugin_contexts.current_host("testhost3"):
            assert check_api.get_effective_service_level() == 0
Esempio n. 2
0
def test_get_effective_service_level(monkeypatch):
    ts = Scenario().add_host("testhost1")
    ts.add_host("testhost2")
    ts.add_host("testhost3")
    ts.set_ruleset(
        "host_service_levels",
        [
            (10, [], ["testhost2"], {}),
            (2, [], ["testhost2"], {}),
        ],
    )
    ts.set_ruleset(
        "service_service_levels",
        [
            (33, [], ["testhost1"], ["CPU load$"], {}),
        ],
    )
    ts.apply(monkeypatch)

    with plugin_contexts.current_service(
            Service(
                item=None,
                check_plugin_name=CheckPluginName("cpu_loads"),
                description="CPU load",
                parameters={},
            )):

        with plugin_contexts.current_host("testhost1"):
            assert check_api.get_effective_service_level() == 33

        with plugin_contexts.current_host("testhost2"):
            assert check_api.get_effective_service_level() == 10

        with plugin_contexts.current_host("testhost3"):
            assert check_api.get_effective_service_level() == 0
Esempio n. 3
0
def run(check_info, dataset, write=False):
    """Run all possible tests on 'dataset'"""
    print("START: %r" % (dataset,))
    checklist = checkhandler.get_applicables(dataset.checkname, check_info)
    assert checklist, "Found no check plugin for %r" % (dataset.checkname,)

    immu = Immutables()

    with optional_freeze_time(dataset):

        parsed = run_test_on_parse(dataset, immu)

        # LOOP OVER ALL (SUB)CHECKS
        for sname in checklist:
            subcheck = (sname + ".").split(".")[1]
            check = Check(sname)

            info_arg = get_info_argument(dataset, subcheck, parsed)
            immu.test(" after get_info_argument ")
            immu.register(info_arg, "info_arg")

            mock_is, mock_hec, mock_hecm = get_mock_values(dataset, subcheck)

            with current_host("non-existent-testhost"), mock_item_state(mock_is), MockHostExtraConf(
                check, mock_hec
            ), MockHostExtraConf(check, mock_hecm, "host_extra_conf_merged"):

                run_test_on_discovery(check, subcheck, dataset, info_arg, immu, write)

                run_test_on_checks(check, subcheck, dataset, info_arg, immu, write)

        immu.test(" at end of subcheck loop %r " % (subcheck,))
Esempio n. 4
0
def test_cpu_loads_predictive(mocker: Mock) -> None:
    # make sure cpu_load check can handle predictive values
    mocker.patch(
        "cmk.base.check_api._prediction.get_levels",
        return_value=(None, (2.2, 4.2, None, None)),
    )
    with current_host("unittest"), current_service(
            CheckPluginName("cpu_loads"), "item"):
        assert list(
            check_cpu_load(
                {
                    "levels": {
                        "period": "minute",
                        "horizon": 1,
                        "levels_upper": ("absolute", (2.0, 4.0)),
                    }
                },
                Section(
                    load=Load(0.5, 1.0, 1.5),
                    num_cpus=4,
                    threads=Threads(count=123),
                ),
            )
        ) == [
            Result(
                state=State.OK,
                summary="15 min load: 1.50 (no reference for prediction yet)"),
            Metric("load15", 1.5,
                   levels=(2.2, 4.2)),  # those are the predicted values
            Result(state=State.OK,
                   summary="15 min load per core: 0.38 (4 cores)"),
            Metric("load1", 0.5, boundaries=(0, 4.0)),
            Metric("load5", 1.0, boundaries=(0, 4.0)),
        ]
Esempio n. 5
0
def test_mem_win(
    mocker: MockerFixture,
    fix_register: FixRegister,
    params: Mapping[str, Any],
    expected_result: CheckResult,
) -> None:
    mocker.patch(
        "cmk.base.check_api._prediction.get_levels",
        return_value=(100000, (90000, 110000, None, None)),
    )
    with current_host("unittest-hn"), current_service(
        Service(
            CheckPluginName("unittest_sd"),
            parameters={},
            item=None,
            description="unittest_sd_description",
        )
    ):
        assert (
            list(
                fix_register.check_plugins[CheckPluginName("mem_win")].check_function(
                    item=None,
                    params=params,
                    section=_SECTION,
                )
            )
            == expected_result
        )
Esempio n. 6
0
def _do_all_checks_on_host(
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    parsed_sections_broker: ParsedSectionsBroker,
    *,
    services: List[Service],
    dry_run: bool,
    show_perfdata: bool,
) -> Tuple[int, List[CheckPluginName]]:
    num_success = 0
    plugins_missing_data: Set[CheckPluginName] = set()

    with plugin_contexts.current_host(host_config.hostname):
        with item_state.load_host_value_store(host_config.hostname,
                                              store_changes=not dry_run) as value_store_manager:
            for service in services:
                success = execute_check(
                    parsed_sections_broker,
                    host_config,
                    ipaddress,
                    service,
                    dry_run=dry_run,
                    show_perfdata=show_perfdata,
                    value_store_manager=value_store_manager,
                )
                if success:
                    num_success += 1
                else:
                    plugins_missing_data.add(service.check_plugin_name)

    return num_success, sorted(plugins_missing_data)
Esempio n. 7
0
def test_check_plugins_do_not_discover_upon_empty_snmp_input(
        monkeypatch, fix_register):
    """
    In Checkmk < 1.6 the parse function has not been called for empty table data,
    unless "handle_empty_info" has been set.

    From version 2.0 on, the parse function will be called allways.
    In case no further processing is desired, the parse functions should return `None`.

    (Returning something falsey usually means nothing will be discovered!)

    Since this was the behaviour for *almost* every plugin we maintain this test
    with a list of known exceptions, to ensure the old behaviour is not changed.

    However: There is nothing wrong with not returning None, in principle.
    If you whish to do that (see one of the listed exeptions for examples),
    just add an exception below. If maintaining this test becvomes too tedious,
    we can probably just remove it.
    """
    Scenario().apply(monkeypatch)  # host_extra_conf needs the ruleset_matcher

    plugins_expected_to_discover_upon_empty = {
        "printer_alerts",
        "liebert_system_events",
        "apc_inrow_system_events",
    }

    plugins_discovering_upon_empty = set()
    for plugin in fix_register.check_plugins.values():
        for sections in _section_permutations(plugin.sections):
            kwargs = {
                str(section.name): _get_empty_parsed_result(section)
                for section in sections
            }
            if all(v is None for v in kwargs.values()):
                continue

            if len(kwargs) > 1:
                kwargs = {f"section_{k}": v for k, v in kwargs.items()}
            else:
                kwargs = {"section": v for v in kwargs.values()}

            if plugin.discovery_default_parameters is not None:
                kwargs["params"] = plugin.discovery_default_parameters if (
                    plugin.discovery_ruleset_type
                    == "merged") else [plugin.discovery_default_parameters]

            with current_host(
                    'testhost'):  # host_extra_conf needs a host_name()
                if list(plugin.discovery_function(**kwargs)):
                    plugins_discovering_upon_empty.add(str(plugin.name))

    assert plugins_discovering_upon_empty == plugins_expected_to_discover_upon_empty
Esempio n. 8
0
def test_check_levels_predictive_default_render_func(mocker):
    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.2, 4.2, None, None)))

    with current_host("unittest"), current_service(
            CheckPluginName("test_check"), "unittest-service-description"):
        result = next(
            utils.check_levels_predictive(42.42,
                                          metric_name="metric_name",
                                          levels={}))

    assert isinstance(result, Result)
    assert result.summary.startswith("42.42")
Esempio n. 9
0
def fixture_results(checkplugin, section, params, mocker: MockerFixture):
    params = {k: params for k in checkplugin.metrics}
    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.2, 4.2, None, None)))

    with current_host("unittest"), current_service(
            CheckPluginName("test_check"), "unittest-service-description"):
        results = list(
            checkplugin.function(
                item=ITEM,
                params=params,
                section_gcp_service_filestore=section,
                section_gcp_assets=None,
            ))
    return results, checkplugin
Esempio n. 10
0
def _discover_services(
    *,
    host_name: HostName,
    ipaddress: Optional[HostAddress],
    parsed_sections_broker: ParsedSectionsBroker,
    run_plugin_names: Container[CheckPluginName],
    on_error: OnError,
) -> List[Service]:
    # find out which plugins we need to discover
    plugin_candidates = _find_candidates(parsed_sections_broker,
                                         run_plugin_names)
    section.section_step("Executing discovery plugins (%d)" %
                         len(plugin_candidates))
    console.vverbose("  Trying discovery with: %s\n" %
                     ", ".join(str(n) for n in plugin_candidates))
    # The host name must be set for the host_name() calls commonly used to determine the
    # host name for host_extra_conf{_merged,} calls in the legacy checks.

    service_table: CheckTable = {}
    try:
        with plugin_contexts.current_host(host_name):
            for check_plugin_name in plugin_candidates:
                try:
                    service_table.update({
                        service.id(): service
                        for service in _discover_plugins_services(
                            check_plugin_name=check_plugin_name,
                            host_name=host_name,
                            ipaddress=ipaddress,
                            parsed_sections_broker=parsed_sections_broker,
                            on_error=on_error,
                        )
                    })
                except (KeyboardInterrupt, MKTimeout):
                    raise
                except Exception as e:
                    if on_error is OnError.RAISE:
                        raise
                    if on_error is OnError.WARN:
                        console.error(
                            f"Discovery of '{check_plugin_name}' failed: {e}\n"
                        )

            return list(service_table.values())

    except KeyboardInterrupt:
        raise MKGeneralException("Interrupted by Ctrl-C.")
def test_check_levels_predictive_default_render_func(mocker):
    mocker.patch(
        "cmk.base.check_api._prediction.get_levels", return_value=(None, (2.2, 4.2, None, None))
    )

    irrelevant_test_parameters: LegacyCheckParameters = {}
    service = Service(
        item=None,
        check_plugin_name=CheckPluginName("test_check"),
        description="unittest-service-description",
        parameters=irrelevant_test_parameters,
    )
    with current_host("unittest"), current_service(service):
        result = next(utils.check_levels_predictive(42.42, metric_name="metric_name", levels={}))

    assert isinstance(result, Result)
    assert result.summary.startswith("42.42")
Esempio n. 12
0
def check_host_services(
    *,
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    parsed_sections_broker: ParsedSectionsBroker,
    services: Sequence[Service],
    run_plugin_names: Container[CheckPluginName],
    dry_run: bool,
    show_perfdata: bool,
) -> Tuple[int, List[CheckPluginName]]:
    """Compute service state results for all given services on node or cluster

     * Loops over all services,
     * calls the check
     * examines the result and sends it to the core (unless `dry_run` is True).
    """
    num_success = 0
    plugins_missing_data: Set[CheckPluginName] = set()

    with plugin_contexts.current_host(host_config.hostname):
        with value_store.load_host_value_store(
                host_config.hostname,
                store_changes=not dry_run) as value_store_manager:
            for service in _filter_services_to_check(
                    services=services,
                    run_plugin_names=run_plugin_names,
                    config_cache=config_cache,
                    host_name=host_config.hostname,
            ):
                success = _execute_check(
                    parsed_sections_broker,
                    host_config,
                    ipaddress,
                    service,
                    dry_run=dry_run,
                    show_perfdata=show_perfdata,
                    value_store_manager=value_store_manager,
                )
                if success:
                    num_success += 1
                else:
                    plugins_missing_data.add(service.check_plugin_name)

    return num_success, sorted(plugins_missing_data)
Esempio n. 13
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    timespecific_parameters: Union[LegacyCheckParameters,
                                   TimespecificParameters],
    *,
    value_store_manager: value_store.ValueStoreManager,
    persist_value_store_changes: bool,
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=ServiceCheckResult.check_not_implemented(),
            cache_info=None,
        )

    config_cache = config.get_config_cache()
    check_function = (_cluster_modes.get_cluster_check_function(
        *config_cache.get_clustered_service_configuration(
            host_config.hostname,
            service.description,
        ),
        plugin=plugin,
        service_id=service.id(),
        persist_value_store_changes=persist_value_store_changes,
    ) if host_config.is_cluster else plugin.check_function)

    section_kws, error_result = _get_monitoring_data_kwargs_handle_pre20_services(
        parsed_sections_broker,
        host_config,
        config_cache,
        ipaddress,
        service,
        plugin.sections,
    )
    if not section_kws:  # no data found
        return AggregatedResult(
            submit=False,
            data_received=False,
            result=error_result,
            cache_info=None,
        )

    item_kw = {} if service.item is None else {"item": service.item}
    params_kw = ({} if plugin.check_default_parameters is None else {
        "params":
        _final_read_only_check_parameters(timespecific_parameters)
    })

    try:
        with plugin_contexts.current_host(
                host_config.hostname), plugin_contexts.current_service(
                    service), value_store_manager.namespace(service.id()):
            result = _aggregate_results(
                check_function(
                    **item_kw,
                    **params_kw,
                    **section_kws,
                ))

    except (item_state.MKCounterWrapped,
            checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=ServiceCheckResult(output=msg),
            cache_info=None,
        )
    except MKTimeout:
        raise
    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname,
                                            skip_autochecks=True)
        result = ServiceCheckResult(
            3,
            cmk.base.crash_reporting.create_check_crash_dump(
                host_name=host_config.hostname,
                service_name=service.description,
                plugin_name=service.check_plugin_name,
                plugin_kwargs={
                    **item_kw,
                    **params_kw,
                    **section_kws
                },
                is_manual=service.id() in table,
            ),
        )

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
Esempio n. 14
0
def test_parse_diskstat_predictive(mocker: MockerFixture):
    # SUP-5924
    DATA = [
        ["1617784511"],
        [
            "259",
            "0",
            "nvme0n1",
            "131855",
            "42275",
            "8019303",
            "34515",
            "386089",
            "166344",
            "13331634",
            "138121",
            "0",
            "185784",
            "177210",
            "0",
            "0",
            "0",
            "0",
            "41445",
            "4574",
        ],
        [
            "53",
            "0",
            "dm-0",
            "172574",
            "0",
            "7980626",
            "74812",
            "548159",
            "0",
            "12443656",
            "706944",
            "0",
            "189576",
            "781756",
            "0",
            "0",
            "0",
            "0",
            "0",
            "0",
        ],
        [
            "53",
            "1",
            "dm-1",
            "171320",
            "0",
            "7710074",
            "74172",
            "546564",
            "0",
            "12514416",
            "674352",
            "0",
            "186452",
            "748524",
            "0",
            "0",
            "0",
            "0",
            "0",
            "0",
        ],
        [
            "53",
            "2",
            "dm-2",
            "194",
            "0",
            "8616",
            "68",
            "0",
            "0",
            "0",
            "0",
            "0",
            "72",
            "68",
            "0",
            "0",
            "0",
            "0",
            "0",
            "0",
        ],
        ["[dmsetup_info]"],
        ["vme0n1p3_crypt", "253:0"],
        ["buntu--vg-swap_1", "253:2", "ubuntu-vg", "swap_1"],
        ["buntu--vg-root", "253:1", "ubuntu-vg", "root"],
    ]

    PARAMS = {
        "average": 300,
        "latency": (80.0, 160.0),
        "read": {
            "horizon": 90,
            "levels_lower": ("absolute", (2.0, 4.0)),
            "levels_upper": ("relative", (10.0, 20.0)),
            "levels_upper_min": (10.0, 15.0),
            "period": "wday",
        },
        "read_ios": (400.0, 600.0),
        "read_latency": (80.0, 160.0),
        "read_wait": (30.0, 50.0),
        "utilization": (80.0, 90.0),
        "write": (50.0, 100.0),
        "write_ios": (300.0, 400.0),
        "write_latency": (80.0, 160.0),
        "write_wait": (30.0, 50.0),
    }

    mocker.patch(
        "cmk.base.check_api._prediction.get_levels", return_value=(None, (2.1, 4.1, None, None))
    )
    dummy_service: Service[LegacyCheckParameters] = Service(
        CheckPluginName("unittest_sd"),
        parameters={},
        item="item-nvme0n1",
        description="unittest_sd_description",
    )
    with plugin_contexts.current_host("unittest-hn"), plugin_contexts.current_service(
        dummy_service
    ):

        with pytest.raises(IgnoreResultsError):
            list(diskstat.check_diskstat("nvme0n1", PARAMS, diskstat.parse_diskstat(DATA), None))
        DATA[0][0] = "1617784512"
        assert list(
            diskstat.check_diskstat(
                "nvme0n1",
                PARAMS,
                diskstat.parse_diskstat(DATA),
                None,
            )
        ) == [
            Result(state=state.OK, notice="All values averaged over 5 minutes 0 seconds"),
            Result(state=state.OK, notice="Utilization: 0%"),
            Metric("disk_utilization", 0.0, levels=(0.8, 0.9)),
            Result(state=state.OK, summary="Read: 0.00 B/s (no reference for prediction yet)"),
            Metric("disk_read_throughput", 0.0, levels=(2.1, 4.1)),  # fake levels are quite low
            Result(state=state.OK, summary="Write: 0.00 B/s"),
            Metric("disk_write_throughput", 0.0, levels=(50000000.0, 100000000.0)),
            Result(state=state.OK, notice="Average wait: 0 seconds"),
            Metric("disk_average_wait", 0.0),
            Result(state=state.OK, notice="Average read wait: 0 seconds"),
            Metric("disk_average_read_wait", 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice="Average write wait: 0 seconds"),
            Metric("disk_average_write_wait", 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice="Average queue length: 0.00"),
            Metric("disk_queue_length", 0.0),
            Result(state=state.OK, notice="Read operations: 0.00/s"),
            Metric("disk_read_ios", 0.0, levels=(400.0, 600.0)),
            Result(state=state.OK, notice="Write operations: 0.00/s"),
            Metric("disk_write_ios", 0.0, levels=(300.0, 400.0)),
            Result(state=state.OK, summary="Latency: 0 seconds"),
            Metric("disk_latency", 0.0, levels=(0.08, 0.16)),
            Metric("disk_average_read_request_size", 0.0),
            Metric("disk_average_request_size", 0.0),
            Metric("disk_average_write_request_size", 0.0),
        ]
Esempio n. 15
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    params_function: Callable[[], Parameters],
    *,
    value_store_manager: item_state.ValueStoreManager,
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    check_function = (plugin.cluster_check_function
                      if host_config.is_cluster else plugin.check_function)

    source_type = (SourceType.MANAGEMENT
                   if service.check_plugin_name.is_management_name() else SourceType.HOST)

    config_cache = config.get_config_cache()

    kwargs = {}
    try:
        kwargs = parsed_sections_broker.get_section_cluster_kwargs(
            config_cache.get_clustered_service_node_keys(
                host_config.hostname,
                source_type,
                service.description,
            ) or [],
            plugin.sections,
        ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, source_type),
            plugin.sections,
        )

        if not kwargs and not service.check_plugin_name.is_management_name():
            # in 1.6 some plugins where discovered for management boards, but with
            # the regular host plugins name. In this case retry with the source type
            # forced to MANAGEMENT:
            kwargs = parsed_sections_broker.get_section_cluster_kwargs(
                config_cache.get_clustered_service_node_keys(
                    host_config.hostname,
                    SourceType.MANAGEMENT,
                    service.description,
                ) or [],
                plugin.sections,
            ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
                HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT),
                plugin.sections,
            )

        if not kwargs:  # no data found
            return AggregatedResult(
                submit=False,
                data_received=False,
                result=RECEIVED_NO_DATA,
                cache_info=None,
            )

        if service.item is not None:
            kwargs["item"] = service.item

        if plugin.check_default_parameters is not None:
            kwargs["params"] = params_function()

        with plugin_contexts.current_host(host_config.hostname), \
            plugin_contexts.current_service(service), \
            value_store_manager.namespace(service.id()):
            result = _aggregate_results(check_function(**kwargs))

    except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=(0, msg, []),
            cache_info=None,
        )

    except MKTimeout:
        raise

    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname, skip_autochecks=True)
        result = 3, cmk.base.crash_reporting.create_check_crash_dump(
            host_name=host_config.hostname,
            service_name=service.description,
            plugin_name=service.check_plugin_name,
            plugin_kwargs=kwargs,
            is_manual=service.id() in table,
        ), []

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
Esempio n. 16
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    # missleading. These are prams that *may* be *partially* time specific
    timespecific_parameters: LegacyCheckParameters,
    *,
    value_store_manager: value_store.ValueStoreManager,
    persist_value_store_changes: bool,
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    config_cache = config.get_config_cache()
    check_function = (
        _cluster_modes.get_cluster_check_function(
            *config_cache.get_clustered_service_configuration(
                host_config.hostname,
                service.description,
            ),
            plugin=plugin,
            service_id=service.id(),
            persist_value_store_changes=persist_value_store_changes,
        )
        if host_config.is_cluster
        else plugin.check_function
    )
    source_type = (
        SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST
    )
    try:
        kwargs = (
            get_section_cluster_kwargs(
                parsed_sections_broker,
                config_cache.get_clustered_service_node_keys(
                    host_config.hostname,
                    source_type,
                    service.description,
                )
                or [],
                plugin.sections,
            )
            if host_config.is_cluster
            else get_section_kwargs(
                parsed_sections_broker,
                HostKey(host_config.hostname, ipaddress, source_type),
                plugin.sections,
            )
        )
        if not kwargs and not service.check_plugin_name.is_management_name():
            # in 1.6 some plugins where discovered for management boards, but with
            # the regular host plugins name. In this case retry with the source type
            # forced to MANAGEMENT:
            kwargs = (
                get_section_cluster_kwargs(
                    parsed_sections_broker,
                    config_cache.get_clustered_service_node_keys(
                        host_config.hostname,
                        SourceType.MANAGEMENT,
                        service.description,
                    )
                    or [],
                    plugin.sections,
                )
                if host_config.is_cluster
                else get_section_kwargs(
                    parsed_sections_broker,
                    HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT),
                    plugin.sections,
                )
            )
        if not kwargs:  # no data found
            return AggregatedResult(
                submit=False,
                data_received=False,
                result=RECEIVED_NO_DATA,
                cache_info=None,
            )

        kwargs = {
            **kwargs,
            **({} if service.item is None else {"item": service.item}),
            **(
                {}
                if plugin.check_default_parameters is None
                else {"params": _final_read_only_check_parameters(timespecific_parameters)}
            ),
        }
        with plugin_contexts.current_host(host_config.hostname), plugin_contexts.current_service(
            service
        ), value_store_manager.namespace(service.id()):
            result = _aggregate_results(check_function(**kwargs))

    except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=(0, msg, []),
            cache_info=None,
        )
    except MKTimeout:
        raise
    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname, skip_autochecks=True)
        result = (
            3,
            cmk.base.crash_reporting.create_check_crash_dump(
                host_name=host_config.hostname,
                service_name=service.description,
                plugin_name=service.check_plugin_name,
                plugin_kwargs=globals().get("kwargs", {}),
                is_manual=service.id() in table,
            ),
            [],
        )

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
Esempio n. 17
0
def test_parse_diskstat_predictive(value_store: MutableMapping[str, Any],
                                   mocker: MockerFixture):
    # SUP-5924
    DATA = [
        ['1617784511'],
        [
            '259', '0', 'nvme0n1', '131855', '42275', '8019303', '34515',
            '386089', '166344', '13331634', '138121', '0', '185784', '177210',
            '0', '0', '0', '0', '41445', '4574'
        ],
        [
            '53', '0', 'dm-0', '172574', '0', '7980626', '74812', '548159',
            '0', '12443656', '706944', '0', '189576', '781756', '0', '0', '0',
            '0', '0', '0'
        ],
        [
            '53', '1', 'dm-1', '171320', '0', '7710074', '74172', '546564',
            '0', '12514416', '674352', '0', '186452', '748524', '0', '0', '0',
            '0', '0', '0'
        ],
        [
            '53', '2', 'dm-2', '194', '0', '8616', '68', '0', '0', '0', '0',
            '0', '72', '68', '0', '0', '0', '0', '0', '0'
        ],
        ['[dmsetup_info]'],
        ['vme0n1p3_crypt', '253:0'],
        ['buntu--vg-swap_1', '253:2', 'ubuntu-vg', 'swap_1'],
        ['buntu--vg-root', '253:1', 'ubuntu-vg', 'root'],
    ]

    PARAMS = {
        'average': 300,
        'latency': (80.0, 160.0),
        'read': {
            'horizon': 90,
            'levels_lower': ('absolute', (2.0, 4.0)),
            'levels_upper': ('relative', (10.0, 20.0)),
            'levels_upper_min': (10.0, 15.0),
            'period': 'wday'
        },
        'read_ios': (400.0, 600.0),
        'read_latency': (80.0, 160.0),
        'read_wait': (30.0, 50.0),
        'utilization': (80.0, 90.0),
        'write': (50.0, 100.0),
        'write_ios': (300.0, 400.0),
        'write_latency': (80.0, 160.0),
        'write_wait': (30.0, 50.0)
    }

    mocker.patch("cmk.base.check_api._prediction.get_levels",
                 return_value=(None, (2.1, 4.1, None, None)))
    dummy_service = Service(
        CheckPluginName("unittest_sd"),
        parameters={},
        item="item-nvme0n1",
        description="unittest_sd_description",
    )
    with plugin_contexts.current_host(
            "unittest-hn"), plugin_contexts.current_service(dummy_service):

        with pytest.raises(IgnoreResultsError):
            list(
                diskstat.check_diskstat("nvme0n1", PARAMS,
                                        diskstat.parse_diskstat(DATA), None))
        DATA[0][0] = '1617784512'
        assert list(
            diskstat.check_diskstat(
                "nvme0n1",
                PARAMS,
                diskstat.parse_diskstat(DATA),
                None,
            )
        ) == [
            Result(state=state.OK,
                   notice='All values averaged over 5 minutes 0 seconds'),
            Result(state=state.OK, notice='Utilization: 0%'),
            Metric('disk_utilization', 0.0, levels=(0.8, 0.9)),
            Result(state=state.OK,
                   summary='Read: 0.00 B/s (no reference for prediction yet)'),
            Metric('disk_read_throughput', 0.0,
                   levels=(2.1, 4.1)),  # fake levels are quite low
            Result(state=state.OK, summary='Write: 0.00 B/s'),
            Metric('disk_write_throughput',
                   0.0,
                   levels=(50000000.0, 100000000.0)),
            Result(state=state.OK, notice='Average wait: 0 seconds'),
            Metric('disk_average_wait', 0.0),
            Result(state=state.OK, notice='Average read wait: 0 seconds'),
            Metric('disk_average_read_wait', 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice='Average write wait: 0 seconds'),
            Metric('disk_average_write_wait', 0.0, levels=(0.03, 0.05)),
            Result(state=state.OK, notice='Average queue length: 0.00'),
            Metric('disk_queue_length', 0.0),
            Result(state=state.OK, notice='Read operations: 0.00/s'),
            Metric('disk_read_ios', 0.0, levels=(400.0, 600.0)),
            Result(state=state.OK, notice='Write operations: 0.00/s'),
            Metric('disk_write_ios', 0.0, levels=(300.0, 400.0)),
            Result(state=state.OK, summary='Latency: 0 seconds'),
            Metric('disk_latency', 0.0, levels=(0.08, 0.16)),
            Metric('disk_average_read_request_size', 0.0),
            Metric('disk_average_request_size', 0.0),
            Metric('disk_average_write_request_size', 0.0),
        ]