def test_get_section_cluster_kwargs(patch_register, required_sections,
                                    expected_result):

    node1_section_content = {
        SectionName("one"): NODE_1,
        SectionName("two"): NODE_1,
        SectionName("three"): NODE_1
    }

    node2_section_content = {
        SectionName("two"): NODE_2,
        SectionName("three"): NODE_2,
    }

    parsed_sections_broker = ParsedSectionsBroker()
    parsed_sections_broker.setdefault(
        HostKey("node1", "127.0.0.1", SourceType.HOST),
        AgentHostSections(sections=node1_section_content),
    )
    parsed_sections_broker.setdefault(
        HostKey("node2", "127.0.0.1", SourceType.HOST),
        AgentHostSections(sections=node2_section_content),
    )

    kwargs = parsed_sections_broker.get_section_cluster_kwargs(
        [
            HostKey("node1", "127.0.0.1", SourceType.HOST),
            HostKey("node2", "127.0.0.1", SourceType.HOST),
        ],
        [ParsedSectionName(n) for n in required_sections],
    )

    assert expected_result == kwargs,\
           "Section content: Expected '%s' but got '%s'" % (expected_result, kwargs)
def test_get_section_content(hostname, host_entries, cluster_node_keys,
                             expected_result):

    parsed_sections_broker = ParsedSectionsBroker()
    for nodename, node_section_content in host_entries:
        parsed_sections_broker.setdefault(
            HostKey(nodename, "127.0.0.1", SourceType.HOST),
            AgentHostSections(
                sections={
                    SectionName("section_plugin_name"): node_section_content
                }),
        )

    mhs = MultiHostSections(parsed_sections_broker)

    section_content = mhs.get_section_content(
        HostKey(hostname, "127.0.0.1", SourceType.HOST),
        check_api_utils.HOST_ONLY,
        "section_plugin_name",
        False,
        cluster_node_keys=cluster_node_keys,
        check_legacy_info=
        {},  # only for parse_function lookup, not needed in this test
    )
    assert expected_result == section_content

    section_content = mhs.get_section_content(
        HostKey(hostname, "127.0.0.1", SourceType.HOST),
        check_api_utils.HOST_PRECEDENCE,
        "section_plugin_name",
        False,
        cluster_node_keys=cluster_node_keys,
        check_legacy_info=
        {},  # only for parse_function lookup, not needed in this test
    )
    assert expected_result == section_content

    section_content = mhs.get_section_content(
        HostKey(hostname, "127.0.0.1", SourceType.MANAGEMENT),
        check_api_utils.MGMT_ONLY,
        "section_plugin_name",
        False,
        cluster_node_keys=None if cluster_node_keys is None else [
            HostKey(hn, ip, SourceType.MANAGEMENT)
            for (hn, ip, _st) in cluster_node_keys
        ],
        check_legacy_info=
        {},  # only for parse_function lookup, not needed in this test
    )
    assert section_content is None
    def test_no_sources(self, hostname, ipaddress, mode, config_cache,
                        host_config):
        broker = ParsedSectionsBroker()
        update_host_sections(
            broker,
            make_nodes(
                config_cache,
                host_config,
                ipaddress,
                mode=mode,
                sources=(),
            ),
            max_cachefile_age=0,
            host_config=host_config,
            fetcher_messages=(),
            selected_sections=NO_SELECTION,
        )
        # The length is not zero because the function always sets,
        # at least, a piggy back section.
        assert len(broker) == 1

        key = HostKey(hostname, ipaddress, SourceType.HOST)
        assert key in broker

        section = broker[key]

        # Public attributes from HostSections:
        assert not section.sections
        assert not section.cache_info
        assert not section.piggybacked_raw_data
def test_get_parsed_section(patch_register, node_section_content,
                            expected_result):

    parsed_sections_broker = ParsedSectionsBroker()
    parsed_sections_broker.setdefault(
        HostKey("node1", "127.0.0.1", SourceType.HOST),
        AgentHostSections(sections=node_section_content),
    )

    content = parsed_sections_broker.get_parsed_section(
        HostKey("node1", "127.0.0.1", SourceType.HOST),
        ParsedSectionName("parsed"),
    )

    assert expected_result == content,\
           "Section content: Expected '%s' but got '%s'" % (expected_result, content)
def _discover_plugins_services(
    *,
    check_plugin_name: CheckPluginName,
    host_name: HostName,
    ipaddress: Optional[HostAddress],
    parsed_sections_broker: ParsedSectionsBroker,
    discovery_parameters: DiscoveryParameters,
) -> Iterator[Service]:
    # Skip this check type if is ignored for that host
    if config.service_ignored(host_name, check_plugin_name, None):
        console.vverbose("  Skip ignored check plugin name '%s'\n" %
                         check_plugin_name)
        return

    check_plugin = agent_based_register.get_check_plugin(check_plugin_name)
    if check_plugin is None:
        console.warning("  Missing check plugin: '%s'\n" % check_plugin_name)
        return

    host_key = HostKey(
        host_name,
        ipaddress,
        SourceType.MANAGEMENT
        if check_plugin.name.is_management_name() else SourceType.HOST,
    )

    try:
        kwargs = parsed_sections_broker.get_section_kwargs(
            host_key, check_plugin.sections)
    except Exception as exc:
        if cmk.utils.debug.enabled(
        ) or discovery_parameters.on_error == "raise":
            raise
        if discovery_parameters.on_error == "warn":
            console.warning("  Exception while parsing agent section: %s\n" %
                            exc)
        return
    if not kwargs:
        return

    disco_params = config.get_discovery_parameters(host_name, check_plugin)
    if disco_params is not None:
        kwargs["params"] = disco_params

    try:
        plugins_services = check_plugin.discovery_function(**kwargs)
        yield from _enriched_discovered_services(host_name, check_plugin.name,
                                                 plugins_services)
    except Exception as e:
        if discovery_parameters.on_error == "warn":
            console.warning(
                "  Exception in discovery function of check plugin '%s': %s" %
                (check_plugin.name, e))
        elif discovery_parameters.on_error == "raise":
            raise
    def test_multiple_sources_from_the_same_host(
        self,
        hostname,
        ipaddress,
        mode,
        config_cache,
        host_config,
    ):
        sources = [
            ProgramSource.ds(
                hostname,
                ipaddress,
                mode=mode,
                template="",
            ),
            TCPSource(
                hostname,
                ipaddress,
                mode=mode,
            ),
        ]

        broker = ParsedSectionsBroker()
        update_host_sections(
            broker,
            make_nodes(
                config_cache,
                host_config,
                ipaddress,
                mode=mode,
                sources=sources,
            ),
            max_cachefile_age=0,
            host_config=host_config,
            fetcher_messages=[
                FetcherMessage.from_raw_data(
                    result.OK(source.default_raw_data),
                    Snapshot.null(),
                    source.fetcher_type,
                ) for source in sources
            ],
            selected_sections=NO_SELECTION,
        )
        assert len(broker) == 1

        key = HostKey(hostname, ipaddress, SourceType.HOST)
        assert key in broker

        section = broker[key]

        assert len(section.sections) == 1
        # yapf: disable
        assert (section.sections[SectionName("section_name_%s" % hostname)]
                == len(sources) * [["section_content"]])
    def test_no_sources(self, cluster, nodes, config_cache, host_config, mode):
        made_nodes = make_nodes(
            config_cache,
            host_config,
            None,
            mode=mode,
            sources=(),
        )

        broker = ParsedSectionsBroker()
        update_host_sections(
            broker,
            made_nodes,
            max_cachefile_age=0,
            host_config=host_config,
            fetcher_messages=[
                # We do not pass sources explicitly but still append Piggyback.
                FetcherMessage.from_raw_data(
                    result.OK(AgentRawData(b"")),
                    Snapshot.null(),
                    FetcherType.PIGGYBACK,
                ) for _n in made_nodes
            ],
            selected_sections=NO_SELECTION,
        )
        assert len(broker) == len(nodes)

        key_clu = HostKey(cluster, None, SourceType.HOST)
        assert key_clu not in broker

        for hostname, addr in nodes.items():
            key = HostKey(hostname, addr, SourceType.HOST)
            assert key in broker

            section = broker[key]
            # yapf: disable
            assert (section.sections[SectionName("section_name_%s" % hostname)]
                    == [["section_content_%s" % hostname]])
            assert not section.cache_info
            assert not section.piggybacked_raw_data
def _get_host_section_for_parse_sections_test():
    node_section_content = {
        SectionName("one"): NODE_1,
        SectionName("four"): NODE_1,
    }

    host_key = HostKey("node1", "127.0.0.1", SourceType.HOST)

    broker = ParsedSectionsBroker()
    broker.setdefault(
        host_key,
        AgentHostSections(sections=node_section_content),
    )
    return host_key, broker
Beispiel #9
0
def _discover_host_labels(
    *,
    host_name: HostName,
    ipaddress: Optional[HostAddress],
    parsed_sections_broker: ParsedSectionsBroker,
    discovery_parameters: DiscoveryParameters,
) -> Sequence[HostLabel]:

    section.section_step("Discover host labels of section plugins")

    # make names unique
    labels_by_name = {
        **_discover_host_labels_for_source_type(
            host_key=HostKey(host_name, ipaddress, SourceType.HOST),
            parsed_sections_broker=parsed_sections_broker,
            discovery_parameters=discovery_parameters,
        ),
        **_discover_host_labels_for_source_type(
            host_key=HostKey(host_name, ipaddress, SourceType.MANAGEMENT),
            parsed_sections_broker=parsed_sections_broker,
            discovery_parameters=discovery_parameters,
        ),
    }
    return list(labels_by_name.values())
Beispiel #10
0
def _do_inv_for_realhost(
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    *,
    parsed_sections_broker: ParsedSectionsBroker,
    run_only_plugin_names: Optional[Set[InventoryPluginName]],
) -> InventoryTrees:
    tree_aggregator = _TreeAggregator()
    _set_cluster_property(tree_aggregator.trees.inventory, host_config)

    section.section_step("Executing inventory plugins")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():
        if run_only_plugin_names and inventory_plugin.name not in run_only_plugin_names:
            continue

        kwargs = parsed_sections_broker.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, SourceType.HOST),
            inventory_plugin.sections,
        )
        if not kwargs:
            console.vverbose(" %s%s%s%s: skipped (no data)\n", tty.yellow,
                             tty.bold, inventory_plugin.name, tty.normal)
            continue

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        if inventory_plugin.inventory_ruleset_name is not None:
            kwargs["params"] = host_config.inventory_parameters(
                inventory_plugin.inventory_ruleset_name)

        exception = tree_aggregator.aggregate_results(
            inventory_plugin.inventory_function(**kwargs), )
        if exception:
            console.warning(" %s%s%s%s: failed: %s", tty.red, tty.bold,
                            inventory_plugin.name, tty.normal, exception)
        else:
            console.verbose(" %s%s%s%s", tty.green, tty.bold,
                            inventory_plugin.name, tty.normal)
            console.vverbose(": ok\n")
    console.verbose("\n")

    tree_aggregator.trees.inventory.normalize_nodes()
    tree_aggregator.trees.status_data.normalize_nodes()
    return tree_aggregator.trees
    def test_one_snmp_source(self, hostname, ipaddress, mode, config_cache,
                             host_config):
        broker = ParsedSectionsBroker()
        update_host_sections(
            broker,
            make_nodes(
                config_cache,
                host_config,
                ipaddress,
                mode=mode,
                sources=[
                    SNMPSource.snmp(
                        hostname,
                        ipaddress,
                        mode=mode,
                        selected_sections=NO_SELECTION,
                        on_scan_error="raise",
                    ),
                ],
            ),
            max_cachefile_age=0,
            host_config=host_config,
            fetcher_messages=[
                FetcherMessage.from_raw_data(
                    result.OK({}),
                    Snapshot.null(),
                    FetcherType.SNMP,
                ),
            ],
            selected_sections=NO_SELECTION,
        )
        assert len(broker) == 1

        key = HostKey(hostname, ipaddress, SourceType.HOST)
        assert key in broker

        section = broker[key]

        assert len(section.sections) == 1
        assert section.sections[SectionName("section_name_%s" %
                                            hostname)] == [["section_content"]]
    def test_one_nonsnmp_source(self, hostname, ipaddress, mode, config_cache,
                                host_config, source):
        source = source(hostname, ipaddress, mode=mode)
        assert source.source_type is SourceType.HOST

        broker = ParsedSectionsBroker()
        update_host_sections(
            broker,
            make_nodes(
                config_cache,
                host_config,
                ipaddress,
                mode=mode,
                sources=[source],
            ),
            max_cachefile_age=0,
            host_config=host_config,
            fetcher_messages=[
                FetcherMessage.from_raw_data(
                    result.OK(source.default_raw_data),
                    Snapshot.null(),
                    source.fetcher_type,
                ),
            ],
            selected_sections=NO_SELECTION,
        )
        assert len(broker) == 1

        key = HostKey(hostname, ipaddress, source.source_type)
        assert key in broker

        section = broker[key]

        assert len(section.sections) == 1
        assert section.sections[SectionName("section_name_%s" %
                                            hostname)] == [["section_content"]]

@pytest.mark.parametrize(
    "hostname,host_entries,cluster_node_keys,expected_result",
    [
        # No clusters
        ("heute", [
            ('heute', NODE_1),
        ], None, NODE_1),
        # Clusters: host_of_clustered_service returns cluster name. That means that
        # the service is assigned to the cluster
        ("cluster", [
            ('node1', NODE_1),
            ('node2', NODE_2),
        ], [
            HostKey("node1", "127.0.0.1", SourceType.HOST),
            HostKey("node2", "127.0.0.1", SourceType.HOST),
        ], NODE_1 + NODE_2),
        # host_of_clustered_service returns either the cluster or node name.
        # That means that the service is assigned to the cluster resp. not to the cluster
        ("cluster", [
            ('node1', NODE_1),
            ('node2', NODE_2),
        ], [
            HostKey("node2", "127.0.0.1", SourceType.HOST),
        ], NODE_2),
    ])
def test_get_section_content(hostname, host_entries, cluster_node_keys,
                             expected_result):

    parsed_sections_broker = ParsedSectionsBroker()
Beispiel #14
0
def test__find_candidates():
    broker = ParsedSectionsBroker()

    broker._data = {
        # we just care about the keys here, content set to arbitrary values that can be parsed.
        # section names have been are chosen arbitrarily.
        HostKey("test_node", "1.2.3.4", SourceType.HOST):
        AgentHostSections({
            SectionName("kernel"): [],  # host only
            SectionName("uptime"): [['123']],  # host & mgmt
        }),
        HostKey("test_node", "1.2.3.4", SourceType.MANAGEMENT):
        SNMPHostSections({
            # host & mgmt:
            SectionName("uptime"): [['123']],  # type: ignore[dict-item]
            # mgmt only:
            SectionName("liebert_fans"):
            [[['Fan', '67', 'umin']]],  # type: ignore[dict-item]
            # is already mgmt_ prefixed:
            SectionName("mgmt_snmp_info"):
            [[['a', 'b', 'c', 'd']]],  # type: ignore[dict-item]
        }),
    }

    preliminary_candidates = list(
        agent_based_register.iter_all_check_plugins())
    parsed_sections_of_interest = {
        parsed_section_name
        for plugin in preliminary_candidates
        for parsed_section_name in plugin.sections
    }

    assert discovery._discovered_services._find_host_candidates(
        broker,
        preliminary_candidates,
        parsed_sections_of_interest,
    ) == {
        CheckPluginName('docker_container_status_uptime'),
        CheckPluginName("kernel"),
        CheckPluginName('kernel_performance'),
        CheckPluginName('kernel_util'),
        CheckPluginName("uptime"),
    }

    assert discovery._discovered_services._find_mgmt_candidates(
        broker,
        preliminary_candidates,
        parsed_sections_of_interest,
    ) == {
        CheckPluginName('mgmt_docker_container_status_uptime'),
        CheckPluginName("mgmt_liebert_fans"),
        CheckPluginName("mgmt_uptime"),
    }

    assert discovery._discovered_services._find_candidates(
        broker,
        run_only_plugin_names=None,
    ) == {
        CheckPluginName('docker_container_status_uptime'),
        CheckPluginName("kernel"),
        CheckPluginName('kernel_performance'),
        CheckPluginName('kernel_util'),
        CheckPluginName('mgmt_docker_container_status_uptime'),
        CheckPluginName("mgmt_liebert_fans"),
        CheckPluginName("mgmt_uptime"),
        CheckPluginName("uptime"),
    }
Beispiel #15
0
def _cluster_scenario(monkeypatch):
    hostname = "test-clusterhost"
    ipaddress = "1.2.3.4"
    node1_hostname = 'test-node1'
    node2_hostname = 'test-node2'

    def fake_lookup_ip_address(host_config, *, family, for_mgmt_board=True):
        return ipaddress

    monkeypatch.setattr(ip_lookup, "lookup_ip_address", fake_lookup_ip_address)

    ts = Scenario()
    ts.add_host(node1_hostname)
    ts.add_host(node2_hostname)
    ts.add_cluster(hostname, nodes=[node1_hostname, node2_hostname])
    ts.set_ruleset("inventory_df_rules", [{
        'value': {
            'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'],
            'never_ignore_mountpoints': ['~.*/omd/sites/[^/]+/tmp$']
        },
        'condition': {
            'host_labels': {
                'cmk/check_mk_server': 'yes'
            }
        }
    }])
    ts.set_ruleset("clustered_services", [([], [node1_hostname], ['fs_'])])
    host_config = ts.apply(monkeypatch).get_host_config(hostname)

    DiscoveredHostLabelsStore(node1_hostname).save({
        'node1_existing_label': {
            'plugin_name': 'node1_plugin',
            'value': 'true',
        }
    })

    DiscoveredHostLabelsStore(hostname).save({
        'existing_label': {
            'plugin_name': 'foo',
            'value': 'bar',
        },
        'another_label': {
            'plugin_name': 'labels',
            'value': 'true',
        }
    })

    broker = ParsedSectionsBroker()
    broker.update({
        HostKey(hostname=node1_hostname,
                ipaddress=ipaddress,
                source_type=SourceType.HOST):
        AgentHostSections(
            sections={
                SectionName("labels"): [
                    [
                        '{"cmk/check_mk_server":"yes"}',
                    ],
                ],
                SectionName("df"): [
                    [
                        '/dev/sda1',
                        'vfat',
                        '523248',
                        '3668',
                        '519580',
                        '1%',
                        '/boot/test-efi',
                    ],
                    [
                        'tmpfs',
                        'tmpfs',
                        '8152916',
                        '244',
                        '8152672',
                        '1%',
                        '/opt/omd/sites/test-heute/tmp',
                    ],
                ],
            }),
        HostKey(hostname=node2_hostname,
                ipaddress=ipaddress,
                source_type=SourceType.HOST):
        AgentHostSections(
            sections={
                SectionName("labels"): [
                    [
                        '{"node2_live_label":"true"}',
                    ],
                ],
                SectionName("df"): [
                    [
                        '/dev/sda1',
                        'vfat',
                        '523248',
                        '3668',
                        '519580',
                        '1%',
                        '/boot/test-efi',
                    ],
                    [
                        'tmpfs',
                        'tmpfs',
                        '8152916',
                        '244',
                        '8152672',
                        '1%',
                        '/opt/omd/sites/test-heute2/tmp',
                    ],
                ],
            }),
    })

    return ClusterScenario(
        host_config,
        ipaddress,
        broker,
        node1_hostname,
        node2_hostname,
    )
Beispiel #16
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    params_function: Callable[[], Parameters],
) -> AggregatedResult:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.
    """
    if plugin is None:
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    check_function = (plugin.cluster_check_function
                      if host_config.is_cluster else plugin.check_function)

    source_type = (SourceType.MANAGEMENT
                   if service.check_plugin_name.is_management_name() else SourceType.HOST)

    config_cache = config.get_config_cache()

    kwargs = {}
    try:
        kwargs = parsed_sections_broker.get_section_cluster_kwargs(
            config_cache.get_clustered_service_node_keys(
                host_config.hostname,
                source_type,
                service.description,
                ip_lookup.lookup_ip_address,
            ) or [],
            plugin.sections,
        ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, source_type),
            plugin.sections,
        )

        if not kwargs and not service.check_plugin_name.is_management_name():
            # in 1.6 some plugins where discovered for management boards, but with
            # the regular host plugins name. In this case retry with the source type
            # forced to MANAGEMENT:
            kwargs = parsed_sections_broker.get_section_cluster_kwargs(
                config_cache.get_clustered_service_node_keys(
                    host_config.hostname,
                    SourceType.MANAGEMENT,
                    service.description,
                    ip_lookup.lookup_ip_address,
                ) or [],
                plugin.sections,
            ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
                HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT),
                plugin.sections,
            )

        if not kwargs:  # no data found
            return AggregatedResult(
                submit=False,
                data_received=False,
                result=RECEIVED_NO_DATA,
                cache_info=None,
            )

        if service.item is not None:
            kwargs["item"] = service.item

        if plugin.check_default_parameters is not None:
            kwargs["params"] = params_function()

        with _service_context(service):
            result = _aggregate_results(check_function(**kwargs))

    except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=(0, msg, []),
            cache_info=None,
        )

    except MKTimeout:
        raise

    except Exception:
        if cmk.utils.debug.enabled():
            raise
        table = check_table.get_check_table(host_config.hostname, skip_autochecks=True)
        result = 3, cmk.base.crash_reporting.create_check_crash_dump(
            host_name=host_config.hostname,
            service_name=service.description,
            plugin_name=service.check_plugin_name,
            plugin_kwargs=kwargs,
            is_manual=service.id() in table,
        ), []

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=parsed_sections_broker.get_cache_info(plugin.sections),
    )
Beispiel #17
0
def _execute_check_legacy_mode(
    multi_host_sections: MultiHostSections,
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    service: Service,
    *,
    submit_to_core: bool,
    show_perfdata: bool,
) -> bool:
    legacy_check_plugin_name = config.legacy_check_plugin_names.get(
        service.check_plugin_name)
    if legacy_check_plugin_name is None:
        _submit_check_result(
            hostname,
            service.description,
            CHECK_NOT_IMPLEMENTED,
            None,
            submit_to_core=submit_to_core,
            show_perfdata=show_perfdata,
        )
        return True

    check_function = config.check_info[legacy_check_plugin_name].get(
        "check_function")
    if check_function is None:
        _submit_check_result(
            hostname,
            service.description,
            CHECK_NOT_IMPLEMENTED,
            None,
            submit_to_core=submit_to_core,
            show_perfdata=show_perfdata,
        )
        return True

    section_name = legacy_check_plugin_name.split('.')[0]

    section_content = None
    mgmt_board_info = config.get_management_board_precedence(
        section_name, config.check_info)
    source_type = SourceType.MANAGEMENT if mgmt_board_info == LEGACY_MGMT_ONLY else SourceType.HOST
    try:
        section_content = multi_host_sections.get_section_content(
            HostKey(hostname, ipaddress, source_type),
            mgmt_board_info,
            section_name,
            for_discovery=False,
            cluster_node_keys=config.get_config_cache().
            get_clustered_service_node_keys(
                hostname,
                source_type,
                service.description,
                ip_lookup.lookup_ip_address,
            ),
            check_legacy_info=config.check_info,
        )

        if section_content is None:  # No data for this check type
            return False

        # Call the actual check function
        item_state.reset_wrapped_counters()

        used_params = legacy_determine_check_params(service.parameters)
        raw_result = check_function(service.item, used_params, section_content)
        result = sanitize_check_result(raw_result)
        item_state.raise_counter_wrap()

    except item_state.MKCounterWrapped as e:
        # handle check implementations that do not yet support the
        # handling of wrapped counters via exception on their own.
        # Do not submit any check result in that case:
        console.verbose("%-20s PEND - Cannot compute check result: %s\n",
                        ensure_str(service.description), e)
        # Don't submit to core - we're done.
        return True

    except MKTimeout:
        raise

    except Exception:
        if cmk.utils.debug.enabled():
            raise
        result = 3, cmk.base.crash_reporting.create_check_crash_dump(
            hostname,
            service.check_plugin_name,
            {
                "item": service.item,
                "params": used_params,
                "section_content": section_content
            },
            is_manual_check(hostname, service.id()),
            service.description,
        ), []

    _submit_check_result(
        hostname,
        service.description,
        result,
        multi_host_sections.legacy_determine_cache_info(
            SectionName(section_name)),
        submit_to_core=submit_to_core,
        show_perfdata=show_perfdata,
    )
    return True
Beispiel #18
0
def get_aggregated_result(
    parsed_sections_broker: ParsedSectionsBroker,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    service: Service,
    plugin: Optional[checking_classes.CheckPlugin],
    params_function: Callable[[], Parameters],
) -> Tuple[bool, bool, ServiceCheckResult]:
    """Run the check function and aggregate the subresults

    This function is also called during discovery.

    Returns a triple:
       bool: should the result be submitted to the core
       bool: did we receive data for the plugin
       ServiceCheckResult: The aggregated result as returned by the plugin, or a fallback

    """
    if plugin is None:
        return False, True, CHECK_NOT_IMPLEMENTED

    check_function = (plugin.cluster_check_function
                      if host_config.is_cluster else plugin.check_function)

    source_type = (SourceType.MANAGEMENT
                   if service.check_plugin_name.is_management_name() else
                   SourceType.HOST)

    config_cache = config.get_config_cache()

    kwargs = {}
    try:
        kwargs = parsed_sections_broker.get_section_cluster_kwargs(
            config_cache.get_clustered_service_node_keys(
                host_config.hostname,
                source_type,
                service.description,
                ip_lookup.lookup_ip_address,
            ) or [],
            plugin.sections,
        ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, source_type),
            plugin.sections,
        )

        if not kwargs and not service.check_plugin_name.is_management_name():
            # in 1.6 some plugins where discovered for management boards, but with
            # the regular host plugins name. In this case retry with the source type
            # forced to MANAGEMENT:
            kwargs = parsed_sections_broker.get_section_cluster_kwargs(
                config_cache.get_clustered_service_node_keys(
                    host_config.hostname,
                    SourceType.MANAGEMENT,
                    service.description,
                    ip_lookup.lookup_ip_address,
                ) or [],
                plugin.sections,
            ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs(
                HostKey(host_config.hostname, ipaddress,
                        SourceType.MANAGEMENT),
                plugin.sections,
            )

        if not kwargs:  # no data found
            return False, False, RECEIVED_NO_DATA

        if service.item is not None:
            kwargs["item"] = service.item

        if plugin.check_default_parameters is not None:
            kwargs["params"] = params_function()

        with _service_context(service):
            result = _aggregate_results(check_function(**kwargs))

    except (item_state.MKCounterWrapped,
            checking_classes.IgnoreResultsError) as e:
        msg = str(e) or "No service summary available"
        return False, True, (0, msg, [])

    except MKTimeout:
        raise

    except Exception:
        if cmk.utils.debug.enabled():
            raise
        result = 3, cmk.base.crash_reporting.create_check_crash_dump(
            host_config.hostname,
            service.check_plugin_name,
            kwargs,
            is_manual_check(host_config.hostname, service.id()),
            service.description,
        ), []

    return True, True, result
def test_get_host_sections_cluster(mode, monkeypatch, mocker):
    hostname = "testhost"
    hosts = {
        "host0": "10.0.0.0",
        "host1": "10.0.0.1",
        "host2": "10.0.0.2",
    }
    address = "1.2.3.4"
    tags = {"agent": "no-agent"}
    section_name = SectionName("test_section")
    config_cache = make_scenario(hostname, tags).apply(monkeypatch)
    host_config = config.HostConfig.make_host_config(hostname)

    def lookup_ip_address(host_config, family=None, for_mgmt_board=False):
        return hosts[host_config.hostname]

    def check(_, *args, **kwargs):
        return result.OK(AgentHostSections(sections={section_name: [[str(section_name)]]}))

    monkeypatch.setattr(
        ip_lookup,
        "lookup_ip_address",
        lookup_ip_address,
    )
    monkeypatch.setattr(
        Source,
        "parse",
        check,
    )
    mocker.patch.object(
        cmk.utils.piggyback,
        "remove_source_status_file",
        autospec=True,
    )
    mocker.patch.object(
        cmk.utils.piggyback,
        "_store_status_file_of",
        autospec=True,
    )

    # Create a cluster
    host_config.nodes = list(hosts.keys())

    nodes = make_nodes(
        config_cache,
        host_config,
        address,
        mode=mode,
        sources=make_sources(host_config, address, mode=mode)
    )

    broker = ParsedSectionsBroker()
    update_host_sections(
        broker,
        nodes,
        max_cachefile_age=host_config.max_cachefile_age,
        host_config=host_config,
        fetcher_messages=[
                FetcherMessage.from_raw_data(
                    result.OK(source.default_raw_data),
                    Snapshot.null(),
                    source.fetcher_type,
                )
                for _h, _i, sources in nodes for source in sources
            ],
        selected_sections=NO_SELECTION,
    )
    assert len(broker) == len(hosts) == 3
    cmk.utils.piggyback._store_status_file_of.assert_not_called()  # type: ignore[attr-defined]
    assert cmk.utils.piggyback.remove_source_status_file.call_count == 3  # type: ignore[attr-defined]

    for host, addr in hosts.items():
        remove_source_status_file = cmk.utils.piggyback.remove_source_status_file
        remove_source_status_file.assert_any_call(host)  # type: ignore[attr-defined]
        key = HostKey(host, addr, SourceType.HOST)
        assert key in broker
        section = broker[key]
        assert len(section.sections) == 1
        assert next(iter(section.sections)) == section_name
        assert not section.cache_info
        assert not section.piggybacked_raw_data
Beispiel #20
0
def get_aggregated_result(
    multi_host_sections: MultiHostSections,
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    service: Service,
    *,
    used_params: LegacyCheckParameters,
) -> AggregatedResult:
    legacy_check_plugin_name = config.legacy_check_plugin_names.get(service.check_plugin_name)
    if legacy_check_plugin_name is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    check_function = config.check_info[legacy_check_plugin_name].get("check_function")
    if check_function is None:
        return AggregatedResult(
            submit=True,
            data_received=True,
            result=CHECK_NOT_IMPLEMENTED,
            cache_info=None,
        )

    section_name = legacy_check_plugin_name.split('.')[0]

    section_content = None
    mgmt_board_info = config.get_management_board_precedence(section_name, config.check_info)
    source_type = SourceType.MANAGEMENT if mgmt_board_info == LEGACY_MGMT_ONLY else SourceType.HOST
    try:
        section_content = multi_host_sections.get_section_content(
            HostKey(hostname, ipaddress, source_type),
            mgmt_board_info,
            section_name,
            for_discovery=False,
            cluster_node_keys=config.get_config_cache().get_clustered_service_node_keys(
                hostname,
                source_type,
                service.description,
                ip_lookup.lookup_ip_address,
            ),
            check_legacy_info=config.check_info,
        )

        if section_content is None:  # No data for this check type
            return AggregatedResult(
                submit=False,
                data_received=False,
                result=RECEIVED_NO_DATA,
                cache_info=None,
            )

        # Call the actual check function
        item_state.reset_wrapped_counters()

        raw_result = check_function(service.item, used_params, section_content)
        result = _sanitize_check_result(raw_result)
        item_state.raise_counter_wrap()

    except item_state.MKCounterWrapped as exc:
        # handle check implementations that do not yet support the
        # handling of wrapped counters via exception on their own.
        # Do not submit any check result in that case:
        return AggregatedResult(
            submit=False,
            data_received=True,
            result=(0, f"Cannot compute check result: {exc}\n", []),
            cache_info=None,
        )

    except MKTimeout:
        raise

    except Exception:
        if cmk.utils.debug.enabled():
            raise
        result = 3, cmk.base.crash_reporting.create_check_crash_dump(
            host_name=hostname,
            service_name=service.description,
            plugin_name=service.check_plugin_name,
            plugin_kwargs={
                "item": service.item,
                "params": used_params,
                "section_content": section_content
            },
            is_manual=service.id() in check_table.get_check_table(hostname, skip_autochecks=True),
        ), []

    return AggregatedResult(
        submit=True,
        data_received=True,
        result=result,
        cache_info=multi_host_sections.legacy_determine_cache_info(SectionName(section_name)),
    )