Exemple #1
0
def make_broker(
    *,
    config_cache: ConfigCache,
    host_config: HostConfig,
    ip_address: Optional[HostAddress],
    mode: Mode,
    selected_sections: SectionNameCollection,
    file_cache_max_age: cache.MaxAge,
    fetcher_messages: Sequence[FetcherMessage],
    force_snmp_cache_refresh: bool,
    on_scan_error: OnError,
) -> Tuple[ParsedSectionsBroker, SourceResults]:
    sources = (make_sources(
        host_config,
        ip_address,
        selected_sections=selected_sections,
        force_snmp_cache_refresh=force_snmp_cache_refresh,
        on_scan_error=on_scan_error,
    ) if host_config.nodes is None else make_cluster_sources(
        config_cache,
        host_config,
    ))

    if not fetcher_messages:
        # Note: *Not* calling `fetch_all(sources)` here is probably buggy.
        # Note: `fetch_all(sources)` is almost always called in similar
        #       code in discovery and inventory.  The only two exceptions
        #       are `cmk.base.agent_based.checking.active_check_checking(...)` and
        #       `cmk.base.agent_based.discovery.active_check_discovery(...)`.
        #       This does not seem right.
        fetcher_messages = list(
            fetch_all(
                sources=sources,
                file_cache_max_age=file_cache_max_age,
                mode=mode,
            ))

    collected_host_sections, results = _collect_host_sections(
        sources=sources,
        file_cache_max_age=file_cache_max_age,
        fetcher_messages=fetcher_messages,
        selected_sections=selected_sections,
    )
    return (
        ParsedSectionsBroker({
            host_key: (
                ParsedSectionsResolver(section_plugins=[
                    agent_based_register.get_section_plugin(section_name)
                    for section_name in host_sections.sections
                ], ),
                SectionsParser(host_sections=host_sections),
            )
            for host_key, host_sections in collected_host_sections.items()
        }),
        results,
    )
Exemple #2
0
def test_host_config_creates_passing_source_sources(
    monkeypatch,
    hostname,
    tags,
    sources,
):
    ts = make_scenario(hostname, tags)
    ts.apply(monkeypatch)

    host_config = config.HostConfig.make_host_config(hostname)
    ipaddress = "127.0.0.1"

    assert [type(c) for c in make_sources(host_config, ipaddress)] == sources
Exemple #3
0
def make_broker(
    *,
    config_cache: 'ConfigCache',
    host_config: 'HostConfig',
    ip_address: Optional[HostAddress],
    mode: 'Mode',
    selected_sections: 'SectionNameCollection',
    file_cache_max_age: int,
    fetcher_messages: Sequence['FetcherMessage'],
    force_snmp_cache_refresh: bool,
    on_scan_error: str,
) -> Tuple[ParsedSectionsBroker, Sequence[Tuple['Source', result.Result[HostSections, Exception]]]]:
    nodes = make_nodes(
        config_cache,
        host_config,
        ip_address,
        mode,
        make_sources(
            host_config,
            ip_address,
            mode=mode,
            selected_sections=selected_sections,
            force_snmp_cache_refresh=force_snmp_cache_refresh,
            on_scan_error=on_scan_error,
        ),
    )

    if not fetcher_messages:
        # Note: *Not* calling `fetch_all(sources)` here is probably buggy.
        # Note: `fetch_all(sources)` is almost always called in similar
        #       code in discovery and inventory.  The only two exceptions
        #       are `cmk.base.agent_based.checking.do_check(...)` and
        #       `cmk.base.agent_based.discovery.check_discovery(...)`.
        #       This does not seem right.
        fetcher_messages = list(fetch_all(
            nodes=nodes,
            file_cache_max_age=file_cache_max_age,
        ))

    collected_host_sections, results = _collect_host_sections(
        nodes=nodes,
        file_cache_max_age=file_cache_max_age,
        fetcher_messages=fetcher_messages,
        selected_sections=selected_sections,
    )
    return ParsedSectionsBroker({
        host_key: SectionsParser(host_sections=host_sections)
        for host_key, host_sections in collected_host_sections.items()
    }), results
Exemple #4
0
def _inventorize_host(
    *,
    host_config: config.HostConfig,
    run_plugin_names: Container[InventoryPluginName],
    selected_sections: SectionNameCollection,
    retentions_tracker: RetentionsTracker,
) -> ActiveInventoryResult:
    if host_config.is_cluster:
        return ActiveInventoryResult(
            trees=_do_inv_for_cluster(host_config),
            source_results=(),
            parsing_errors=(),
            processing_failed=False,
        )

    ipaddress = config.lookup_ip_address(host_config)
    config_cache = config.get_config_cache()

    fetched = fetch_all(
        sources=make_sources(
            config_cache,
            host_config,
            ipaddress,
            selected_sections=selected_sections,
            force_snmp_cache_refresh=False,
            on_scan_error=OnError.RAISE,
        ),
        file_cache_max_age=host_config.max_cachefile_age,
        mode=(Mode.INVENTORY
              if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS),
    )
    broker, results = make_broker(
        fetched=fetched,
        selected_sections=selected_sections,
        file_cache_max_age=host_config.max_cachefile_age,
    )

    parsing_errors = broker.parsing_errors()
    return ActiveInventoryResult(
        trees=_do_inv_for_realhost(
            host_config,
            parsed_sections_broker=broker,
            run_plugin_names=run_plugin_names,
            retentions_tracker=retentions_tracker,
        ),
        source_results=results,
        parsing_errors=parsing_errors,
        processing_failed=(_sources_failed(results) or bool(parsing_errors)),
    )
Exemple #5
0
def _fetch_parsed_sections_broker_for_inv(
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    selected_sections: SectionNameCollection,
) -> Tuple[ParsedSectionsBroker, Sequence[Tuple[Source, result.Result[
        HostSections, Exception]]]]:
    if host_config.is_cluster:
        return ParsedSectionsBroker(), []

    mode = (Mode.INVENTORY
            if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS)

    nodes = sources.make_nodes(
        config_cache,
        host_config,
        ipaddress,
        mode,
        sources.make_sources(
            host_config,
            ipaddress,
            mode=mode,
            selected_sections=selected_sections,
        ),
    )
    parsed_sections_broker = ParsedSectionsBroker()
    results = sources.update_host_sections(
        parsed_sections_broker,
        nodes,
        max_cachefile_age=host_config.max_cachefile_age,
        host_config=host_config,
        fetcher_messages=list(
            sources.fetch_all(
                nodes,
                max_cachefile_age=host_config.max_cachefile_age,
                host_config=host_config,
            )),
        selected_sections=selected_sections,
    )

    return parsed_sections_broker, results
Exemple #6
0
def commandline_checking(
    host_name: HostName,
    ipaddress: Optional[HostAddress],
    *,
    run_plugin_names: Container[CheckPluginName] = EVERYTHING,
    selected_sections: SectionNameCollection = NO_SELECTION,
    dry_run: bool = False,
    show_perfdata: bool = False,
) -> ActiveCheckResult:
    console.vverbose("Checkmk version %s\n", cmk_version.__version__)
    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(host_name)
    # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when
    # address is unknown). When called as non keepalive ipaddress may be None or
    # is already an address (2nd argument)
    if ipaddress is None and not host_config.is_cluster:
        ipaddress = config.lookup_ip_address(host_config)

    fetched = fetch_all(
        sources=make_sources(
            config_cache,
            host_config,
            ipaddress,
            selected_sections=selected_sections,
            force_snmp_cache_refresh=False,
            on_scan_error=OnError.RAISE,
        ),
        file_cache_max_age=host_config.max_cachefile_age,
        mode=Mode.CHECKING
        if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS,
    )
    return _execute_checkmk_checks(
        hostname=host_name,
        ipaddress=ipaddress,
        fetched=fetched,
        run_plugin_names=run_plugin_names,
        selected_sections=selected_sections,
        dry_run=dry_run,
        show_perfdata=show_perfdata,
    )
Exemple #7
0
def dump_host(hostname: HostName) -> None:
    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(hostname)

    out.output("\n")
    if host_config.is_cluster:
        nodes = host_config.nodes
        if nodes is None:
            raise RuntimeError()
        color = tty.bgmagenta
        add_txt = " (cluster of " + (", ".join(nodes)) + ")"
    else:
        color = tty.bgblue
        add_txt = ""
    out.output("%s%s%s%-78s %s\n" %
               (color, tty.bold, tty.white, hostname + add_txt, tty.normal))

    ipaddress = _ip_address_for_dump_host(
        host_config, family=host_config.default_address_family)

    addresses: Optional[str] = ""
    if not host_config.is_ipv4v6_host:
        addresses = ipaddress
    else:
        try:
            secondary = _ip_address_for_dump_host(
                host_config,
                family=socket.AF_INET
                if host_config.is_ipv6_primary else socket.AF_INET6,
            )
        except Exception:
            secondary = "X.X.X.X"

        addresses = "%s, %s" % (ipaddress, secondary)
        if host_config.is_ipv6_primary:
            addresses += " (Primary: IPv6)"
        else:
            addresses += " (Primary: IPv4)"

    out.output(tty.yellow + "Addresses:              " + tty.normal +
               (addresses if addresses is not None else "No IP") + "\n")

    tag_template = tty.bold + "[" + tty.normal + "%s" + tty.bold + "]" + tty.normal
    tags = [(tag_template % ":".join(t))
            for t in sorted(host_config.tag_groups.items())]
    out.output(tty.yellow + "Tags:                   " + tty.normal +
               ", ".join(tags) + "\n")

    labels = [
        tag_template % ":".join(l) for l in sorted(host_config.labels.items())
    ]
    out.output(tty.yellow + "Labels:                 " + tty.normal +
               ", ".join(labels) + "\n")

    # TODO: Clean this up once cluster parent handling has been moved to HostConfig
    if host_config.is_cluster:
        parents_list = host_config.nodes
        if parents_list is None:
            raise RuntimeError()
    else:
        parents_list = host_config.parents
    if len(parents_list) > 0:
        out.output(tty.yellow + "Parents:                " + tty.normal +
                   ", ".join(parents_list) + "\n")
    out.output(tty.yellow + "Host groups:            " + tty.normal +
               ", ".join(host_config.hostgroups) + "\n")
    out.output(tty.yellow + "Contact groups:         " + tty.normal +
               ", ".join(host_config.contactgroups) + "\n")

    agenttypes = [
        source.description
        for source in sources.make_sources(host_config, ipaddress)
    ]

    if host_config.is_ping_host:
        agenttypes.append("PING only")

    out.output(tty.yellow + "Agent mode:             " + tty.normal)
    out.output(host_config.agent_description + "\n")

    out.output(tty.yellow + "Type of agent:          " + tty.normal)
    if len(agenttypes) == 1:
        out.output(agenttypes[0] + "\n")
    else:
        out.output("\n  ")
        out.output("\n  ".join(agenttypes) + "\n")

    out.output(tty.yellow + "Services:" + tty.normal + "\n")

    headers = ["checktype", "item", "params", "description", "groups"]
    colors = [tty.normal, tty.blue, tty.normal, tty.green, tty.normal]

    table_data = []
    for service in sorted(check_table.get_check_table(hostname).values(),
                          key=lambda s: s.description):
        table_data.append([
            str(service.check_plugin_name),
            str(service.item),
            _evaluate_params(service.parameters),
            service.description,
            ",".join(
                config_cache.servicegroups_of_service(hostname,
                                                      service.description)),
        ])

    tty.print_table(headers, colors, table_data, "  ")
def test_get_host_sections_cluster(monkeypatch, mocker):
    hostname = "testhost"
    hosts = {
        "host0": "10.0.0.0",
        "host1": "10.0.0.1",
        "host2": "10.0.0.2",
    }
    address = "1.2.3.4"
    tags = {"agent": "no-agent"}
    section_name = SectionName("test_section")
    config_cache = make_scenario(hostname, tags).apply(monkeypatch)
    host_config = config.HostConfig.make_host_config(hostname)

    def fake_lookup_ip_address(host_config, family=None):
        return hosts[host_config.hostname]

    def check(_, *args, **kwargs):
        return result.OK(AgentHostSections(sections={section_name: [[str(section_name)]]}))

    monkeypatch.setattr(
        config,
        "lookup_ip_address",
        fake_lookup_ip_address,
    )
    monkeypatch.setattr(
        Source,
        "parse",
        check,
    )
    mocker.patch.object(
        cmk.utils.piggyback,
        "remove_source_status_file",
        autospec=True,
    )
    mocker.patch.object(
        cmk.utils.piggyback,
        "_store_status_file_of",
        autospec=True,
    )

    # Create a cluster
    host_config.nodes = list(hosts.keys())

    nodes = make_nodes(
        config_cache,
        host_config,
        address,
        sources=make_sources(host_config, address),
    )

    host_sections = _collect_host_sections(
        nodes=nodes,
        file_cache_max_age=host_config.max_cachefile_age,
        fetcher_messages=[
                FetcherMessage.from_raw_data(
                    result.OK(source.default_raw_data),
                    Snapshot.null(),
                    source.fetcher_type,
                )
                for _h, _i, sources in nodes for source in sources
            ],
        selected_sections=NO_SELECTION,
    )[0]
    assert len(host_sections) == len(hosts) == 3
    cmk.utils.piggyback._store_status_file_of.assert_not_called()  # type: ignore[attr-defined]
    assert cmk.utils.piggyback.remove_source_status_file.call_count == 3  # type: ignore[attr-defined]

    for host, addr in hosts.items():
        remove_source_status_file = cmk.utils.piggyback.remove_source_status_file
        remove_source_status_file.assert_any_call(host)  # type: ignore[attr-defined]
        key = HostKey(host, addr, SourceType.HOST)
        assert key in host_sections
        section = host_sections[key]
        assert len(section.sections) == 1
        assert next(iter(section.sections)) == section_name
        assert not section.cache_info
        assert not section.piggybacked_raw_data
Exemple #9
0
def do_check(
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    *,
    # The following arguments *must* remain optional for Nagios and the `DiscoCheckExecutor`.
    #   See Also: `cmk.base.discovery.check_discovery()`
    fetcher_messages: Sequence[FetcherMessage] = (),
    run_only_plugin_names: Optional[Set[CheckPluginName]] = None,
    selected_sections: SectionNameCollection = NO_SELECTION,
    submit_to_core: bool = True,
    show_perfdata: bool = False,
) -> Tuple[int, List[ServiceDetails], List[ServiceAdditionalDetails],
           List[str]]:
    console.verbose("Checkmk version %s\n", cmk_version.__version__)

    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(hostname)

    exit_spec = host_config.exit_code_spec()

    mode = Mode.CHECKING if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS

    status: ServiceState = 0
    infotexts: List[ServiceDetails] = []
    long_infotexts: List[ServiceAdditionalDetails] = []
    perfdata: List[str] = []
    try:
        license_usage.try_history_update()

        # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when
        # address is unknown). When called as non keepalive ipaddress may be None or
        # is already an address (2nd argument)
        if ipaddress is None and not host_config.is_cluster:
            ipaddress = ip_lookup.lookup_ip_address(host_config)

        # When monitoring Checkmk clusters, the cluster nodes are responsible for fetching all
        # information from the monitored host and cache the result for the cluster checks to be
        # performed on the cached information.
        #
        # This means that in case of SNMP nodes, they need to take the clustered services of the
        # node into account, fetch the needed sections and cache them for the cluster host.
        #
        # But later, when checking the node services, the node has to only deal with the unclustered
        # services.
        #
        # TODO: clean this up. The fetched sections are computed in the checkers
        #       _make_configured_snmp_sections now.
        #
        belongs_to_cluster = len(config_cache.clusters_of(hostname)) > 0

        services_to_fetch = _get_services_to_fetch(
            host_name=hostname,
            belongs_to_cluster=belongs_to_cluster,
            config_cache=config_cache,
        )

        services_to_check = _filter_clustered_services(
            config_cache=config_cache,
            host_name=hostname,
            belongs_to_cluster=belongs_to_cluster,
            services=services_to_fetch,
            run_only_plugin_names=run_only_plugin_names,
        )

        nodes = sources.make_nodes(
            config_cache,
            host_config,
            ipaddress,
            mode,
            sources.make_sources(
                host_config,
                ipaddress,
                mode=mode,
                selected_sections=selected_sections,
            ),
        )

        if not fetcher_messages:
            # Note: `fetch_all(sources)` is almost always called in similar
            #       code in discovery and inventory.  The only other exception
            #       is `cmk.base.discovery.check_discovery(...)`.  This does
            #       not seem right.
            fetcher_messages = list(
                sources.fetch_all(
                    nodes,
                    max_cachefile_age=host_config.max_cachefile_age,
                    host_config=host_config,
                ))

        with CPUTracker() as tracker:
            broker = ParsedSectionsBroker()
            result = sources.update_host_sections(
                broker,
                nodes,
                max_cachefile_age=host_config.max_cachefile_age,
                host_config=host_config,
                fetcher_messages=fetcher_messages,
                selected_sections=selected_sections,
            )

            num_success, plugins_missing_data = _do_all_checks_on_host(
                config_cache,
                host_config,
                ipaddress,
                parsed_sections_broker=broker,
                services=services_to_check,
                submit_to_core=submit_to_core,
                show_perfdata=show_perfdata,
            )

            if run_only_plugin_names is None:
                inventory.do_inventory_actions_during_checking_for(
                    config_cache,
                    host_config,
                    ipaddress,
                    parsed_sections_broker=broker,
                )

            for source, host_sections in result:
                source_state, source_output, source_perfdata = source.summarize(
                    host_sections)
                if source_output != "":
                    status = max(status, source_state)
                    infotexts.append("[%s] %s" % (source.id, source_output))
                    perfdata.extend(
                        [_convert_perf_data(p) for p in source_perfdata])

            if plugins_missing_data:
                missing_data_status, missing_data_infotext = _check_plugins_missing_data(
                    plugins_missing_data,
                    exit_spec,
                    bool(num_success),
                )
                status = max(status, missing_data_status)
                infotexts.append(missing_data_infotext)

        total_times = tracker.duration
        for msg in fetcher_messages:
            total_times += msg.stats.duration

        infotexts.append("execution time %.1f sec" %
                         total_times.process.elapsed)
        if config.check_mk_perfdata_with_times:
            perfdata += [
                "execution_time=%.3f" % total_times.process.elapsed,
                "user_time=%.3f" % total_times.process.user,
                "system_time=%.3f" % total_times.process.system,
                "children_user_time=%.3f" % total_times.process.children_user,
                "children_system_time=%.3f" %
                total_times.process.children_system,
            ]
            summary: DefaultDict[str, Snapshot] = defaultdict(Snapshot.null)
            for msg in fetcher_messages if fetcher_messages else ():
                if msg.fetcher_type in (
                        FetcherType.PIGGYBACK,
                        FetcherType.PROGRAM,
                        FetcherType.SNMP,
                        FetcherType.TCP,
                ):
                    summary[{
                        FetcherType.PIGGYBACK: "agent",
                        FetcherType.PROGRAM: "ds",
                        FetcherType.SNMP: "snmp",
                        FetcherType.TCP: "agent",
                    }[msg.fetcher_type]] += msg.stats.duration
            for phase, duration in summary.items():
                perfdata.append("cmk_time_%s=%.3f" % (phase, duration.idle))
        else:
            perfdata.append("execution_time=%.3f" %
                            total_times.process.elapsed)

        return status, infotexts, long_infotexts, perfdata
    finally:
        if _checkresult_file_fd is not None:
            _close_checkresult_file()
Exemple #10
0
def check_discovery(
    host_name: HostName,
    ipaddress: Optional[HostAddress],
    *,
    # The next argument *must* remain optional for the DiscoCheckExecutor.
    #   See Also: `cmk.base.checking.do_check()`.
    fetcher_messages: Sequence[FetcherMessage] = (),
) -> Tuple[int, List[str], List[str], List[Tuple]]:

    # Note: '--cache' is set in core_cmc, nagios template or even on CL and means:
    # 1. use caches as default:
    #    - Set FileCacheFactory.maybe = True (set max_cachefile_age, else 0)
    #    - Set FileCacheFactory.use_outdated = True
    # 2. Then these settings are used to read cache file or not

    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(host_name)
    discovery_parameters = DiscoveryParameters(
        on_error="raise",
        load_labels=True,
        save_labels=False,
        only_host_labels=False,
    )

    params = host_config.discovery_check_parameters
    if params is None:
        params = host_config.default_discovery_check_parameters()

    # In case of keepalive discovery we always have an ipaddress. When called as non keepalive
    # ipaddress is always None
    if ipaddress is None and not host_config.is_cluster:
        ipaddress = config.lookup_ip_address(host_config)

    nodes = sources.make_nodes(
        config_cache,
        host_config,
        ipaddress,
        Mode.DISCOVERY,
        sources.make_sources(host_config, ipaddress, mode=Mode.DISCOVERY),
    )
    use_caches = cmk.core_helpers.cache.FileCacheFactory.maybe
    max_cachefile_age = config.discovery_max_cachefile_age() if use_caches else 0
    if not fetcher_messages:
        # Note: *Not* calling `fetch_all(sources)` here is probably buggy.
        #       Also See: `cmk.base.checking.do_check()`
        fetcher_messages = list(
            sources.fetch_all(
                nodes,
                max_cachefile_age=max_cachefile_age,
                host_config=host_config,
            ))

    parsed_sections_broker = ParsedSectionsBroker()
    result = sources.update_host_sections(
        parsed_sections_broker,
        nodes,
        max_cachefile_age=max_cachefile_age,
        host_config=host_config,
        fetcher_messages=fetcher_messages,
        selected_sections=NO_SELECTION,
    )

    services, host_label_discovery_result = _get_host_services(
        host_config,
        ipaddress,
        parsed_sections_broker,
        discovery_parameters,
    )

    status, infotexts, long_infotexts, perfdata, need_rediscovery = _aggregate_subresults(
        _check_service_lists(host_name, services, params),
        _check_host_labels(host_label_discovery_result, params),
        _check_data_sources(result),
    )

    if need_rediscovery:
        if host_config.is_cluster and host_config.nodes:
            for nodename in host_config.nodes:
                _set_rediscovery_flag(nodename)
        else:
            _set_rediscovery_flag(host_name)
        infotexts.append(u"rediscovery scheduled")

    return status, infotexts, long_infotexts, perfdata
Exemple #11
0
def discover_on_host(
    *,
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    mode: str,
    service_filters: Optional[_ServiceFilters],
    on_error: str,
    use_cached_snmp_data: bool,
    max_cachefile_age: int,
) -> DiscoveryResult:

    console.verbose("  Doing discovery with mode '%s'...\n" % mode)

    host_name = host_config.hostname
    result = DiscoveryResult()
    discovery_parameters = DiscoveryParameters(
        on_error=on_error,
        load_labels=(mode != "remove"),
        save_labels=(mode != "remove"),
        only_host_labels=(mode == "only-host-labels"),
    )

    if host_name not in config_cache.all_active_hosts():
        result.error_text = ""
        return result

    _set_cache_opts_of_checkers(use_cached_snmp_data=use_cached_snmp_data)

    try:
        # in "refresh" mode we first need to remove all previously discovered
        # checks of the host, so that _get_host_services() does show us the
        # new discovered check parameters.
        if mode == "refresh":
            result.self_removed += host_config.remove_autochecks()  # this is cluster-aware!

        if host_config.is_cluster:
            ipaddress = None
        else:
            ipaddress = config.lookup_ip_address(host_config)

        nodes = sources.make_nodes(
            config_cache,
            host_config,
            ipaddress,
            Mode.DISCOVERY,
            sources.make_sources(
                host_config,
                ipaddress,
                mode=Mode.DISCOVERY,
                on_scan_error=on_error,
            ),
        )

        parsed_sections_broker = ParsedSectionsBroker()
        sources.update_host_sections(
            parsed_sections_broker,
            nodes,
            max_cachefile_age=max_cachefile_age,
            host_config=host_config,
            fetcher_messages=list(
                sources.fetch_all(
                    nodes,
                    max_cachefile_age=max_cachefile_age,
                    host_config=host_config,
                )),
            selected_sections=NO_SELECTION,
        )

        # Compute current state of new and existing checks
        services, host_labels = _get_host_services(
            host_config,
            ipaddress,
            parsed_sections_broker,
            discovery_parameters,
        )

        old_services = services.get("old", [])

        # Create new list of checks
        new_services = _get_post_discovery_services(host_name, services, service_filters or
                                                    _ServiceFilters.accept_all(), result, mode)
        host_config.set_autochecks(new_services)

        result.diff_text = make_object_diff(
            _make_services_audit_log_object([x.service for x in old_services]),
            _make_services_audit_log_object([x.service for x in new_services]))

    except MKTimeout:
        raise  # let general timeout through

    except Exception as e:
        if cmk.utils.debug.enabled():
            raise
        result.error_text = str(e)

    else:
        if mode != "remove":
            result.self_new_host_labels = len(host_labels.new)
            result.self_total_host_labels = len(host_labels.present)

    result.self_total = result.self_new + result.self_kept
    return result
Exemple #12
0
def do_discovery(
    arg_hostnames: Set[HostName],
    *,
    selected_sections: SectionNameCollection,
    run_only_plugin_names: Optional[Set[CheckPluginName]],
    arg_only_new: bool,
    only_host_labels: bool = False,
) -> None:
    config_cache = config.get_config_cache()
    use_caches = not arg_hostnames or cmk.core_helpers.cache.FileCacheFactory.maybe
    on_error = "raise" if cmk.utils.debug.enabled() else "warn"

    discovery_parameters = DiscoveryParameters(
        on_error=on_error,
        load_labels=arg_only_new,
        save_labels=True,
        only_host_labels=only_host_labels,
    )

    host_names = _preprocess_hostnames(arg_hostnames, config_cache, only_host_labels)

    mode = Mode.DISCOVERY if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS

    # Now loop through all hosts
    for host_name in sorted(host_names):
        host_config = config_cache.get_host_config(host_name)
        section.section_begin(host_name)
        try:
            ipaddress = config.lookup_ip_address(host_config)
            nodes = sources.make_nodes(
                config_cache,
                host_config,
                ipaddress,
                mode,
                sources.make_sources(
                    host_config,
                    ipaddress,
                    mode=mode,
                    selected_sections=selected_sections,
                    on_scan_error=on_error,
                ),
            )
            max_cachefile_age = config.discovery_max_cachefile_age() if use_caches else 0

            parsed_sections_broker = ParsedSectionsBroker()
            sources.update_host_sections(
                parsed_sections_broker,
                nodes,
                max_cachefile_age=max_cachefile_age,
                host_config=host_config,
                fetcher_messages=list(
                    sources.fetch_all(
                        nodes,
                        max_cachefile_age=max_cachefile_age,
                        host_config=host_config,
                    )),
                selected_sections=selected_sections,
            )
            _do_discovery_for(
                host_name,
                ipaddress,
                parsed_sections_broker,
                run_only_plugin_names,
                arg_only_new,
                discovery_parameters,
            )

        except Exception as e:
            if cmk.utils.debug.enabled():
                raise
            section.section_error("%s" % e)
        finally:
            cmk.utils.cleanup.cleanup_globals()
Exemple #13
0
def get_check_preview(
    *,
    host_name: HostName,
    max_cachefile_age: int,
    use_cached_snmp_data: bool,
    on_error: str,
) -> Tuple[CheckPreviewTable, DiscoveredHostLabels]:
    """Get the list of service of a host or cluster and guess the current state of
    all services if possible"""
    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(host_name)

    ip_address = None if host_config.is_cluster else config.lookup_ip_address(host_config)
    discovery_parameters = DiscoveryParameters(
        on_error=on_error,
        load_labels=False,
        save_labels=False,
        only_host_labels=False,
    )

    _set_cache_opts_of_checkers(use_cached_snmp_data=use_cached_snmp_data)
    nodes = sources.make_nodes(
        config_cache, host_config, ip_address, Mode.DISCOVERY,
        sources.make_sources(
            host_config,
            ip_address,
            mode=Mode.DISCOVERY,
            on_scan_error=on_error,
        ))

    parsed_sections_broker = ParsedSectionsBroker()
    sources.update_host_sections(
        parsed_sections_broker,
        nodes,
        max_cachefile_age=max_cachefile_age,
        host_config=host_config,
        fetcher_messages=list(
            sources.fetch_all(
                nodes,
                max_cachefile_age=max_cachefile_age,
                host_config=host_config,
            )),
        selected_sections=NO_SELECTION,
    )

    grouped_services, host_label_result = _get_host_services(
        host_config,
        ip_address,
        parsed_sections_broker,
        discovery_parameters,
    )

    table: CheckPreviewTable = []
    for check_source, services_with_nodes in grouped_services.items():
        for service, found_on_nodes in services_with_nodes:
            plugin = agent_based_register.get_check_plugin(service.check_plugin_name)
            params = _preview_params(host_name, service, plugin, check_source)

            if check_source in ['legacy', 'active', 'custom']:
                exitcode = None
                output = u"WAITING - %s check, cannot be done offline" % check_source.title()
                ruleset_name: Optional[RulesetName] = None
            else:

                ruleset_name = (str(plugin.check_ruleset_name)
                                if plugin and plugin.check_ruleset_name else None)
                wrapped_params = (Parameters(wrap_parameters(params)) if plugin and
                                  plugin.check_default_parameters is not None else None)

                exitcode, output, _perfdata = checking.get_aggregated_result(
                    parsed_sections_broker,
                    host_config,
                    ip_address,
                    service,
                    plugin,
                    lambda p=wrapped_params: p,  # type: ignore[misc]  # "type of lambda"
                ).result

            # Service discovery never uses the perfdata in the check table. That entry
            # is constantly discarded, yet passed around(back and forth) as part of the
            # discovery result in the request elements. Some perfdata VALUES are not parsable
            # by ast.literal_eval such as "inf" it lead to ValueErrors. Thus keep perfdata empty
            perfdata: List[MetricTuple] = []
            table.append((
                _preview_check_source(host_name, service, check_source),
                str(service.check_plugin_name),
                ruleset_name,
                service.item,
                service.parameters,
                params,
                service.description,
                exitcode,
                output,
                perfdata,
                service.service_labels.to_dict(),
                found_on_nodes,
            ))

    return table, DiscoveredHostLabels(
        *{
            # TODO (mo): According to unit tests, this is what was done prior to refactoring.
            # Im not sure this is desired. If it is, it should be explained.
            **{l.name: l for l in host_label_result.vanished},
            **{l.name: l for l in host_label_result.present},
        }.values())