Exemplo n.º 1
0
def do_inv(hostnames):
    # type: (List[HostName]) -> None
    store.makedirs(cmk.utils.paths.inventory_output_dir)
    store.makedirs(cmk.utils.paths.inventory_archive_dir)

    for hostname in hostnames:
        console.section_begin(hostname)
        try:
            config_cache = config.get_config_cache()
            host_config = config_cache.get_host_config(hostname)

            if host_config.is_cluster:
                ipaddress = None
            else:
                ipaddress = ip_lookup.lookup_ip_address(hostname)

            sources = data_sources.DataSources(hostname, ipaddress)
            inventory_tree, status_data_tree = _do_inv_for(
                sources,
                multi_host_sections=None,
                host_config=host_config,
                ipaddress=ipaddress,
            )
            _run_inventory_export_hooks(host_config, inventory_tree)
            _show_inventory_results_on_console(inventory_tree,
                                               status_data_tree)

        except Exception as e:
            if cmk.utils.debug.enabled():
                raise

            console.section_error("%s" % e)
        finally:
            cmk.base.cleanup.cleanup_globals()
Exemplo n.º 2
0
def _get_needed_check_plugin_names(host_config):
    import cmk.base.check_table as check_table
    needed_check_plugin_names = set([])

    # In case the host is monitored as special agent, the check plugin for the special agent needs
    # to be loaded
    sources = data_sources.DataSources(host_config.hostname, ipaddress=None)
    for source in sources.get_data_sources():
        if isinstance(source, data_sources.programs.SpecialAgentDataSource):
            needed_check_plugin_names.add(
                source.special_agent_plugin_file_name)

    # Collect the needed check plugin names using the host check table
    for check_plugin_name in check_table.get_needed_check_names(
            host_config.hostname,
            filter_mode="include_clustered",
            skip_ignored=False):
        if config.check_info[check_plugin_name].get("extra_sections"):
            for section_name in config.check_info[check_plugin_name][
                    "extra_sections"]:
                if section_name in config.check_info:
                    needed_check_plugin_names.add(section_name)

        needed_check_plugin_names.add(check_plugin_name)

    # Also include the check plugins of the cluster nodes to be able to load
    # the autochecks of the nodes
    if host_config.is_cluster:
        for node in host_config.nodes:
            needed_check_plugin_names.update(
                check_table.get_needed_check_names(node, skip_ignored=False))

    return needed_check_plugin_names
Exemplo n.º 3
0
def _get_needed_check_plugin_names(host_config):
    # type: (config.HostConfig) -> Set[CheckPluginName]
    import cmk.base.check_table as check_table  # pylint: disable=import-outside-toplevel
    needed_check_plugin_names = set([])

    # In case the host is monitored as special agent, the check plugin for the special agent needs
    # to be loaded
    sources = data_sources.DataSources(host_config.hostname, ipaddress=None)
    for source in sources.get_data_sources():
        if isinstance(source, data_sources.programs.SpecialAgentDataSource):
            needed_check_plugin_names.add(source.special_agent_plugin_file_name)

    # Collect the needed check plugin names using the host check table
    for check_plugin_name in check_table.get_needed_check_names(host_config.hostname,
                                                                filter_mode="include_clustered",
                                                                skip_ignored=False):
        if config.check_info[check_plugin_name].get("extra_sections"):
            for section_name in config.check_info[check_plugin_name]["extra_sections"]:
                if section_name in config.check_info:
                    needed_check_plugin_names.add(section_name)

        needed_check_plugin_names.add(check_plugin_name)

    # Also include the check plugins of the cluster nodes to be able to load
    # the autochecks of the nodes
    if host_config.is_cluster:
        nodes = host_config.nodes
        if nodes is None:
            raise MKGeneralException("Invalid cluster configuration")
        for node in nodes:
            needed_check_plugin_names.update(
                check_table.get_needed_check_names(node, skip_ignored=False))

    return needed_check_plugin_names
Exemplo n.º 4
0
def do_inv_check(hostname, options):
    # type: (HostName, Dict[str, int]) -> Tuple[ServiceState, List[ServiceDetails], List[ServiceAdditionalDetails], Metric]
    _inv_hw_changes = options.get("hw-changes", 0)
    _inv_sw_changes = options.get("sw-changes", 0)
    _inv_sw_missing = options.get("sw-missing", 0)

    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(
        hostname)  # type: config.HostConfig

    if host_config.is_cluster:
        ipaddress = None
    else:
        ipaddress = ip_lookup.lookup_ip_address(hostname)

    status = 0
    infotexts = []  # type: List[Text]
    long_infotexts = []  # type: List[Text]
    perfdata = []  # type: List[Tuple]

    sources = data_sources.DataSources(hostname, ipaddress)
    inventory_tree, status_data_tree = _do_inv_for(
        sources,
        multi_host_sections=None,
        host_config=host_config,
        ipaddress=ipaddress,
    )

    #TODO add cluster if and only if all sources do not fail?
    if _all_sources_fail(host_config, sources):
        old_tree, sources_state = None, 1
        status = max(status, sources_state)
        infotexts.append("Cannot update tree%s" %
                         check_api_utils.state_markers[sources_state])
    else:
        old_tree = _save_inventory_tree(hostname, inventory_tree)

    _run_inventory_export_hooks(host_config, inventory_tree)

    if inventory_tree.is_empty() and status_data_tree.is_empty():
        infotexts.append("Found no data")

    else:
        infotexts.append("Found %d inventory entries" %
                         inventory_tree.count_entries())

        # Node 'software' is always there because _do_inv_for creates this node for cluster info
        if not inventory_tree.get_sub_container(['software']).has_edge('packages')\
           and _inv_sw_missing:
            infotexts.append("software packages information is missing" +
                             check_api_utils.state_markers[_inv_sw_missing])
            status = max(status, _inv_sw_missing)

        if old_tree is not None:
            if not old_tree.is_equal(inventory_tree, edges=["software"]):
                infotext = "software changes"
                if _inv_sw_changes:
                    status = max(status, _inv_sw_changes)
                    infotext += check_api_utils.state_markers[_inv_sw_changes]
                infotexts.append(infotext)

            if not old_tree.is_equal(inventory_tree, edges=["hardware"]):
                infotext = "hardware changes"
                if _inv_hw_changes:
                    status = max(status, _inv_hw_changes)
                    infotext += check_api_utils.state_markers[_inv_hw_changes]

                infotexts.append(infotext)

        if not status_data_tree.is_empty():
            infotexts.append("Found %s status entries" %
                             status_data_tree.count_entries())

    for source in sources.get_data_sources():
        source_state, source_output, _source_perfdata = source.get_summary_result_for_inventory(
        )
        # Do not output informational (state = 0) things. These information are shown by the "Check_MK" service
        if source_state != 0:
            status = max(source_state, status)
            infotexts.append("[%s] %s" % (source.id(), source_output))

    return status, infotexts, long_infotexts, perfdata
Exemplo n.º 5
0
def do_check(hostname, ipaddress, only_check_plugin_names=None):
    # type: (HostName, Optional[HostAddress], Optional[List[CheckPluginName]]) -> Tuple[int, List[ServiceDetails], List[ServiceAdditionalDetails], List[Text]]
    cpu_tracking.start("busy")
    console.verbose("Check_MK version %s\n", six.ensure_str(cmk.__version__))

    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(hostname)

    exit_spec = host_config.exit_code_spec()

    status = 0  # type: ServiceState
    infotexts = []  # type: List[ServiceDetails]
    long_infotexts = []  # type: List[ServiceAdditionalDetails]
    perfdata = []  # type: List[Text]
    try:
        # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when
        # address is unknown). When called as non keepalive ipaddress may be None or
        # is already an address (2nd argument)
        if ipaddress is None and not host_config.is_cluster:
            ipaddress = ip_lookup.lookup_ip_address(hostname)

        item_state.load(hostname)

        sources = data_sources.DataSources(hostname, ipaddress)

        num_success, missing_sections = \
            _do_all_checks_on_host(sources, host_config, ipaddress, only_check_plugin_names)

        if _submit_to_core:
            item_state.save(hostname)

        for source in sources.get_data_sources():
            source_state, source_output, source_perfdata = source.get_summary_result_for_checking(
            )
            if source_output != "":
                status = max(status, source_state)
                infotexts.append("[%s] %s" % (source.id(), source_output))
                perfdata.extend(
                    [_convert_perf_data(p) for p in source_perfdata])

        if missing_sections and num_success > 0:
            missing_sections_status, missing_sections_infotext = \
                _check_missing_sections(missing_sections, exit_spec)
            status = max(status, missing_sections_status)
            infotexts.append(missing_sections_infotext)

        elif missing_sections:
            infotexts.append("Got no information from host")
            status = max(status, cast(int, exit_spec.get("empty_output", 2)))

        cpu_tracking.end()
        phase_times = cpu_tracking.get_times()
        total_times = phase_times["TOTAL"]
        run_time = total_times[4]

        infotexts.append("execution time %.1f sec" % run_time)
        if config.check_mk_perfdata_with_times:
            perfdata += [
                "execution_time=%.3f" % run_time,
                "user_time=%.3f" % total_times[0],
                "system_time=%.3f" % total_times[1],
                "children_user_time=%.3f" % total_times[2],
                "children_system_time=%.3f" % total_times[3],
            ]

            for phase, times in phase_times.items():
                if phase in ["agent", "snmp", "ds"]:
                    t = times[4] - sum(times[:4])  # real time - CPU time
                    perfdata.append("cmk_time_%s=%.3f" % (phase, t))
        else:
            perfdata.append("execution_time=%.3f" % run_time)

        return status, infotexts, long_infotexts, perfdata
    finally:
        if _checkresult_file_fd is not None:
            _close_checkresult_file()

        # "ipaddress is not None": At least when working with a cluster host it seems the ipaddress
        # may be None.  This needs to be understood in detail and cleaned up. As the InlineSNMP
        # stats feature is a very rarely used debugging feature, the analyzation and fix is
        # postponed now.
        if config.record_inline_snmp_stats \
           and ipaddress is not None \
           and host_config.snmp_config(ipaddress).is_inline_snmp_host:
            inline_snmp.save_snmp_stats()
Exemplo n.º 6
0
def do_check(
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    only_check_plugin_names: Optional[Set[CheckPluginName]] = None
) -> Tuple[int, List[ServiceDetails], List[ServiceAdditionalDetails], List[str]]:
    cpu_tracking.start("busy")
    console.verbose("Check_MK version %s\n", cmk_version.__version__)

    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(hostname)

    exit_spec = host_config.exit_code_spec()

    status: ServiceState = 0
    infotexts: List[ServiceDetails] = []
    long_infotexts: List[ServiceAdditionalDetails] = []
    perfdata: List[str] = []
    try:
        # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when
        # address is unknown). When called as non keepalive ipaddress may be None or
        # is already an address (2nd argument)
        if ipaddress is None and not host_config.is_cluster:
            ipaddress = ip_lookup.lookup_ip_address(hostname)

        item_state.load(hostname)

        services = _get_filtered_services(
            host_name=hostname,
            belongs_to_cluster=len(config_cache.clusters_of(hostname)) > 0,
            config_cache=config_cache,
            only_check_plugins=only_check_plugin_names,
        )

        # see which raw sections we may need
        selected_raw_sections = config.get_relevant_raw_sections(
            CheckPluginName(maincheckify(s.check_plugin_name)) for s in services)

        sources = data_sources.DataSources(
            hostname,
            ipaddress,
            sources=data_sources.make_sources(
                host_config,
                ipaddress,
                selected_raw_sections=selected_raw_sections,
            ),
        )
        num_success, plugins_missing_data = _do_all_checks_on_host(
            services,
            sources,
            host_config,
            ipaddress,
            only_check_plugin_names,
        )

        if _submit_to_core:
            item_state.save(hostname)

        for source in sources:
            source_state, source_output, source_perfdata = source.get_summary_result_for_checking()
            if source_output != "":
                status = max(status, source_state)
                infotexts.append("[%s] %s" % (source.id(), source_output))
                perfdata.extend([_convert_perf_data(p) for p in source_perfdata])

        if plugins_missing_data:
            missing_data_status, missing_data_infotext = _check_plugins_missing_data(
                plugins_missing_data,
                exit_spec,
                bool(num_success),
            )
            status = max(status, missing_data_status)
            infotexts.append(missing_data_infotext)

        cpu_tracking.end()
        phase_times = cpu_tracking.get_times()
        total_times = phase_times["TOTAL"]
        run_time = total_times[4]

        infotexts.append("execution time %.1f sec" % run_time)
        if config.check_mk_perfdata_with_times:
            perfdata += [
                "execution_time=%.3f" % run_time,
                "user_time=%.3f" % total_times[0],
                "system_time=%.3f" % total_times[1],
                "children_user_time=%.3f" % total_times[2],
                "children_system_time=%.3f" % total_times[3],
            ]

            for phase, times in phase_times.items():
                if phase in ["agent", "snmp", "ds"]:
                    t = times[4] - sum(times[:4])  # real time - CPU time
                    perfdata.append("cmk_time_%s=%.3f" % (phase, t))
        else:
            perfdata.append("execution_time=%.3f" % run_time)

        return status, infotexts, long_infotexts, perfdata
    finally:
        if _checkresult_file_fd is not None:
            _close_checkresult_file()

        # "ipaddress is not None": At least when working with a cluster host it seems the ipaddress
        # may be None.  This needs to be understood in detail and cleaned up. As the InlineSNMP
        # stats feature is a very rarely used debugging feature, the analyzation and fix is
        # postponed now.
        if config.record_inline_snmp_stats \
           and ipaddress is not None \
           and host_config.snmp_config(ipaddress).is_inline_snmp_host:
            inline.snmp_stats_save()
Exemplo n.º 7
0
def dump_host(hostname):
    # type: (HostName) -> None
    config_cache = config.get_config_cache()
    host_config = config_cache.get_host_config(hostname)

    out.output("\n")
    if host_config.is_cluster:
        nodes = host_config.nodes
        if nodes is None:
            raise RuntimeError()
        color = tty.bgmagenta
        add_txt = " (cluster of " + (", ".join(nodes)) + ")"
    else:
        color = tty.bgblue
        add_txt = ""
    out.output("%s%s%s%-78s %s\n" %
               (color, tty.bold, tty.white, hostname + add_txt, tty.normal))

    ipaddress = _ip_address_for_dump_host(host_config)

    addresses = ""  # type: Optional[str]
    if not host_config.is_ipv4v6_host:
        addresses = ipaddress
    else:
        try:
            if host_config.is_ipv6_primary:
                secondary = _ip_address_for_dump_host(host_config, 4)
            else:
                secondary = _ip_address_for_dump_host(host_config, 6)
        except Exception:
            secondary = "X.X.X.X"

        addresses = "%s, %s" % (ipaddress, secondary)
        if host_config.is_ipv6_primary:
            addresses += " (Primary: IPv6)"
        else:
            addresses += " (Primary: IPv4)"

    out.output(tty.yellow + "Addresses:              " + tty.normal +
               (addresses if addresses is not None else "No IP") + "\n")

    tag_template = tty.bold + "[" + tty.normal + "%s" + tty.bold + "]" + tty.normal
    tags = [(tag_template % ":".join(t))
            for t in sorted(host_config.tag_groups.items())]
    out.output(tty.yellow + "Tags:                   " + tty.normal +
               ", ".join(tags) + "\n")

    labels = [
        tag_template % ":".join(l) for l in sorted(host_config.labels.items())
    ]
    out.output(tty.yellow + "Labels:                 " + tty.normal +
               ", ".join(labels) + "\n")

    # TODO: Clean this up once cluster parent handling has been moved to HostConfig
    if host_config.is_cluster:
        parents_list = host_config.nodes
        if parents_list is None:
            raise RuntimeError()
    else:
        parents_list = host_config.parents
    if len(parents_list) > 0:
        out.output(tty.yellow + "Parents:                " + tty.normal +
                   ", ".join(parents_list) + "\n")
    out.output(tty.yellow + "Host groups:            " + tty.normal +
               ", ".join(host_config.hostgroups) + "\n")
    out.output(tty.yellow + "Contact groups:         " + tty.normal +
               ", ".join(host_config.contactgroups) + "\n")

    agenttypes = []
    sources = data_sources.DataSources(hostname, ipaddress)
    for source in sources.get_data_sources():
        agenttypes.append(source.describe())

    if host_config.is_ping_host:
        agenttypes.append('PING only')

    out.output(tty.yellow + "Agent mode:             " + tty.normal)
    out.output(sources.describe_data_sources() + "\n")

    out.output(tty.yellow + "Type of agent:          " + tty.normal)
    if len(agenttypes) == 1:
        out.output(agenttypes[0] + "\n")
    else:
        out.output("\n  ")
        out.output("\n  ".join(agenttypes) + "\n")

    out.output(tty.yellow + "Services:" + tty.normal + "\n")

    headers = ["checktype", "item", "params", "description", "groups"]
    colors = [tty.normal, tty.blue, tty.normal, tty.green, tty.normal]

    table_data = []
    for service in sorted(check_table.get_check_table(hostname).values(),
                          key=lambda s: s.description):
        table_data.append([
            service.check_plugin_name,
            six.ensure_str("None" if service.item is None else service.item),
            _evaluate_params(service.parameters),
            six.ensure_str(service.description), ",".join(
                config_cache.servicegroups_of_service(hostname,
                                                      service.description))
        ])

    tty.print_table(headers, colors, table_data, "  ")