Example #1
0
 def __init__(
     self,
     hostname: HostName,
     ipaddress: Optional[HostAddress],
     *,
     mode: Mode,
 ):
     super().__init__(
         hostname,
         ipaddress,
         mode=mode,
         source_type=SourceType.MANAGEMENT,
         fetcher_type=FetcherType.IPMI,
         description=IPMIConfigurator._make_description(
             ipaddress,
             cast(
                 IPMICredentials,
                 HostConfig.make_host_config(
                     hostname).management_credentials),
         ),
         id_="mgmt_ipmi",
         cpu_tracking_id="mgmt_ipmi",
         main_data_source=False,
     )
     self.credentials: Final[IPMICredentials] = cast(
         IPMICredentials,
         HostConfig.make_host_config(hostname).management_credentials)
Example #2
0
 def __init__(self, hostname: HostName, ipaddress: Optional[HostAddress]) -> None:
     super().__init__(
         hostname,
         ipaddress,
         source_type=SourceType.MANAGEMENT,
         fetcher_type=FetcherType.IPMI,
         description=IPMISource._make_description(
             ipaddress,
             cast(IPMICredentials,
                  HostConfig.make_host_config(hostname).management_credentials),
         ),
         id_="mgmt_ipmi",
         main_data_source=False,
     )
     self.credentials: Final[IPMICredentials] = self.get_ipmi_credentials(
         HostConfig.make_host_config(hostname))
Example #3
0
    def __init__(
        self,
        hostname: HostName,
        ipaddress: Optional[HostAddress],
        *,
        source_type: SourceType,
        fetcher_type: FetcherType,
        description: str,
        default_raw_data: TRawData,
        default_host_sections: THostSections,
        id_: str,
        cache_dir: Optional[Path] = None,
        persisted_section_dir: Optional[Path] = None,
    ) -> None:
        self.hostname: Final[HostName] = hostname
        self.ipaddress: Final[Optional[str]] = ipaddress
        self.source_type: Final[SourceType] = source_type
        self.fetcher_type: Final[FetcherType] = fetcher_type
        self.description: Final[str] = description
        self.default_raw_data: Final = default_raw_data
        self.default_host_sections: Final[THostSections] = default_host_sections
        self.id: Final[str] = id_
        if not cache_dir:
            cache_dir = Path(cmk.utils.paths.data_source_cache_dir) / self.id
        if not persisted_section_dir:
            persisted_section_dir = Path(cmk.utils.paths.var_dir) / "persisted_sections" / self.id

        self.file_cache_base_path: Final[Path] = cache_dir
        self.file_cache_max_age: int = 0
        self.persisted_sections_file_path: Final[Path] = persisted_section_dir / self.hostname

        self.host_config: Final[HostConfig] = HostConfig.make_host_config(hostname)
        self._logger: Final[logging.Logger] = logging.getLogger("cmk.base.data_source.%s" % id_)

        self.exit_spec = self.host_config.exit_code_spec(id_)
Example #4
0
def make_cluster_sources(
    config_cache: config.ConfigCache,
    host_config: HostConfig,
) -> Sequence[Source]:
    """Abstract clusters/nodes/hosts"""
    assert host_config.nodes is not None

    return [
        source for host_name in host_config.nodes for source in make_sources(
            HostConfig.make_host_config(host_name),
            config.lookup_ip_address(config_cache.get_host_config(host_name)),
            force_snmp_cache_refresh=False,
        )
    ]
Example #5
0
def get_cmk_passive_service_attributes(
        config_cache: ConfigCache, host_config: HostConfig, service: Service,
        check_mk_attrs: ObjectAttributes) -> ObjectAttributes:
    attrs = get_service_attributes(host_config.hostname, service.description,
                                   config_cache, service.check_plugin_name,
                                   service.parameters)

    value = host_config.snmp_check_interval(
        config_cache.section_name_of(service.check_plugin_name))
    if value is not None:
        attrs["check_interval"] = value
    else:
        attrs["check_interval"] = check_mk_attrs["check_interval"]

    return attrs
Example #6
0
    def __init__(
        self,
        hostname: HostName,
        ipaddress: Optional[HostAddress],
        *,
        mode: Mode,
        source_type: SourceType,
        fetcher_type: FetcherType,
        description: str,
        default_raw_data: TRawData,
        default_host_sections: THostSections,
        id_: str,
        cpu_tracking_id: str,
        cache_dir: Optional[Path] = None,
        persisted_section_dir: Optional[Path] = None,
    ) -> None:
        self.hostname: Final[str] = hostname
        self.ipaddress: Final[Optional[str]] = ipaddress
        self.mode: Final[Mode] = mode
        self.source_type: Final[SourceType] = source_type
        self.fetcher_type: Final[FetcherType] = fetcher_type
        self.description: Final[str] = description
        self.default_raw_data: Final = default_raw_data
        self.default_host_sections: Final[
            THostSections] = default_host_sections
        self.id: Final[str] = id_
        self.cpu_tracking_id: Final[str] = cpu_tracking_id
        if not cache_dir:
            cache_dir = Path(cmk.utils.paths.data_source_cache_dir) / self.id
        if not persisted_section_dir:
            persisted_section_dir = Path(
                cmk.utils.paths.var_dir) / "persisted_sections" / self.id

        self.file_cache = FileCacheConfigurator(
            cache_dir / self.hostname,
            self.fetcher_type,
            simulation=config.simulation_mode,
        )
        self.persisted_sections_file_path: Final[
            Path] = persisted_section_dir / self.hostname
        self.selected_raw_sections: Optional[SelectedRawSections] = None

        self.host_config: Final[HostConfig] = HostConfig.make_host_config(
            hostname)
        self._logger: Final[logging.Logger] = logging.getLogger(
            "cmk.base.data_source.%s" % id_)

        self.exit_code_spec = self.host_config.exit_code_spec(id_)
Example #7
0
def _make_piggyback_nodes(
        config_cache: config.ConfigCache,
        host_config: HostConfig) -> Iterable[Tuple[HostName, Optional[HostAddress], DataSources]]:
    """Abstract clusters/nodes/hosts"""
    assert host_config.nodes is not None

    nodes = []
    for hostname in host_config.nodes:
        node_config = config_cache.get_host_config(hostname)
        ipaddress = ip_lookup.lookup_ip_address(node_config)
        sources = make_sources(
            HostConfig.make_host_config(hostname),
            ipaddress,
        )
        nodes.append((hostname, ipaddress, sources))
    return nodes
Example #8
0
def _do_inv_for_realhost(
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    *,
    multi_host_sections: MultiHostSections,
    run_only_plugin_names: Optional[Set[InventoryPluginName]],
) -> InventoryTrees:
    tree_aggregator = _TreeAggregator()
    _set_cluster_property(tree_aggregator.trees.inventory, host_config)

    section.section_step("Executing inventory plugins")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():
        if run_only_plugin_names and inventory_plugin.name not in run_only_plugin_names:
            continue

        kwargs = multi_host_sections.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, SourceType.HOST),
            inventory_plugin.sections,
        )
        if not kwargs:
            console.vverbose(" %s%s%s%s: skipped (no data)\n", tty.yellow,
                             tty.bold, inventory_plugin.name, tty.normal)
            continue

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        if inventory_plugin.inventory_ruleset_name is not None:
            kwargs["params"] = host_config.inventory_parameters(
                str(inventory_plugin.inventory_ruleset_name)
            )  # TODO (mo): keep type!

        exception = tree_aggregator.aggregate_results(
            inventory_plugin.inventory_function(**kwargs),
            inventory_plugin.name,
        )
        if exception:
            console.warning(" %s%s%s%s: failed: %s", tty.red, tty.bold,
                            inventory_plugin.name, tty.normal, exception)
        else:
            console.verbose(" %s%s%s%s", tty.green, tty.bold,
                            inventory_plugin.name, tty.normal)
            console.vverbose(": ok\n")
    console.verbose("\n")

    tree_aggregator.trees.inventory.normalize_nodes()
    tree_aggregator.trees.status_data.normalize_nodes()
    return tree_aggregator.trees
Example #9
0
def _do_inv_for_realhost(
    host_config: config.HostConfig,
    ipaddress: Optional[HostAddress],
    *,
    parsed_sections_broker: ParsedSectionsBroker,
    run_plugin_names: Container[InventoryPluginName],
) -> InventoryTrees:
    tree_aggregator = TreeAggregator()
    _set_cluster_property(tree_aggregator.trees.inventory, host_config)

    section.section_step("Executing inventory plugins")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():
        if inventory_plugin.name not in run_plugin_names:
            continue

        for source_type in (SourceType.HOST, SourceType.MANAGEMENT):
            kwargs = get_section_kwargs(
                parsed_sections_broker,
                HostKey(host_config.hostname, ipaddress, source_type),
                inventory_plugin.sections,
            )
            if not kwargs:
                console.vverbose(" %s%s%s%s: skipped (no data)\n", tty.yellow,
                                 tty.bold, inventory_plugin.name, tty.normal)
                continue

            # Inventory functions can optionally have a second argument: parameters.
            # These are configured via rule sets (much like check parameters).
            if inventory_plugin.inventory_ruleset_name is not None:
                kwargs["params"] = host_config.inventory_parameters(
                    inventory_plugin.inventory_ruleset_name)

            exception = tree_aggregator.aggregate_results(
                inventory_plugin.inventory_function(**kwargs), )
            if exception:
                console.warning(" %s%s%s%s: failed: %s", tty.red, tty.bold,
                                inventory_plugin.name, tty.normal, exception)
            else:
                console.verbose(" %s%s%s%s", tty.green, tty.bold,
                                inventory_plugin.name, tty.normal)
                console.vverbose(": ok\n")
    console.verbose("\n")

    tree_aggregator.trees.inventory.normalize_nodes()
    tree_aggregator.trees.status_data.normalize_nodes()
    return tree_aggregator.trees
Example #10
0
def _make_cluster_nodes(
    config_cache: config.ConfigCache,
    host_config: HostConfig,
) -> Sequence[Tuple[HostName, Optional[HostAddress], Sequence[Source]]]:
    """Abstract clusters/nodes/hosts"""
    assert host_config.nodes is not None

    nodes = []
    for hostname in host_config.nodes:
        node_config = config_cache.get_host_config(hostname)
        ipaddress = config.lookup_ip_address(node_config)
        sources = make_sources(
            HostConfig.make_host_config(hostname),
            ipaddress,
            force_snmp_cache_refresh=False,
        )
        nodes.append((hostname, ipaddress, sources))
    return nodes
Example #11
0
    def __init__(
        self,
        hostname: HostName,
        ipaddress: Optional[HostAddress],
        *,
        mode: Mode,
        source_type: SourceType,
        fetcher_type: FetcherType,
        description: str,
        default_raw_data: TRawData,
        default_host_sections: THostSections,
        preselected_sections: PreselectedSectionNames,
        id_: str,
        cache_dir: Optional[Path] = None,
        persisted_section_dir: Optional[Path] = None,
    ) -> None:
        self.hostname: Final[str] = hostname
        self.ipaddress: Final[Optional[str]] = ipaddress
        self.mode: Final[Mode] = mode
        self.source_type: Final[SourceType] = source_type
        self.fetcher_type: Final[FetcherType] = fetcher_type
        self.description: Final[str] = description
        self.default_raw_data: Final = default_raw_data
        self.default_host_sections: Final[THostSections] = default_host_sections
        # If preselected sections are given, we assume that we are interested in these
        # and only these sections, so we may omit others and in the SNMP case (TODO (mo))
        # must try to fetch them (regardles of detection).
        self.preselected_sections: Final[PreselectedSectionNames] = preselected_sections

        self.id: Final[str] = id_
        if not cache_dir:
            cache_dir = Path(cmk.utils.paths.data_source_cache_dir) / self.id
        if not persisted_section_dir:
            persisted_section_dir = Path(cmk.utils.paths.var_dir) / "persisted_sections" / self.id

        self.file_cache_path: Final[Path] = cache_dir / self.hostname
        self.file_cache_max_age: int = 0
        self.persisted_sections_file_path: Final[Path] = persisted_section_dir / self.hostname

        self.host_config: Final[HostConfig] = HostConfig.make_host_config(hostname)
        self._logger: Final[logging.Logger] = logging.getLogger("cmk.base.data_source.%s" % id_)

        self.exit_spec = self.host_config.exit_code_spec(id_)
Example #12
0
def _make_piggyback_nodes(
    mode: Mode,
    config_cache: config.ConfigCache,
    host_config: HostConfig,
) -> Sequence[Tuple[HostName, Optional[HostAddress], Sequence[Source]]]:
    """Abstract clusters/nodes/hosts"""
    assert host_config.nodes is not None

    nodes = []
    for hostname in host_config.nodes:
        node_config = config_cache.get_host_config(hostname)
        ipaddress = ip_lookup.lookup_ip_address(
            node_config,
            family=node_config.default_address_family,
        )
        sources = make_sources(
            HostConfig.make_host_config(hostname),
            ipaddress,
            mode=mode,
        )
        nodes.append((hostname, ipaddress, sources))
    return nodes
Example #13
0
    def _extract_persisted_sections(
        raw_data: SNMPRawData,
        host_config: config.HostConfig,
    ) -> SNMPPersistedSections:
        """Extract the sections to be persisted from the raw_data and return it

        Gather the check types to be persisted, extract the related data from
        the raw data, calculate the times and store the persisted info for
        later use.
        """
        persisted_sections: SNMPPersistedSections = {}

        for section_name, section_content in raw_data.items():
            fetch_interval = host_config.snmp_fetch_interval(section_name)
            if fetch_interval is None:
                continue

            cached_at = int(time.time())
            until = cached_at + (fetch_interval * 60)
            persisted_sections[section_name] = (cached_at, until, section_content)

        return persisted_sections
Example #14
0
    def _extract_persisted_sections(
        raw_data: SNMPRawData,
        host_config: config.HostConfig,
    ) -> PersistedSections[SNMPSectionContent]:
        """Extract the sections to be persisted from the raw_data and return it

        Gather the check types to be persisted, extract the related data from
        the raw data, calculate the times and store the persisted info for
        later use.
        """
        persisted_sections = PersistedSections[SNMPSectionContent]({})

        for section_name, section_content in raw_data.items():
            fetch_interval = host_config.snmp_fetch_interval(section_name)
            if fetch_interval is None:
                continue

            cached_at = int(time.time())
            until = cached_at + (fetch_interval * 60)
            # pylint does not seem to understand `NewType`... leave the checking up to mypy.
            persisted_sections[section_name] = (  # false positive: pylint: disable=E1137
                (cached_at, until, section_content))

        return persisted_sections
Example #15
0
def _do_inv_for_realhost(
    host_config: config.HostConfig,
    multi_host_sections: MultiHostSections,
    ipaddress: Optional[HostAddress],
    inventory_tree: StructuredDataTree,
    status_data_tree: StructuredDataTree,
):
    section.section_step("Executing inventory plugins")
    console.verbose("Plugins:")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():

        kwargs = multi_host_sections.get_section_kwargs(
            HostKey(host_config.hostname, ipaddress, SourceType.HOST),
            inventory_plugin.sections,
        )
        if not kwargs:
            continue

        console.verbose(
            " %s%s%s%s" %
            (tty.green, tty.bold, inventory_plugin.name, tty.normal))

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        if inventory_plugin.inventory_ruleset_name is not None:
            kwargs["params"] = host_config.inventory_parameters(
                str(inventory_plugin.inventory_ruleset_name)
            )  # TODO (mo): keep type!

        _aggregate_inventory_results(
            inventory_plugin.inventory_function(**kwargs),
            inventory_tree,
            status_data_tree,
        )

    console.verbose("\n")
Example #16
0
def automation_discovery(
    *,
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    mode: DiscoveryMode,
    service_filters: Optional[_ServiceFilters],
    on_error: OnError,
    use_cached_snmp_data: bool,
    max_cachefile_age: cmk.core_helpers.cache.MaxAge,
) -> DiscoveryResult:

    console.verbose("  Doing discovery with mode '%s'...\n" % mode)

    host_name = host_config.hostname
    result = DiscoveryResult()

    if host_name not in config_cache.all_active_hosts():
        result.error_text = ""
        return result

    cmk.core_helpers.cache.FileCacheFactory.use_outdated = True
    cmk.core_helpers.cache.FileCacheFactory.maybe = use_cached_snmp_data

    try:
        # in "refresh" mode we first need to remove all previously discovered
        # checks of the host, so that _get_host_services() does show us the
        # new discovered check parameters.
        if mode is DiscoveryMode.REFRESH:
            result.self_removed += host_config.remove_autochecks(
            )  # this is cluster-aware!

        if host_config.is_cluster:
            ipaddress = None
        else:
            ipaddress = config.lookup_ip_address(host_config)

        parsed_sections_broker, _source_results = make_broker(
            config_cache=config_cache,
            host_config=host_config,
            ip_address=ipaddress,
            mode=Mode.DISCOVERY,
            selected_sections=NO_SELECTION,
            file_cache_max_age=max_cachefile_age,
            fetcher_messages=(),
            force_snmp_cache_refresh=not use_cached_snmp_data,
            on_scan_error=on_error,
        )

        if mode is not DiscoveryMode.REMOVE:
            host_labels = analyse_host_labels(
                host_config=host_config,
                ipaddress=ipaddress,
                parsed_sections_broker=parsed_sections_broker,
                load_labels=True,
                save_labels=True,
                on_error=on_error,
            )
            result.self_new_host_labels = len(host_labels.new)
            result.self_total_host_labels = len(host_labels.present)

        if mode is DiscoveryMode.ONLY_HOST_LABELS:
            # This is the result of a refactoring, and the following code was added
            # to ensure a compatible behaviour. I don't think it is particularly
            # sensible. We used to only compare service descriptions of old and new
            # services, so `make_object_diff` was always comparing two identical objects
            # if the mode was DiscoveryMode.ONLY_HOST_LABEL.
            # We brainlessly mimic that behaviour, for now.
            result.diff_text = make_object_diff(set(), set())
            return result

        # Compute current state of new and existing checks
        services = _get_host_services(
            host_config,
            ipaddress,
            parsed_sections_broker,
            on_error=on_error,
        )

        old_services = services.get("old", [])

        # Create new list of checks
        new_services = _get_post_discovery_services(
            host_name, services, service_filters
            or _ServiceFilters.accept_all(), result, mode)
        host_config.set_autochecks(new_services)

        # If old_services == new_services, make_object_diff will return
        # something along the lines of "nothing changed".
        # I guess this was written before discovered host labels were invented.
        result.diff_text = make_object_diff(
            {x.service.description
             for x in old_services},
            {x.service.description
             for x in new_services},
        )

    except MKTimeout:
        raise  # let general timeout through

    except Exception as e:
        if cmk.utils.debug.enabled():
            raise
        result.error_text = str(e)

    result.self_total = result.self_new + result.self_kept
    return result
Example #17
0
def discover_on_host(
    *,
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    mode: DiscoveryMode,
    service_filters: Optional[_ServiceFilters],
    on_error: str,
    use_cached_snmp_data: bool,
    max_cachefile_age: int,
) -> DiscoveryResult:

    console.verbose("  Doing discovery with mode '%s'...\n" % mode)

    host_name = host_config.hostname
    result = DiscoveryResult()
    discovery_parameters = DiscoveryParameters(
        on_error=on_error,
        load_labels=(mode is not DiscoveryMode.REMOVE),
        save_labels=(mode is not DiscoveryMode.REMOVE),
        only_host_labels=(mode is DiscoveryMode.ONLY_HOST_LABELS),
    )

    if host_name not in config_cache.all_active_hosts():
        result.error_text = ""
        return result

    _set_cache_opts_of_checkers(use_cached_snmp_data=use_cached_snmp_data)

    try:
        # in "refresh" mode we first need to remove all previously discovered
        # checks of the host, so that _get_host_services() does show us the
        # new discovered check parameters.
        if mode is DiscoveryMode.REFRESH:
            result.self_removed += host_config.remove_autochecks()  # this is cluster-aware!

        if host_config.is_cluster:
            ipaddress = None
        else:
            ipaddress = config.lookup_ip_address(host_config)

        parsed_sections_broker, _source_results = make_broker(
            config_cache=config_cache,
            host_config=host_config,
            ip_address=ipaddress,
            mode=Mode.DISCOVERY,
            selected_sections=NO_SELECTION,
            file_cache_max_age=max_cachefile_age,
            fetcher_messages=(),
            force_snmp_cache_refresh=not use_cached_snmp_data,
            on_scan_error=on_error,
        )

        # Compute current state of new and existing checks
        services, host_labels = _get_host_services(
            host_config,
            ipaddress,
            parsed_sections_broker,
            discovery_parameters,
        )

        old_services = services.get("old", [])

        # Create new list of checks
        new_services = _get_post_discovery_services(host_name, services, service_filters or
                                                    _ServiceFilters.accept_all(), result, mode)
        host_config.set_autochecks(new_services)

        result.diff_text = make_object_diff(
            _make_services_audit_log_object([x.service for x in old_services]),
            _make_services_audit_log_object([x.service for x in new_services]))

    except MKTimeout:
        raise  # let general timeout through

    except Exception as e:
        if cmk.utils.debug.enabled():
            raise
        result.error_text = str(e)

    else:
        if mode is not DiscoveryMode.REMOVE:
            result.self_new_host_labels = len(host_labels.new)
            result.self_total_host_labels = len(host_labels.present)

    result.self_total = result.self_new + result.self_kept
    return result
Example #18
0
 def _make_description(hostname: HostName,
                       ipaddress: Optional[HostAddress]) -> str:
     return "TCP: %s:%d" % (
         ipaddress,
         HostConfig.make_host_config(hostname).agent_port,
     )
Example #19
0
def _do_inv_for_realhost(
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    sources: data_sources.DataSources,
    multi_host_sections: Optional[MultiHostSections],
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    inventory_tree: StructuredDataTree,
    status_data_tree: StructuredDataTree,
) -> None:
    for source in sources:
        if isinstance(source, data_sources.snmp.SNMPDataSource):
            source.detector.on_error = "raise"  # default
            source.detector.do_snmp_scan = True
            data_sources.snmp.SNMPDataSource.disable_data_source_cache()
            source.set_use_snmpwalk_cache(False)
            source.set_ignore_check_interval(True)
            if multi_host_sections is not None:
                # Status data inventory already provides filled multi_host_sections object.
                # SNMP data source: If 'do_status_data_inv' is enabled there may be
                # sections for inventory plugins which were not fetched yet.
                host_sections = multi_host_sections.setdefault(
                    HostKey(hostname, ipaddress, source.source_type),
                    SNMPHostSections(),
                )
                source.set_fetched_raw_section_names(
                    set(host_sections.sections))
                host_sections.update(source.run(selected_raw_sections=None))

    if multi_host_sections is None:
        multi_host_sections = data_sources.make_host_sections(
            config_cache,
            host_config,
            ipaddress,
            sources,
            max_cachefile_age=host_config.max_cachefile_age,
            selected_raw_sections=None,
        )

    section.section_step("Executing inventory plugins")
    import cmk.base.inventory_plugins as inventory_plugins  # pylint: disable=import-outside-toplevel
    console.verbose("Plugins:")
    for section_name, plugin in inventory_plugins.sorted_inventory_plugins():
        section_content = multi_host_sections.get_section_content(
            HostKey(hostname, ipaddress, SourceType.HOST),
            check_api_utils.HOST_PRECEDENCE,
            section_name,
            for_discovery=False,
        )
        if not section_content:  # section not present (None or [])
            # Note: this also excludes existing sections without info..
            continue

        if all([x in [[], {}, None] for x in section_content]):
            # Inventory plugins which get parsed info from related
            # check plugin may have more than one return value, eg
            # parse function of oracle_tablespaces returns ({}, {})
            continue

        console.verbose(" %s%s%s%s" %
                        (tty.green, tty.bold, section_name, tty.normal))

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        inv_function = plugin["inv_function"]
        kwargs = cmk.utils.misc.make_kwargs_for(
            inv_function,
            inventory_tree=inventory_tree,
            status_data_tree=status_data_tree)
        non_kwargs = set(
            cmk.utils.misc.getfuncargs(inv_function)) - set(kwargs)
        args = [section_content]
        if len(non_kwargs) == 2:
            args += [host_config.inventory_parameters(section_name)]
        inv_function(*args, **kwargs)
    console.verbose("\n")
Example #20
0
def _do_inv_for_realhost(
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    sources: Sequence[ABCSource],
    multi_host_sections: Optional[MultiHostSections],
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    inventory_tree: StructuredDataTree,
    status_data_tree: StructuredDataTree,
) -> Sequence[Tuple[ABCSource, Result[ABCHostSections, Exception]]]:
    results: List[Tuple[ABCSource, Result[ABCHostSections, Exception]]] = []
    for source in sources:
        if isinstance(source, checkers.snmp.SNMPSource):
            # TODO(ml): This modifies the SNMP fetcher config dynamically.
            source.on_snmp_scan_error = "raise"  # default
            checkers.FileCacheConfigurer.snmp_disabled = True
            source.use_snmpwalk_cache = False
            source.ignore_check_interval = True
            if multi_host_sections is not None:
                # Status data inventory already provides filled multi_host_sections object.
                # SNMP data source: If 'do_status_data_inv' is enabled there may be
                # sections for inventory plugins which were not fetched yet.
                host_sections = multi_host_sections.setdefault(
                    # TODO(ml): are
                    #    hostname == source.hostname
                    #    ipaddress == source.ipaddress
                    # ?
                    HostKey(hostname, ipaddress, source.source_type),
                    SNMPHostSections(),
                )
                # TODO(ml): This modifies the SNMP fetcher config dynamically.
                #           Can the fetcher handle that on its own?
                source.prefetched_sections = host_sections.sections

                # When executing the structured status inventory, we are in the Mode.CHECKING
                assert source.mode is Mode.INVENTORY or source.mode is Mode.CHECKING

                host_section = source.parse(source.fetch())
                results.append((source, host_section))
                if host_section.is_ok():
                    assert host_section.ok is not None
                    host_sections.update(host_section.ok)

    if multi_host_sections is None:
        multi_host_sections = MultiHostSections()
        hs = checkers.update_host_sections(
            multi_host_sections,
            checkers.make_nodes(
                config_cache,
                host_config,
                ipaddress,
                checkers.Mode.INVENTORY,
                sources,
            ),
            max_cachefile_age=host_config.max_cachefile_age,
            selected_raw_sections=None,
            host_config=host_config,
        )
        results.extend(hs)

    section.section_step("Executing inventory plugins")
    console.verbose("Plugins:")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():

        kwargs = multi_host_sections.get_section_kwargs(
            HostKey(hostname, ipaddress, SourceType.HOST),
            inventory_plugin.sections,
        )
        if not kwargs:
            continue

        console.verbose(
            " %s%s%s%s" %
            (tty.green, tty.bold, inventory_plugin.name, tty.normal))

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        if inventory_plugin.inventory_ruleset_name is not None:
            kwargs["params"] = host_config.inventory_parameters(
                str(inventory_plugin.inventory_ruleset_name)
            )  # TODO (mo): keep type!

        _aggregate_inventory_results(
            inventory_plugin.inventory_function(**kwargs),
            inventory_tree,
            status_data_tree,
        )

    console.verbose("\n")
    return results
Example #21
0
def _do_inv_for_realhost(
    host_config: config.HostConfig,
    *,
    parsed_sections_broker: ParsedSectionsBroker,
    run_plugin_names: Container[InventoryPluginName],
    retentions_tracker: RetentionsTracker,
) -> InventoryTrees:
    tree_aggregator = TreeAggregator()

    _set_cluster_property(tree_aggregator.trees.inventory, host_config)

    section.section_step("Executing inventory plugins")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():
        if inventory_plugin.name not in run_plugin_names:
            continue

        for host_key in (host_config.host_key, host_config.host_key_mgmt):
            kwargs = get_section_kwargs(
                parsed_sections_broker,
                host_key,
                inventory_plugin.sections,
            )
            if not kwargs:
                console.vverbose(
                    " %s%s%s%s: skipped (no data)\n",
                    tty.yellow,
                    tty.bold,
                    inventory_plugin.name,
                    tty.normal,
                )
                continue

            # Inventory functions can optionally have a second argument: parameters.
            # These are configured via rule sets (much like check parameters).
            if inventory_plugin.inventory_ruleset_name is not None:
                kwargs = {
                    **kwargs,
                    "params":
                    host_config.inventory_parameters(
                        inventory_plugin.inventory_ruleset_name),
                }

            exception = tree_aggregator.aggregate_results(
                inventory_generator=inventory_plugin.inventory_function(
                    **kwargs),
                retentions_tracker=retentions_tracker,
                raw_cache_info=parsed_sections_broker.get_cache_info(
                    inventory_plugin.sections),
                is_legacy_plugin=inventory_plugin.module is None,
            )

            if exception:
                console.warning(
                    " %s%s%s%s: failed: %s",
                    tty.red,
                    tty.bold,
                    inventory_plugin.name,
                    tty.normal,
                    exception,
                )
            else:
                console.verbose(" %s%s%s%s", tty.green, tty.bold,
                                inventory_plugin.name, tty.normal)
                console.vverbose(": ok\n")

    console.verbose("\n")
    return tree_aggregator.trees
Example #22
0
def _do_inv_for_realhost(
    config_cache: config.ConfigCache,
    host_config: config.HostConfig,
    sources: data_sources.DataSources,
    multi_host_sections: Optional[MultiHostSections],
    hostname: HostName,
    ipaddress: Optional[HostAddress],
    inventory_tree: StructuredDataTree,
    status_data_tree: StructuredDataTree,
) -> None:
    for source in sources:
        if isinstance(source, data_sources.snmp.SNMPDataSource):
            # TODO(ml): This modifies the SNMP fetcher config dynamically.
            configurator = cast(data_sources.snmp.SNMPConfigurator, source.configurator)
            configurator.on_snmp_scan_error = "raise"  # default
            configurator.do_snmp_scan = True
            data_sources.FileCacheConfigurator.snmp_disabled = True
            configurator.use_snmpwalk_cache = False
            configurator.ignore_check_interval = True
            if multi_host_sections is not None:
                # Status data inventory already provides filled multi_host_sections object.
                # SNMP data source: If 'do_status_data_inv' is enabled there may be
                # sections for inventory plugins which were not fetched yet.
                host_sections = multi_host_sections.setdefault(
                    # TODO(ml): are
                    #    hostname == source.hostname
                    #    ipaddress == source.ipaddress
                    # ?
                    HostKey(hostname, ipaddress, source.configurator.source_type),
                    SNMPHostSections(),
                )
                # TODO(ml): This modifies the SNMP fetcher config dynamically.
                #           Can the fetcher handle that on its own?
                configurator.prefetched_sections = host_sections.sections
                host_sections.update(source.run(selected_raw_sections=None))

    if multi_host_sections is None:
        multi_host_sections = data_sources.make_host_sections(
            config_cache,
            host_config,
            ipaddress,
            data_sources.Mode.INVENTORY,
            sources,
            max_cachefile_age=host_config.max_cachefile_age,
            selected_raw_sections=None,
        )

    section.section_step("Executing inventory plugins")
    console.verbose("Plugins:")
    for inventory_plugin in agent_based_register.iter_all_inventory_plugins():

        kwargs = multi_host_sections.get_section_kwargs(
            HostKey(hostname, ipaddress, SourceType.HOST),
            inventory_plugin.sections,
        )
        if not kwargs:
            continue

        console.verbose(" %s%s%s%s" % (tty.green, tty.bold, inventory_plugin.name, tty.normal))

        # Inventory functions can optionally have a second argument: parameters.
        # These are configured via rule sets (much like check parameters).
        if inventory_plugin.inventory_ruleset_name is not None:
            kwargs["params"] = host_config.inventory_parameters(
                str(inventory_plugin.inventory_ruleset_name))  # TODO (mo): keep type!

        _aggregate_inventory_results(
            inventory_plugin.inventory_function(**kwargs),
            inventory_tree,
            status_data_tree,
        )

    console.verbose("\n")