def _do_inv_for_realhost(host_config, sources, multi_host_sections, hostname, ipaddress, inventory_tree, status_data_tree): # type: (config.HostConfig, data_sources.DataSources, Optional[data_sources.MultiHostSections], HostName, Optional[HostAddress], StructuredDataTree, StructuredDataTree) -> None for source in sources.get_data_sources(): if isinstance(source, data_sources.SNMPDataSource): source.set_on_error("raise") source.set_do_snmp_scan(True) data_sources.SNMPDataSource.disable_data_source_cache() source.set_use_snmpwalk_cache(False) source.set_ignore_check_interval(True) source.set_check_plugin_name_filter( _gather_snmp_check_plugin_names_inventory) if multi_host_sections is not None: # Status data inventory already provides filled multi_host_sections object. # SNMP data source: If 'do_status_data_inv' is enabled there may be # sections for inventory plugins which were not fetched yet. source.enforce_check_plugin_names(None) host_sections = multi_host_sections.add_or_get_host_sections( hostname, ipaddress, deflt=SNMPHostSections()) source.set_fetched_check_plugin_names( set(host_sections.sections)) host_sections_from_source = source.run() host_sections.update(host_sections_from_source) if multi_host_sections is None: multi_host_sections = sources.get_host_sections() section.section_step("Executing inventory plugins") import cmk.base.inventory_plugins as inventory_plugins # pylint: disable=import-outside-toplevel console.verbose("Plugins:") for section_name, plugin in inventory_plugins.sorted_inventory_plugins(): section_content = multi_host_sections.get_section_content( hostname, ipaddress, section_name, for_discovery=False) if not section_content: # section not present (None or []) # Note: this also excludes existing sections without info.. continue if all([x in [[], {}, None] for x in section_content]): # Inventory plugins which get parsed info from related # check plugin may have more than one return value, eg # parse function of oracle_tablespaces returns ({}, {}) continue console.verbose(" %s%s%s%s" % (tty.green, tty.bold, section_name, tty.normal)) # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). inv_function = plugin["inv_function"] kwargs = cmk.utils.misc.make_kwargs_for( inv_function, inventory_tree=inventory_tree, status_data_tree=status_data_tree) non_kwargs = set( cmk.utils.misc.getfuncargs(inv_function)) - set(kwargs) args = [section_content] if len(non_kwargs) == 2: args += [host_config.inventory_parameters(section_name)] inv_function(*args, **kwargs) console.verbose("\n")
def create_diagnostics_dump(parameters: Optional[DiagnosticsOptionalParameters]) -> None: dump = DiagnosticsDump(parameters) dump.create() section.section_step("Creating diagnostics dump", verbose=False) if dump.tarfile_created: console.info("%s\n", _format_filepath(dump.tarfile_path)) else: console.info("%s%s\n", _GAP, "No dump")
def _commandline_discovery_on_host( host_name: HostName, ipaddress: Optional[HostAddress], parsed_sections_broker: ParsedSectionsBroker, run_plugin_names: Container[CheckPluginName], only_new: bool, *, load_labels: bool, only_host_labels: bool, on_error: OnError, ) -> None: section.section_step("Analyse discovered host labels") host_labels = analyse_node_labels( host_name=host_name, ipaddress=ipaddress, parsed_sections_broker=parsed_sections_broker, load_labels=load_labels, save_labels=True, on_error=on_error, ) count = len(host_labels.new) if host_labels.new else ( "no new" if only_new else "no") section.section_success(f"Found {count} host labels") if only_host_labels: return section.section_step("Analyse discovered services") service_result = analyse_discovered_services( host_name=host_name, ipaddress=ipaddress, parsed_sections_broker=parsed_sections_broker, run_plugin_names=run_plugin_names, only_new=only_new, on_error=on_error, ) # TODO (mo): for the labels the corresponding code is in _host_labels. # We should put the persisting in one place. autochecks.save_autochecks_file(host_name, service_result.present) new_per_plugin = Counter(s.check_plugin_name for s in service_result.new) for name, count in sorted(new_per_plugin.items()): console.verbose("%s%3d%s %s\n" % (tty.green + tty.bold, count, tty.normal, name)) count = len(service_result.new) if service_result.new else ( "no new" if only_new else "no") section.section_success(f"Found {count} services") for detail in check_parsing_errors( parsed_sections_broker.parsing_errors()).details: console.warning(detail)
def test_section_step(caplog, capsys): caplog.set_level(console.VERBOSE, logger="cmk.base") section.section_step("hello") captured = capsys.readouterr() # no `stream` arg assert "hello" not in captured.out assert "HELLO" in captured.out assert captured.out.endswith("\n") assert not captured.err
def _analyse_host_labels( *, host_name: HostName, discovered_host_labels: Sequence[HostLabel], existing_host_labels: Sequence[HostLabel], discovery_parameters: DiscoveryParameters, ) -> QualifiedDiscovery[HostLabel]: section.section_step("Analyse discovered host labels") host_labels = QualifiedDiscovery( preexisting=existing_host_labels, current=discovered_host_labels, key=lambda hl: hl.label, ) if discovery_parameters.save_labels: DiscoveredHostLabelsStore(host_name).save({ # TODO (mo): Im not sure this is desired. If it is, it should be explained. **{l.name: l.to_dict() for l in host_labels.vanished}, **{l.name: l.to_dict() for l in host_labels.present}, }) if host_labels.new: # Some check plugins like 'df' may discover services based on host labels. # A rule may look like: # [{ # 'value': { # 'ignore_fs_types': ['tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660'], # 'never_ignore_mountpoints': ['~.*/omd/sites/[^/]+/tmp$'] # }, # 'condition': { # 'host_labels': { # 'cmk/check_mk_server': 'yes' # } # } # }] # In the first step '_discover_host_labels' the ruleset optimizer caches the # result of the evaluation of these rules. Contemporary we may find new host # labels which are not yet taken into account by the ruleset optimizer. # In the next step '_discover_services' we want to discover new services # based on these new host labels but we only got the cached result. # If we found new host labels, we have to evaluate these rules again in order # to find new services, eg. in 'inventory_df'. Thus we have to clear these caches. config.get_config_cache( ).ruleset_matcher.ruleset_optimizer.clear_caches() return host_labels
def _cleanup_dump_folder(self) -> None: if not self.tarfile_created: # Remove empty tarfile path self._remove_file(self.tarfile_path) dumps = sorted( [(dump.stat().st_mtime, dump) for dump in self.dump_folder.glob("*%s" % SUFFIX)], key=lambda t: t[0])[:-self._keep_num_dumps] section.section_step("Cleanup dump folder", add_info="keep last %d dumps" % self._keep_num_dumps) for _mtime, filepath in dumps: console.verbose("%s\n", _format_filepath(filepath)) self._remove_file(filepath)
def _do_inv_for_realhost( host_config: config.HostConfig, ipaddress: Optional[HostAddress], *, multi_host_sections: MultiHostSections, run_only_plugin_names: Optional[Set[InventoryPluginName]], ) -> InventoryTrees: tree_aggregator = _TreeAggregator() _set_cluster_property(tree_aggregator.trees.inventory, host_config) section.section_step("Executing inventory plugins") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): if run_only_plugin_names and inventory_plugin.name not in run_only_plugin_names: continue kwargs = multi_host_sections.get_section_kwargs( HostKey(host_config.hostname, ipaddress, SourceType.HOST), inventory_plugin.sections, ) if not kwargs: console.vverbose(" %s%s%s%s: skipped (no data)\n", tty.yellow, tty.bold, inventory_plugin.name, tty.normal) continue # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs["params"] = host_config.inventory_parameters( str(inventory_plugin.inventory_ruleset_name) ) # TODO (mo): keep type! exception = tree_aggregator.aggregate_results( inventory_plugin.inventory_function(**kwargs), inventory_plugin.name, ) if exception: console.warning(" %s%s%s%s: failed: %s", tty.red, tty.bold, inventory_plugin.name, tty.normal, exception) else: console.verbose(" %s%s%s%s", tty.green, tty.bold, inventory_plugin.name, tty.normal) console.vverbose(": ok\n") console.verbose("\n") tree_aggregator.trees.inventory.normalize_nodes() tree_aggregator.trees.status_data.normalize_nodes() return tree_aggregator.trees
def _discover_services( *, host_name: HostName, ipaddress: Optional[HostAddress], parsed_sections_broker: ParsedSectionsBroker, run_plugin_names: Container[CheckPluginName], on_error: OnError, ) -> List[Service]: # find out which plugins we need to discover plugin_candidates = _find_candidates(parsed_sections_broker, run_plugin_names) section.section_step("Executing discovery plugins (%d)" % len(plugin_candidates)) console.vverbose(" Trying discovery with: %s\n" % ", ".join(str(n) for n in plugin_candidates)) # The host name must be set for the host_name() calls commonly used to determine the # host name for host_extra_conf{_merged,} calls in the legacy checks. service_table: CheckTable = {} try: with plugin_contexts.current_host(host_name): for check_plugin_name in plugin_candidates: try: service_table.update({ service.id(): service for service in _discover_plugins_services( check_plugin_name=check_plugin_name, host_name=host_name, ipaddress=ipaddress, parsed_sections_broker=parsed_sections_broker, on_error=on_error, ) }) except (KeyboardInterrupt, MKTimeout): raise except Exception as e: if on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.error( f"Discovery of '{check_plugin_name}' failed: {e}\n" ) return list(service_table.values()) except KeyboardInterrupt: raise MKGeneralException("Interrupted by Ctrl-C.")
def _discover_services( *, host_name: HostName, ipaddress: Optional[HostAddress], parsed_sections_broker: ParsedSectionsBroker, discovery_parameters: DiscoveryParameters, run_only_plugin_names: Optional[Set[CheckPluginName]], ) -> List[Service]: # find out which plugins we need to discover plugin_candidates = _find_candidates(parsed_sections_broker, run_only_plugin_names) section.section_step("Executing discovery plugins (%d)" % len(plugin_candidates)) console.vverbose(" Trying discovery with: %s\n" % ", ".join(str(n) for n in plugin_candidates)) # The host name must be set for the host_name() calls commonly used to determine the # host name for host_extra_conf{_merged,} calls in the legacy checks. check_api_utils.set_hostname(host_name) service_table: cmk.base.check_utils.CheckTable = {} try: for check_plugin_name in plugin_candidates: try: service_table.update({ service.id(): service for service in _discover_plugins_services( check_plugin_name=check_plugin_name, host_name=host_name, ipaddress=ipaddress, parsed_sections_broker=parsed_sections_broker, discovery_parameters=discovery_parameters, ) }) except (KeyboardInterrupt, MKTimeout): raise except Exception as e: if discovery_parameters.on_error == "raise": raise if discovery_parameters.on_error == "warn": console.error("Discovery of '%s' failed: %s\n" % (check_plugin_name, e)) return list(service_table.values()) except KeyboardInterrupt: raise MKGeneralException("Interrupted by Ctrl-C.")
def _do_inv_for_realhost( host_config: config.HostConfig, ipaddress: Optional[HostAddress], *, parsed_sections_broker: ParsedSectionsBroker, run_plugin_names: Container[InventoryPluginName], ) -> InventoryTrees: tree_aggregator = TreeAggregator() _set_cluster_property(tree_aggregator.trees.inventory, host_config) section.section_step("Executing inventory plugins") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): if inventory_plugin.name not in run_plugin_names: continue for source_type in (SourceType.HOST, SourceType.MANAGEMENT): kwargs = get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, source_type), inventory_plugin.sections, ) if not kwargs: console.vverbose(" %s%s%s%s: skipped (no data)\n", tty.yellow, tty.bold, inventory_plugin.name, tty.normal) continue # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs["params"] = host_config.inventory_parameters( inventory_plugin.inventory_ruleset_name) exception = tree_aggregator.aggregate_results( inventory_plugin.inventory_function(**kwargs), ) if exception: console.warning(" %s%s%s%s: failed: %s", tty.red, tty.bold, inventory_plugin.name, tty.normal, exception) else: console.verbose(" %s%s%s%s", tty.green, tty.bold, inventory_plugin.name, tty.normal) console.vverbose(": ok\n") console.verbose("\n") tree_aggregator.trees.inventory.normalize_nodes() tree_aggregator.trees.status_data.normalize_nodes() return tree_aggregator.trees
def _run_inventory_export_hooks(host_config: config.HostConfig, inventory_tree: StructuredDataTree) -> None: import cmk.base.inventory_plugins as inventory_plugins # pylint: disable=import-outside-toplevel hooks = host_config.inventory_export_hooks if not hooks: return section.section_step("Execute inventory export hooks") for hookname, params in hooks: console.verbose("Execute export hook: %s%s%s%s" % (tty.blue, tty.bold, hookname, tty.normal)) try: func = inventory_plugins.inv_export[hookname]["export_function"] func(host_config.hostname, params, inventory_tree.get_raw_tree()) except Exception as e: if cmk.utils.debug.enabled(): raise raise MKGeneralException("Failed to execute export hook %s: %s" % (hookname, e))
def _get_filepaths(self, tmp_dump_folder: Path) -> List[Path]: section.section_step("Collect diagnostics information", verbose=False) collectors = Collectors() filepaths = [] for element in self.elements: console.info("%s\n", _format_title(element.title)) console.info("%s\n", _format_description(element.description)) try: for filepath in element.add_or_get_files(tmp_dump_folder, collectors): filepaths.append(filepath) except DiagnosticsElementError as e: console.info("%s\n", _format_error(str(e))) continue except Exception: console.info("%s\n", _format_error(traceback.format_exc())) continue return filepaths
def _commandline_inventory_on_host( *, host_config: config.HostConfig, run_plugin_names: Container[InventoryPluginName], selected_sections: SectionNameCollection, ) -> None: section.section_step("Inventorizing") inv_result = _inventorize_host( host_config=host_config, selected_sections=selected_sections, run_plugin_names=run_plugin_names, retentions_tracker=RetentionsTracker([]), ) for subresult in check_parsing_errors(errors=inv_result.parsing_errors): for line in subresult.details: console.warning(line) # TODO: inv_results.source_results is completely ignored here. # We should process the results to make errors visible on the console count_i = inv_result.trees.inventory.count_entries() count_s = inv_result.trees.status_data.count_entries() section.section_success(f"Found {count_i} inventory entries") section.section_success(f"Found {count_s} status entries") if not host_config.inventory_export_hooks: return section.section_step("Execute inventory export hooks") _run_inventory_export_hooks(host_config, inv_result.trees.inventory) count = len(host_config.inventory_export_hooks) section.section_success(f"Sucessfully ran {count} export hooks")
def _discover_host_labels( *, host_name: HostName, ipaddress: Optional[HostAddress], parsed_sections_broker: ParsedSectionsBroker, discovery_parameters: DiscoveryParameters, ) -> Sequence[HostLabel]: section.section_step("Discover host labels of section plugins") # make names unique labels_by_name = { **_discover_host_labels_for_source_type( host_key=HostKey(host_name, ipaddress, SourceType.HOST), parsed_sections_broker=parsed_sections_broker, discovery_parameters=discovery_parameters, ), **_discover_host_labels_for_source_type( host_key=HostKey(host_name, ipaddress, SourceType.MANAGEMENT), parsed_sections_broker=parsed_sections_broker, discovery_parameters=discovery_parameters, ), } return list(labels_by_name.values())
def _do_inv_for_realhost( host_config: config.HostConfig, multi_host_sections: MultiHostSections, ipaddress: Optional[HostAddress], inventory_tree: StructuredDataTree, status_data_tree: StructuredDataTree, ): section.section_step("Executing inventory plugins") console.verbose("Plugins:") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): kwargs = multi_host_sections.get_section_kwargs( HostKey(host_config.hostname, ipaddress, SourceType.HOST), inventory_plugin.sections, ) if not kwargs: continue console.verbose( " %s%s%s%s" % (tty.green, tty.bold, inventory_plugin.name, tty.normal)) # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs["params"] = host_config.inventory_parameters( str(inventory_plugin.inventory_ruleset_name) ) # TODO (mo): keep type! _aggregate_inventory_results( inventory_plugin.inventory_function(**kwargs), inventory_tree, status_data_tree, ) console.verbose("\n")
def _create_dump_folder(self) -> None: section.section_step("Create dump folder") console.verbose("%s\n", _format_filepath(self.dump_folder)) self.dump_folder.mkdir(parents=True, exist_ok=True)
def _do_inv_for_realhost( host_config: config.HostConfig, *, parsed_sections_broker: ParsedSectionsBroker, run_plugin_names: Container[InventoryPluginName], retentions_tracker: RetentionsTracker, ) -> InventoryTrees: tree_aggregator = TreeAggregator() _set_cluster_property(tree_aggregator.trees.inventory, host_config) section.section_step("Executing inventory plugins") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): if inventory_plugin.name not in run_plugin_names: continue for host_key in (host_config.host_key, host_config.host_key_mgmt): kwargs = get_section_kwargs( parsed_sections_broker, host_key, inventory_plugin.sections, ) if not kwargs: console.vverbose( " %s%s%s%s: skipped (no data)\n", tty.yellow, tty.bold, inventory_plugin.name, tty.normal, ) continue # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs = { **kwargs, "params": host_config.inventory_parameters( inventory_plugin.inventory_ruleset_name), } exception = tree_aggregator.aggregate_results( inventory_generator=inventory_plugin.inventory_function( **kwargs), retentions_tracker=retentions_tracker, raw_cache_info=parsed_sections_broker.get_cache_info( inventory_plugin.sections), is_legacy_plugin=inventory_plugin.module is None, ) if exception: console.warning( " %s%s%s%s: failed: %s", tty.red, tty.bold, inventory_plugin.name, tty.normal, exception, ) else: console.verbose(" %s%s%s%s", tty.green, tty.bold, inventory_plugin.name, tty.normal) console.vverbose(": ok\n") console.verbose("\n") return tree_aggregator.trees
def _do_inv_for_realhost( config_cache: config.ConfigCache, host_config: config.HostConfig, sources: data_sources.DataSources, multi_host_sections: Optional[MultiHostSections], hostname: HostName, ipaddress: Optional[HostAddress], inventory_tree: StructuredDataTree, status_data_tree: StructuredDataTree, ) -> None: for source in sources: if isinstance(source, data_sources.snmp.SNMPDataSource): source.detector.on_error = "raise" # default source.detector.do_snmp_scan = True data_sources.snmp.SNMPDataSource.disable_data_source_cache() source.set_use_snmpwalk_cache(False) source.set_ignore_check_interval(True) if multi_host_sections is not None: # Status data inventory already provides filled multi_host_sections object. # SNMP data source: If 'do_status_data_inv' is enabled there may be # sections for inventory plugins which were not fetched yet. host_sections = multi_host_sections.setdefault( HostKey(hostname, ipaddress, source.source_type), SNMPHostSections(), ) source.set_fetched_raw_section_names( set(host_sections.sections)) host_sections.update(source.run(selected_raw_sections=None)) if multi_host_sections is None: multi_host_sections = data_sources.make_host_sections( config_cache, host_config, ipaddress, sources, max_cachefile_age=host_config.max_cachefile_age, selected_raw_sections=None, ) section.section_step("Executing inventory plugins") import cmk.base.inventory_plugins as inventory_plugins # pylint: disable=import-outside-toplevel console.verbose("Plugins:") for section_name, plugin in inventory_plugins.sorted_inventory_plugins(): section_content = multi_host_sections.get_section_content( HostKey(hostname, ipaddress, SourceType.HOST), check_api_utils.HOST_PRECEDENCE, section_name, for_discovery=False, ) if not section_content: # section not present (None or []) # Note: this also excludes existing sections without info.. continue if all([x in [[], {}, None] for x in section_content]): # Inventory plugins which get parsed info from related # check plugin may have more than one return value, eg # parse function of oracle_tablespaces returns ({}, {}) continue console.verbose(" %s%s%s%s" % (tty.green, tty.bold, section_name, tty.normal)) # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). inv_function = plugin["inv_function"] kwargs = cmk.utils.misc.make_kwargs_for( inv_function, inventory_tree=inventory_tree, status_data_tree=status_data_tree) non_kwargs = set( cmk.utils.misc.getfuncargs(inv_function)) - set(kwargs) args = [section_content] if len(non_kwargs) == 2: args += [host_config.inventory_parameters(section_name)] inv_function(*args, **kwargs) console.verbose("\n")
def _do_inv_for_realhost( config_cache: config.ConfigCache, host_config: config.HostConfig, sources: data_sources.DataSources, multi_host_sections: Optional[MultiHostSections], hostname: HostName, ipaddress: Optional[HostAddress], inventory_tree: StructuredDataTree, status_data_tree: StructuredDataTree, ) -> None: for source in sources: if isinstance(source, data_sources.snmp.SNMPDataSource): # TODO(ml): This modifies the SNMP fetcher config dynamically. configurator = cast(data_sources.snmp.SNMPConfigurator, source.configurator) configurator.on_snmp_scan_error = "raise" # default configurator.do_snmp_scan = True data_sources.FileCacheConfigurator.snmp_disabled = True configurator.use_snmpwalk_cache = False configurator.ignore_check_interval = True if multi_host_sections is not None: # Status data inventory already provides filled multi_host_sections object. # SNMP data source: If 'do_status_data_inv' is enabled there may be # sections for inventory plugins which were not fetched yet. host_sections = multi_host_sections.setdefault( # TODO(ml): are # hostname == source.hostname # ipaddress == source.ipaddress # ? HostKey(hostname, ipaddress, source.configurator.source_type), SNMPHostSections(), ) # TODO(ml): This modifies the SNMP fetcher config dynamically. # Can the fetcher handle that on its own? configurator.prefetched_sections = host_sections.sections host_sections.update(source.run(selected_raw_sections=None)) if multi_host_sections is None: multi_host_sections = data_sources.make_host_sections( config_cache, host_config, ipaddress, data_sources.Mode.INVENTORY, sources, max_cachefile_age=host_config.max_cachefile_age, selected_raw_sections=None, ) section.section_step("Executing inventory plugins") console.verbose("Plugins:") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): kwargs = multi_host_sections.get_section_kwargs( HostKey(hostname, ipaddress, SourceType.HOST), inventory_plugin.sections, ) if not kwargs: continue console.verbose(" %s%s%s%s" % (tty.green, tty.bold, inventory_plugin.name, tty.normal)) # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs["params"] = host_config.inventory_parameters( str(inventory_plugin.inventory_ruleset_name)) # TODO (mo): keep type! _aggregate_inventory_results( inventory_plugin.inventory_function(**kwargs), inventory_tree, status_data_tree, ) console.verbose("\n")
def _do_inv_for_realhost( config_cache: config.ConfigCache, host_config: config.HostConfig, sources: Sequence[ABCSource], multi_host_sections: Optional[MultiHostSections], hostname: HostName, ipaddress: Optional[HostAddress], inventory_tree: StructuredDataTree, status_data_tree: StructuredDataTree, ) -> Sequence[Tuple[ABCSource, Result[ABCHostSections, Exception]]]: results: List[Tuple[ABCSource, Result[ABCHostSections, Exception]]] = [] for source in sources: if isinstance(source, checkers.snmp.SNMPSource): # TODO(ml): This modifies the SNMP fetcher config dynamically. source.on_snmp_scan_error = "raise" # default checkers.FileCacheConfigurer.snmp_disabled = True source.use_snmpwalk_cache = False source.ignore_check_interval = True if multi_host_sections is not None: # Status data inventory already provides filled multi_host_sections object. # SNMP data source: If 'do_status_data_inv' is enabled there may be # sections for inventory plugins which were not fetched yet. host_sections = multi_host_sections.setdefault( # TODO(ml): are # hostname == source.hostname # ipaddress == source.ipaddress # ? HostKey(hostname, ipaddress, source.source_type), SNMPHostSections(), ) # TODO(ml): This modifies the SNMP fetcher config dynamically. # Can the fetcher handle that on its own? source.prefetched_sections = host_sections.sections # When executing the structured status inventory, we are in the Mode.CHECKING assert source.mode is Mode.INVENTORY or source.mode is Mode.CHECKING host_section = source.parse(source.fetch()) results.append((source, host_section)) if host_section.is_ok(): assert host_section.ok is not None host_sections.update(host_section.ok) if multi_host_sections is None: multi_host_sections = MultiHostSections() hs = checkers.update_host_sections( multi_host_sections, checkers.make_nodes( config_cache, host_config, ipaddress, checkers.Mode.INVENTORY, sources, ), max_cachefile_age=host_config.max_cachefile_age, selected_raw_sections=None, host_config=host_config, ) results.extend(hs) section.section_step("Executing inventory plugins") console.verbose("Plugins:") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): kwargs = multi_host_sections.get_section_kwargs( HostKey(hostname, ipaddress, SourceType.HOST), inventory_plugin.sections, ) if not kwargs: continue console.verbose( " %s%s%s%s" % (tty.green, tty.bold, inventory_plugin.name, tty.normal)) # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs["params"] = host_config.inventory_parameters( str(inventory_plugin.inventory_ruleset_name) ) # TODO (mo): keep type! _aggregate_inventory_results( inventory_plugin.inventory_function(**kwargs), inventory_tree, status_data_tree, ) console.verbose("\n") return results