def test__find_candidates(): mhs = MultiHostSections() mhs._data = { # we just care about the keys here, content set to [] for simplicity # section names have been are chosen arbitrarily. # any HostSections type is fine. HostKey("test_node", "1.2.3.4", SourceType.HOST): AgentHostSections({ SectionName("kernel"): [], # host only SectionName("uptime"): [['123']], # host & mgmt }), HostKey("test_node", "1.2.3.4", SourceType.MANAGEMENT): AgentHostSections({ SectionName("uptime"): [['123']], # host & mgmt SectionName("liebert_fans"): [[]], # mgmt only SectionName("mgmt_snmp_info"): [[]], # is already mgmt_ prefixed }), } preliminary_candidates = list(agent_based_register.iter_all_check_plugins()) parsed_sections_of_interest = { parsed_section_name for plugin in preliminary_candidates for parsed_section_name in plugin.sections } assert discovery._find_host_candidates( mhs, preliminary_candidates, parsed_sections_of_interest, ) == { CheckPluginName('docker_container_status_uptime'), CheckPluginName("kernel"), CheckPluginName('kernel_performance'), CheckPluginName('kernel_util'), CheckPluginName("uptime"), } assert discovery._find_mgmt_candidates( mhs, preliminary_candidates, parsed_sections_of_interest, ) == { CheckPluginName('mgmt_docker_container_status_uptime'), CheckPluginName("mgmt_liebert_fans"), CheckPluginName("mgmt_uptime"), CheckPluginName("mgmt_snmp_info"), # not mgmt_mgmt_... } assert discovery._find_candidates( mhs, selected_check_plugins=None, ) == { CheckPluginName('docker_container_status_uptime'), CheckPluginName("kernel"), CheckPluginName('kernel_performance'), CheckPluginName('kernel_util'), CheckPluginName('mgmt_docker_container_status_uptime'), CheckPluginName("mgmt_liebert_fans"), CheckPluginName("mgmt_uptime"), CheckPluginName("mgmt_snmp_info"), # not mgmt_mgmt_... CheckPluginName("uptime"), }
def test_get_section_kwargs(monkeypatch, required_sections, expected_result): _set_up(monkeypatch, "node1", None, {}) node_section_content = { SectionName("one"): NODE_1, # TODO (mo): CMK-4232 # SectionName("two"): NODE_1, SectionName("three"): NODE_1 } host_key = HostKey("node1", "127.0.0.1", SourceType.HOST) multi_host_sections = MultiHostSections() multi_host_sections.setdefault( host_key, AgentHostSections(sections=node_section_content), ) kwargs = multi_host_sections.get_section_kwargs( host_key, [ParsedSectionName(n) for n in required_sections], ) assert expected_result == kwargs,\ "Section content: Expected '%s' but got '%s'" % (expected_result, kwargs)
def test_get_section_content(monkeypatch, hostname, nodes, host_entries, cluster_mapping, service_descr, expected_result): ts = Scenario() if nodes is None: ts.add_host(hostname) else: ts.add_cluster(hostname, nodes=nodes) for node in nodes or []: ts.add_host(node) config_cache = ts.apply(monkeypatch) def host_of_clustered_service(hostname, service_description): return cluster_mapping[hostname] multi_host_sections = MultiHostSections() for nodename, node_section_content in host_entries: multi_host_sections.add_or_get_host_sections( nodename, "127.0.0.1", AgentHostSections(sections={"check_plugin_name": node_section_content})) monkeypatch.setattr(ip_lookup, "lookup_ip_address", lambda h: "127.0.0.1") monkeypatch.setattr(config_cache, "host_of_clustered_service", host_of_clustered_service) section_content = multi_host_sections.get_section_content(hostname, "127.0.0.1", "check_plugin_name", False, service_description=service_descr) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content)
def get_aggregated_result( multi_host_sections: MultiHostSections, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[checking_types.CheckPlugin], params_function: Callable[[], checking_types.Parameters], ) -> Tuple[bool, bool, ServiceCheckResult]: if plugin is None: return False, True, CHECK_NOT_IMPLEMENTED check_function = (plugin.cluster_check_function if host_config.is_cluster else plugin.check_function) source_type = (SourceType.MANAGEMENT if service.check_plugin_name.startswith('mgmt_') else SourceType.HOST) kwargs = {} try: kwargs = multi_host_sections.get_section_cluster_kwargs( HostKey(host_config.hostname, None, source_type), plugin.sections, service.description, ) if host_config.is_cluster else multi_host_sections.get_section_kwargs( HostKey(host_config.hostname, ipaddress, source_type), plugin.sections, ) if not kwargs: return False, False, RECEIVED_NO_DATA if service.item is not None: kwargs["item"] = service.item if plugin.check_ruleset_name: kwargs["params"] = params_function() with value_store.context(plugin.name, service.item): result = _aggregate_results(check_function(**kwargs)) except (item_state.MKCounterWrapped, checking_types.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return False, True, (0, msg, []) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise result = 3, cmk.base.crash_reporting.create_check_crash_dump( host_config.hostname, service.check_plugin_name, kwargs, is_manual_check(host_config.hostname, service.check_plugin_name, service.item), service.description, ), [] return True, True, result
def _get_host_section_for_parse_sections_test(): node_section_content = { SectionName("one"): NODE_1, SectionName("four"): NODE_1, } host_key = HostKey("node1", "127.0.0.1", SourceType.HOST) mhs = MultiHostSections() mhs.setdefault( host_key, AgentHostSections(sections=node_section_content), ) return host_key, mhs
def test_get_parsed_section(monkeypatch, node_section_content, expected_result): _set_up(monkeypatch, "node1", None, {}) multi_host_sections = MultiHostSections() multi_host_sections.add_or_get_host_sections( "node1", "127.0.0.1", AgentHostSections(sections=node_section_content)) content = multi_host_sections.get_parsed_section("node1", "127.0.0.1", PluginName("parsed")) assert expected_result == content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, content)
def test_no_sources(self, hostname, ipaddress, mode, config_cache, host_config): mhs = MultiHostSections() update_host_sections( mhs, make_nodes( config_cache, host_config, ipaddress, mode=mode, sources=[], ), max_cachefile_age=0, selected_raw_sections=None, host_config=host_config, ) # The length is not zero because the function always sets, # at least, a piggy back section. assert len(mhs) == 1 key = HostKey(hostname, ipaddress, SourceType.HOST) assert key in mhs section = mhs[key] assert isinstance(section, AgentHostSections) # Public attributes from ABCHostSections: assert not section.sections assert not section.cache_info assert not section.piggybacked_raw_data assert not section.persisted_sections
def test_one_snmp_source(self, hostname, ipaddress, mode, config_cache, host_config): mhs = MultiHostSections() update_host_sections( mhs, make_nodes( config_cache, host_config, ipaddress, mode=mode, sources=[ SNMPConfigurator.snmp( hostname, ipaddress, mode=mode, ).make_checker(), ], ), max_cachefile_age=0, selected_raw_sections=None, host_config=host_config, ) assert len(mhs) == 1 key = HostKey(hostname, ipaddress, SourceType.HOST) assert key in mhs section = mhs[key] assert isinstance(section, SNMPHostSections) assert len(section.sections) == 1 assert section.sections[SectionName("section_name_%s" % hostname)] == [["section_content"]]
def test_one_nonsnmp_source(self, hostname, ipaddress, mode, config_cache, host_config, source): source = source(hostname, ipaddress, mode=mode) assert source.configurator.source_type is SourceType.HOST mhs = MultiHostSections() update_host_sections( mhs, make_nodes( config_cache, host_config, ipaddress, mode=mode, sources=[source], ), max_cachefile_age=0, selected_raw_sections=None, host_config=host_config, ) assert len(mhs) == 1 key = HostKey(hostname, ipaddress, source.configurator.source_type) assert key in mhs section = mhs[key] assert isinstance(section, AgentHostSections) assert len(section.sections) == 1 assert section.sections[SectionName("section_name_%s" % hostname)] == [["section_content"]]
def test_no_sources(self, cluster, nodes, config_cache, host_config, mode): mhs = MultiHostSections() update_host_sections( mhs, make_nodes( config_cache, host_config, None, mode=mode, sources=[], ), max_cachefile_age=0, selected_raw_sections=None, host_config=host_config, ) assert len(mhs) == len(nodes) key_clu = HostKey(cluster, None, SourceType.HOST) assert key_clu not in mhs for hostname, addr in nodes.items(): key = HostKey(hostname, addr, SourceType.HOST) assert key in mhs section = mhs[key] # yapf: disable assert (section.sections[SectionName("section_name_%s" % hostname)] == [["section_content"]]) assert not section.cache_info assert not section.piggybacked_raw_data assert not section.persisted_sections
def execute_check(multi_host_sections: MultiHostSections, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service) -> bool: # TODO (mo): centralize maincheckify: CMK-4295 plugin_name = CheckPluginName(maincheckify(service.check_plugin_name)) plugin = config.get_registered_check_plugin(plugin_name) # check if we must use legacy mode. remove this block entirely one day if (plugin is not None and host_config.is_cluster and plugin.cluster_check_function.__name__ == CLUSTER_LEGACY_MODE_FROM_HELL): return _execute_check_legacy_mode( multi_host_sections, host_config.hostname, ipaddress, service, ) submit, data_received, result = get_aggregated_result( multi_host_sections, host_config, ipaddress, service, plugin, lambda: determine_check_params(service.parameters), ) if submit: _submit_check_result( host_config.hostname, service.description, result, multi_host_sections.get_cache_info(plugin.sections) if plugin else None, ) elif data_received: console.verbose("%-20s PEND - %s\n", ensure_str(service.description), result[1]) return data_received
def test_get_parsed_section(patch_register, node_section_content, expected_result): multi_host_sections = MultiHostSections() multi_host_sections.setdefault( HostKey("node1", "127.0.0.1", SourceType.HOST), AgentHostSections(sections=node_section_content), ) content = multi_host_sections.get_parsed_section( HostKey("node1", "127.0.0.1", SourceType.HOST), ParsedSectionName("parsed"), ) assert expected_result == content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, content)
def test_get_section_cluster_kwargs(monkeypatch, required_sections, expected_result): _set_up(monkeypatch, "cluster", ["node2", "node1"], {"node1": "cluster", "node2": "cluster"}) node1_section_content = { SectionName("one"): NODE_1, # TODO (mo): CMK-4232 # SectionName("two"): NODE_1, SectionName("three"): NODE_1 } node2_section_content = { SectionName("two"): NODE_2, SectionName("three"): NODE_2, } multi_host_sections = MultiHostSections() multi_host_sections.setdefault( HostKey("node1", "127.0.0.1", SourceType.HOST), AgentHostSections(sections=node1_section_content), ) multi_host_sections.setdefault( HostKey("node2", "127.0.0.1", SourceType.HOST), AgentHostSections(sections=node2_section_content), ) kwargs = multi_host_sections.get_section_cluster_kwargs( HostKey("cluster", None, SourceType.HOST), [ParsedSectionName(n) for n in required_sections], "_service_description", ) assert expected_result == kwargs,\ "Section content: Expected '%s' but got '%s'" % (expected_result, kwargs)
def test_get_section_cluster_kwargs(monkeypatch, required_sections, expected_result): _set_up(monkeypatch, "cluster", ["node2", "node1"], { "node1": "cluster", "node2": "cluster" }) node1_section_content = { "one": NODE_1, # TODO (mo): CMK-4232 # "two": NODE_1, "three": NODE_1 } node2_section_content = {"two": NODE_2, "three": NODE_2} multi_host_sections = MultiHostSections() multi_host_sections.setdefault_host_sections( ("node1", "127.0.0.1"), AgentHostSections(sections=node1_section_content), ) multi_host_sections.setdefault_host_sections( ("node2", "127.0.0.1"), AgentHostSections(sections=node2_section_content), ) kwargs = multi_host_sections.get_section_cluster_kwargs( "cluster", [PluginName(n) for n in required_sections], "_service_description") assert expected_result == kwargs,\ "Section content: Expected '%s' but got '%s'" % (expected_result, kwargs)
def test_get_section_cluster_kwargs(patch_register, required_sections, expected_result): node1_section_content = { SectionName("one"): NODE_1, SectionName("two"): NODE_1, SectionName("three"): NODE_1 } node2_section_content = { SectionName("two"): NODE_2, SectionName("three"): NODE_2, } multi_host_sections = MultiHostSections() multi_host_sections.setdefault( HostKey("node1", "127.0.0.1", SourceType.HOST), AgentHostSections(sections=node1_section_content), ) multi_host_sections.setdefault( HostKey("node2", "127.0.0.1", SourceType.HOST), AgentHostSections(sections=node2_section_content), ) kwargs = multi_host_sections.get_section_cluster_kwargs( [ HostKey("node1", "127.0.0.1", SourceType.HOST), HostKey("node2", "127.0.0.1", SourceType.HOST), ], [ParsedSectionName(n) for n in required_sections], ) assert expected_result == kwargs,\ "Section content: Expected '%s' but got '%s'" % (expected_result, kwargs)
def test_get_parsed_section(monkeypatch, node_section_content, expected_result): _set_up(monkeypatch, "node1", None, {}) multi_host_sections = MultiHostSections() multi_host_sections.setdefault_host_sections( ("node1", "127.0.0.1", SourceType.HOST), AgentHostSections(sections=node_section_content), ) content = multi_host_sections.get_parsed_section( ("node1", "127.0.0.1", SourceType.HOST), PluginName("parsed"), ) assert expected_result == content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, content)
def test_get_section_kwargs(monkeypatch, required_sections, expected_result): _set_up(monkeypatch, "node1", None, {}) node_section_content = { "one": NODE_1, # TODO (mo): CMK-4232 # "two": NODE_1, "three": NODE_1 } multi_host_sections = MultiHostSections() multi_host_sections.add_or_get_host_sections( "node1", "127.0.0.1", AgentHostSections(sections=node_section_content)) kwargs = multi_host_sections.get_section_kwargs( "node1", "127.0.0.1", [PluginName(n) for n in required_sections]) assert expected_result == kwargs,\ "Section content: Expected '%s' but got '%s'" % (expected_result, kwargs)
def test__find_candidates(): mhs = MultiHostSections() mhs._data = { # we just care about the keys here, content set to [] for simplicity # section names have been are chosen arbitrarily. # any HostSections type is fine. HostKey("test_node", "1.2.3.4", SourceType.HOST): AgentHostSections({ SectionName("kernel"): [], # host only SectionName("uptime"): [], # host & mgmt }), HostKey("test_node", "1.2.3.4", SourceType.MANAGEMENT): AgentHostSections({ SectionName("uptime"): [], # host & mgmt SectionName("liebert_fans"): [], # mgmt only SectionName("mgmt_snmp_info"): [], # is already mgmt_ prefixed }), } assert discovery._find_candidates_by_source_type(mhs, SourceType.HOST) == { CheckPluginName("kernel"), CheckPluginName('kernel_performance'), CheckPluginName('kernel_util'), CheckPluginName("uptime"), } assert discovery._find_candidates_by_source_type( mhs, SourceType.MANAGEMENT) == { CheckPluginName("liebert_fans"), CheckPluginName("uptime"), CheckPluginName("mgmt_snmp_info"), } assert discovery._find_candidates(mhs) == { CheckPluginName("kernel"), CheckPluginName('kernel_performance'), CheckPluginName('kernel_util'), CheckPluginName("uptime"), CheckPluginName("mgmt_liebert_fans"), CheckPluginName("mgmt_uptime"), CheckPluginName("mgmt_snmp_info"), # not mgmt_mgmt_... }
def test_get_section_content(monkeypatch, hostname, nodes, host_entries, cluster_mapping, service_descr, expected_result): _set_up(monkeypatch, hostname, nodes, cluster_mapping) multi_host_sections = MultiHostSections() for nodename, node_section_content in host_entries: multi_host_sections.add_or_get_host_sections( nodename, "127.0.0.1", AgentHostSections( sections={"check_plugin_name": node_section_content})) section_content = multi_host_sections.get_section_content( hostname, "127.0.0.1", "check_plugin_name", False, service_description=service_descr) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content)
def test_multiple_sources_from_the_same_host( self, hostname, ipaddress, mode, config_cache, host_config, ): sources = [ ProgramConfigurator.ds( hostname, ipaddress, mode=mode, template="", ).make_checker(), TCPConfigurator( hostname, ipaddress, mode=mode, ).make_checker(), ] mhs = MultiHostSections() update_host_sections( mhs, make_nodes( config_cache, host_config, ipaddress, mode=mode, sources=sources, ), max_cachefile_age=0, selected_raw_sections=None, host_config=host_config, ) assert len(mhs) == 1 key = HostKey(hostname, ipaddress, SourceType.HOST) assert key in mhs section = mhs[key] assert isinstance(section, AgentHostSections) assert len(section.sections) == 1 # yapf: disable assert (section.sections[SectionName("section_name_%s" % hostname)] == len(sources) * [["section_content"]])
def _legacy_determine_cache_info(multi_host_sections: MultiHostSections, section_name: SectionName) -> Optional[Tuple[int, int]]: """Aggregate information about the age of the data in the agent sections This is in data_sources.g_agent_cache_info. For clusters we use the oldest of the timestamps, of course. """ cached_ats: List[int] = [] intervals: List[int] = [] for host_sections in multi_host_sections.values(): section_entries = host_sections.cache_info if section_name in section_entries: cached_at, cache_interval = section_entries[section_name] cached_ats.append(cached_at) intervals.append(cache_interval) return (min(cached_ats), max(intervals)) if cached_ats else None
def execute_check(multi_host_sections: MultiHostSections, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service) -> bool: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) # Make a bit of context information globally available, so that functions # called by checks know this context. set_service is needed for predictive levels! # TODO: This should be a context manager, similar to value_store (f.k.a. item_state) # This is used for both legacy and agent_based API. check_api_utils.set_service(str(service.check_plugin_name), service.description) # check if we must use legacy mode. remove this block entirely one day if (plugin is not None and host_config.is_cluster and plugin.cluster_check_function.__name__ == "cluster_legacy_mode_from_hell"): return _execute_check_legacy_mode( multi_host_sections, host_config.hostname, ipaddress, service, ) submit, data_received, result = get_aggregated_result( multi_host_sections, host_config, ipaddress, service, plugin, lambda: determine_check_params(service.parameters), ) if submit: _submit_check_result( host_config.hostname, service.description, result, multi_host_sections.get_cache_info(plugin.sections) if plugin else None, ) elif data_received: console.verbose("%-20s PEND - %s\n", ensure_str(service.description), result[1]) return data_received
def test_get_section_content(hostname, host_entries, cluster_node_keys, expected_result): multi_host_sections = MultiHostSections() for nodename, node_section_content in host_entries: multi_host_sections.setdefault( HostKey(nodename, "127.0.0.1", SourceType.HOST), AgentHostSections( sections={ SectionName("section_plugin_name"): node_section_content }), ) section_content = multi_host_sections.get_section_content( HostKey(hostname, "127.0.0.1", SourceType.HOST), check_api_utils.HOST_ONLY, "section_plugin_name", False, cluster_node_keys=cluster_node_keys, check_legacy_info= {}, # only for parse_function lookup, not needed in this test ) assert expected_result == section_content section_content = multi_host_sections.get_section_content( HostKey(hostname, "127.0.0.1", SourceType.HOST), check_api_utils.HOST_PRECEDENCE, "section_plugin_name", False, cluster_node_keys=cluster_node_keys, check_legacy_info= {}, # only for parse_function lookup, not needed in this test ) assert expected_result == section_content section_content = multi_host_sections.get_section_content( HostKey(hostname, "127.0.0.1", SourceType.MANAGEMENT), check_api_utils.MGMT_ONLY, "section_plugin_name", False, cluster_node_keys=None if cluster_node_keys is None else [ HostKey(hn, ip, SourceType.MANAGEMENT) for (hn, ip, _st) in cluster_node_keys ], check_legacy_info= {}, # only for parse_function lookup, not needed in this test ) assert section_content is None
def test_get_section_content(monkeypatch, hostname, nodes, host_entries, cluster_mapping, service_descr, expected_result): _set_up(monkeypatch, hostname, nodes, cluster_mapping) multi_host_sections = MultiHostSections() for nodename, node_section_content in host_entries: multi_host_sections.setdefault_host_sections( (nodename, "127.0.0.1", SourceType.HOST), AgentHostSections( sections={"check_plugin_name": node_section_content}), ) section_content = multi_host_sections.get_section_content( hostname, "127.0.0.1", check_api_utils.HOST_ONLY, "check_plugin_name", False, service_description=service_descr, ) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content) section_content = multi_host_sections.get_section_content( hostname, "127.0.0.1", check_api_utils.HOST_PRECEDENCE, "check_plugin_name", False, service_description=service_descr, ) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content) section_content = multi_host_sections.get_section_content( hostname, "127.0.0.1", check_api_utils.MGMT_ONLY, "check_plugin_name", False, service_description=service_descr, ) assert section_content is None, \ "Section content: Expected 'None' but got '%s'" % (section_content,)
def test_get_section_content(monkeypatch, hostname, nodes, host_entries, cluster_mapping, service_descr, expected_result): _set_up(monkeypatch, hostname, nodes, cluster_mapping) multi_host_sections = MultiHostSections() for nodename, node_section_content in host_entries: multi_host_sections.setdefault( HostKey(nodename, "127.0.0.1", SourceType.HOST), AgentHostSections(sections={SectionName("section_plugin_name"): node_section_content}), ) section_content = multi_host_sections.get_section_content( HostKey(hostname, "127.0.0.1", SourceType.HOST), check_api_utils.HOST_ONLY, "section_plugin_name", False, service_description=service_descr, check_info={}, # only for parse_function lookup, not needed in this test ) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content) section_content = multi_host_sections.get_section_content( HostKey(hostname, "127.0.0.1", SourceType.HOST), check_api_utils.HOST_PRECEDENCE, "section_plugin_name", False, service_description=service_descr, check_info={}, # only for parse_function lookup, not needed in this test ) assert expected_result == section_content,\ "Section content: Expected '%s' but got '%s'" % (expected_result, section_content) section_content = multi_host_sections.get_section_content( HostKey(hostname, "127.0.0.1", SourceType.MANAGEMENT), check_api_utils.MGMT_ONLY, "section_plugin_name", False, service_description=service_descr, check_info={}, # only for parse_function lookup, not needed in this test ) assert section_content is None, \ "Section content: Expected 'None' but got '%s'" % (section_content,)
def _execute_check_legacy_mode(multi_host_sections: MultiHostSections, hostname: HostName, ipaddress: Optional[HostAddress], service: Service) -> bool: check_function = config.check_info[service.check_plugin_name].get("check_function") if check_function is None: _submit_check_result(hostname, service.description, CHECK_NOT_IMPLEMENTED, None) return True # Make a bit of context information globally available, so that functions # called by checks know this context check_api_utils.set_service(service.check_plugin_name, service.description) item_state.set_item_state_prefix(service.check_plugin_name, service.item) section_name = section_name_of(service.check_plugin_name) section_content = None mgmt_board_info = config.get_management_board_precedence(section_name, config.check_info) try: # TODO: There is duplicate code with discovery._execute_discovery(). section_content = multi_host_sections.get_section_content( HostKey( hostname, ipaddress, SourceType.MANAGEMENT if mgmt_board_info == LEGACY_MGMT_ONLY else SourceType.HOST, ), mgmt_board_info, section_name, for_discovery=False, service_description=service.description, ) # TODO: Move this to a helper function if section_content is None: # No data for this check type return False # Call the actual check function item_state.reset_wrapped_counters() used_params = legacy_determine_check_params(service.parameters) raw_result = check_function(service.item, used_params, section_content) result = sanitize_check_result(raw_result) item_state.raise_counter_wrap() except item_state.MKCounterWrapped as e: # handle check implementations that do not yet support the # handling of wrapped counters via exception on their own. # Do not submit any check result in that case: console.verbose("%-20s PEND - Cannot compute check result: %s\n", ensure_str(service.description), e) # Don't submit to core - we're done. return True except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise result = 3, cmk.base.crash_reporting.create_check_crash_dump( hostname, service.check_plugin_name, { "item": service.item, "params": used_params, "section_content": section_content }, is_manual_check(hostname, service.check_plugin_name, service.item), service.description, ), [] _submit_check_result( hostname, service.description, result, _legacy_determine_cache_info(multi_host_sections, SectionName(section_name)), ) return True
def do_check( hostname: HostName, ipaddress: Optional[HostAddress], only_check_plugin_names: Optional[Set[CheckPluginName]] = None, fetcher_messages: Optional[List[FetcherMessage]] = None ) -> Tuple[int, List[ServiceDetails], List[ServiceAdditionalDetails], List[str]]: cpu_tracking.start("busy") console.verbose("Checkmk version %s\n", cmk_version.__version__) config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() status: ServiceState = 0 infotexts: List[ServiceDetails] = [] long_infotexts: List[ServiceAdditionalDetails] = [] perfdata: List[str] = [] try: # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when # address is unknown). When called as non keepalive ipaddress may be None or # is already an address (2nd argument) if ipaddress is None and not host_config.is_cluster: ipaddress = ip_lookup.lookup_ip_address(host_config) item_state.load(hostname) # When monitoring Checkmk clusters, the cluster nodes are responsible for fetching all # information from the monitored host and cache the result for the cluster checks to be # performed on the cached information. # # This means that in case of SNMP nodes, they need to take the clustered services of the # node into account, fetch the needed sections and cache them for the cluster host. # # But later, when checking the node services, the node has to only deal with the unclustered # services. belongs_to_cluster = len(config_cache.clusters_of(hostname)) > 0 services_to_fetch = _get_services_to_fetch( host_name=hostname, belongs_to_cluster=belongs_to_cluster, config_cache=config_cache, only_check_plugins=only_check_plugin_names, ) services_to_check = _filter_clustered_services( config_cache=config_cache, host_name=hostname, belongs_to_cluster=belongs_to_cluster, services=services_to_fetch, ) # see which raw sections we may need selected_raw_sections = _get_relevant_raw_sections(services_to_fetch, host_config) sources = data_sources.make_checkers( host_config, ipaddress, mode=data_sources.Mode.CHECKING, ) mhs = MultiHostSections() result = data_sources.update_host_sections( mhs, data_sources.make_nodes( config_cache, host_config, ipaddress, data_sources.Mode.CHECKING, sources, ), selected_raw_sections=selected_raw_sections, max_cachefile_age=host_config.max_cachefile_age, host_config=host_config, fetcher_messages=fetcher_messages, ) num_success, plugins_missing_data = _do_all_checks_on_host( config_cache, host_config, ipaddress, multi_host_sections=mhs, services=services_to_check, only_check_plugins=only_check_plugin_names, ) inventory.do_inventory_actions_during_checking_for( config_cache, host_config, ipaddress, sources=sources, multi_host_sections=mhs, ) if _submit_to_core: item_state.save(hostname) for configurator, host_sections in result: # TODO(ml): This implements the hidden protocol explicitly. This step # is necessary before we get rid of it. checker = configurator.make_checker() checker.host_sections = host_sections source_state, source_output, source_perfdata = checker.get_summary_result() if source_output != "": status = max(status, source_state) infotexts.append("[%s] %s" % (configurator.id, source_output)) perfdata.extend([_convert_perf_data(p) for p in source_perfdata]) if plugins_missing_data: missing_data_status, missing_data_infotext = _check_plugins_missing_data( plugins_missing_data, exit_spec, bool(num_success), ) status = max(status, missing_data_status) infotexts.append(missing_data_infotext) cpu_tracking.end() phase_times = cpu_tracking.get_times() total_times = phase_times["TOTAL"] run_time = total_times[4] infotexts.append("execution time %.1f sec" % run_time) if config.check_mk_perfdata_with_times: perfdata += [ "execution_time=%.3f" % run_time, "user_time=%.3f" % total_times[0], "system_time=%.3f" % total_times[1], "children_user_time=%.3f" % total_times[2], "children_system_time=%.3f" % total_times[3], ] for phase, times in phase_times.items(): if phase in ["agent", "snmp", "ds"]: t = times[4] - sum(times[:4]) # real time - CPU time perfdata.append("cmk_time_%s=%.3f" % (phase, t)) else: perfdata.append("execution_time=%.3f" % run_time) return status, infotexts, long_infotexts, perfdata finally: if _checkresult_file_fd is not None: _close_checkresult_file() # "ipaddress is not None": At least when working with a cluster host it seems the ipaddress # may be None. This needs to be understood in detail and cleaned up. As the InlineSNMP # stats feature is a very rarely used debugging feature, the analyzation and fix is # postponed now. if config.record_inline_snmp_stats \ and ipaddress is not None \ and host_config.snmp_config(ipaddress).is_inline_snmp_host: inline.snmp_stats_save()
def test_get_host_sections_cluster(mode, monkeypatch, mocker): hostname = "testhost" hosts = { "host0": "10.0.0.0", "host1": "10.0.0.1", "host2": "10.0.0.2", } address = "1.2.3.4" tags = {"agent": "no-agent"} section_name = SectionName("test_section") config_cache = make_scenario(hostname, tags).apply(monkeypatch) host_config = config.HostConfig.make_host_config(hostname) def lookup_ip_address(host_config, family=None, for_mgmt_board=False): return hosts[host_config.hostname] def make_piggybacked_sections(hc): if hc.nodes == host_config.nodes: return {section_name: True} return {} def check(_, *args, **kwargs): return AgentHostSections(sections={section_name: [[str(section_name)]]}) monkeypatch.setattr( ip_lookup, "lookup_ip_address", lookup_ip_address, ) monkeypatch.setattr( _data_sources, "_make_piggybacked_sections", make_piggybacked_sections, ) monkeypatch.setattr( ABCChecker, "check", check, ) mocker.patch.object( cmk.utils.piggyback, "remove_source_status_file", autospec=True, ) mocker.patch.object( cmk.utils.piggyback, "_store_status_file_of", autospec=True, ) # Create a cluster host_config.nodes = list(hosts.keys()) mhs = MultiHostSections() update_host_sections( mhs, make_nodes( config_cache, host_config, address, mode=mode, sources=make_checkers(host_config, address, mode=mode), ), max_cachefile_age=host_config.max_cachefile_age, selected_raw_sections=None, host_config=host_config, ) assert len(mhs) == len(hosts) == 3 cmk.utils.piggyback._store_status_file_of.assert_not_called() # type: ignore[attr-defined] assert cmk.utils.piggyback.remove_source_status_file.call_count == 3 # type: ignore[attr-defined] for host, addr in hosts.items(): remove_source_status_file = cmk.utils.piggyback.remove_source_status_file remove_source_status_file.assert_any_call(host) # type: ignore[attr-defined] key = HostKey(host, addr, SourceType.HOST) assert key in mhs section = mhs[key] assert len(section.sections) == 1 assert next(iter(section.sections)) == section_name assert not section.cache_info assert not section.piggybacked_raw_data assert not section.persisted_sections
def _do_inv_for_realhost( config_cache: config.ConfigCache, host_config: config.HostConfig, sources: data_sources.Checkers, multi_host_sections: Optional[MultiHostSections], hostname: HostName, ipaddress: Optional[HostAddress], inventory_tree: StructuredDataTree, status_data_tree: StructuredDataTree, ) -> Sequence[Tuple[ABCConfigurator, ABCHostSections]]: results: List[Tuple[ABCConfigurator, ABCHostSections]] = [] for source in sources: if isinstance(source, data_sources.snmp.SNMPChecker): # TODO(ml): This modifies the SNMP fetcher config dynamically. configurator = cast(data_sources.snmp.SNMPConfigurator, source.configurator) configurator.on_snmp_scan_error = "raise" # default data_sources.FileCacheConfigurator.snmp_disabled = True configurator.use_snmpwalk_cache = False configurator.ignore_check_interval = True if multi_host_sections is not None: # Status data inventory already provides filled multi_host_sections object. # SNMP data source: If 'do_status_data_inv' is enabled there may be # sections for inventory plugins which were not fetched yet. host_sections = multi_host_sections.setdefault( # TODO(ml): are # hostname == source.hostname # ipaddress == source.ipaddress # ? HostKey(hostname, ipaddress, source.configurator.source_type), SNMPHostSections(), ) # TODO(ml): This modifies the SNMP fetcher config dynamically. # Can the fetcher handle that on its own? configurator.prefetched_sections = host_sections.sections raw_data = source.configurator.default_raw_data try: with source.configurator.make_fetcher() as fetcher: raw_data = fetcher.fetch(Mode.INVENTORY) except Exception as exc: source.exception = exc hs = source.check(raw_data) results.append((source.configurator, hs)) host_sections.update(hs) if multi_host_sections is None: multi_host_sections = MultiHostSections() hs = data_sources.update_host_sections( multi_host_sections, data_sources.make_nodes( config_cache, host_config, ipaddress, data_sources.Mode.INVENTORY, sources, ), max_cachefile_age=host_config.max_cachefile_age, selected_raw_sections=None, host_config=host_config, ) results.extend(hs) section.section_step("Executing inventory plugins") console.verbose("Plugins:") for inventory_plugin in agent_based_register.iter_all_inventory_plugins(): kwargs = multi_host_sections.get_section_kwargs( HostKey(hostname, ipaddress, SourceType.HOST), inventory_plugin.sections, ) if not kwargs: continue console.verbose( " %s%s%s%s" % (tty.green, tty.bold, inventory_plugin.name, tty.normal)) # Inventory functions can optionally have a second argument: parameters. # These are configured via rule sets (much like check parameters). if inventory_plugin.inventory_ruleset_name is not None: kwargs["params"] = host_config.inventory_parameters( str(inventory_plugin.inventory_ruleset_name) ) # TODO (mo): keep type! _aggregate_inventory_results( inventory_plugin.inventory_function(**kwargs), inventory_tree, status_data_tree, ) console.verbose("\n") return results
def get_aggregated_result( multi_host_sections: MultiHostSections, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[CheckPlugin], params_function: Callable[[], checking_classes.Parameters], ) -> Tuple[bool, bool, ServiceCheckResult]: """Run the check function and aggregate the subresults This function is also called during discovery. Returns a triple: bool: should the result be submitted to the core bool: did we receive data for the plugin ServiceCheckResult: The aggregated result as returned by the plugin, or a fallback """ if plugin is None: return False, True, CHECK_NOT_IMPLEMENTED check_function = (plugin.cluster_check_function if host_config.is_cluster else plugin.check_function) source_type = (SourceType.MANAGEMENT if is_management_name(service.check_plugin_name) else SourceType.HOST) kwargs = {} try: kwargs = multi_host_sections.get_section_cluster_kwargs( HostKey(host_config.hostname, None, source_type), plugin.sections, service.description, ) if host_config.is_cluster else multi_host_sections.get_section_kwargs( HostKey(host_config.hostname, ipaddress, source_type), plugin.sections, ) if not kwargs and not is_management_name(service.check_plugin_name): # in 1.6 some plugins where discovered for management boards, but with # the regular host plugins name. In this case retry with the source type # forced to MANAGEMENT: kwargs = multi_host_sections.get_section_cluster_kwargs( HostKey(host_config.hostname, None, SourceType.MANAGEMENT), plugin.sections, service.description, ) if host_config.is_cluster else multi_host_sections.get_section_kwargs( HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT), plugin.sections, ) if not kwargs: # no data found return False, False, RECEIVED_NO_DATA if service.item is not None: kwargs["item"] = service.item if plugin.check_ruleset_name: kwargs["params"] = params_function() with value_store.context(plugin.name, service.item): result = _aggregate_results(check_function(**kwargs)) except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return False, True, (0, msg, []) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise result = 3, cmk.base.crash_reporting.create_check_crash_dump( host_config.hostname, service.check_plugin_name, kwargs, is_manual_check(host_config.hostname, service.id()), service.description, ), [] return True, True, result