def _get_monitoring_data_kwargs_by_source( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, config_cache: config.ConfigCache, ipaddress: Optional[HostAddress], service: Service, sections: Sequence[ParsedSectionName], source_type: SourceType, ) -> Tuple[Mapping[str, object], ServiceCheckResult]: if host_config.is_cluster: nodes = config_cache.get_clustered_service_node_keys( host_config, source_type, service.description, ) return ( get_section_cluster_kwargs( parsed_sections_broker, nodes, sections, ), cluster_received_no_data(nodes), ) return ( get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, source_type), sections, ), RECEIVED_NO_DATA, )
def test_get_section_cluster_kwargs(required_sections: Sequence[str], expected_result: Dict[str, Any]) -> None: node1_sections = HostSections[AgentRawDataSection]( sections={ SectionName("one"): NODE_1, SectionName("two"): NODE_1, SectionName("three"): NODE_1, }) node2_sections = HostSections[AgentRawDataSection]( sections={ SectionName("two"): NODE_2, SectionName("three"): NODE_2, }) parsed_sections_broker = ParsedSectionsBroker({ HostKey(HostName("node1"), HostAddress("127.0.0.1"), SourceType.HOST): ( ParsedSectionsResolver(section_plugins=[ SECTION_ONE, SECTION_TWO, SECTION_THREE, SECTION_FOUR ], ), SectionsParser(host_sections=node1_sections, host_name=HostName("node1")), ), HostKey(HostName("node2"), HostAddress("127.0.0.1"), SourceType.HOST): ( ParsedSectionsResolver(section_plugins=[ SECTION_ONE, SECTION_TWO, SECTION_THREE, SECTION_FOUR ], ), SectionsParser(host_sections=node2_sections, host_name=HostName("node2")), ), }) kwargs = get_section_cluster_kwargs( parsed_sections_broker, [ HostKey(HostName("node1"), HostAddress("127.0.0.1"), SourceType.HOST), HostKey(HostName("node2"), HostAddress("127.0.0.1"), SourceType.HOST), ], [ParsedSectionName(n) for n in required_sections], ) assert expected_result == kwargs
def _get_monitoring_data_kwargs( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, config_cache: config.ConfigCache, ipaddress: Optional[HostAddress], service: ConfiguredService, sections: Sequence[ParsedSectionName], source_type: Optional[SourceType] = None, ) -> Tuple[Mapping[str, object], ServiceCheckResult]: if source_type is None: source_type = ( SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST ) if host_config.is_cluster: nodes = config_cache.get_clustered_service_node_keys( host_config, source_type, service.description, ) return ( get_section_cluster_kwargs( parsed_sections_broker, nodes, sections, ), ServiceCheckResult.cluster_received_no_data(nodes), ) return ( get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, source_type), sections, ), ServiceCheckResult.received_no_data(), )
def test_get_section_cluster_kwargs(required_sections, expected_result): node1_sections = AgentHostSections( sections={ SectionName("one"): NODE_1, SectionName("two"): NODE_1, SectionName("three"): NODE_1 }) node2_sections = AgentHostSections(sections={ SectionName("two"): NODE_2, SectionName("three"): NODE_2, }) parsed_sections_broker = ParsedSectionsBroker({ HostKey("node1", "127.0.0.1", SourceType.HOST): ( ParsedSectionsResolver(section_plugins=[ SECTION_ONE, SECTION_TWO, SECTION_THREE, SECTION_FOUR ], ), SectionsParser(host_sections=node1_sections), ), HostKey("node2", "127.0.0.1", SourceType.HOST): ( ParsedSectionsResolver(section_plugins=[ SECTION_ONE, SECTION_TWO, SECTION_THREE, SECTION_FOUR ], ), SectionsParser(host_sections=node2_sections), ), }) kwargs = get_section_cluster_kwargs( parsed_sections_broker, [ HostKey("node1", "127.0.0.1", SourceType.HOST), HostKey("node2", "127.0.0.1", SourceType.HOST), ], [ParsedSectionName(n) for n in required_sections], ) assert expected_result == kwargs
def get_aggregated_result( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[checking_classes.CheckPlugin], # missleading. These are prams that *may* be *partially* time specific timespecific_parameters: LegacyCheckParameters, *, value_store_manager: value_store.ValueStoreManager, persist_value_store_changes: bool, ) -> AggregatedResult: """Run the check function and aggregate the subresults This function is also called during discovery. """ if plugin is None: return AggregatedResult( submit=True, data_received=True, result=CHECK_NOT_IMPLEMENTED, cache_info=None, ) config_cache = config.get_config_cache() check_function = ( _cluster_modes.get_cluster_check_function( *config_cache.get_clustered_service_configuration( host_config.hostname, service.description, ), plugin=plugin, service_id=service.id(), persist_value_store_changes=persist_value_store_changes, ) if host_config.is_cluster else plugin.check_function ) source_type = ( SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST ) try: kwargs = ( get_section_cluster_kwargs( parsed_sections_broker, config_cache.get_clustered_service_node_keys( host_config.hostname, source_type, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, source_type), plugin.sections, ) ) if not kwargs and not service.check_plugin_name.is_management_name(): # in 1.6 some plugins where discovered for management boards, but with # the regular host plugins name. In this case retry with the source type # forced to MANAGEMENT: kwargs = ( get_section_cluster_kwargs( parsed_sections_broker, config_cache.get_clustered_service_node_keys( host_config.hostname, SourceType.MANAGEMENT, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT), plugin.sections, ) ) if not kwargs: # no data found return AggregatedResult( submit=False, data_received=False, result=RECEIVED_NO_DATA, cache_info=None, ) kwargs = { **kwargs, **({} if service.item is None else {"item": service.item}), **( {} if plugin.check_default_parameters is None else {"params": _final_read_only_check_parameters(timespecific_parameters)} ), } with plugin_contexts.current_host(host_config.hostname), plugin_contexts.current_service( service ), value_store_manager.namespace(service.id()): result = _aggregate_results(check_function(**kwargs)) except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return AggregatedResult( submit=False, data_received=True, result=(0, msg, []), cache_info=None, ) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise table = check_table.get_check_table(host_config.hostname, skip_autochecks=True) result = ( 3, cmk.base.crash_reporting.create_check_crash_dump( host_name=host_config.hostname, service_name=service.description, plugin_name=service.check_plugin_name, plugin_kwargs=globals().get("kwargs", {}), is_manual=service.id() in table, ), [], ) return AggregatedResult( submit=True, data_received=True, result=result, cache_info=parsed_sections_broker.get_cache_info(plugin.sections), )
def get_aggregated_result( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[checking_classes.CheckPlugin], params_function: Callable[[], Parameters], *, value_store_manager: value_store.ValueStoreManager, ) -> AggregatedResult: """Run the check function and aggregate the subresults This function is also called during discovery. """ if plugin is None: return AggregatedResult( submit=True, data_received=True, result=CHECK_NOT_IMPLEMENTED, cache_info=None, ) check_function = (plugin.cluster_check_function if host_config.is_cluster else plugin.check_function) source_type = (SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST) config_cache = config.get_config_cache() kwargs: MutableMapping[str, Any] = {} try: kwargs = get_section_cluster_kwargs( parsed_sections_broker, config_cache.get_clustered_service_node_keys( host_config.hostname, source_type, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, source_type), plugin.sections, ) if not kwargs and not service.check_plugin_name.is_management_name(): # in 1.6 some plugins where discovered for management boards, but with # the regular host plugins name. In this case retry with the source type # forced to MANAGEMENT: kwargs = get_section_cluster_kwargs( parsed_sections_broker, config_cache.get_clustered_service_node_keys( host_config.hostname, SourceType.MANAGEMENT, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT), plugin.sections, ) if not kwargs: # no data found return AggregatedResult( submit=False, data_received=False, result=RECEIVED_NO_DATA, cache_info=None, ) if service.item is not None: kwargs["item"] = service.item if plugin.check_default_parameters is not None: kwargs["params"] = params_function() with plugin_contexts.current_host(host_config.hostname), \ plugin_contexts.current_service(service), \ value_store_manager.namespace(service.id()): result = _aggregate_results(check_function(**kwargs)) except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return AggregatedResult( submit=False, data_received=True, result=(0, msg, []), cache_info=None, ) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise table = check_table.get_check_table(host_config.hostname, skip_autochecks=True) result = 3, cmk.base.crash_reporting.create_check_crash_dump( host_name=host_config.hostname, service_name=service.description, plugin_name=service.check_plugin_name, plugin_kwargs=kwargs, is_manual=service.id() in table, ), [] return AggregatedResult( submit=True, data_received=True, result=result, cache_info=parsed_sections_broker.get_cache_info(plugin.sections), )