def test_get_check_table_of_mgmt_boards(monkeypatch, hostname, expected_result): autochecks = { "mgmt-board-ipmi": [ Service("mgmt_ipmi_sensors", "TEMP X", "Management Interface: IPMI Sensor TEMP X", {}), ], "ipmi-host": [Service("ipmi_sensors", "TEMP Y", "IPMI Sensor TEMP Y", {}),] } ts = Scenario().add_host("mgmt-board-ipmi", tags={ 'piggyback': 'auto-piggyback', 'networking': 'lan', 'address_family': 'no-ip', 'criticality': 'prod', 'snmp_ds': 'no-snmp', 'site': 'heute', 'agent': 'no-agent' }) ts.add_host("ipmi-host", tags={ 'piggyback': 'auto-piggyback', 'networking': 'lan', 'agent': 'cmk-agent', 'criticality': 'prod', 'snmp_ds': 'no-snmp', 'site': 'heute', 'address_family': 'ip-v4-only' }) ts.set_option("management_protocol", {"mgmt-board-ipmi": "ipmi"}) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: autochecks.get(h, [])) CheckManager().load(["mgmt_ipmi_sensors", "ipmi_sensors"]) assert check_table.get_check_table(hostname).keys() == expected_result
def test_cluster_ignores_nodes_parameters(monkeypatch: MonkeyPatch) -> None: node = HostName("node") cluster = HostName("cluster") service_id = CheckPluginName("smart_temp"), "auto-clustered" ts = Scenario() ts.add_host("node") ts.add_cluster("cluster", nodes=["node"]) ts.set_ruleset( "clustered_services", [([], ["node"], ["Temperature SMART auto-clustered$"])], ) ts.set_autochecks("node", [AutocheckEntry(*service_id, {}, {})]) ts.apply(monkeypatch) # a rule for the node: monkeypatch.setattr( config, "_get_configured_parameters", lambda host, plugin, item: (TimespecificParameters((TimespecificParameterSet.from_parameters( {"levels_for_node": (1, 2)}), )) if host == node else TimespecificParameters()), ) clustered_service = check_table.get_check_table(cluster)[service_id] assert clustered_service.parameters.entries == ( TimespecificParameterSet.from_parameters({"levels": (35, 40)}), )
def _get_services_to_check( *, config_cache: config.ConfigCache, host_name: HostName, run_plugin_names: Container[CheckPluginName], ) -> List[Service]: """Gather list of services to check""" services = config.resolve_service_dependencies( host_name=host_name, services=sorted( check_table.get_check_table(host_name).values(), key=lambda service: service.description, ), ) if run_plugin_names is EVERYTHING: return [ service for service in services if not service_outside_check_period(config_cache, host_name, service.description) ] # If check types are specified via command line, drop all others return [ service for service in services if service.check_plugin_name in run_plugin_names and not service_outside_check_period(config_cache, host_name, service.description) ]
def test_cluster_ignores_nodes_parameters(monkeypatch: MonkeyPatch) -> None: node = HostName("node") cluster = HostName("cluster") service_id = CheckPluginName("smart_temp"), "auto-clustered" ts = Scenario() ts.add_host("node") ts.add_cluster("cluster", nodes=["node"]) ts.set_ruleset( "clustered_services", [([], ["node"], ["Temperature SMART auto-clustered$"])], ) ts.set_autochecks("node", [Service(*service_id, "Temperature SMART auto-clustered", {})]) ts.apply(monkeypatch) # a rule for the node: monkeypatch.setattr( config, "_update_with_configured_check_parameters", lambda host, plugin, item, params, configured_params: {"levels_for_node": (1, 2), **params} if host == node else params, ) clustered_service = check_table.get_check_table(cluster)[service_id] assert clustered_service.parameters == {"levels": (35, 40)}
def test_get_check_table__static_checks_win(monkeypatch: MonkeyPatch) -> None: hostname_str = "df_host" hostname = HostName(hostname_str) plugin_name = CheckPluginName("df") item = "/snap/core/9066" ts = Scenario() ts.add_host(hostname) ts.set_option( "static_checks", { "filesystem": [ ((str(plugin_name), item, { "source": "static" }), [], [hostname_str]), ], }, ) ts.set_autochecks( hostname_str, [AutocheckEntry(plugin_name, item, {"source": "auto"}, {})]) ts.apply(monkeypatch) chk_table = check_table.get_check_table(hostname) # assert check table is populated as expected assert len(chk_table) == 1 # assert static checks won effective_params = chk_table[(plugin_name, item)].parameters.evaluate(lambda _: True) assert effective_params[ "source"] == "static" # type: ignore[index,call-overload]
def _manual_items( host_config: config.HostConfig ) -> Iterable[Tuple[ServiceID, ServicesTableEntry]]: # Find manual checks. These can override discovered checks -> "manual" host_name = host_config.hostname yield from ((service.id(), ('manual', service, [host_name])) for service in check_table.get_check_table( host_name, skip_autochecks=True).values())
def _merge_manual_services( host_config: config.HostConfig, services: ServicesTable, discovery_parameters: DiscoveryParameters, ) -> ServicesByTransition: """Add/replace manual and active checks and handle ignoration""" host_name = host_config.hostname # Find manual checks. These can override discovered checks -> "manual" manual_items = check_table.get_check_table(host_name, skip_autochecks=True) for service in manual_items.values(): services[service.id()] = ('manual', service, [host_name]) # Add custom checks -> "custom" for entry in host_config.custom_checks: services[(CheckPluginName('custom'), entry['service_description'])] = ( 'custom', Service( check_plugin_name=CheckPluginName('custom'), item=entry['service_description'], description=entry['service_description'], parameters=None, ), [host_name], ) # Similar for 'active_checks', but here we have parameters for plugin_name, entries in host_config.active_checks: for params in entries: descr = config.active_check_service_description( host_name, plugin_name, params) services[(CheckPluginName(plugin_name), descr)] = ( 'active', Service( check_plugin_name=CheckPluginName(plugin_name), item=descr, description=descr, parameters=params, ), [host_name], ) # Handle disabled services -> "ignored" for check_source, discovered_service, _found_on_nodes in services.values(): if check_source in ["legacy", "active", "custom"]: # These are ignored later in get_check_preview # TODO: This needs to be cleaned up. The problem here is that service_description() can not # calculate the description of active checks and the active checks need to be put into # "[source]_ignored" instead of ignored. continue if config.service_ignored(host_name, discovered_service.check_plugin_name, discovered_service.description): services[discovered_service.id()] = ("ignored", discovered_service, [host_name]) return _group_by_transition(services.values())
def test_get_check_table(monkeypatch, hostname, expected_result): autochecks = { "ping-host": [Service("smart.temp", "bla", u'Temperature SMART bla', {})], "autocheck-overwrite": [ Service('smart.temp', '/dev/sda', u'Temperature SMART /dev/sda', {"is_autocheck": True}), Service('smart.temp', '/dev/sdb', u'Temperature SMART /dev/sdb', {"is_autocheck": True}), ], "ignore-not-existing-checks": [ Service("bla.blub", "ITEM", u'Blub ITEM', {}), ], "node1": [ Service("smart.temp", "auto-clustered", u"Temperature SMART auto-clustered", {}), Service("smart.temp", "auto-not-clustered", u'Temperature SMART auto-not-clustered', {}) ], } ts = Scenario().add_host(hostname, tags={"criticality": "test"}) ts.add_host("ping-host", tags={"agent": "no-agent"}) ts.add_host("node1") ts.add_cluster("cluster1", nodes=["node1"]) ts.set_option( "static_checks", { "temperature": [ (('smart.temp', '/dev/sda', {}), [], ["no-autochecks", "autocheck-overwrite"]), (('blub.bla', 'ITEM', {}), [], ["ignore-not-existing-checks"]), (('smart.temp', 'ITEM1', {}), [], ["ignore-disabled-rules"], { "disabled": True }), (('smart.temp', 'ITEM2', {}), [], ["ignore-disabled-rules"]), (('smart.temp', '/dev/sda', { "rule": 1 }), [], ["static-check-overwrite"]), (('smart.temp', '/dev/sda', { "rule": 2 }), [], ["static-check-overwrite"]), (('smart.temp', 'static-node1', {}), [], ["node1"]), (('smart.temp', 'static-cluster', {}), [], ["cluster1"]), ] }, ) ts.set_ruleset("clustered_services", [ ([], ['node1'], [u'Temperature SMART auto-clustered$']), ]) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: autochecks.get(h, [])) CheckManager().load(["smart"]) assert check_table.get_check_table(hostname) == expected_result
def test_get_check_table_of_mgmt_boards( monkeypatch: MonkeyPatch, hostname_str: str, expected_result: List[ServiceID]) -> None: hostname = HostName(hostname_str) autochecks: Mapping[str, Sequence[Service[LegacyCheckParameters]]] = { "mgmt-board-ipmi": [ Service( CheckPluginName("mgmt_ipmi_sensors"), "TEMP X", "Management Interface: IPMI Sensor TEMP X", {}, ), ], "ipmi-host": [ Service(CheckPluginName("ipmi_sensors"), "TEMP Y", "IPMI Sensor TEMP Y", {}), ], } ts = Scenario().add_host( "mgmt-board-ipmi", tags={ "piggyback": "auto-piggyback", "networking": "lan", "address_family": "no-ip", "criticality": "prod", "snmp_ds": "no-snmp", "site": "heute", "agent": "no-agent", }, ) ts.add_host( "ipmi-host", tags={ "piggyback": "auto-piggyback", "networking": "lan", "agent": "cmk-agent", "criticality": "prod", "snmp_ds": "no-snmp", "site": "heute", "address_family": "ip-v4-only", }, ) ts.set_option("management_protocol", {"mgmt-board-ipmi": "ipmi"}) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: autochecks.get(h, [])) assert list( check_table.get_check_table(hostname).keys()) == expected_result
def _make_checking_sections(self) -> Set[SectionName]: if self.selected_sections is not NO_SELECTION: checking_sections = self.selected_sections else: checking_sections = set( agent_based_register.get_relevant_raw_sections( check_plugin_names=check_table.get_check_table( self.hostname, filter_mode=check_table.FilterMode.INCLUDE_CLUSTERED, skip_ignored=True, ).needed_check_names(), inventory_plugin_names=())) return checking_sections.intersection( s.name for s in agent_based_register.iter_all_snmp_sections())
def test_get_check_table_of_mgmt_boards( monkeypatch: MonkeyPatch, hostname_str: str, expected_result: List[ServiceID]) -> None: hostname = HostName(hostname_str) ts = Scenario() ts.add_host( "mgmt-board-ipmi", tags={ "piggyback": "auto-piggyback", "networking": "lan", "address_family": "no-ip", "criticality": "prod", "snmp_ds": "no-snmp", "site": "heute", "agent": "no-agent", }, ) ts.add_host( "ipmi-host", tags={ "piggyback": "auto-piggyback", "networking": "lan", "agent": "cmk-agent", "criticality": "prod", "snmp_ds": "no-snmp", "site": "heute", "address_family": "ip-v4-only", }, ) ts.set_option("management_protocol", {"mgmt-board-ipmi": "ipmi"}) ts.set_autochecks( "mgmt-board-ipmi", [ AutocheckEntry(CheckPluginName("mgmt_ipmi_sensors"), "TEMP X", {}, {}) ], ) ts.set_autochecks( "ipmi-host", [AutocheckEntry(CheckPluginName("ipmi_sensors"), "TEMP Y", {}, {})], ) ts.apply(monkeypatch) assert list( check_table.get_check_table(hostname).keys()) == expected_result
def _get_services_to_fetch( host_name: HostName, belongs_to_cluster: bool, config_cache: config.ConfigCache, ) -> List[Service]: """Gather list of services to fetch the sections for Please note that explicitly includes the services that are assigned to cluster nodes. In SNMP clusters the nodes have to fetch the information for the checking phase of the clustered services. """ host_check_table = check_table.get_check_table( host_name, filter_mode=check_table.FilterMode.INCLUDE_CLUSTERED) services = config.resolve_service_dependencies( host_name=host_name, services=sorted(host_check_table.values(), key=lambda s: s.description), ) return [ service for service in services if not service_outside_check_period(config_cache, host_name, service.description) ]
def get_aggregated_result( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[checking_classes.CheckPlugin], timespecific_parameters: Union[LegacyCheckParameters, TimespecificParameters], *, value_store_manager: value_store.ValueStoreManager, persist_value_store_changes: bool, ) -> AggregatedResult: """Run the check function and aggregate the subresults This function is also called during discovery. """ if plugin is None: return AggregatedResult( submit=True, data_received=True, result=ServiceCheckResult.check_not_implemented(), cache_info=None, ) config_cache = config.get_config_cache() check_function = (_cluster_modes.get_cluster_check_function( *config_cache.get_clustered_service_configuration( host_config.hostname, service.description, ), plugin=plugin, service_id=service.id(), persist_value_store_changes=persist_value_store_changes, ) if host_config.is_cluster else plugin.check_function) section_kws, error_result = _get_monitoring_data_kwargs_handle_pre20_services( parsed_sections_broker, host_config, config_cache, ipaddress, service, plugin.sections, ) if not section_kws: # no data found return AggregatedResult( submit=False, data_received=False, result=error_result, cache_info=None, ) item_kw = {} if service.item is None else {"item": service.item} params_kw = ({} if plugin.check_default_parameters is None else { "params": _final_read_only_check_parameters(timespecific_parameters) }) try: with plugin_contexts.current_host( host_config.hostname), plugin_contexts.current_service( service), value_store_manager.namespace(service.id()): result = _aggregate_results( check_function( **item_kw, **params_kw, **section_kws, )) except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return AggregatedResult( submit=False, data_received=True, result=ServiceCheckResult(output=msg), cache_info=None, ) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise table = check_table.get_check_table(host_config.hostname, skip_autochecks=True) result = ServiceCheckResult( 3, cmk.base.crash_reporting.create_check_crash_dump( host_name=host_config.hostname, service_name=service.description, plugin_name=service.check_plugin_name, plugin_kwargs={ **item_kw, **params_kw, **section_kws }, is_manual=service.id() in table, ), ) return AggregatedResult( submit=True, data_received=True, result=result, cache_info=parsed_sections_broker.get_cache_info(plugin.sections), )
def test_get_check_table_of_static_check(monkeypatch, hostname, expected_result): static_checks = { "df_host": [ Service(CheckPluginName('df'), '/snap/core/9066', u'Filesystem /snap/core/9066', [{ 'tp_values': [('24X7', { 'inodes_levels': None })], 'tp_default_value': {} }, { 'trend_range': 24, 'show_levels': 'onmagic', 'inodes_levels': (10.0, 5.0), 'magic_normsize': 20, 'show_inodes': 'onlow', 'levels': (80.0, 90.0), 'show_reserved': False, 'levels_low': (50.0, 60.0), 'trend_perfdata': True }]), ], "df_host_1": [ Service( CheckPluginName('df'), '/snap/core/9067', u'Filesystem /snap/core/9067', { 'trend_range': 24, 'show_levels': 'onmagic', 'inodes_levels': (10.0, 5.0), 'magic_normsize': 20, 'show_inodes': 'onlow', 'levels': (80.0, 90.0), 'tp_default_value': { 'levels': (87.0, 90.0) }, 'show_reserved': False, 'tp_values': [('24X7', { 'inodes_levels': None })], 'levels_low': (50.0, 60.0), 'trend_perfdata': True }) ], "df_host_2": [ Service(CheckPluginName('df'), '/snap/core/9068', u'Filesystem /snap/core/9068', None) ], } ts = Scenario().add_host(hostname, tags={"criticality": "test"}) ts.add_host("df_host") ts.add_host("df_host_1") ts.add_host("df_host_2") ts.set_option( "static_checks", { "filesystem": [ (('df', '/snap/core/9066', [{ 'tp_values': [('24X7', { 'inodes_levels': None })], 'tp_default_value': {} }, { 'trend_range': 24, 'show_levels': 'onmagic', 'inodes_levels': (10.0, 5.0), 'magic_normsize': 20, 'show_inodes': 'onlow', 'levels': (80.0, 90.0), 'show_reserved': False, 'levels_low': (50.0, 60.0), 'trend_perfdata': True }]), [], ["df_host"]), (('df', '/snap/core/9067', [{ 'tp_values': [('24X7', { 'inodes_levels': None })], 'tp_default_value': {} }, { 'trend_range': 24, 'show_levels': 'onmagic', 'inodes_levels': (10.0, 5.0), 'magic_normsize': 20, 'show_inodes': 'onlow', 'levels': (80.0, 90.0), 'show_reserved': False, 'levels_low': (50.0, 60.0), 'trend_perfdata': True }]), [], ["df_host_1"]), (('df', '/snap/core/9068', None), [], ["df_host_2"]), ], }, ) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: static_checks.get(h, [])) assert list( check_table.get_check_table(hostname).keys()) == expected_result
def get_aggregated_result( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[checking_classes.CheckPlugin], # missleading. These are prams that *may* be *partially* time specific timespecific_parameters: LegacyCheckParameters, *, value_store_manager: value_store.ValueStoreManager, persist_value_store_changes: bool, ) -> AggregatedResult: """Run the check function and aggregate the subresults This function is also called during discovery. """ if plugin is None: return AggregatedResult( submit=True, data_received=True, result=CHECK_NOT_IMPLEMENTED, cache_info=None, ) config_cache = config.get_config_cache() check_function = ( _cluster_modes.get_cluster_check_function( *config_cache.get_clustered_service_configuration( host_config.hostname, service.description, ), plugin=plugin, service_id=service.id(), persist_value_store_changes=persist_value_store_changes, ) if host_config.is_cluster else plugin.check_function ) source_type = ( SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST ) try: kwargs = ( get_section_cluster_kwargs( parsed_sections_broker, config_cache.get_clustered_service_node_keys( host_config.hostname, source_type, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, source_type), plugin.sections, ) ) if not kwargs and not service.check_plugin_name.is_management_name(): # in 1.6 some plugins where discovered for management boards, but with # the regular host plugins name. In this case retry with the source type # forced to MANAGEMENT: kwargs = ( get_section_cluster_kwargs( parsed_sections_broker, config_cache.get_clustered_service_node_keys( host_config.hostname, SourceType.MANAGEMENT, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else get_section_kwargs( parsed_sections_broker, HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT), plugin.sections, ) ) if not kwargs: # no data found return AggregatedResult( submit=False, data_received=False, result=RECEIVED_NO_DATA, cache_info=None, ) kwargs = { **kwargs, **({} if service.item is None else {"item": service.item}), **( {} if plugin.check_default_parameters is None else {"params": _final_read_only_check_parameters(timespecific_parameters)} ), } with plugin_contexts.current_host(host_config.hostname), plugin_contexts.current_service( service ), value_store_manager.namespace(service.id()): result = _aggregate_results(check_function(**kwargs)) except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return AggregatedResult( submit=False, data_received=True, result=(0, msg, []), cache_info=None, ) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise table = check_table.get_check_table(host_config.hostname, skip_autochecks=True) result = ( 3, cmk.base.crash_reporting.create_check_crash_dump( host_name=host_config.hostname, service_name=service.description, plugin_name=service.check_plugin_name, plugin_kwargs=globals().get("kwargs", {}), is_manual=service.id() in table, ), [], ) return AggregatedResult( submit=True, data_received=True, result=result, cache_info=parsed_sections_broker.get_cache_info(plugin.sections), )
def _execute_checkmk_checks( *, hostname: HostName, ipaddress: Optional[HostAddress], fetcher_messages: Sequence[FetcherMessage] = (), run_plugin_names: Container[CheckPluginName], selected_sections: SectionNameCollection, dry_run: bool, show_perfdata: bool, ) -> ActiveCheckResult: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() mode = Mode.CHECKING if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS try: license_usage.try_history_update() # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when # address is unknown). When called as non keepalive ipaddress may be None or # is already an address (2nd argument) if ipaddress is None and not host_config.is_cluster: ipaddress = config.lookup_ip_address(host_config) services = config.resolve_service_dependencies( host_name=hostname, services=sorted( check_table.get_check_table(hostname).values(), key=lambda service: service.description, ), ) with CPUTracker() as tracker: broker, source_results = make_broker( config_cache=config_cache, host_config=host_config, ip_address=ipaddress, mode=mode, selected_sections=selected_sections, file_cache_max_age=host_config.max_cachefile_age, fetcher_messages=fetcher_messages, force_snmp_cache_refresh=False, on_scan_error=OnError.RAISE, ) num_success, plugins_missing_data = check_host_services( config_cache=config_cache, host_config=host_config, ipaddress=ipaddress, parsed_sections_broker=broker, services=services, run_plugin_names=run_plugin_names, dry_run=dry_run, show_perfdata=show_perfdata, ) if run_plugin_names is EVERYTHING: inventory.do_inventory_actions_during_checking_for( config_cache, host_config, ipaddress, parsed_sections_broker=broker, ) timed_results = [ *check_sources( source_results=source_results, mode=mode, include_ok_results=True, ), *check_parsing_errors(errors=broker.parsing_errors(), ), *_check_plugins_missing_data( plugins_missing_data, exit_spec, bool(num_success), ), ] return ActiveCheckResult.from_subresults( *timed_results, _timing_results(tracker, fetcher_messages), ) finally: _submit_to_core.finalize()
def get_aggregated_result( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, plugin: Optional[checking_classes.CheckPlugin], params_function: Callable[[], Parameters], *, value_store_manager: item_state.ValueStoreManager, ) -> AggregatedResult: """Run the check function and aggregate the subresults This function is also called during discovery. """ if plugin is None: return AggregatedResult( submit=True, data_received=True, result=CHECK_NOT_IMPLEMENTED, cache_info=None, ) check_function = (plugin.cluster_check_function if host_config.is_cluster else plugin.check_function) source_type = (SourceType.MANAGEMENT if service.check_plugin_name.is_management_name() else SourceType.HOST) config_cache = config.get_config_cache() kwargs = {} try: kwargs = parsed_sections_broker.get_section_cluster_kwargs( config_cache.get_clustered_service_node_keys( host_config.hostname, source_type, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs( HostKey(host_config.hostname, ipaddress, source_type), plugin.sections, ) if not kwargs and not service.check_plugin_name.is_management_name(): # in 1.6 some plugins where discovered for management boards, but with # the regular host plugins name. In this case retry with the source type # forced to MANAGEMENT: kwargs = parsed_sections_broker.get_section_cluster_kwargs( config_cache.get_clustered_service_node_keys( host_config.hostname, SourceType.MANAGEMENT, service.description, ) or [], plugin.sections, ) if host_config.is_cluster else parsed_sections_broker.get_section_kwargs( HostKey(host_config.hostname, ipaddress, SourceType.MANAGEMENT), plugin.sections, ) if not kwargs: # no data found return AggregatedResult( submit=False, data_received=False, result=RECEIVED_NO_DATA, cache_info=None, ) if service.item is not None: kwargs["item"] = service.item if plugin.check_default_parameters is not None: kwargs["params"] = params_function() with plugin_contexts.current_host(host_config.hostname), \ plugin_contexts.current_service(service), \ value_store_manager.namespace(service.id()): result = _aggregate_results(check_function(**kwargs)) except (item_state.MKCounterWrapped, checking_classes.IgnoreResultsError) as e: msg = str(e) or "No service summary available" return AggregatedResult( submit=False, data_received=True, result=(0, msg, []), cache_info=None, ) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise table = check_table.get_check_table(host_config.hostname, skip_autochecks=True) result = 3, cmk.base.crash_reporting.create_check_crash_dump( host_name=host_config.hostname, service_name=service.description, plugin_name=service.check_plugin_name, plugin_kwargs=kwargs, is_manual=service.id() in table, ), [] return AggregatedResult( submit=True, data_received=True, result=result, cache_info=parsed_sections_broker.get_cache_info(plugin.sections), )
def test_get_check_table_of_static_check( monkeypatch: MonkeyPatch, hostname_str: str, expected_result: List[ServiceID] ) -> None: hostname = HostName(hostname_str) static_checks = { "df_host": [ Service( CheckPluginName("df"), "/snap/core/9066", "Filesystem /snap/core/9066", [ {"tp_values": [("24X7", {"inodes_levels": None})], "tp_default_value": {}}, { "trend_range": 24, "show_levels": "onmagic", "inodes_levels": (10.0, 5.0), "magic_normsize": 20, "show_inodes": "onlow", "levels": (80.0, 90.0), "show_reserved": False, "levels_low": (50.0, 60.0), "trend_perfdata": True, }, ], ), ], "df_host_1": [ Service( CheckPluginName("df"), "/snap/core/9067", "Filesystem /snap/core/9067", { "trend_range": 24, "show_levels": "onmagic", "inodes_levels": (10.0, 5.0), "magic_normsize": 20, "show_inodes": "onlow", "levels": (80.0, 90.0), "tp_default_value": {"levels": (87.0, 90.0)}, "show_reserved": False, "tp_values": [("24X7", {"inodes_levels": None})], "levels_low": (50.0, 60.0), "trend_perfdata": True, }, ) ], "df_host_2": [ Service(CheckPluginName("df"), "/snap/core/9068", "Filesystem /snap/core/9068", None) ], } ts = Scenario().add_host(hostname, tags={"criticality": "test"}) ts.add_host("df_host") ts.add_host("df_host_1") ts.add_host("df_host_2") ts.set_option( "static_checks", { "filesystem": [ ( ( "df", "/snap/core/9066", [ { "tp_values": [("24X7", {"inodes_levels": None})], "tp_default_value": {}, }, { "trend_range": 24, "show_levels": "onmagic", "inodes_levels": (10.0, 5.0), "magic_normsize": 20, "show_inodes": "onlow", "levels": (80.0, 90.0), "show_reserved": False, "levels_low": (50.0, 60.0), "trend_perfdata": True, }, ], ), [], ["df_host"], ), ( ( "df", "/snap/core/9067", [ { "tp_values": [("24X7", {"inodes_levels": None})], "tp_default_value": {}, }, { "trend_range": 24, "show_levels": "onmagic", "inodes_levels": (10.0, 5.0), "magic_normsize": 20, "show_inodes": "onlow", "levels": (80.0, 90.0), "show_reserved": False, "levels_low": (50.0, 60.0), "trend_perfdata": True, }, ], ), [], ["df_host_1"], ), (("df", "/snap/core/9068", None), [], ["df_host_2"]), ], }, ) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: static_checks.get(h, [])) assert list(check_table.get_check_table(hostname).keys()) == expected_result
def is_manual_check(hostname: HostName, service_id: ServiceID) -> bool: return service_id in check_table.get_check_table( hostname, skip_autochecks=True, )
def is_manual_check(hostname: HostName, check_plugin_name: CheckPluginNameStr, item: Item) -> bool: manual_checks = check_table.get_check_table(hostname, remove_duplicates=True, skip_autochecks=True) return (check_plugin_name, item) in manual_checks
def dump_host(hostname: HostName) -> None: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) out.output("\n") if host_config.is_cluster: nodes = host_config.nodes if nodes is None: raise RuntimeError() color = tty.bgmagenta add_txt = " (cluster of " + (", ".join(nodes)) + ")" else: color = tty.bgblue add_txt = "" out.output("%s%s%s%-78s %s\n" % (color, tty.bold, tty.white, hostname + add_txt, tty.normal)) ipaddress = _ip_address_for_dump_host( host_config, family=host_config.default_address_family) addresses: Optional[str] = "" if not host_config.is_ipv4v6_host: addresses = ipaddress else: try: secondary = _ip_address_for_dump_host( host_config, family=socket.AF_INET if host_config.is_ipv6_primary else socket.AF_INET6, ) except Exception: secondary = "X.X.X.X" addresses = "%s, %s" % (ipaddress, secondary) if host_config.is_ipv6_primary: addresses += " (Primary: IPv6)" else: addresses += " (Primary: IPv4)" out.output(tty.yellow + "Addresses: " + tty.normal + (addresses if addresses is not None else "No IP") + "\n") tag_template = tty.bold + "[" + tty.normal + "%s" + tty.bold + "]" + tty.normal tags = [(tag_template % ":".join(t)) for t in sorted(host_config.tag_groups.items())] out.output(tty.yellow + "Tags: " + tty.normal + ", ".join(tags) + "\n") labels = [ tag_template % ":".join(l) for l in sorted(host_config.labels.items()) ] out.output(tty.yellow + "Labels: " + tty.normal + ", ".join(labels) + "\n") # TODO: Clean this up once cluster parent handling has been moved to HostConfig if host_config.is_cluster: parents_list = host_config.nodes if parents_list is None: raise RuntimeError() else: parents_list = host_config.parents if len(parents_list) > 0: out.output(tty.yellow + "Parents: " + tty.normal + ", ".join(parents_list) + "\n") out.output(tty.yellow + "Host groups: " + tty.normal + ", ".join(host_config.hostgroups) + "\n") out.output(tty.yellow + "Contact groups: " + tty.normal + ", ".join(host_config.contactgroups) + "\n") agenttypes = [ source.description for source in sources.make_sources(host_config, ipaddress) ] if host_config.is_ping_host: agenttypes.append("PING only") out.output(tty.yellow + "Agent mode: " + tty.normal) out.output(host_config.agent_description + "\n") out.output(tty.yellow + "Type of agent: " + tty.normal) if len(agenttypes) == 1: out.output(agenttypes[0] + "\n") else: out.output("\n ") out.output("\n ".join(agenttypes) + "\n") out.output(tty.yellow + "Services:" + tty.normal + "\n") headers = ["checktype", "item", "params", "description", "groups"] colors = [tty.normal, tty.blue, tty.normal, tty.green, tty.normal] table_data = [] for service in sorted(check_table.get_check_table(hostname).values(), key=lambda s: s.description): table_data.append([ str(service.check_plugin_name), str(service.item), _evaluate_params(service.parameters), service.description, ",".join( config_cache.servicegroups_of_service(hostname, service.description)), ]) tty.print_table(headers, colors, table_data, " ")
def _execute_checkmk_checks( *, hostname: HostName, ipaddress: Optional[HostAddress], fetched: Sequence[Tuple[Source, FetcherMessage]], run_plugin_names: Container[CheckPluginName], selected_sections: SectionNameCollection, dry_run: bool, show_perfdata: bool, ) -> ActiveCheckResult: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() try: license_usage.try_history_update() services = config.resolve_service_dependencies( host_name=hostname, services=sorted( check_table.get_check_table(hostname).values(), key=lambda service: service.description, ), ) broker, source_results = make_broker( fetched=fetched, selected_sections=selected_sections, file_cache_max_age=host_config.max_cachefile_age, ) with CPUTracker() as tracker: num_success, plugins_missing_data = check_host_services( config_cache=config_cache, host_config=host_config, ipaddress=ipaddress, parsed_sections_broker=broker, services=services, run_plugin_names=run_plugin_names, dry_run=dry_run, show_perfdata=show_perfdata, ) if run_plugin_names is EVERYTHING: inventory.do_inventory_actions_during_checking_for( config_cache, host_config, parsed_sections_broker=broker, ) timed_results = [ *check_sources( source_results=source_results, include_ok_results=True, ), *check_parsing_errors(errors=broker.parsing_errors(), ), *_check_plugins_missing_data( plugins_missing_data, exit_spec, bool(num_success), ), ] return ActiveCheckResult.from_subresults( *timed_results, _timing_results(tracker.duration, [fetched_entry[1] for fetched_entry in fetched]), ) finally: _submit_to_core.finalize()
def test_get_check_table(monkeypatch: MonkeyPatch, hostname_str: str, expected_result: HostCheckTable) -> None: hostname = HostName(hostname_str) ts = Scenario() ts.add_host(hostname, tags={"criticality": "test"}) ts.add_host("ping-host", tags={"agent": "no-agent"}) ts.add_host("node1") ts.add_cluster("cluster1", nodes=["node1"]) ts.set_option( "static_checks", { "temperature": [ (("smart.temp", "/dev/sda", {}), [], ["no-autochecks", "autocheck-overwrite"]), (("blub.bla", "ITEM", {}), [], ["ignore-not-existing-checks"]), (("smart.temp", "ITEM1", {}), [], ["ignore-disabled-rules"], { "disabled": True }), (("smart.temp", "ITEM2", {}), [], ["ignore-disabled-rules"]), (("smart.temp", "/dev/sda", { "rule": 1 }), [], ["static-check-overwrite"]), (("smart.temp", "/dev/sda", { "rule": 2 }), [], ["static-check-overwrite"]), (("smart.temp", "static-node1", {}), [], ["node1"]), (("smart.temp", "static-cluster", {}), [], ["cluster1"]), ] }, ) ts.set_ruleset( "clustered_services", [ ([], ["node1"], ["Temperature SMART auto-clustered$"]), ], ) ts.set_autochecks( "ping-host", [ AutocheckEntry(CheckPluginName("smart_temp"), "bla", {}, {}), ], ) ts.set_autochecks( "autocheck-overwrite", [ AutocheckEntry(CheckPluginName("smart_temp"), "/dev/sda", {"is_autocheck": True}, {}), AutocheckEntry(CheckPluginName("smart_temp"), "/dev/sdb", {"is_autocheck": True}, {}), ], ) ts.set_autochecks( "ignore-not-existing-checks", [ AutocheckEntry(CheckPluginName("bla_blub"), "ITEM", {}, {}), ], ) ts.set_autochecks( "node1", [ AutocheckEntry(CheckPluginName("smart_temp"), "auto-clustered", {}, {}), AutocheckEntry(CheckPluginName("smart_temp"), "auto-not-clustered", {}, {}), ], ) ts.apply(monkeypatch) assert set(check_table.get_check_table(hostname)) == set(expected_result) for key, value in check_table.get_check_table(hostname).items(): assert key in expected_result assert expected_result[key] == value
def _create_nagios_servicedefs(cfg, config_cache, hostname, host_attrs): # type: (NagiosConfig, ConfigCache, HostName, ObjectAttributes) -> None import cmk.base.check_table as check_table # pylint: disable=import-outside-toplevel host_config = config_cache.get_host_config(hostname) check_mk_attrs = core_config.get_service_attributes( hostname, "Check_MK", config_cache) # _____ # |___ / # |_ \ # ___) | # |____/ 3. Services def do_omit_service(hostname, description): # type: (HostName, ServiceName) -> bool if config.service_ignored(hostname, None, description): return True if hostname != config_cache.host_of_clustered_service( hostname, description): return True return False def get_dependencies(hostname, servicedesc): # type: (HostName, ServiceName) -> str result = "" for dep in config.service_depends_on(hostname, servicedesc): result += _format_nagios_object( "servicedependency", { "use": config.service_dependency_template, "host_name": hostname, "service_description": dep, "dependent_host_name": hostname, "dependent_service_description": servicedesc, }) return result services = check_table.get_check_table(hostname, remove_duplicates=True).values() have_at_least_one_service = False used_descriptions = { } # type: Dict[ServiceName, Tuple[CheckPluginName, Item]] for service in sorted(services, key=lambda s: (s.check_plugin_name, s.item)): if service.check_plugin_name not in config.check_info: continue # simply ignore missing checks # Make sure, the service description is unique on this host if service.description in used_descriptions: cn, it = used_descriptions[service.description] # TODO: Remove "type: ignore" once we are on python3 core_config.warning( "ERROR: Duplicate service description '%s' for host '%s'!\n" # type: ignore[arg-type] " - 1st occurrance: checktype = %s, item = %r\n" " - 2nd occurrance: checktype = %s, item = %r\n" % (service.description, hostname, cn, it, service.check_plugin_name, service.item)) continue used_descriptions[service.description] = (service.check_plugin_name, service.item) if config.check_info[service.check_plugin_name].get( "has_perfdata", False): template = config.passive_service_template_perf else: template = config.passive_service_template # Services Dependencies for autochecks cfg.write(get_dependencies(hostname, service.description)) service_spec = { "use": template, "host_name": hostname, "service_description": service.description, "check_command": "check_mk-%s" % service.check_plugin_name, } service_spec.update( core_config.get_cmk_passive_service_attributes( config_cache, host_config, service, check_mk_attrs)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, service.description)) cfg.write(_format_nagios_object("service", service_spec)) cfg.checknames_to_define.add(service.check_plugin_name) have_at_least_one_service = True # Active check for check_mk if have_at_least_one_service: service_spec = { "use": config.active_service_template, "host_name": hostname, "service_description": "Check_MK", } service_spec.update(check_mk_attrs) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, "Check_MK")) cfg.write(_format_nagios_object("service", service_spec)) # legacy checks via active_checks actchecks = [] for plugin_name, entries in host_config.active_checks: cfg.active_checks_to_define.add(plugin_name) act_info = config.active_check_info[plugin_name] for params in entries: actchecks.append((plugin_name, act_info, params)) if actchecks: cfg.write("\n\n# Active checks\n") for acttype, act_info, params in actchecks: # Make hostname available as global variable in argument functions check_api_utils.set_hostname(hostname) has_perfdata = act_info.get('has_perfdata', False) description = config.active_check_service_description( hostname, acttype, params) if do_omit_service(hostname, description): continue # compute argument, and quote ! and \ for Nagios args = core_config.active_check_arguments( hostname, description, act_info["argument_function"](params)).replace( "\\", "\\\\").replace("!", "\\!") if description in used_descriptions: cn, it = used_descriptions[description] # If we have the same active check again with the same description, # then we do not regard this as an error, but simply ignore the # second one. That way one can override a check with other settings. if cn == "active(%s)" % acttype: continue # TODO: Remove "type: ignore" once we are on python3 core_config.warning( "ERROR: Duplicate service description (active check) '%s' for host '%s'!\n" # type: ignore[arg-type] " - 1st occurrance: checktype = %s, item = %r\n" " - 2nd occurrance: checktype = active(%s), item = None\n" % (description, hostname, cn, it, acttype)) continue used_descriptions[description] = ("active(" + acttype + ")", description) template = "check_mk_perf," if has_perfdata else "" if host_attrs["address"] in ["0.0.0.0", "::"]: command_name = "check-mk-custom" command = command_name + "!echo \"CRIT - Failed to lookup IP address and no explicit IP address configured\" && exit 2" cfg.custom_commands_to_define.add(command_name) else: command = "check_mk_active-%s!%s" % (acttype, args) service_spec = { "use": "%scheck_mk_default" % template, "host_name": hostname, "service_description": description, "check_command": _simulate_command(cfg, command), "active_checks_enabled": str(1), } service_spec.update( core_config.get_service_attributes(hostname, description, config_cache)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, description)) cfg.write(_format_nagios_object("service", service_spec)) # write service dependencies for active checks cfg.write(get_dependencies(hostname, description)) # Legacy checks via custom_checks custchecks = host_config.custom_checks if custchecks: cfg.write("\n\n# Custom checks\n") for entry in custchecks: # entries are dicts with the following keys: # "service_description" Service description to use # "command_line" (optional) Unix command line for executing the check # If this is missing, we create a passive check # "command_name" (optional) Name of Monitoring command to define. If missing, # we use "check-mk-custom" # "has_perfdata" (optional) If present and True, we activate perf_data description = config.get_final_service_description( hostname, entry["service_description"]) has_perfdata = entry.get("has_perfdata", False) command_name = entry.get("command_name", "check-mk-custom") command_line = entry.get("command_line", "") if do_omit_service(hostname, description): continue if command_line: command_line = core_config.autodetect_plugin( command_line).replace("\\", "\\\\").replace("!", "\\!") if "freshness" in entry: freshness = { "check_freshness": 1, "freshness_threshold": 60 * entry["freshness"]["interval"], } command_line = "echo %s && exit %d" % (_quote_nagios_string( entry["freshness"]["output"]), entry["freshness"]["state"]) else: freshness = {} cfg.custom_commands_to_define.add(command_name) if description in used_descriptions: cn, it = used_descriptions[description] # If we have the same active check again with the same description, # then we do not regard this as an error, but simply ignore the # second one. if cn == "custom(%s)" % command_name: continue # TODO: Remove "type: ignore" once we are on python3 core_config.warning( "ERROR: Duplicate service description (custom check) '%s' for host '%s'!\n" # type: ignore[arg-type] " - 1st occurrance: checktype = %s, item = %r\n" " - 2nd occurrance: checktype = custom(%s), item = %r\n" % (description, hostname, cn, it, command_name, description)) continue used_descriptions[description] = ("custom(%s)" % command_name, description) template = "check_mk_perf," if has_perfdata else "" command = "%s!%s" % (command_name, command_line) service_spec = { "use": "%scheck_mk_default" % template, "host_name": hostname, "service_description": description, "check_command": _simulate_command(cfg, command), "active_checks_enabled": str(1 if (command_line and not freshness) else 0), } service_spec.update(freshness) service_spec.update( core_config.get_service_attributes(hostname, description, config_cache)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, description)) cfg.write(_format_nagios_object("service", service_spec)) # write service dependencies for custom checks cfg.write(get_dependencies(hostname, description)) service_discovery_name = config_cache.service_discovery_name() # Inventory checks - if user has configured them. params = host_config.discovery_check_parameters if host_config.add_service_discovery_check(params, service_discovery_name): service_spec = { "use": config.inventory_check_template, "host_name": hostname, "service_description": service_discovery_name, } service_spec.update( core_config.get_service_attributes(hostname, service_discovery_name, config_cache)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, service_discovery_name)) service_spec.update({ "check_interval": params["check_interval"], "retry_interval": params["check_interval"], }) cfg.write(_format_nagios_object("service", service_spec)) if have_at_least_one_service: cfg.write( _format_nagios_object( "servicedependency", { "use": config.service_dependency_template, "host_name": hostname, "service_description": "Check_MK", "dependent_host_name": hostname, "dependent_service_description": service_discovery_name, })) # No check_mk service, no legacy service -> create PING service if not have_at_least_one_service and not actchecks and not custchecks: _add_ping_service(cfg, config_cache, host_config, host_attrs["address"], host_config.is_ipv6_primary and 6 or 4, "PING", host_attrs.get("_NODEIPS")) if host_config.is_ipv4v6_host: if host_config.is_ipv6_primary: _add_ping_service(cfg, config_cache, host_config, host_attrs["_ADDRESS_4"], 4, "PING IPv4", host_attrs.get("_NODEIPS_4")) else: _add_ping_service(cfg, config_cache, host_config, host_attrs["_ADDRESS_6"], 6, "PING IPv6", host_attrs.get("_NODEIPS_6"))
def _get_aggregated_result( *, parsed_sections_broker: ParsedSectionsBroker, hostname: HostName, ipaddress: Optional[HostAddress], service: Service, used_params: LegacyCheckParameters, ) -> AggregatedResult: legacy_check_plugin_name = config.legacy_check_plugin_names.get(service.check_plugin_name) if legacy_check_plugin_name is None: return AggregatedResult( submit=True, data_received=True, result=CHECK_NOT_IMPLEMENTED, cache_info=None, ) check_function = config.check_info[legacy_check_plugin_name].get("check_function") if check_function is None: return AggregatedResult( submit=True, data_received=True, result=CHECK_NOT_IMPLEMENTED, cache_info=None, ) section_name = legacy_check_plugin_name.split('.')[0] main_check_info = config.check_info.get(section_name, {}) section_content = None multi_host_sections = _MultiHostSections(parsed_sections_broker) mgmt_board_info = main_check_info.get("management_board") or LEGACY_HOST_PRECEDENCE source_type = SourceType.MANAGEMENT if mgmt_board_info == LEGACY_MGMT_ONLY else SourceType.HOST try: section_content = multi_host_sections.get_section_content( HostKey(hostname, ipaddress, source_type), mgmt_board_info, section_name, for_discovery=False, cluster_node_keys=config.get_config_cache().get_clustered_service_node_keys( hostname, source_type, service.description, ), check_legacy_info=config.check_info, ) if section_content is None: # No data for this check type return AggregatedResult( submit=False, data_received=False, result=RECEIVED_NO_DATA, cache_info=None, ) # Call the actual check function item_state.reset_wrapped_counters() raw_result = check_function(service.item, used_params, section_content) result = _sanitize_check_result(raw_result) item_state.raise_counter_wrap() except item_state.MKCounterWrapped as exc: # handle check implementations that do not yet support the # handling of wrapped counters via exception on their own. # Do not submit any check result in that case: return AggregatedResult( submit=False, data_received=True, result=(0, f"Cannot compute check result: {exc}\n", []), cache_info=None, ) except MKTimeout: raise except Exception: if cmk.utils.debug.enabled(): raise result = 3, cmk.base.crash_reporting.create_check_crash_dump( host_name=hostname, service_name=service.description, plugin_name=service.check_plugin_name, plugin_kwargs={ "item": service.item, "params": used_params, "section_content": section_content }, is_manual=service.id() in check_table.get_check_table(hostname, skip_autochecks=True), ), [] return AggregatedResult( submit=True, data_received=True, result=result, cache_info=multi_host_sections.legacy_determine_cache_info(SectionName(section_name)), )
def is_manual_check(hostname, check_plugin_name, item): # type: (HostName, CheckPluginName, Item) -> bool manual_checks = check_table.get_check_table(hostname, remove_duplicates=True, skip_autochecks=True) return (check_plugin_name, item) in manual_checks
def test_get_check_table( monkeypatch: MonkeyPatch, hostname_str: str, expected_result: HostCheckTable ) -> None: hostname = HostName(hostname_str) autochecks = { "ping-host": [ Service( CheckPluginName("smart_temp"), "bla", "Temperature SMART bla", {}, ) ], "autocheck-overwrite": [ Service( CheckPluginName("smart_temp"), "/dev/sda", "Temperature SMART /dev/sda", {"is_autocheck": True}, ), Service( CheckPluginName("smart_temp"), "/dev/sdb", "Temperature SMART /dev/sdb", {"is_autocheck": True}, ), ], "ignore-not-existing-checks": [ Service( CheckPluginName("bla_blub"), "ITEM", "Blub ITEM", {}, ), ], "node1": [ Service( CheckPluginName("smart_temp"), "auto-clustered", "Temperature SMART auto-clustered", {}, ), Service( CheckPluginName("smart_temp"), "auto-not-clustered", "Temperature SMART auto-not-clustered", {}, ), ], } ts = Scenario().add_host(hostname, tags={"criticality": "test"}) ts.add_host("ping-host", tags={"agent": "no-agent"}) ts.add_host("node1") ts.add_cluster("cluster1", nodes=["node1"]) ts.set_option( "static_checks", { "temperature": [ (("smart.temp", "/dev/sda", {}), [], ["no-autochecks", "autocheck-overwrite"]), (("blub.bla", "ITEM", {}), [], ["ignore-not-existing-checks"]), (("smart.temp", "ITEM1", {}), [], ["ignore-disabled-rules"], {"disabled": True}), (("smart.temp", "ITEM2", {}), [], ["ignore-disabled-rules"]), (("smart.temp", "/dev/sda", {"rule": 1}), [], ["static-check-overwrite"]), (("smart.temp", "/dev/sda", {"rule": 2}), [], ["static-check-overwrite"]), (("smart.temp", "static-node1", {}), [], ["node1"]), (("smart.temp", "static-cluster", {}), [], ["cluster1"]), ] }, ) ts.set_ruleset( "clustered_services", [ ([], ["node1"], ["Temperature SMART auto-clustered$"]), ], ) config_cache = ts.apply(monkeypatch) monkeypatch.setattr(config_cache, "get_autochecks_of", lambda h: autochecks.get(h, [])) assert check_table.get_check_table(hostname) == expected_result
def do_check( hostname: HostName, ipaddress: Optional[HostAddress], *, # The following arguments *must* remain optional for Nagios and the `DiscoCheckExecutor`. # See Also: `cmk.base.discovery.check_discovery()` fetcher_messages: Sequence[FetcherMessage] = (), run_plugin_names: Container[CheckPluginName] = EVERYTHING, selected_sections: SectionNameCollection = NO_SELECTION, dry_run: bool = False, show_perfdata: bool = False, ) -> Tuple[int, List[ServiceDetails], List[ServiceAdditionalDetails], List[str]]: console.vverbose("Checkmk version %s\n", cmk_version.__version__) config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() mode = Mode.CHECKING if selected_sections is NO_SELECTION else Mode.FORCE_SECTIONS status: ServiceState = 0 infotexts: List[ServiceDetails] = [] long_infotexts: List[ServiceAdditionalDetails] = [] perfdata: List[str] = [] try: license_usage.try_history_update() # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when # address is unknown). When called as non keepalive ipaddress may be None or # is already an address (2nd argument) if ipaddress is None and not host_config.is_cluster: ipaddress = config.lookup_ip_address(host_config) services = config.resolve_service_dependencies( host_name=hostname, services=sorted( check_table.get_check_table(hostname).values(), key=lambda service: service.description, ), ) with CPUTracker() as tracker: broker, source_results = make_broker( config_cache=config_cache, host_config=host_config, ip_address=ipaddress, mode=mode, selected_sections=selected_sections, file_cache_max_age=host_config.max_cachefile_age, fetcher_messages=fetcher_messages, force_snmp_cache_refresh=False, on_scan_error="raise", ) num_success, plugins_missing_data = do_all_checks_on_host( config_cache=config_cache, host_config=host_config, ipaddress=ipaddress, parsed_sections_broker=broker, services=services, run_plugin_names=run_plugin_names, dry_run=dry_run, show_perfdata=show_perfdata, ) if run_plugin_names is EVERYTHING: inventory.do_inventory_actions_during_checking_for( config_cache, host_config, ipaddress, parsed_sections_broker=broker, ) for source, host_sections in source_results: source_state, source_output = source.summarize(host_sections) if source_output != "": status = worst_service_state(status, source_state, default=3) infotexts.append("[%s] %s" % (source.id, source_output)) if plugins_missing_data: missing_data_status, missing_data_infotext = _check_plugins_missing_data( plugins_missing_data, exit_spec, bool(num_success), ) status = max(status, missing_data_status) infotexts.append(missing_data_infotext) total_times = tracker.duration for msg in fetcher_messages: total_times += msg.stats.duration infotexts.append("execution time %.1f sec" % total_times.process.elapsed) if config.check_mk_perfdata_with_times: perfdata += [ "execution_time=%.3f" % total_times.process.elapsed, "user_time=%.3f" % total_times.process.user, "system_time=%.3f" % total_times.process.system, "children_user_time=%.3f" % total_times.process.children_user, "children_system_time=%.3f" % total_times.process.children_system, ] summary: DefaultDict[str, Snapshot] = defaultdict(Snapshot.null) for msg in fetcher_messages if fetcher_messages else (): if msg.fetcher_type in ( FetcherType.PIGGYBACK, FetcherType.PROGRAM, FetcherType.SNMP, FetcherType.TCP, ): summary[{ FetcherType.PIGGYBACK: "agent", FetcherType.PROGRAM: "ds", FetcherType.SNMP: "snmp", FetcherType.TCP: "agent", }[msg.fetcher_type]] += msg.stats.duration for phase, duration in summary.items(): perfdata.append("cmk_time_%s=%.3f" % (phase, duration.idle)) else: perfdata.append("execution_time=%.3f" % total_times.process.elapsed) return status, infotexts, long_infotexts, perfdata finally: _submit_to_core.finalize()
def dump_host(hostname): # type: (HostName) -> None config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) console.output("\n") if host_config.is_cluster: nodes = host_config.nodes if nodes is None: raise RuntimeError() color = tty.bgmagenta add_txt = " (cluster of " + (", ".join(nodes)) + ")" else: color = tty.bgblue add_txt = "" console.output( "%s%s%s%-78s %s\n" % (color, tty.bold, tty.white, hostname + add_txt, tty.normal)) ipaddress = _ip_address_for_dump_host(host_config) addresses = "" # type: Optional[str] if not host_config.is_ipv4v6_host: addresses = ipaddress else: try: if host_config.is_ipv6_primary: secondary = _ip_address_for_dump_host(host_config, 4) else: secondary = _ip_address_for_dump_host(host_config, 6) except Exception: secondary = "X.X.X.X" addresses = "%s, %s" % (ipaddress, secondary) if host_config.is_ipv6_primary: addresses += " (Primary: IPv6)" else: addresses += " (Primary: IPv4)" console.output(tty.yellow + "Addresses: " + tty.normal + (addresses if addresses is not None else "No IP") + "\n") tag_template = tty.bold + "[" + tty.normal + "%s" + tty.bold + "]" + tty.normal tags = [(tag_template % ":".join(t)) for t in sorted(host_config.tag_groups.iteritems())] console.output(tty.yellow + "Tags: " + tty.normal + ", ".join(tags) + "\n") labels = [(tag_template % ":".join(l)).encode("utf-8") for l in sorted(host_config.labels.iteritems())] console.output(tty.yellow + "Labels: " + tty.normal + ", ".join(labels) + "\n") # TODO: Clean this up once cluster parent handling has been moved to HostConfig if host_config.is_cluster: parents_list = host_config.nodes if parents_list is None: raise RuntimeError() else: parents_list = host_config.parents if len(parents_list) > 0: console.output(tty.yellow + "Parents: " + tty.normal + ", ".join(parents_list) + "\n") console.output(tty.yellow + "Host groups: " + tty.normal + make_utf8(", ".join(host_config.hostgroups)) + "\n") console.output(tty.yellow + "Contact groups: " + tty.normal + make_utf8(", ".join(host_config.contactgroups)) + "\n") agenttypes = [] sources = data_sources.DataSources(hostname, ipaddress) for source in sources.get_data_sources(): agenttypes.append(source.describe()) if host_config.is_ping_host: agenttypes.append('PING only') console.output(tty.yellow + "Agent mode: " + tty.normal) console.output(sources.describe_data_sources() + "\n") console.output(tty.yellow + "Type of agent: " + tty.normal) if len(agenttypes) == 1: console.output(agenttypes[0] + "\n") else: console.output("\n ") console.output("\n ".join(agenttypes) + "\n") console.output(tty.yellow + "Services:" + tty.normal + "\n") headers = ["checktype", "item", "params", "description", "groups"] colors = [tty.normal, tty.blue, tty.normal, tty.green, tty.normal] table_data = [] # type: tty.TableRows for service in sorted(check_table.get_check_table(hostname).values(), key=lambda s: s.description): table_data.append( (service.check_plugin_name, make_utf8("%s" % service.item), _evaluate_params(service.parameters), make_utf8(service.description), make_utf8(",".join( config_cache.servicegroups_of_service(hostname, service.description))))) tty.print_table(headers, colors, table_data, " ")