def test_get_registered_check_plugins(monkeypatch): test_plugin = CheckPlugin( CheckPluginName("check_unit_test"), [], "Unit Test", None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test None, # type: ignore # irrelevant for test ) monkeypatch.setattr(agent_based_register._config, "registered_check_plugins", {test_plugin.name: test_plugin}) assert agent_based_register.is_registered_check_plugin(test_plugin.name) assert agent_based_register.get_check_plugin( test_plugin.name) is test_plugin assert agent_based_register.get_check_plugin( CheckPluginName("mgmt_this_should_not_exists")) is None mgmt_plugin = agent_based_register.get_check_plugin( CheckPluginName("mgmt_%s" % test_plugin.name)) assert mgmt_plugin is not None assert str(mgmt_plugin.name).startswith("mgmt_") assert mgmt_plugin.service_name.startswith("Management Interface: ")
def _execute_check( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, *, dry_run: bool, show_perfdata: bool, value_store_manager: value_store.ValueStoreManager, ) -> bool: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) submittable = get_aggregated_result( parsed_sections_broker, host_config, ipaddress, service, plugin, service.parameters, value_store_manager=value_store_manager, persist_value_store_changes=not dry_run, ) if submittable.submit: _submit_to_core.check_result( host_name=host_config.hostname, service_name=service.description, result=submittable.result, cache_info=submittable.cache_info, dry_run=dry_run, show_perfdata=show_perfdata, ) else: console.verbose( f"{service.description:20} PEND - {submittable.result.output}\n") return submittable.data_received
def test_inventory_sap_hana_instance_status(info, expected_result): section_name = SectionName("sap_hana_instance_status") section = register.get_section_plugin(section_name).parse_function(info) plugin_name = CheckPluginName("sap_hana_instance_status") plugin = register.get_check_plugin(plugin_name) if plugin: assert list(plugin.discovery_function(section)) == expected_result
def _keep_service( config_cache: config.ConfigCache, host_config: config.HostConfig, service: Service, filter_mode: Optional[Literal["only_clustered", "include_clustered"]], skip_ignored: bool, ) -> bool: hostname = host_config.hostname # drop unknown plugins: if agent_based_register.get_check_plugin(service.check_plugin_name) is None: return False if skip_ignored and config.service_ignored(hostname, service.check_plugin_name, service.description): return False if filter_mode == "include_clustered": return True if not host_config.part_of_clusters: return filter_mode != "only_clustered" host_of_service = config_cache.host_of_clustered_service( hostname, service.description, part_of_clusters=host_config.part_of_clusters, ) svc_is_mine = (hostname == host_of_service) if filter_mode is None: return svc_is_mine # filter_mode == "only_clustered" return not svc_is_mine
def test_check_sap_hana_db_status(item, info, expected_result): section_name = SectionName("sap_hana_db_status") section = register.get_section_plugin(section_name).parse_function(info) plugin_name = CheckPluginName("sap_hana_db_status") plugin = register.get_check_plugin(plugin_name) if plugin: assert list(plugin.check_function(item, section)) == expected_result
def _keep_service( self, service: Service, filter_mode: Optional[str], skip_ignored: bool, ) -> bool: hostname = self._host_config.hostname # drop unknown plugins: if agent_based_register.get_check_plugin( service.check_plugin_name) is None: return False if skip_ignored and config.service_ignored( hostname, service.check_plugin_name, service.description): return False if self._host_config.part_of_clusters: host_of_service = self._config_cache.host_of_clustered_service( hostname, service.description, part_of_clusters=self._host_config.part_of_clusters) svc_is_mine = (hostname == host_of_service) else: svc_is_mine = True if filter_mode is None and not svc_is_mine: return False if filter_mode == "only_clustered" and svc_is_mine: return False return True
def keep(self, service: Service) -> bool: # drop unknown plugins: if agent_based_register.get_check_plugin( service.check_plugin_name) is None: return False if self._skip_ignored and config.service_ignored( self._host_name, service.check_plugin_name, service.description, ): return False if self._mode is FilterMode.INCLUDE_CLUSTERED: return True if not self._host_part_of_clusters: return self._mode is not FilterMode.ONLY_CLUSTERED host_of_service = self._config_cache.host_of_clustered_service( self._host_name, service.description, part_of_clusters=self._host_part_of_clusters, ) svc_is_mine = (self._host_name == host_of_service) if self._mode is FilterMode.NONE: return svc_is_mine # self._mode is FilterMode.ONLY_CLUSTERED return not svc_is_mine
def _check_preview_table_row( *, host_config: config.HostConfig, ip_address: Optional[HostAddress], service: Service, check_source: str, parsed_sections_broker: ParsedSectionsBroker, found_on_nodes: List[HostName], value_store_manager: ValueStoreManager, ) -> CheckPreviewEntry: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) params = _preview_params(host_config.hostname, service, plugin, check_source) if check_source in ['legacy', 'active', 'custom']: exitcode = None output = u"WAITING - %s check, cannot be done offline" % check_source.title( ) ruleset_name: Optional[RulesetName] = None else: ruleset_name = (str(plugin.check_ruleset_name) if plugin and plugin.check_ruleset_name else None) wrapped_params = (Parameters(wrap_parameters(params)) if plugin and plugin.check_default_parameters is not None else None) exitcode, output, _perfdata = checking.get_aggregated_result( parsed_sections_broker, host_config, ip_address, service, plugin, lambda p=wrapped_params: p, # type: ignore[misc] # "type of lambda" value_store_manager=value_store_manager, persist_value_store_changes=False, # never during discovery ).result # Service discovery never uses the perfdata in the check table. That entry # is constantly discarded, yet passed around(back and forth) as part of the # discovery result in the request elements. Some perfdata VALUES are not parsable # by ast.literal_eval such as "inf" it lead to ValueErrors. Thus keep perfdata empty perfdata: List[MetricTuple] = [] return ( _preview_check_source(host_config.hostname, service, check_source), str(service.check_plugin_name), ruleset_name, service.item, service.parameters, params, service.description, exitcode, output, perfdata, service.service_labels.to_dict(), found_on_nodes, )
def test_check_sap_hana_diskusage(value_store_patch, item, info, expected_result): section_name = SectionName("sap_hana_diskusage") section = register.get_section_plugin(section_name).parse_function(info) plugin_name = CheckPluginName("sap_hana_diskusage") plugin = register.get_check_plugin(plugin_name) if plugin: assert list(plugin.check_function(item, {}, section)) == expected_result
def test_cmciii_leakage_sensors(status, position, expected): section_plugin = agent_based_register.get_section_plugin(SectionName('cmciii')) assert section_plugin plugin = agent_based_register.get_check_plugin(CheckPluginName('cmciii_leakage')) assert plugin item, info = _leakage_info(status, position) section = section_plugin.parse_function(info) assert list(plugin.check_function(item=item, params={}, section=section)) == expected
def execute_check( multi_host_sections: MultiHostSections, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, *, submit_to_core: bool, show_perfdata: bool, ) -> bool: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) # Make a bit of context information globally available, so that functions # called by checks know this context. set_service is needed for predictive levels! # TODO: This should be a context manager, similar to value_store (f.k.a. item_state) # This is used for both legacy and agent_based API. check_api_utils.set_service(str(service.check_plugin_name), service.description) # check if we must use legacy mode. remove this block entirely one day if (plugin is not None and host_config.is_cluster and plugin.cluster_check_function.__name__ == "cluster_legacy_mode_from_hell"): with _service_context(service): return _execute_check_legacy_mode( multi_host_sections, host_config.hostname, ipaddress, service, submit_to_core=submit_to_core, show_perfdata=show_perfdata, ) submit, data_received, result = get_aggregated_result( multi_host_sections, host_config, ipaddress, service, plugin, lambda: determine_check_params(service.parameters), ) if submit: _submit_check_result( host_config.hostname, service.description, result, multi_host_sections.get_cache_info(plugin.sections) if plugin else None, submit_to_core=submit_to_core, show_perfdata=show_perfdata, ) elif data_received: console.verbose("%-20s PEND - %s\n", ensure_str(service.description), result[1]) return data_received
def _discover_plugins_services( *, check_plugin_name: CheckPluginName, host_key: HostKey, parsed_sections_broker: ParsedSectionsBroker, on_error: OnError, ) -> Iterator[AutocheckEntry]: # Skip this check type if is ignored for that host if config.service_ignored(host_key.hostname, check_plugin_name, None): console.vverbose(" Skip ignored check plugin name '%s'\n" % check_plugin_name) return check_plugin = agent_based_register.get_check_plugin(check_plugin_name) if check_plugin is None: console.warning(" Missing check plugin: '%s'\n" % check_plugin_name) return try: kwargs = get_section_kwargs(parsed_sections_broker, host_key, check_plugin.sections) except Exception as exc: if cmk.utils.debug.enabled() or on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning(" Exception while parsing agent section: %s\n" % exc) return if not kwargs: return disco_params = config.get_discovery_parameters(host_key.hostname, check_plugin) if disco_params is not None: kwargs = {**kwargs, "params": disco_params} try: yield from ( AutocheckEntry( check_plugin_name=check_plugin.name, item=service.item, parameters=unwrap_parameters(service.parameters), # Convert from APIs ServiceLabel to internal ServiceLabel service_labels={ label.name: label.value for label in service.labels }, ) for service in check_plugin.discovery_function(**kwargs)) except Exception as e: if on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning( " Exception in discovery function of check plugin '%s': %s" % (check_plugin.name, e))
def __init__(self, name): import cmk.base.config as config # pylint: disable=import-outside-toplevel from cmk.base.api.agent_based import register # pylint: disable=import-outside-toplevel super(Check, self).__init__(name) if self.name not in config.check_info: raise MissingCheckInfoError(self.name) self.info = config.check_info[self.name] self.context = config._check_contexts[self.name] self._migrated_plugin = register.get_check_plugin( config.CheckPluginName(self.name.replace('.', '_')))
def _discover_plugins_services( *, check_plugin_name: CheckPluginName, host_name: HostName, ipaddress: Optional[HostAddress], parsed_sections_broker: ParsedSectionsBroker, on_error: OnError, ) -> Iterator[Service]: # Skip this check type if is ignored for that host if config.service_ignored(host_name, check_plugin_name, None): console.vverbose(" Skip ignored check plugin name '%s'\n" % check_plugin_name) return check_plugin = agent_based_register.get_check_plugin(check_plugin_name) if check_plugin is None: console.warning(" Missing check plugin: '%s'\n" % check_plugin_name) return host_key = HostKey( host_name, ipaddress, SourceType.MANAGEMENT if check_plugin.name.is_management_name() else SourceType.HOST, ) try: kwargs = parsed_sections_broker.get_section_kwargs( host_key, check_plugin.sections) except Exception as exc: if cmk.utils.debug.enabled() or on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning(" Exception while parsing agent section: %s\n" % exc) return if not kwargs: return disco_params = config.get_discovery_parameters(host_name, check_plugin) if disco_params is not None: kwargs["params"] = disco_params try: plugins_services = check_plugin.discovery_function(**kwargs) yield from _enriched_discovered_services(host_name, check_plugin.name, plugins_services) except Exception as e: if on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning( " Exception in discovery function of check plugin '%s': %s" % (check_plugin.name, e))
def _transformed_params( self, plugin_name: CheckPluginName, params: Any, all_rulesets: cmk.gui.watolib.rulesets.AllRulesets, hostname: str, ) -> Any: check_plugin = register.get_check_plugin(plugin_name) if check_plugin is None: return None ruleset_name = "checkgroup_parameters:%s" % check_plugin.check_ruleset_name if ruleset_name not in all_rulesets.get_rulesets(): return None debug_info = "host=%r, plugin=%r, ruleset=%r, params=%r" % ( hostname, str(plugin_name), str(check_plugin.check_ruleset_name), params) try: ruleset = all_rulesets.get_rulesets()[ruleset_name] # TODO: in order to keep the original input parameters and to identify misbehaving # transform_values() implementations we check the passed values for modifications # In that case we have to fix that transform_values() before using it # This hack chould vanish as soon as we know transform_values() works as expected param_copy = copy.deepcopy(params) new_params = ruleset.valuespec().transform_value(param_copy) if params else {} if not param_copy == params: self._logger.warning("transform_value() for ruleset '%s' altered input" % check_plugin.check_ruleset_name) assert new_params or not params, "non-empty params vanished" assert not isinstance(params, dict) or isinstance( new_params, dict), ("transformed params down-graded from dict: %r" % new_params) # TODO: in case of known exceptions we don't want the transformed values be combined # with old keys. As soon as we can remove the workaround below we should not # handle any ruleset differently if str(check_plugin.check_ruleset_name) in {"if"}: return new_params # TODO: some transform_value() implementations (e.g. 'ps') return parameter with # missing keys - so for safety-reasons we keep keys that don't exist in the # transformed values # On the flipside this can lead to problems with the check itself and should # be vanished as soon as we can be sure no keys are deleted accidentally return {**params, **new_params} if isinstance(params, dict) else new_params except Exception as exc: msg = ("Transform failed: %s, error=%r" % (debug_info, exc)) if self._arguments.debug: raise RuntimeError(msg) from exc self._logger.error(msg) return None
def _execute_check( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, *, dry_run: bool, show_perfdata: bool, value_store_manager: value_store.ValueStoreManager, ) -> bool: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) # check if we must use legacy mode. remove this block entirely one day if (plugin is not None and host_config.is_cluster and plugin.cluster_check_function.__name__ == "cluster_legacy_mode_from_hell"): submittable = _legacy_mode.get_aggregated_result( parsed_sections_broker, host_config.hostname, ipaddress, service, used_params=( # time_resolved_check_parameters(service.parameters) # if isinstance(service.parameters, cmk.base.config.TimespecificParamList) else service.parameters), value_store_manager=value_store_manager, ) else: # This is the new, shiny, 'normal' case. submittable = get_aggregated_result( parsed_sections_broker, host_config, ipaddress, service, plugin, lambda: _final_read_only_check_parameters(service.parameters), value_store_manager=value_store_manager, ) if submittable.submit: _submit_to_core.check_result( host_name=host_config.hostname, service_name=service.description, result=submittable.result, cache_info=submittable.cache_info, dry_run=dry_run, show_perfdata=show_perfdata, ) else: console.verbose( f"{service.description:20} PEND - {submittable.result[1]}\n") return submittable.data_received
def execute_check( multi_host_sections: MultiHostSections, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: Service, *, submit_to_core: bool, show_perfdata: bool, ) -> bool: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) # check if we must use legacy mode. remove this block entirely one day if (plugin is not None and host_config.is_cluster and plugin.cluster_check_function.__name__ == "cluster_legacy_mode_from_hell"): with _service_context(service): return _execute_check_legacy_mode( multi_host_sections, host_config.hostname, ipaddress, service, submit_to_core=submit_to_core, show_perfdata=show_perfdata, ) submit, data_received, result = get_aggregated_result( multi_host_sections, host_config, ipaddress, service, plugin, lambda: determine_check_params(service.parameters), ) if submit: _submit_check_result( host_config.hostname, service.description, result, multi_host_sections.get_cache_info(plugin.sections) if plugin else None, submit_to_core=submit_to_core, show_perfdata=show_perfdata, ) elif data_received: console.verbose("%-20s PEND - %s\n", ensure_str(service.description), result[1]) return data_received
def _transformed_params( self, plugin_name: CheckPluginName, params, all_rulesets: cmk.gui.watolib.rulesets.AllRulesets, ) -> Any: check_plugin = register.get_check_plugin(plugin_name) if check_plugin is None: return None ruleset_name = "checkgroup_parameters:%s" % check_plugin.check_ruleset_name if ruleset_name not in all_rulesets.get_rulesets(): return None ruleset = all_rulesets.get_rulesets()[ruleset_name] new_params = {} if params is None else ruleset.valuespec().transform_value(params) if params and not new_params: self._logger.warning("Transforming %r returned empty (plugin=%s, ruleset=%r" % (params, plugin_name, ruleset_name)) assert not isinstance(params, dict) or isinstance(new_params, dict), ( "if params had been a dict transformed params should be a dict, too.") return {**params, **new_params} if isinstance(params, dict) else new_params
def _execute_check( parsed_sections_broker: ParsedSectionsBroker, host_config: config.HostConfig, ipaddress: Optional[HostAddress], service: ConfiguredService, *, dry_run: bool, show_perfdata: bool, value_store_manager: value_store.ValueStoreManager, ) -> bool: plugin = agent_based_register.get_check_plugin(service.check_plugin_name) submittable = get_aggregated_result( parsed_sections_broker, host_config, ipaddress, service, plugin, value_store_manager=value_store_manager, persist_value_store_changes=not dry_run, ) if submittable.submit: _submit_to_core.check_result( host_name=host_config.hostname, service_name=service.description, result=submittable.result, cache_info=submittable.cache_info, submitter=_submit_to_core.get_submitter( check_submission=config.check_submission, monitoring_core=config.monitoring_core, dry_run=dry_run, keepalive=get_keepalive(cmk_version.edition()), ), show_perfdata=show_perfdata, perfdata_format="pnp" if config.perfdata_format == "pnp" else "standard", ) else: console.verbose( f"{service.description:20} PEND - {submittable.result.output}\n") return submittable.data_received
def get_check_preview( *, host_name: HostName, max_cachefile_age: int, use_cached_snmp_data: bool, on_error: str, ) -> Tuple[CheckPreviewTable, QualifiedDiscovery[HostLabel]]: """Get the list of service of a host or cluster and guess the current state of all services if possible""" config_cache = config.get_config_cache() host_config = config_cache.get_host_config(host_name) ip_address = None if host_config.is_cluster else config.lookup_ip_address( host_config) discovery_parameters = DiscoveryParameters( on_error=on_error, load_labels=True, save_labels=False, only_host_labels=False, ) _set_cache_opts_of_checkers(use_cached_snmp_data=use_cached_snmp_data) parsed_sections_broker, _source_results = make_broker( config_cache=config_cache, host_config=host_config, ip_address=ip_address, mode=Mode.DISCOVERY, file_cache_max_age=max_cachefile_age, selected_sections=NO_SELECTION, fetcher_messages=(), force_snmp_cache_refresh=not use_cached_snmp_data, on_scan_error=on_error, ) grouped_services, host_label_result = _get_host_services( host_config, ip_address, parsed_sections_broker, discovery_parameters, ) table: CheckPreviewTable = [] for check_source, services_with_nodes in grouped_services.items(): for service, found_on_nodes in services_with_nodes: plugin = agent_based_register.get_check_plugin( service.check_plugin_name) params = _preview_params(host_name, service, plugin, check_source) if check_source in ['legacy', 'active', 'custom']: exitcode = None output = u"WAITING - %s check, cannot be done offline" % check_source.title( ) ruleset_name: Optional[RulesetName] = None else: ruleset_name = (str(plugin.check_ruleset_name) if plugin and plugin.check_ruleset_name else None) wrapped_params = ( Parameters(wrap_parameters(params)) if plugin and plugin.check_default_parameters is not None else None) exitcode, output, _perfdata = checking.get_aggregated_result( parsed_sections_broker, host_config, ip_address, service, plugin, lambda p=wrapped_params: p, # type: ignore[misc] # "type of lambda" ).result # Service discovery never uses the perfdata in the check table. That entry # is constantly discarded, yet passed around(back and forth) as part of the # discovery result in the request elements. Some perfdata VALUES are not parsable # by ast.literal_eval such as "inf" it lead to ValueErrors. Thus keep perfdata empty perfdata: List[MetricTuple] = [] table.append(( _preview_check_source(host_name, service, check_source), str(service.check_plugin_name), ruleset_name, service.item, service.parameters, params, service.description, exitcode, output, perfdata, service.service_labels.to_dict(), found_on_nodes, )) return table, host_label_result