def test_get_effective_service_level(monkeypatch): ts = Scenario().add_host("testhost1") ts.add_host("testhost2") ts.add_host("testhost3") ts.set_ruleset( "host_service_levels", [ (10, [], ["testhost2"], {}), (2, [], ["testhost2"], {}), ], ) ts.set_ruleset( "service_service_levels", [ (33, [], ["testhost1"], ["CPU load$"], {}), ], ) ts.apply(monkeypatch) check_api_utils.set_service("cpu.loads", "CPU load") check_api_utils.set_hostname("testhost1") assert check_api.get_effective_service_level() == 33 check_api_utils.set_hostname("testhost2") assert check_api.get_effective_service_level() == 10 check_api_utils.set_hostname("testhost3") assert check_api.get_effective_service_level() == 0
def _do_all_checks_on_host(sources, host_config, ipaddress, only_check_plugin_names=None): # type: (data_sources.DataSources, config.HostConfig, Optional[str], Optional[List[str]]) -> Tuple[int, List[str]] hostname = host_config.hostname config_cache = config.get_config_cache() num_success, missing_sections = 0, set() check_api_utils.set_hostname(hostname) filter_mode = None belongs_to_cluster = len(config_cache.clusters_of(hostname)) > 0 if belongs_to_cluster: filter_mode = "include_clustered" table = check_table.get_precompiled_check_table(hostname, remove_duplicates=True, filter_mode=filter_mode) # When check types are specified via command line, enforce them. Otherwise use the # list of checks defined by the check table. if only_check_plugin_names is None: only_check_plugins = set([e[0] for e in table]) else: only_check_plugins = set(only_check_plugin_names) sources.enforce_check_plugin_names(only_check_plugins) # Gather the data from the sources multi_host_sections = sources.get_host_sections() # Filter out check types which are not used on the node if belongs_to_cluster: pos_match = set() neg_match = set() for check_plugin_name, item, params, description in table: if hostname != config_cache.host_of_clustered_service( hostname, description): pos_match.add(check_plugin_name) else: neg_match.add(check_plugin_name) only_check_plugins -= (pos_match - neg_match) for check_plugin_name, item, params, description in table: if only_check_plugins is not None and check_plugin_name not in only_check_plugins: continue if belongs_to_cluster and hostname != config_cache.host_of_clustered_service( hostname, description): continue success = execute_check(multi_host_sections, hostname, ipaddress, check_plugin_name, item, params, description) if success: num_success += 1 elif success is None: # If the service is in any timeperiod we do not want to # - increase num_success or # - add to missing sections continue else: missing_sections.add( cmk_base.check_utils.section_name_of(check_plugin_name)) import cmk_base.inventory as inventory inventory.do_inventory_actions_during_checking_for(sources, multi_host_sections, host_config, ipaddress) missing_section_list = sorted(list(missing_sections)) return num_success, missing_section_list
def _create_nagios_servicedefs(cfg, config_cache, hostname, host_attrs): outfile = cfg.outfile import cmk_base.check_table as check_table host_config = config_cache.get_host_config(hostname) check_mk_attrs = core_config.get_service_attributes( hostname, "Check_MK", config_cache) # _____ # |___ / # |_ \ # ___) | # |____/ 3. Services def do_omit_service(hostname, description): if config.service_ignored(hostname, None, description): return True if hostname != config_cache.host_of_clustered_service( hostname, description): return True return False def get_dependencies(hostname, servicedesc): result = "" for dep in config.service_depends_on(hostname, servicedesc): result += _format_nagios_object( "servicedependency", { "use": config.service_dependency_template, "host_name": hostname, "service_description": dep, "dependent_host_name": hostname, "dependent_service_description": servicedesc, }) return result services = check_table.get_check_table(hostname, remove_duplicates=True).values() have_at_least_one_service = False used_descriptions = {} for service in sorted(services, key=lambda s: (s.check_plugin_name, s.item)): if service.check_plugin_name not in config.check_info: continue # simply ignore missing checks # Make sure, the service description is unique on this host if service.description in used_descriptions: cn, it = used_descriptions[service.description] core_config.warning( "ERROR: Duplicate service description '%s' for host '%s'!\n" " - 1st occurrance: checktype = %s, item = %r\n" " - 2nd occurrance: checktype = %s, item = %r\n" % (service.description, hostname, cn, it, service.check_plugin_name, service.item)) continue else: used_descriptions[service.description] = ( service.check_plugin_name, service.item) if config.check_info[service.check_plugin_name].get( "has_perfdata", False): template = config.passive_service_template_perf else: template = config.passive_service_template # Services Dependencies for autochecks outfile.write( get_dependencies(hostname, service.description).encode("utf-8")) service_spec = { "use": template, "host_name": hostname, "service_description": service.description, "check_command": "check_mk-%s" % service.check_plugin_name, } service_spec.update( core_config.get_cmk_passive_service_attributes( config_cache, host_config, service, check_mk_attrs)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, service.description)) outfile.write( _format_nagios_object("service", service_spec).encode("utf-8")) cfg.checknames_to_define.add(service.check_plugin_name) have_at_least_one_service = True # Active check for check_mk if have_at_least_one_service: service_spec = { "use": config.active_service_template, "host_name": hostname, "service_description": "Check_MK", } service_spec.update(check_mk_attrs) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, "Check_MK")) outfile.write( _format_nagios_object("service", service_spec).encode("utf-8")) # legacy checks via active_checks actchecks = [] for plugin_name, entries in host_config.active_checks: cfg.active_checks_to_define.add(plugin_name) act_info = config.active_check_info[plugin_name] for params in entries: actchecks.append((plugin_name, act_info, params)) if actchecks: outfile.write("\n\n# Active checks\n") for acttype, act_info, params in actchecks: # Make hostname available as global variable in argument functions check_api_utils.set_hostname(hostname) has_perfdata = act_info.get('has_perfdata', False) description = config.active_check_service_description( hostname, acttype, params) if do_omit_service(hostname, description): continue # compute argument, and quote ! and \ for Nagios args = core_config.active_check_arguments( hostname, description, act_info["argument_function"](params)).replace( "\\", "\\\\").replace("!", "\\!") if description in used_descriptions: cn, it = used_descriptions[description] # If we have the same active check again with the same description, # then we do not regard this as an error, but simply ignore the # second one. That way one can override a check with other settings. if cn == "active(%s)" % acttype: continue core_config.warning( "ERROR: Duplicate service description (active check) '%s' for host '%s'!\n" " - 1st occurrance: checktype = %s, item = %r\n" " - 2nd occurrance: checktype = active(%s), item = None\n" % (description, hostname, cn, it, acttype)) continue else: used_descriptions[description] = ("active(" + acttype + ")", description) template = "check_mk_perf," if has_perfdata else "" if host_attrs["address"] in ["0.0.0.0", "::"]: command_name = "check-mk-custom" command = command_name + "!echo \"CRIT - Failed to lookup IP address and no explicit IP address configured\" && exit 2" cfg.custom_commands_to_define.add(command_name) else: command = "check_mk_active-%s!%s" % (acttype, args) service_spec = { "use": "%scheck_mk_default" % template, "host_name": hostname, "service_description": description, "check_command": _simulate_command(cfg, command), "active_checks_enabled": 1, } service_spec.update( core_config.get_service_attributes(hostname, description, config_cache)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, description)) outfile.write( _format_nagios_object("service", service_spec).encode("utf-8")) # write service dependencies for active checks outfile.write( get_dependencies(hostname, description).encode("utf-8")) # Legacy checks via custom_checks custchecks = host_config.custom_checks if custchecks: outfile.write("\n\n# Custom checks\n") for entry in custchecks: # entries are dicts with the following keys: # "service_description" Service description to use # "command_line" (optional) Unix command line for executing the check # If this is missing, we create a passive check # "command_name" (optional) Name of Monitoring command to define. If missing, # we use "check-mk-custom" # "has_perfdata" (optional) If present and True, we activate perf_data description = config.get_final_service_description( hostname, entry["service_description"]) has_perfdata = entry.get("has_perfdata", False) command_name = entry.get("command_name", "check-mk-custom") command_line = entry.get("command_line", "") if do_omit_service(hostname, description): continue if command_line: command_line = core_config.autodetect_plugin( command_line).replace("\\", "\\\\").replace("!", "\\!") if "freshness" in entry: freshness = { "check_freshness": 1, "freshness_threshold": 60 * entry["freshness"]["interval"], } command_line = "echo %s && exit %d" % (_quote_nagios_string( entry["freshness"]["output"]), entry["freshness"]["state"]) else: freshness = {} cfg.custom_commands_to_define.add(command_name) if description in used_descriptions: cn, it = used_descriptions[description] # If we have the same active check again with the same description, # then we do not regard this as an error, but simply ignore the # second one. if cn == "custom(%s)" % command_name: continue core_config.warning( "ERROR: Duplicate service description (custom check) '%s' for host '%s'!\n" " - 1st occurrance: checktype = %s, item = %r\n" " - 2nd occurrance: checktype = custom(%s), item = %r\n" % (description, hostname, cn, it, command_name, description)) continue else: used_descriptions[description] = ("custom(%s)" % command_name, description) template = "check_mk_perf," if has_perfdata else "" command = "%s!%s" % (command_name, command_line) service_spec = { "use": "%scheck_mk_default" % template, "host_name": hostname, "service_description": description, "check_command": _simulate_command(cfg, command), "active_checks_enabled": 1 if (command_line and not freshness) else 0, } service_spec.update(freshness) service_spec.update( core_config.get_service_attributes(hostname, description, config_cache)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, description)) outfile.write( _format_nagios_object("service", service_spec).encode("utf-8")) # write service dependencies for custom checks outfile.write( get_dependencies(hostname, description).encode("utf-8")) # FIXME: Remove old name one day service_discovery_name = 'Check_MK inventory' if 'cmk-inventory' in config.use_new_descriptions_for: service_discovery_name = 'Check_MK Discovery' # Inventory checks - if user has configured them. params = host_config.discovery_check_parameters if params and params["check_interval"] \ and not config.service_ignored(hostname, None, service_discovery_name) \ and not host_config.is_ping_host: service_spec = { "use": config.inventory_check_template, "host_name": hostname, "service_description": service_discovery_name, } service_spec.update( core_config.get_service_attributes(hostname, service_discovery_name, config_cache)) service_spec.update( _extra_service_conf_of(cfg, config_cache, hostname, service_discovery_name)) service_spec.update({ "check_interval": params["check_interval"], "retry_interval": params["check_interval"], }) outfile.write( _format_nagios_object("service", service_spec).encode("utf-8")) if have_at_least_one_service: outfile.write( _format_nagios_object( "servicedependency", { "use": config.service_dependency_template, "host_name": hostname, "service_description": "Check_MK", "dependent_host_name": hostname, "dependent_service_description": service_discovery_name, }).encode("utf-8")) # No check_mk service, no legacy service -> create PING service if not have_at_least_one_service and not actchecks and not custchecks: _add_ping_service(cfg, config_cache, host_config, host_attrs["address"], host_config.is_ipv6_primary and 6 or 4, "PING", host_attrs.get("_NODEIPS")) if host_config.is_ipv4v6_host: if host_config.is_ipv6_primary: _add_ping_service(cfg, config_cache, host_config, host_attrs["_ADDRESS_4"], 4, "PING IPv4", host_attrs.get("_NODEIPS_4")) else: _add_ping_service(cfg, config_cache, host_config, host_attrs["_ADDRESS_6"], 6, "PING IPv6", host_attrs.get("_NODEIPS_6"))
def _precompile_hostcheck(config_cache, hostname): host_config = config_cache.get_host_config(hostname) console.verbose("%s%s%-16s%s:", tty.bold, tty.blue, hostname, tty.normal, stream=sys.stderr) check_api_utils.set_hostname(hostname) compiled_filename = cmk.utils.paths.precompiled_hostchecks_dir + "/" + hostname source_filename = compiled_filename + ".py" for fname in [compiled_filename, source_filename]: try: os.remove(fname) except OSError as e: if e.errno != errno.ENOENT: raise needed_check_plugin_names = _get_needed_check_plugin_names(host_config) if not needed_check_plugin_names: console.verbose("(no Check_MK checks)\n") return output = file(source_filename + ".new", "w") output.write("#!/usr/bin/env python\n") output.write("# encoding: utf-8\n\n") output.write("import logging\n") output.write("import sys\n\n") output.write("if not sys.executable.startswith('/omd'):\n") output.write( " sys.stdout.write(\"ERROR: Only executable with sites python\\n\")\n" ) output.write(" sys.exit(2)\n\n") # Remove precompiled directory from sys.path. Leaving it in the path # makes problems when host names (name of precompiled files) are equal # to python module names like "random" output.write("sys.path.pop(0)\n") output.write("import cmk.utils.log\n") output.write("import cmk.utils.debug\n") output.write("from cmk.utils.exceptions import MKTerminate\n") output.write("\n") output.write("import cmk_base.utils\n") output.write("import cmk_base.config as config\n") output.write("import cmk_base.console as console\n") output.write("import cmk_base.checking as checking\n") output.write("import cmk_base.check_api as check_api\n") output.write("import cmk_base.ip_lookup as ip_lookup\n") # Self-compile: replace symlink with precompiled python-code, if # we are run for the first time if config.delay_precompile: output.write(""" import os if os.path.islink(%(dst)r): import py_compile os.remove(%(dst)r) py_compile.compile(%(src)r, %(dst)r, %(dst)r, True) os.chmod(%(dst)r, 0755) """ % { "src": source_filename, "dst": compiled_filename }) # Register default Check_MK signal handler output.write("cmk_base.utils.register_sigint_handler()\n") # initialize global variables output.write(""" # very simple commandline parsing: only -v (once or twice) and -d are supported cmk.utils.log.setup_console_logging() logger = logging.getLogger("cmk.base") # TODO: This is not really good parsing, because it not cares about syntax like e.g. "-nv". # The later regular argument parsing is handling this correctly. Try to clean this up. cmk.utils.log.logger.setLevel(cmk.utils.log.verbosity_to_log_level(len([ a for a in sys.argv if a in [ "-v", "--verbose"] ]))) if '-d' in sys.argv: cmk.utils.debug.enable() """) output.write("config.load_checks(check_api.get_check_api_context, %r)\n" % _get_needed_check_file_names(needed_check_plugin_names)) for check_plugin_name in sorted(needed_check_plugin_names): console.verbose(" %s%s%s", tty.green, check_plugin_name, tty.normal, stream=sys.stderr) output.write("config.load_packed_config()\n") # IP addresses needed_ipaddresses, needed_ipv6addresses, = {}, {} if host_config.is_cluster: for node in host_config.nodes: node_config = config_cache.get_host_config(node) if node_config.is_ipv4_host: needed_ipaddresses[node] = ip_lookup.lookup_ipv4_address(node) if node_config.is_ipv6_host: needed_ipv6addresses[node] = ip_lookup.lookup_ipv6_address( node) try: if host_config.is_ipv4_host: needed_ipaddresses[hostname] = ip_lookup.lookup_ipv4_address( hostname) except Exception: pass try: if host_config.is_ipv6_host: needed_ipv6addresses[hostname] = ip_lookup.lookup_ipv6_address( hostname) except Exception: pass else: if host_config.is_ipv4_host: needed_ipaddresses[hostname] = ip_lookup.lookup_ipv4_address( hostname) if host_config.is_ipv6_host: needed_ipv6addresses[hostname] = ip_lookup.lookup_ipv6_address( hostname) output.write("config.ipaddresses = %r\n\n" % needed_ipaddresses) output.write("config.ipv6addresses = %r\n\n" % needed_ipv6addresses) # perform actual check with a general exception handler output.write("try:\n") output.write(" sys.exit(checking.do_check(%r, None))\n" % hostname) output.write("except MKTerminate:\n") output.write(" console.output('<Interrupted>\\n', stream=sys.stderr)\n") output.write(" sys.exit(1)\n") output.write("except SystemExit, e:\n") output.write(" sys.exit(e.code)\n") output.write("except Exception, e:\n") output.write(" import traceback, pprint\n") # status output message output.write( " sys.stdout.write(\"UNKNOWN - Exception in precompiled check: %s (details in long output)\\n\" % e)\n" ) # generate traceback for long output output.write( " sys.stdout.write(\"Traceback: %s\\n\" % traceback.format_exc())\n" ) output.write("\n") output.write(" sys.exit(3)\n") output.close() # compile python (either now or delayed), but only if the source # code has not changed. The Python compilation is the most costly # operation here. if os.path.exists(source_filename): if file(source_filename).read() == file(source_filename + ".new").read(): console.verbose(" (%s is unchanged)\n", source_filename, stream=sys.stderr) os.remove(source_filename + ".new") return else: console.verbose(" (new content)", stream=sys.stderr) os.rename(source_filename + ".new", source_filename) if not config.delay_precompile: py_compile.compile(source_filename, compiled_filename, compiled_filename, True) os.chmod(compiled_filename, 0o755) else: if os.path.exists(compiled_filename) or os.path.islink( compiled_filename): os.remove(compiled_filename) os.symlink(hostname + ".py", compiled_filename) console.verbose(" ==> %s.\n", compiled_filename, stream=sys.stderr)
def _snmp_scan(host_config, on_error="ignore", for_inv=False, do_snmp_scan=True, for_mgmt_board=False): import cmk_base.inventory_plugins as inventory_plugins # Make hostname globally available for scan functions. # This is rarely used, but e.g. the scan for if/if64 needs # this to evaluate if_disabled_if64_checks. check_api_utils.set_hostname(host_config.hostname) snmp.initialize_single_oid_cache(host_config) console.vverbose(" SNMP scan:\n") if not config.get_config_cache().in_binary_hostlist( host_config.hostname, config.snmp_without_sys_descr): for oid, name in [(".1.3.6.1.2.1.1.1.0", "system description"), (".1.3.6.1.2.1.1.2.0", "system object")]: value = snmp.get_single_oid(host_config, oid, do_snmp_scan=do_snmp_scan) if value is None: raise MKSNMPError( "Cannot fetch %s OID %s. This might be OK for some bogus devices. " "In that case please configure the ruleset \"Hosts without system " "description OID\" to tell Check_MK not to fetch the system " "description and system object OIDs." % (name, oid)) else: # Fake OID values to prevent issues with a lot of scan functions console.vverbose( " Skipping system description OID " "(Set .1.3.6.1.2.1.1.1.0 and .1.3.6.1.2.1.1.2.0 to \"\")\n") snmp.set_single_oid_cache(host_config, ".1.3.6.1.2.1.1.1.0", "") snmp.set_single_oid_cache(host_config, ".1.3.6.1.2.1.1.2.0", "") found_check_plugin_names = [] if for_inv: items = inventory_plugins.inv_info.items() else: items = config.check_info.items() positive_found = [] default_found = [] for check_plugin_name, _unused_check in items: if config.service_ignored(host_config.hostname, check_plugin_name, None): continue else: if for_inv and not inventory_plugins.is_snmp_plugin( check_plugin_name): continue elif not for_inv and not cmk_base.check_utils.is_snmp_check( check_plugin_name): continue section_name = cmk_base.check_utils.section_name_of(check_plugin_name) # The scan function should be assigned to the section_name, because # subchecks sharing the same SNMP info of course should have # an identical scan function. But some checks do not do this # correctly if check_plugin_name in config.snmp_scan_functions: scan_function = config.snmp_scan_functions[check_plugin_name] elif section_name in config.snmp_scan_functions: scan_function = config.snmp_scan_functions[section_name] elif section_name in inventory_plugins.inv_info: scan_function = inventory_plugins.inv_info[section_name].get( "snmp_scan_function") else: scan_function = None if scan_function: try: def oid_function(oid, default_value=None, cp_name=check_plugin_name): value = snmp.get_single_oid(host_config, oid, cp_name, do_snmp_scan=do_snmp_scan) return default_value if value is None else value result = scan_function(oid_function) if result is not None and not isinstance(result, (str, bool)): if on_error == "warn": console.warning( " SNMP scan function of %s returns invalid type %s." % (check_plugin_name, type(result))) elif on_error == "raise": raise MKGeneralException("SNMP Scan aborted.") elif result: found_check_plugin_names.append(check_plugin_name) positive_found.append(check_plugin_name) except MKGeneralException: # some error messages which we explicitly want to show to the user # should be raised through this raise except Exception: if on_error == "warn": console.warning( " Exception in SNMP scan function of %s" % check_plugin_name) elif on_error == "raise": raise else: found_check_plugin_names.append(check_plugin_name) default_found.append(check_plugin_name) _output_snmp_check_plugins("SNMP scan found", positive_found) if default_found: _output_snmp_check_plugins("SNMP without scan function", default_found) filtered = config.filter_by_management_board(host_config.hostname, found_check_plugin_names, for_mgmt_board, for_discovery=True, for_inventory=for_inv) _output_snmp_check_plugins("SNMP filtered check plugin names", filtered) snmp.write_single_oid_cache(host_config) return sorted(filtered)