def from_exception_and_context(cls, hostname, check_plugin_name, item, is_manual_check, params, description, info, text): config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) snmp_info, agent_output = None, None if cmk_base.check_utils.is_snmp_check(check_plugin_name): snmp_info = _read_snmp_info(hostname) else: agent_output = _read_agent_output(hostname) return cls.from_exception( details={ "check_output": text, "host": hostname, "is_cluster": host_config.is_cluster, "description": description, "check_type": check_plugin_name, "item": item, "params": params, "uses_snmp": cmk_base.check_utils.is_snmp_check(check_plugin_name), "inline_snmp": host_config.snmp_config(hostname).is_inline_snmp_host, "manual_check": is_manual_check, }, type_specific_attributes={ "snmp_info": snmp_info, "agent_output": agent_output, }, )
def apply(self, monkeypatch): for key, value in self.config.items(): monkeypatch.setattr(config, key, value) self.config_cache = config.get_config_cache() self.config_cache.initialize() return self.config_cache
def _set_autochecks_of_cluster(host_config, new_items): # type: (config.HostConfig, List[DiscoveredService]) -> None """A Cluster does not have an autochecks file. All of its services are located in the nodes instead. For clusters we cycle through all nodes remove all clustered service and add the ones we've got as input.""" if not host_config.nodes: return config_cache = config.get_config_cache() new_autochecks = [] # type: List[DiscoveredService] for node in host_config.nodes: for existing_service in parse_autochecks_file(node): if host_config.hostname != config_cache.host_of_clustered_service( node, existing_service.description): new_autochecks.append(existing_service) for discovered_service in new_items: new_autochecks.append(discovered_service) # write new autochecks file for that host save_autochecks_file(node, new_autochecks) # Check whether or not the cluster host autocheck files are still existant. # Remove them. The autochecks are only stored in the nodes autochecks files # these days. remove_autochecks_file(host_config.hostname)
def do_inventory_actions_during_checking_for(sources, multi_host_sections, host_config, ipaddress): # type: (data_sources.DataSources, data_sources.MultiHostSections, config.HostConfig, Optional[str]) -> None hostname = host_config.hostname do_status_data_inventory = not host_config.is_cluster and host_config.do_status_data_inventory if not do_status_data_inventory: _cleanup_status_data(hostname) if not do_status_data_inventory: return # nothing to do here # This is called during checking, but the inventory plugins are not loaded yet import cmk_base.inventory_plugins as inventory_plugins inventory_plugins.load_plugins(check_api.get_check_api_context, get_inventory_context) config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) _do_inv_for( sources, multi_host_sections=multi_host_sections, host_config=host_config, ipaddress=ipaddress, do_status_data_inv=do_status_data_inventory, )
def ruleset_match_object_for_checkgroup_parameters(hostname, item, svc_desc): # type: (str, Text, Text) -> RulesetMatchObject """Construct the object that is needed to match checkgroup parameter rulesets""" _load_config() config_cache = config.get_config_cache() return config_cache.ruleset_match_object_for_checkgroup_parameters( hostname, item, svc_desc)
def do_inv(hostnames): cmk.utils.store.makedirs(cmk.utils.paths.inventory_output_dir) cmk.utils.store.makedirs(cmk.utils.paths.inventory_archive_dir) for hostname in hostnames: console.section_begin(hostname) try: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) if host_config.is_cluster: ipaddress = None else: ipaddress = ip_lookup.lookup_ip_address(hostname) sources = data_sources.DataSources(hostname, ipaddress) _do_inv_for( sources, multi_host_sections=None, host_config=host_config, ipaddress=ipaddress, do_status_data_inv=host_config.do_status_data_inventory, ) except Exception as e: if cmk.utils.debug.enabled(): raise console.section_error("%s" % e) finally: cmk_base.cleanup.cleanup_globals()
def test_mode_discover_all_hosts(test_cfg, monkeypatch): config_cache = config.get_config_cache() _patch_data_source_run(monkeypatch, _may_use_cache_file=True, _max_cachefile_age=120) cmk_base.modes.check_mk.mode_discover({"discover": 1}, []) assert _counter_run == len(config_cache.all_active_realhosts()) * 2
def parse_hostname_list(self, args, with_clusters=True, with_foreign_hosts=False): config_cache = config.get_config_cache() if with_foreign_hosts: valid_hosts = config_cache.all_configured_realhosts() else: valid_hosts = config_cache.all_active_realhosts() if with_clusters: valid_hosts = valid_hosts.union(config_cache.all_active_clusters()) hostlist = [] for arg in args: if arg[0] != '@' and arg in valid_hosts: hostlist.append(arg) else: if arg[0] == '@': arg = arg[1:] tagspec = arg.split(',') num_found = 0 for hostname in valid_hosts: if config.hosttags_match_taglist( config_cache.tag_list_of_host(hostname), tagspec): hostlist.append(hostname) num_found += 1 if num_found == 0: raise MKBailOut("Hostname or tag specification '%s' does " "not match any host." % arg) return hostlist
def reload_config(): # Needs to be done together, even when the checks are not directly needed import cmk_base.check_api as check_api config.load_all_checks(check_api.get_check_api_context) config.load() config_cache = config.get_config_cache() config_cache.initialize() return config_cache
def cached_dns_lookup(hostname, family): # type: (str, int) -> Optional[str] cache = cmk_base.config_cache.get_dict("cached_dns_lookup") cache_id = hostname, family # Address has already been resolved in prior call to this function? try: return cache[cache_id] except KeyError: pass # Prepare file based fall-back DNS cache in case resolution fails # TODO: Find a place where this only called once! ip_lookup_cache = _initialize_ip_lookup_cache() cached_ip = ip_lookup_cache.get(cache_id) if cached_ip and config.use_dns_cache: cache[cache_id] = cached_ip return cached_ip host_config = config.get_config_cache().get_host_config(hostname) if host_config.is_no_ip_host: cache[cache_id] = None return None # Now do the actual DNS lookup try: ipa = socket.getaddrinfo( hostname, None, family == 4 and socket.AF_INET or socket.AF_INET6)[0][4][0] # Update our cached address if that has changed or was missing if ipa != cached_ip: console.verbose("Updating IPv%d DNS cache for %s: %s\n" % (family, hostname, ipa)) _update_ip_lookup_cache(cache_id, ipa) cache[cache_id] = ipa # Update in-memory-cache return ipa except (MKTerminate, MKTimeout): # We should be more specific with the exception handler below, then we # could drop this special handling here raise except Exception as e: # DNS failed. Use cached IP address if present, even if caching # is disabled. if cached_ip: cache[cache_id] = cached_ip return cached_ip else: cache[cache_id] = None raise MKIPAddressLookupError( "Failed to lookup IPv%d address of %s via DNS: %s" % (family, hostname, e))
def wrapped_check_func(hostname, *args, **kwargs): host_config = config.get_config_cache().get_host_config(hostname) exit_spec = host_config.exit_code_spec() status, infotexts, long_infotexts, perfdata = 0, [], [], [] try: status, infotexts, long_infotexts, perfdata = check_func( hostname, *args, **kwargs) except SystemExit: raise except MKTimeout: if _in_keepalive_mode(): raise else: infotexts.append("Timed out") status = max(status, exit_spec.get("timeout", 2)) except (MKAgentError, MKSNMPError, MKIPAddressLookupError) as e: infotexts.append("%s" % e) status = exit_spec.get("connection", 2) except MKGeneralException as e: infotexts.append("%s" % e) status = max(status, exit_spec.get("exception", 3)) except Exception: if cmk.utils.debug.enabled(): raise crash_output = cmk_base.crash_reporting.create_crash_dump( hostname, check_plugin_name, None, False, None, description, []) infotexts.append( crash_output.replace("Crash dump:\n", "Crash dump:\\n")) status = max(status, exit_spec.get("exception", 3)) # Produce the service check result output output_txt = "%s - %s" % (defines.short_service_state_name(status), ", ".join(infotexts)) if perfdata: output_txt += " | %s" % " ".join(perfdata) if long_infotexts: output_txt = "%s\n%s" % (output_txt, "\n".join(long_infotexts)) output_txt += "\n" if _in_keepalive_mode(): keepalive.add_keepalive_active_check_result( hostname, output_txt) console.verbose(output_txt.encode("utf-8")) else: console.output(output_txt.encode("utf-8")) return status
def create_snmp_host_config(hostname): # type: (str) -> snmp_utils.SNMPHostConfig host_config = config.get_config_cache().get_host_config(hostname) # ip_lookup.lookup_ipv4_address() returns Optional[str] in general, but for # all cases that reach the code here we seem to have "str". address = ip_lookup.lookup_ip_address(hostname) if address is None: raise MKGeneralException("Failed to gather IP address of %s" % hostname) return host_config.snmp_config(address)
def get_check_table(hostname, remove_duplicates=False, use_cache=True, skip_autochecks=False, filter_mode=None, skip_ignored=True): # type: (str, bool, bool, bool, Optional[str], bool) -> CheckTable config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) table = HostCheckTable(config_cache, host_config) return table.get(remove_duplicates, use_cache, skip_autochecks, filter_mode, skip_ignored)
def __init__(self, hostname, ipaddress): super(DataSources, self).__init__() self._hostname = hostname self._ipaddress = ipaddress self._config_cache = config.get_config_cache() self._host_config = self._config_cache.get_host_config(hostname) self._initialize_data_sources() # Has currently no effect. The value possibly set during execution on the single data # sources is kept here in this object to return it later on self._enforced_check_plugin_names = None
def get_effective_service_level(): """Get the service level that applies to the current service. This can only be used within check functions, not during discovery nor parsing.""" config_cache = _config.get_config_cache() service_level = config_cache.service_level_of_service(host_name(), service_description()) if service_level is not None: return service_level service_level = config_cache.get_host_config(host_name()).service_level if service_level is not None: return service_level return 0
def _get_dns_cache_lookup_hosts(): # type: () -> List[IPLookupCacheId] config_cache = config.get_config_cache() hosts = [] for hostname in config_cache.all_active_hosts(): host_config = config_cache.get_host_config(hostname) if host_config.is_ipv4_host: hosts.append((hostname, 4)) if host_config.is_ipv6_host: hosts.append((hostname, 6)) return hosts
def _remove_autochecks_of_host(hostname): # type: (str) -> int removed = 0 new_items = [] # type: List[DiscoveredService] config_cache = config.get_config_cache() old_items = parse_autochecks_file(hostname) for existing_service in old_items: if hostname != config_cache.host_of_clustered_service( hostname, existing_service.description): new_items.append(existing_service) else: removed += 1 save_autochecks_file(hostname, new_items) return removed
def get_effective_service_level(): """Get the service level that applies to the current service. This can only be used within check functions, not during discovery nor parsing.""" config_cache = _config.get_config_cache() service_levels = config_cache.service_extra_conf( host_name(), service_description(), _config.service_service_levels) if service_levels: return service_levels[0] else: service_levels = config_cache.host_extra_conf( host_name(), _config.host_service_levels) if service_levels: return service_levels[0] return 0
def __init__(self, site_id="unit"): super(Scenario, self).__init__() tag_config = cmk.utils.tags.sample_tag_config() self.tags = cmk.utils.tags.get_effective_tag_config(tag_config) self.site_id = site_id self.config = { "tag_config": tag_config, "distributed_wato_site": site_id, "all_hosts": [], "host_paths": {}, "host_tags": {}, "clusters": {}, } self.config_cache = config.get_config_cache()
def update_dns_cache(): updated = 0 failed = [] console.verbose("Cleaning up existing DNS cache...\n") try: os.unlink(_cache_path()) except OSError as e: if e.errno != errno.ENOENT: raise config_cache = config.get_config_cache() console.verbose("Updating DNS cache...\n") for hostname in config_cache.all_active_hosts(): host_config = config_cache.get_host_config(hostname) # Use intelligent logic. This prevents DNS lookups for hosts # with statically configured addresses, etc. for family in [4, 6]: if (family == 4 and host_config.is_ipv4_host) \ or (family == 6 and host_config.is_ipv6_host): console.verbose("%s (IPv%d)..." % (hostname, family)) try: if family == 4: ip = lookup_ipv4_address(hostname) else: ip = lookup_ipv6_address(hostname) console.verbose("%s\n" % ip) updated += 1 except (MKTerminate, MKTimeout): # We should be more specific with the exception handler below, then we # could drop this special handling here raise except Exception as e: failed.append(hostname) console.verbose("lookup failed: %s\n" % e) if cmk.utils.debug.enabled(): raise continue # TODO: After calculation the cache needs to be written once return updated, failed
def main(): args = parse_arguments() check_df_sources_include_flag() site = SiteContext(site_name()) if not (site.is_stopped() or args.dry_run): raise RuntimeError('The site needs to be stopped to run this script') if not _ask_for_confirmation_backup(args): sys.exit(1) config_cache = config.get_config_cache() update_service_info(config_cache, get_hostnames(config_cache), args) if not args.dry_run: save_new_config()
def _agent_cache_file_age(hostname, check_plugin_name): host_config = _config.get_config_cache().get_host_config(hostname) if host_config.is_cluster: raise MKGeneralException("get_agent_data_time() not valid for cluster") import cmk_base.check_utils if cmk_base.check_utils.is_snmp_check(check_plugin_name): cachefile = _paths.tcp_cache_dir + "/" + hostname + "." + check_plugin_name.split(".")[0] elif cmk_base.check_utils.is_tcp_check(check_plugin_name): cachefile = _paths.tcp_cache_dir + "/" + hostname else: cachefile = None if cachefile is not None and os.path.exists(cachefile): return _utils.cachefile_age(cachefile) return None
def __init__(self, hostname, ipaddress): super(DataSource, self).__init__() self._hostname = hostname self._ipaddress = ipaddress self._max_cachefile_age = None self._enforced_check_plugin_names = None self._logger = console.logger.getChild("data_source.%s" % self.id()) self._setup_logger() # Runtime data (managed by self.run()) - Meant for self.get_summary_result() self._exception = None self._host_sections = None self._persisted_sections = None self._config_cache = config.get_config_cache() self._host_config = self._config_cache.get_host_config(self._hostname)
def precompile_hostchecks(): console.verbose("Creating precompiled host check config...\n") config.PackedConfig().save() if not os.path.exists(cmk.utils.paths.precompiled_hostchecks_dir): os.makedirs(cmk.utils.paths.precompiled_hostchecks_dir) config_cache = config.get_config_cache() console.verbose("Precompiling host checks...\n") for host in config_cache.all_active_hosts(): try: _precompile_hostcheck(config_cache, host) except Exception as e: if cmk.utils.debug.enabled(): raise console.error("Error precompiling checks for host %s: %s\n" % (host, e)) sys.exit(5)
def create_config(outfile, hostnames): if config.host_notification_periods != []: core_config.warning( "host_notification_periods is not longer supported. Please use extra_host_conf['notification_period'] instead." ) if config.service_notification_periods != []: core_config.warning( "service_notification_periods is not longer supported. Please use extra_service_conf['notification_period'] instead." ) # Map service_period to _SERVICE_PERIOD. This field das not exist in Nagios. # The CMC has this field natively. if "service_period" in config.extra_host_conf: config.extra_host_conf["_SERVICE_PERIOD"] = config.extra_host_conf[ "service_period"] del config.extra_host_conf["service_period"] if "service_period" in config.extra_service_conf: config.extra_service_conf[ "_SERVICE_PERIOD"] = config.extra_service_conf["service_period"] del config.extra_service_conf["service_period"] config_cache = config.get_config_cache() if hostnames is None: hostnames = config_cache.all_active_hosts() cfg = NagiosConfig(outfile, hostnames) _output_conf_header(cfg) for hostname in hostnames: _create_nagios_config_host(cfg, config_cache, hostname) _create_nagios_config_contacts(cfg, hostnames) _create_nagios_config_hostgroups(cfg) _create_nagios_config_servicegroups(cfg) _create_nagios_config_contactgroups(cfg) _create_nagios_config_commands(cfg) _create_nagios_config_timeperiods(cfg) if config.extra_nagios_conf: outfile.write("\n# extra_nagios_conf\n\n") outfile.write(config.extra_nagios_conf)
def test_mode_inventory_caching(test_cfg, hosts, cache, force, monkeypatch): kwargs = {} kwargs.update(hosts[1]) kwargs.update(cache[1]) kwargs.update(force[1]) if cache[0] is None: kwargs["_may_use_cache_file"] = not hosts[0] _patch_data_source_run(monkeypatch, **kwargs) config_cache = config.get_config_cache() try: if cache[0] is True: cmk_base.modes.check_mk.option_cache() elif cache[0] is False: cmk_base.modes.check_mk.option_no_cache() # --no-cache options = {} if force[0]: options["force"] = True assert _counter_run == 0 cmk_base.modes.check_mk.mode_inventory(options, hosts[0]) # run() has to be called once for each requested host if hosts[0] == []: valid_hosts = config_cache.all_active_realhosts() valid_hosts = valid_hosts.union(config_cache.all_active_clusters()) else: valid_hosts = hosts[0] num_runs = len([ h for h in valid_hosts if not config_cache.get_host_config(h).is_cluster ]) * 2 assert _counter_run == num_runs finally: # TODO: Can't the mode clean this up on it's own? cmk_base.data_sources.abstract.DataSource.set_use_outdated_cache_file( False)
def update_dns_cache(): updated = 0 failed = [] console.verbose("Cleaning up existing DNS cache...\n") try: os.unlink(_cache_path()) except OSError as e: if e.errno != errno.ENOENT: raise config_cache = config.get_config_cache() console.verbose("Updating DNS cache...\n") for hostname in config_cache.all_active_hosts(): host_config = config_cache.get_host_config(hostname) # Use intelligent logic. This prevents DNS lookups for hosts # with statically configured addresses, etc. for family in [4, 6]: if (family == 4 and host_config.is_ipv4_host) \ or (family == 6 and host_config.is_ipv6_host): console.verbose("%s (IPv%d)..." % (hostname, family)) try: if family == 4: ip = lookup_ipv4_address(hostname) else: ip = lookup_ipv6_address(hostname) console.verbose("%s\n" % ip) updated += 1 except Exception as e: failed.append(hostname) console.verbose("lookup failed: %s\n" % e) if cmk.utils.debug.enabled(): raise continue return updated, failed
def _create_crash_dump_info_file(crash_dir, hostname, check_plugin_name, item, is_manual_check, params, description, info, text): config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) crash_info = crash_reporting.create_crash_info( "check", details={ "check_output": text, "host": hostname, "is_cluster": host_config.is_cluster, "description": description, "check_type": check_plugin_name, "item": item, "params": params, "uses_snmp": cmk_base.check_utils.is_snmp_check(check_plugin_name), "inline_snmp": host_config.snmp_config(hostname).is_inline_snmp_host, "manual_check": is_manual_check, }) open(crash_dir + "/crash.info", "w").write(crash_reporting.crash_info_to_string(crash_info) + "\n")
def do_snmpget(*args): if not args[0]: raise MKBailOut("You need to specify an OID.") oid = args[0][0] config_cache = config.get_config_cache() hostnames = args[0][1:] if not hostnames: hostnames = [] for host in config_cache.all_active_realhosts(): host_config = config_cache.get_host_config(host) if host_config.is_snmp_host: hostnames.append(host) for hostname in hostnames: #TODO what about SNMP management boards? snmp_config = create_snmp_host_config(hostname) value = get_single_oid(snmp_config, oid) console.output("%s (%s): %r\n" % (hostname, snmp_config.ipaddress, value)) cmk_base.cleanup.cleanup_globals()
def lookup_ip_address(hostname, family=None): # type: (str, Optional[int]) -> Optional[str] # Quick hack, where all IP addresses are faked (--fake-dns) if _fake_dns: return _fake_dns if config.fake_dns: return config.fake_dns config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) if family is None: # choose primary family family = 6 if host_config.is_ipv6_primary else 4 # Honor simulation mode und usewalk hosts. Never contact the network. if config.simulation_mode or _enforce_localhost or \ (host_config.is_usewalk_host and host_config.is_snmp_host): if family == 4: return "127.0.0.1" return "::1" # Now check, if IP address is hard coded by the user if family == 4: ipa = config.ipaddresses.get(hostname) else: ipa = config.ipv6addresses.get(hostname) if ipa: return ipa # Hosts listed in dyndns hosts always use dynamic DNS lookup. # The use their hostname as IP address at all places if config_cache.in_binary_hostlist(hostname, config.dyndns_hosts): return hostname return cached_dns_lookup(hostname, family)