def _ip_address_for_dump_host(host_config, family=None): if host_config.is_cluster: try: return ip_lookup.lookup_ip_address(host_config.hostname, family) except: return "" try: return ip_lookup.lookup_ip_address(host_config.hostname, family) except: return core_config.fallback_ip_for(host_config, family)
def do_inv(hostnames): cmk.utils.store.makedirs(cmk.utils.paths.inventory_output_dir) cmk.utils.store.makedirs(cmk.utils.paths.inventory_archive_dir) for hostname in hostnames: console.section_begin(hostname) try: config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) if host_config.is_cluster: ipaddress = None else: ipaddress = ip_lookup.lookup_ip_address(hostname) sources = data_sources.DataSources(hostname, ipaddress) _do_inv_for( sources, multi_host_sections=None, host_config=host_config, ipaddress=ipaddress, do_status_data_inv=host_config.do_status_data_inventory, ) except Exception as e: if cmk.utils.debug.enabled(): raise console.section_error("%s" % e) finally: cmk_base.cleanup.cleanup_globals()
def get_host_sections(self, max_cachefile_age=None): """Gather ALL host info data for any host (hosts, nodes, clusters) in Check_MK. Returns a dictionary object of already parsed HostSections() constructs for each related host. For single hosts it's just a single entry in the dictionary. For cluster hosts it contains one HostSections() entry for each related node. Communication errors are not raised through by this functions. All agent related errors are caught by the source.run() method and saved in it's _exception attribute. The caller should use source.get_summary_result() to get the state, output and perfdata of the agent excecution or source.exception() to get the exception object. """ console.step("Fetching data") # First abstract clusters/nodes/hosts hosts = [] nodes = self._host_config.nodes if nodes is not None: for node_hostname in nodes: node_ipaddress = ip_lookup.lookup_ip_address(node_hostname) node_check_names = check_table.get_needed_check_names(node_hostname, remove_duplicates=True, filter_mode="only_clustered") node_data_sources = DataSources(node_hostname, node_ipaddress) node_data_sources.enforce_check_plugin_names(set(node_check_names)) hosts.append((node_hostname, node_ipaddress, node_data_sources, config.cluster_max_cachefile_age)) else: hosts.append((self._hostname, self._ipaddress, self, config.check_max_cachefile_age)) if nodes: import cmk_base.data_sources.abstract as abstract abstract.DataSource.set_may_use_cache_file() # Special agents can produce data for the same check_plugin_name on the same host, in this case # the section lines need to be extended multi_host_sections = MultiHostSections() for this_hostname, this_ipaddress, these_sources, this_max_cachefile_age in hosts: # In case a max_cachefile_age is given with the function call, always use this one # instead of the host individual one. This is only used in discovery mode. if max_cachefile_age is not None: these_sources.set_max_cachefile_age(max_cachefile_age) else: these_sources.set_max_cachefile_age(this_max_cachefile_age) host_sections =\ multi_host_sections.add_or_get_host_sections(this_hostname, this_ipaddress) for source in these_sources.get_data_sources(): host_sections_from_source = source.run() host_sections.update(host_sections_from_source) # Store piggyback information received from all sources of this host. This # also implies a removal of piggyback files received during previous calls. cmk_base.piggyback.store_piggyback_raw_data(this_hostname, host_sections.piggybacked_raw_data) return multi_host_sections
def _get_host_entries(self, hostname, ipaddress): host_config = self._config_cache.get_host_config(hostname) if host_config.nodes is None: return [(hostname, ipaddress)] return [(node_hostname, ip_lookup.lookup_ip_address(node_hostname)) for node_hostname in host_config.nodes]
def create_snmp_host_config(hostname): # type: (str) -> snmp_utils.SNMPHostConfig host_config = config.get_config_cache().get_host_config(hostname) # ip_lookup.lookup_ipv4_address() returns Optional[str] in general, but for # all cases that reach the code here we seem to have "str". address = ip_lookup.lookup_ip_address(hostname) if address is None: raise MKGeneralException("Failed to gather IP address of %s" % hostname) return host_config.snmp_config(address)
def _management_board_ipaddress(self, hostname): mgmt_ipaddress = self._host_config.management_address if mgmt_ipaddress is None: return None if not self._is_ipaddress(mgmt_ipaddress): try: return ip_lookup.lookup_ip_address(mgmt_ipaddress) except MKIPAddressLookupError: return None else: return mgmt_ipaddress
def _ip_address_of(host_config, family=None): # type: (config.HostConfig, Optional[int]) -> Optional[str] try: return ip_lookup.lookup_ip_address(host_config.hostname, family) except Exception as e: if host_config.is_cluster: return "" _failed_ip_lookups.append(host_config.hostname) if not _ignore_ip_lookup_failures: warning("Cannot lookup IP address of '%s' (%s). " "The host will not be monitored correctly." % (host_config.hostname, e)) return fallback_ip_for(host_config.hostname, family)
def do_check(hostname, ipaddress, only_check_plugin_names=None): cpu_tracking.start("busy") console.verbose("Check_MK version %s\n" % cmk.__version__) config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) exit_spec = host_config.exit_code_spec() status, infotexts, long_infotexts, perfdata = 0, [], [], [] try: # In case of keepalive we always have an ipaddress (can be 0.0.0.0 or :: when # address is unknown). When called as non keepalive ipaddress may be None or # is already an address (2nd argument) if ipaddress is None and not host_config.is_cluster: ipaddress = ip_lookup.lookup_ip_address(hostname) item_state.load(hostname) sources = data_sources.DataSources(hostname, ipaddress) num_success, missing_sections = \ _do_all_checks_on_host(sources, host_config, ipaddress, only_check_plugin_names) if _submit_to_core: item_state.save(hostname) for source in sources.get_data_sources(): source_state, source_output, source_perfdata = source.get_summary_result_for_checking( ) if source_output != "": status = max(status, source_state) infotexts.append("[%s] %s" % (source.id(), source_output)) perfdata.extend(source_perfdata) if missing_sections and num_success > 0: missing_sections_status, missing_sections_infotext = \ _check_missing_sections(missing_sections, exit_spec) status = max(status, missing_sections_status) infotexts.append(missing_sections_infotext) elif missing_sections: infotexts.append("Got no information from host") status = max(status, exit_spec.get("empty_output", 2)) cpu_tracking.end() phase_times = cpu_tracking.get_times() total_times = phase_times["TOTAL"] run_time = total_times[4] infotexts.append("execution time %.1f sec" % run_time) if config.check_mk_perfdata_with_times: perfdata += [ "execution_time=%.3f" % run_time, "user_time=%.3f" % total_times[0], "system_time=%.3f" % total_times[1], "children_user_time=%.3f" % total_times[2], "children_system_time=%.3f" % total_times[3], ] for phase, times in phase_times.items(): if phase in ["agent", "snmp", "ds"]: t = times[4] - sum(times[:4]) # real time - CPU time perfdata.append("cmk_time_%s=%.3f" % (phase, t)) else: perfdata.append("execution_time=%.3f" % run_time) return status, infotexts, long_infotexts, perfdata finally: if _checkresult_file_fd is not None: _close_checkresult_file() if config.record_inline_snmp_stats \ and host_config.snmp_config(ipaddress).is_inline_snmp_host: inline_snmp.save_snmp_stats()
def do_inv_check(hostname, options): _inv_hw_changes = options.get("hw-changes", 0) _inv_sw_changes = options.get("sw-changes", 0) _inv_sw_missing = options.get("sw-missing", 0) _inv_fail_status = options.get( "inv-fail-status", 1) # State in case of an error (default: WARN) config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) if host_config.is_cluster: ipaddress = None else: ipaddress = ip_lookup.lookup_ip_address(hostname) status, infotexts, long_infotexts, perfdata = 0, [], [], [] sources = data_sources.DataSources(hostname, ipaddress) old_timestamp, inventory_tree, status_data_tree = _do_inv_for( sources, multi_host_sections=None, host_config=host_config, ipaddress=ipaddress, do_status_data_inv=host_config.do_status_data_inventory, ) if inventory_tree.is_empty() and status_data_tree.is_empty(): infotexts.append("Found no data") else: infotexts.append("Found %d inventory entries" % inventory_tree.count_entries()) # Node 'software' is always there because _do_inv_for creates this node for cluster info if not inventory_tree.get_sub_container(['software']).has_edge('packages')\ and _inv_sw_missing: infotexts.append("software packages information is missing" + check_api_utils.state_markers[_inv_sw_missing]) status = max(status, _inv_sw_missing) if old_timestamp: path = "%s/%s/%d" % (cmk.utils.paths.inventory_archive_dir, hostname, old_timestamp) old_tree = StructuredDataTree().load_from(path) if not old_tree.is_equal(inventory_tree, edges=["software"]): infotext = "software changes" if _inv_sw_changes: status = max(status, _inv_sw_changes) infotext += check_api_utils.state_markers[_inv_sw_changes] infotexts.append(infotext) if not old_tree.is_equal(inventory_tree, edges=["hardware"]): infotext = "hardware changes" if _inv_hw_changes: status = max(status, _inv_hw_changes) infotext += check_api_utils.state_markers[_inv_hw_changes] infotexts.append(infotext) if not status_data_tree.is_empty(): infotexts.append("Found %s status entries" % status_data_tree.count_entries()) for source in sources.get_data_sources(): source_state, source_output, _source_perfdata = source.get_summary_result_for_inventory( ) # Do not output informational (state = 0) things. These information are shown by the "Check_MK" service if source_state != 0: status = max(source_state, status) infotexts.append("[%s] %s" % (source.id(), source_output)) return status, infotexts, long_infotexts, perfdata
def do_inv_check(hostname, options): _inv_hw_changes = options.get("hw-changes", 0) _inv_sw_changes = options.get("sw-changes", 0) _inv_sw_missing = options.get("sw-missing", 0) _inv_fail_status = options.get( "inv-fail-status", 1) # State in case of an error (default: WARN) config_cache = config.get_config_cache() host_config = config_cache.get_host_config(hostname) if host_config.is_cluster: ipaddress = None else: ipaddress = ip_lookup.lookup_ip_address(hostname) status, infotexts, long_infotexts, perfdata = 0, [], [], [] sources = data_sources.DataSources(hostname, ipaddress) inventory_tree, status_data_tree = _do_inv_for( sources, multi_host_sections=None, host_config=host_config, ipaddress=ipaddress, ) #TODO add cluster if and only if all sources do not fail? if _all_sources_fail(host_config, sources): old_tree, sources_state = None, 1 status = max(status, sources_state) infotexts.append("Cannot update tree%s" % check_api_utils.state_markers[sources_state]) else: old_tree = _save_inventory_tree(hostname, inventory_tree) _run_inventory_export_hooks(host_config, inventory_tree) if inventory_tree.is_empty() and status_data_tree.is_empty(): infotexts.append("Found no data") else: infotexts.append("Found %d inventory entries" % inventory_tree.count_entries()) # Node 'software' is always there because _do_inv_for creates this node for cluster info if not inventory_tree.get_sub_container(['software']).has_edge('packages')\ and _inv_sw_missing: infotexts.append("software packages information is missing" + check_api_utils.state_markers[_inv_sw_missing]) status = max(status, _inv_sw_missing) if old_tree is not None: if not old_tree.is_equal(inventory_tree, edges=["software"]): infotext = "software changes" if _inv_sw_changes: status = max(status, _inv_sw_changes) infotext += check_api_utils.state_markers[_inv_sw_changes] infotexts.append(infotext) if not old_tree.is_equal(inventory_tree, edges=["hardware"]): infotext = "hardware changes" if _inv_hw_changes: status = max(status, _inv_hw_changes) infotext += check_api_utils.state_markers[_inv_hw_changes] infotexts.append(infotext) if not status_data_tree.is_empty(): infotexts.append("Found %s status entries" % status_data_tree.count_entries()) for source in sources.get_data_sources(): source_state, source_output, _source_perfdata = source.get_summary_result_for_inventory( ) # Do not output informational (state = 0) things. These information are shown by the "Check_MK" service if source_state != 0: status = max(source_state, status) infotexts.append("[%s] %s" % (source.id(), source_output)) return status, infotexts, long_infotexts, perfdata