def perform_snmpwalk(hostname, ip, check_type, base_oid, fetchoid): added_oids = set([]) rowinfo = [] if is_snmpv3_host(hostname): snmp_contexts = snmpv3_contexts_of(hostname, check_type) else: snmp_contexts = [None] for context_name in snmp_contexts: if is_inline_snmp_host(hostname): rows = inline_snmpwalk_on_suboid(hostname, check_type, fetchoid, base_oid, context_name=context_name, ipaddress=ip) else: rows = snmpwalk_on_suboid(hostname, ip, fetchoid, context_name=context_name) # I've seen a broken device (Mikrotik Router), that broke after an # update to RouterOS v6.22. It would return 9 time the same OID when # .1.3.6.1.2.1.1.1.0 was being walked. We try to detect these situations # by removing any duplicate OID information if len(rows) > 1 and rows[0][0] == rows[1][0]: console.vverbose("Detected broken SNMP agent. Ignoring duplicate OID %s.\n" % rows[0][0]) rows = rows[:1] for row_oid, val in rows: if row_oid in added_oids: console.vverbose("Duplicate OID found: %s (%s)\n" % (row_oid, val)) else: rowinfo.append((row_oid, val)) added_oids.add(row_oid) return rowinfo
def _output_snmp_check_plugins(title, collection): if collection: collection_out = " ".join(sorted(collection)) else: collection_out = "-" console.vverbose(" %-35s%s%s%s%s\n" % \ (title, tty.bold, tty.yellow, collection_out, tty.normal))
def _perform_snmpwalk(snmp_config, check_plugin_name, base_oid, fetchoid): added_oids = set([]) rowinfo = [] if snmp_utils.is_snmpv3_host(snmp_config): snmp_contexts = _snmpv3_contexts_of(snmp_config, check_plugin_name) else: snmp_contexts = [None] for context_name in snmp_contexts: snmp_backend = SNMPBackendFactory().factory(snmp_config, enforce_stored_walks=_enforce_stored_walks) rows = snmp_backend.walk(snmp_config, fetchoid, check_plugin_name=check_plugin_name, table_base_oid=base_oid, context_name=context_name) # I've seen a broken device (Mikrotik Router), that broke after an # update to RouterOS v6.22. It would return 9 time the same OID when # .1.3.6.1.2.1.1.1.0 was being walked. We try to detect these situations # by removing any duplicate OID information if len(rows) > 1 and rows[0][0] == rows[1][0]: console.vverbose("Detected broken SNMP agent. Ignoring duplicate OID %s.\n" % rows[0][0]) rows = rows[:1] for row_oid, val in rows: if row_oid in added_oids: console.vverbose("Duplicate OID found: %s (%s)\n" % (row_oid, val)) else: rowinfo.append((row_oid, val)) added_oids.add(row_oid) return rowinfo
def _save_snmpwalk_cache(hostname, fetchoid, rowinfo): path = _snmpwalk_cache_path(hostname, fetchoid) if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) console.vverbose(" Saving walk of %s to walk cache %s\n" % (fetchoid, path)) store.save_data_to_file(path, rowinfo, pretty=False)
def walk(self, snmp_config, oid, check_plugin_name=None, table_base_oid=None, context_name=None): # type: (snmp_utils.SNMPHostConfig, str, Optional[str], Optional[str], Optional[str]) -> snmp_utils.SNMPRowInfo if oid.startswith("."): oid = oid[1:] if oid.endswith(".*"): oid_prefix = oid[:-2] dot_star = True else: oid_prefix = oid dot_star = False path = cmk.utils.paths.snmpwalks_dir + "/" + snmp_config.hostname console.vverbose(" Loading %s from %s\n" % (oid, path)) rowinfo = [] # type: List[Tuple[str, str]] if snmp_config.hostname in _g_walk_cache: lines = _g_walk_cache[snmp_config.hostname] else: try: lines = open(path).readlines() except IOError: raise MKSNMPError("No snmpwalk file %s" % path) _g_walk_cache[snmp_config.hostname] = lines begin = 0 end = len(lines) hit = None while end - begin > 0: current = (begin + end) // 2 parts = lines[current].split(None, 1) comp = parts[0] hit = self._compare_oids(oid_prefix, comp) if hit == 0: break elif hit == 1: # we are too low begin = current + 1 else: end = current if hit != 0: return [] # not found rowinfo = self._collect_until(oid, oid_prefix, lines, current, -1) rowinfo.reverse() rowinfo += self._collect_until(oid, oid_prefix, lines, current + 1, 1) if dot_star: return [rowinfo[0]] return rowinfo
def pop_phase(): # type: () -> None if _is_not_tracking(): return console.vverbose("[cpu_tracking] Pop phase '%s' (Stack: %r)\n" % (phase_stack[-1], phase_stack)) _add_times_to_phase() del phase_stack[-1]
def push_phase(phase): # type: (str) -> None if _is_not_tracking(): return console.vverbose("[cpu_tracking] Push phase '%s' (Stack: %r)\n" % (phase, phase_stack)) _add_times_to_phase() phase_stack.append(phase)
def start(initial_phase): # type: (str) -> None global times, last_time_snapshot console.vverbose("[cpu_tracking] Start with phase '%s'\n" % initial_phase) times = {} last_time_snapshot = _time_snapshot() del phase_stack[:] phase_stack.append(initial_phase)
def _get_cached_snmpwalk(hostname, fetchoid): path = _snmpwalk_cache_path(hostname, fetchoid) try: console.vverbose(" Loading %s from walk cache %s\n" % (fetchoid, path)) return store.load_data_from_file(path) except Exception: if cmk.utils.debug.enabled(): raise console.verbose(" Failed loading walk cache. Continue without it.\n" % path) return None
def walk(self, snmp_config, oid, check_plugin_name=None, table_base_oid=None, context_name=None): # type: (snmp_utils.SNMPHostConfig, str, Optional[str], Optional[str], Optional[str]) -> snmp_utils.SNMPRowInfo protospec = self._snmp_proto_spec(snmp_config) ipaddress = snmp_config.ipaddress if snmp_config.is_ipv6_primary: ipaddress = "[" + ipaddress + "]" portspec = self._snmp_port_spec(snmp_config) command = self._snmp_walk_command(snmp_config, context_name) command += ["-OQ", "-OU", "-On", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid] # list2cmdline exists, but mypy complains console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) # type: ignore snmp_process = None exitstatus = None rowinfo = [] # type: snmp_utils.SNMPRowInfo try: snmp_process = subprocess.Popen( command, close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE) rowinfo = self._get_rowinfo_from_snmp_process(snmp_process) except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if snmp_process: os.kill(snmp_process.pid, signal.SIGTERM) snmp_process.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if snmp_process: exitstatus = snmp_process.wait() if snmp_process.stderr: error = snmp_process.stderr.read() snmp_process.stderr.close() if snmp_process.stdout: snmp_process.stdout.close() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error: %s\n" % error.strip()) raise MKSNMPError( "SNMP Error on %s: %s (Exit-Code: %d)" % (ipaddress, error.strip(), exitstatus)) return rowinfo
def get_single_oid(snmp_config, oid, check_plugin_name=None, do_snmp_scan=True): # type: (snmp_utils.SNMPHostConfig, str, Optional[str], bool) -> Optional[str] # The OID can end with ".*". In that case we do a snmpgetnext and try to # find an OID with the prefix in question. The *cache* is working including # the X, however. if oid[0] != '.': if cmk.utils.debug.enabled(): raise MKGeneralException( "OID definition '%s' does not begin with a '.'" % oid) else: oid = '.' + oid # TODO: Use generic cache mechanism if _is_in_single_oid_cache(snmp_config, oid): console.vverbose(" Using cached OID %s: " % oid) value = _get_oid_from_single_oid_cache(snmp_config, oid) console.vverbose("%s%s%s%s\n" % (tty.bold, tty.green, value, tty.normal)) return value # get_single_oid() can only return a single value. When SNMPv3 is used with multiple # SNMP contexts, all contextes will be queried until the first answer is received. if check_plugin_name is not None and snmp_utils.is_snmpv3_host( snmp_config): snmp_contexts = _snmpv3_contexts_of(snmp_config, check_plugin_name) else: snmp_contexts = [None] console.vverbose(" Getting OID %s: " % oid) for context_name in snmp_contexts: try: snmp_backend = SNMPBackendFactory().factory( snmp_config, enforce_stored_walks=_enforce_stored_walks) value = snmp_backend.get(snmp_config, oid, context_name) if value is not None: break # Use first received answer in case of multiple contextes except Exception: if cmk.utils.debug.enabled(): raise value = None if value is not None: console.vverbose("%s%s%s%s\n" % (tty.bold, tty.green, value, tty.normal)) else: console.vverbose("failed.\n") set_single_oid_cache(snmp_config, oid, value) return value
def snmpwalk_on_suboid(hostname, ip, oid, hex_plain = False, context_name = None): protospec = snmp_proto_spec(hostname) portspec = snmp_port_spec(hostname) command = snmp_walk_command(hostname) if context_name != None: command += [ "-n", context_name ] command += [ "-OQ", "-OU", "-On", "-Ot", "%s%s%s" % (protospec, ip, portspec), oid ] debug_cmd = [ "''" if a == "" else a for a in command ] console.vverbose("Running '%s'\n" % " ".join(debug_cmd)) snmp_process = subprocess.Popen(command, close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Ugly(1): in some cases snmpwalk inserts line feed within one # dataset. This happens for example on hexdump outputs longer # than a few bytes. Those dumps are enclosed in double quotes. # So if the value begins with a double quote, but the line # does not end with a double quote, we take the next line(s) as # a continuation line. rowinfo = [] try: line_iter = snmp_process.stdout.xreadlines() while True: line = line_iter.next().strip() parts = line.split('=', 1) if len(parts) < 2: continue # broken line, must contain = oid = parts[0].strip() value = parts[1].strip() # Filter out silly error messages from snmpwalk >:-P if value.startswith('No more variables') or value.startswith('End of MIB') \ or value.startswith('No Such Object available') or value.startswith('No Such Instance currently exists'): continue if value == '"' or (len(value) > 1 and value[0] == '"' and (value[-1] != '"')): # to be continued while True: # scan for end of this dataset nextline = line_iter.next().strip() value += " " + nextline if value[-1] == '"': break rowinfo.append((oid, strip_snmp_value(value, hex_plain))) except StopIteration: pass error = snmp_process.stderr.read() exitstatus = snmp_process.wait() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error: %s\n" % error.strip()) raise MKSNMPError("SNMP Error on %s: %s (Exit-Code: %d)" % (ip, error.strip(), exitstatus)) return rowinfo
def get_cached_snmpwalk(hostname, fetchoid): path = cmk.paths.var_dir + "/snmp_cache/" + hostname + "/" + fetchoid try: console.vverbose(" Loading %s from walk cache %s\n" % (fetchoid, path)) return eval(file(path).read()) except IOError: return None # don't print error when not cached yet except: if cmk.debug.enabled(): raise console.verbose("Failed to read cached SNMP walk from %s, ignoring.\n" % path) return None
def get(self, snmp_config, oid, context_name=None): if oid.endswith(".*"): oid_prefix = oid[:-2] commandtype = "getnext" else: oid_prefix = oid commandtype = "get" protospec = self._snmp_proto_spec(snmp_config) ipaddress = snmp_config.ipaddress if snmp_config.is_ipv6_primary: ipaddress = "[" + ipaddress + "]" portspec = self._snmp_port_spec(snmp_config) command = self._snmp_base_command(commandtype, snmp_config, context_name) + \ [ "-On", "-OQ", "-Oe", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid_prefix ] console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) snmp_process = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) exitstatus = snmp_process.wait() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error\n") console.verbose(snmp_process.stderr.read() + "\n") return None line = snmp_process.stdout.readline().strip() if not line: console.verbose("Error in response to snmpget.\n") return None item, value = line.split("=", 1) value = value.strip() console.vverbose("SNMP answer: ==> [%s]\n" % value) if value.startswith('No more variables') or value.startswith('End of MIB') \ or value.startswith('No Such Object available') or value.startswith('No Such Instance currently exists'): value = None # In case of .*, check if prefix is the one we are looking for if commandtype == "getnext" and not item.startswith(oid_prefix + "."): value = None # Strip quotes if value and value.startswith('"') and value.endswith('"'): value = value[1:-1] return value
def execute_check(multi_host_sections, hostname, ipaddress, check_plugin_name, item, params, description): # Make a bit of context information globally available, so that functions # called by checks now this context check_api_utils.set_service(check_plugin_name, description) item_state.set_item_state_prefix(check_plugin_name, item) # Skip checks that are not in their check period period = config.check_period_of(hostname, description) if period and not cmk_base.core.check_timeperiod(period): console.verbose( "Skipping service %s: currently not in timeperiod %s.\n" % (description, period)) return None elif period: console.vverbose("Service %s: timeperiod %s is currently active.\n" % (description, period)) section_name = cmk_base.check_utils.section_name_of(check_plugin_name) dont_submit = False section_content = None try: # TODO: There is duplicate code with discovery._execute_discovery(). Find a common place! try: section_content = multi_host_sections.get_section_content( hostname, ipaddress, section_name, for_discovery=False, service_description=description) except MKParseFunctionError as e: x = e.exc_info() # re-raise the original exception to not destory the trace. This may raise a MKCounterWrapped # exception which need to lead to a skipped check instead of a crash raise x[0], x[1], x[2] # TODO: Move this to a helper function if section_content is None: # No data for this check type return False # In case of SNMP checks but missing agent response, skip this check. # Special checks which still need to be called even with empty data # may declare this. if not section_content and cmk_base.check_utils.is_snmp_check(check_plugin_name) \ and not config.check_info[check_plugin_name]["handle_empty_info"]: return False check_function = config.check_info[check_plugin_name].get( "check_function") if check_function is None: check_function = lambda item, params, section_content: ( 3, 'UNKNOWN - Check not implemented') # Call the actual check function item_state.reset_wrapped_counters() raw_result = check_function(item, determine_check_params(params), section_content) result = sanitize_check_result( raw_result, cmk_base.check_utils.is_snmp_check(check_plugin_name)) item_state.raise_counter_wrap() except item_state.MKCounterWrapped as e: # handle check implementations that do not yet support the # handling of wrapped counters via exception on their own. # Do not submit any check result in that case: console.verbose("%-20s PEND - Cannot compute check result: %s\n" % (description, e)) dont_submit = True except MKTimeout: raise except Exception as e: if cmk.utils.debug.enabled(): raise result = 3, cmk_base.crash_reporting.create_crash_dump( hostname, check_plugin_name, item, is_manual_check(hostname, check_plugin_name, item), params, description, section_content), [] if not dont_submit: # Now add information about the age of the data in the agent # sections. This is in data_sources.g_agent_cache_info. For clusters we # use the oldest of the timestamps, of course. oldest_cached_at = None largest_interval = None def minn(a, b): if a is None: return b elif b is None: return a return min(a, b) for host_sections in multi_host_sections.get_host_sections().values(): section_entries = host_sections.cache_info if section_name in section_entries: cached_at, cache_interval = section_entries[section_name] oldest_cached_at = minn(oldest_cached_at, cached_at) largest_interval = max(largest_interval, cache_interval) _submit_check_result(hostname, description, result, cached_at=oldest_cached_at, cache_interval=largest_interval) return True
def end(): # type: () -> None console.vverbose("[cpu_tracking] End\n") _add_times_to_phase() del phase_stack[:]
def _snmp_scan(host_config, on_error="ignore", for_inv=False, do_snmp_scan=True, for_mgmt_board=False): import cmk_base.inventory_plugins as inventory_plugins # Make hostname globally available for scan functions. # This is rarely used, but e.g. the scan for if/if64 needs # this to evaluate if_disabled_if64_checks. check_api_utils.set_hostname(host_config.hostname) snmp.initialize_single_oid_cache(host_config) console.vverbose(" SNMP scan:\n") if not config.get_config_cache().in_binary_hostlist( host_config.hostname, config.snmp_without_sys_descr): for oid, name in [(".1.3.6.1.2.1.1.1.0", "system description"), (".1.3.6.1.2.1.1.2.0", "system object")]: value = snmp.get_single_oid(host_config, oid, do_snmp_scan=do_snmp_scan) if value is None: raise MKSNMPError( "Cannot fetch %s OID %s. This might be OK for some bogus devices. " "In that case please configure the ruleset \"Hosts without system " "description OID\" to tell Check_MK not to fetch the system " "description and system object OIDs." % (name, oid)) else: # Fake OID values to prevent issues with a lot of scan functions console.vverbose( " Skipping system description OID " "(Set .1.3.6.1.2.1.1.1.0 and .1.3.6.1.2.1.1.2.0 to \"\")\n") snmp.set_single_oid_cache(host_config, ".1.3.6.1.2.1.1.1.0", "") snmp.set_single_oid_cache(host_config, ".1.3.6.1.2.1.1.2.0", "") found_check_plugin_names = [] if for_inv: items = inventory_plugins.inv_info.items() else: items = config.check_info.items() positive_found = [] default_found = [] for check_plugin_name, _unused_check in items: if config.service_ignored(host_config.hostname, check_plugin_name, None): continue else: if for_inv and not inventory_plugins.is_snmp_plugin( check_plugin_name): continue elif not for_inv and not cmk_base.check_utils.is_snmp_check( check_plugin_name): continue section_name = cmk_base.check_utils.section_name_of(check_plugin_name) # The scan function should be assigned to the section_name, because # subchecks sharing the same SNMP info of course should have # an identical scan function. But some checks do not do this # correctly if check_plugin_name in config.snmp_scan_functions: scan_function = config.snmp_scan_functions[check_plugin_name] elif section_name in config.snmp_scan_functions: scan_function = config.snmp_scan_functions[section_name] elif section_name in inventory_plugins.inv_info: scan_function = inventory_plugins.inv_info[section_name].get( "snmp_scan_function") else: scan_function = None if scan_function: try: def oid_function(oid, default_value=None, cp_name=check_plugin_name): value = snmp.get_single_oid(host_config, oid, cp_name, do_snmp_scan=do_snmp_scan) return default_value if value is None else value result = scan_function(oid_function) if result is not None and not isinstance(result, (str, bool)): if on_error == "warn": console.warning( " SNMP scan function of %s returns invalid type %s." % (check_plugin_name, type(result))) elif on_error == "raise": raise MKGeneralException("SNMP Scan aborted.") elif result: found_check_plugin_names.append(check_plugin_name) positive_found.append(check_plugin_name) except MKGeneralException: # some error messages which we explicitly want to show to the user # should be raised through this raise except Exception: if on_error == "warn": console.warning( " Exception in SNMP scan function of %s" % check_plugin_name) elif on_error == "raise": raise else: found_check_plugin_names.append(check_plugin_name) default_found.append(check_plugin_name) _output_snmp_check_plugins("SNMP scan found", positive_found) if default_found: _output_snmp_check_plugins("SNMP without scan function", default_found) filtered = config.filter_by_management_board(host_config.hostname, found_check_plugin_names, for_mgmt_board, for_discovery=True, for_inventory=for_inv) _output_snmp_check_plugins("SNMP filtered check plugin names", filtered) snmp.write_single_oid_cache(host_config) return sorted(filtered)
def scan_parents_of(config_cache, hosts, silent=False, settings=None): if settings is None: settings = {} if config.monitoring_host: nagios_ip = ip_lookup.lookup_ipv4_address(config.monitoring_host) else: nagios_ip = None os.putenv("LANG", "") os.putenv("LC_ALL", "") # Start processes in parallel procs = [] for host in hosts: console.verbose("%s " % host) try: ip = ip_lookup.lookup_ipv4_address(host) command = [ "traceroute", "-w", "%d" % settings.get("timeout", 8), "-q", "%d" % settings.get("probes", 2), "-m", "%d" % settings.get("max_ttl", 10), "-n", ip ] console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) procs.append((host, ip, subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True))) except Exception as e: if cmk.utils.debug.enabled(): raise procs.append((host, None, "ERROR: %s" % e)) # Output marks with status of each single scan def dot(color, dot='o'): if not silent: console.output(tty.bold + color + dot + tty.normal) # Now all run and we begin to read the answers. For each host # we add a triple to gateways: the gateway, a scan state and a diagnostic output gateways = [] for host, ip, proc_or_error in procs: if isinstance(proc_or_error, six.string_types): lines = [proc_or_error] exitstatus = 1 else: exitstatus = proc_or_error.wait() lines = [l.strip() for l in proc_or_error.stdout.readlines()] if exitstatus: dot(tty.red, '*') gateways.append( (None, "failed", 0, "Traceroute failed with exit code %d" % (exitstatus & 255))) continue if len(lines) == 1 and lines[0].startswith("ERROR:"): message = lines[0][6:].strip() console.verbose("%s: %s\n", host, message, stream=sys.stderr) dot(tty.red, "D") gateways.append((None, "dnserror", 0, message)) continue elif len(lines) == 0: if cmk.utils.debug.enabled(): raise MKGeneralException( "Cannot execute %s. Is traceroute installed? Are you root?" % command) else: dot(tty.red, '!') continue elif len(lines) < 2: if not silent: console.error("%s: %s\n" % (host, ' '.join(lines))) gateways.append((None, "garbled", 0, "The output of traceroute seem truncated:\n%s" % ("".join(lines)))) dot(tty.blue) continue # Parse output of traceroute: # traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 40 byte packets # 1 * * * # 2 10.0.0.254 0.417 ms 0.459 ms 0.670 ms # 3 172.16.0.254 0.967 ms 1.031 ms 1.544 ms # 4 217.0.116.201 23.118 ms 25.153 ms 26.959 ms # 5 217.0.76.134 32.103 ms 32.491 ms 32.337 ms # 6 217.239.41.106 32.856 ms 35.279 ms 36.170 ms # 7 74.125.50.149 45.068 ms 44.991 ms * # 8 * 66.249.94.86 41.052 ms 66.249.94.88 40.795 ms # 9 209.85.248.59 43.739 ms 41.106 ms 216.239.46.240 43.208 ms # 10 216.239.48.53 45.608 ms 47.121 ms 64.233.174.29 43.126 ms # 11 209.85.255.245 49.265 ms 40.470 ms 39.870 ms # 12 8.8.8.8 28.339 ms 28.566 ms 28.791 ms routes = [] for line in lines[1:]: parts = line.split() route = parts[1] if route.count('.') == 3: routes.append(route) elif route == '*': routes.append(None) # No answer from this router else: if not silent: console.error("%s: invalid output line from traceroute: '%s'\n" % (host, line)) if len(routes) == 0: error = "incomplete output from traceroute. No routes found." console.error("%s: %s\n" % (host, error)) gateways.append((None, "garbled", 0, error)) dot(tty.red) continue # Only one entry -> host is directly reachable and gets nagios as parent - # if nagios is not the parent itself. Problem here: How can we determine # if the host in question is the monitoring host? The user must configure # this in monitoring_host. elif len(routes) == 1: if ip == nagios_ip: gateways.append((None, "root", 0, "")) # We are the root-monitoring host dot(tty.white, 'N') elif config.monitoring_host: gateways.append(((config.monitoring_host, nagios_ip, None), "direct", 0, "")) dot(tty.cyan, 'L') else: gateways.append((None, "direct", 0, "")) continue # Try far most route which is not identical with host itself ping_probes = settings.get("ping_probes", 5) skipped_gateways = 0 route = None for r in routes[::-1]: if not r or (r == ip): continue # Do (optional) PING check in order to determine if that # gateway can be monitored via the standard host check if ping_probes: if not gateway_reachable_via_ping(r, ping_probes): console.verbose("(not using %s, not reachable)\n", r, stream=sys.stderr) skipped_gateways += 1 continue route = r break if not route: error = "No usable routing information" if not silent: console.error("%s: %s\n" % (host, error)) gateways.append((None, "notfound", 0, error)) dot(tty.blue) continue # TTLs already have been filtered out) gateway_ip = route gateway = _ip_to_hostname(config_cache, route) if gateway: console.verbose("%s(%s) ", gateway, gateway_ip) else: console.verbose("%s ", gateway_ip) # Try to find DNS name of host via reverse DNS lookup dns_name = _ip_to_dnsname(gateway_ip) gateways.append(((gateway, gateway_ip, dns_name), "gateway", skipped_gateways, "")) dot(tty.green, 'G') return gateways
def save_snmpwalk_cache(hostname, fetchoid, rowinfo): base_dir = cmk.paths.var_dir + "/snmp_cache/" + hostname + "/" if not os.path.exists(base_dir): os.makedirs(base_dir) console.vverbose(" Caching walk of %s\n" % fetchoid) file(base_dir + fetchoid, "w").write("%r\n" % rowinfo)
def get_stored_snmpwalk(hostname, oid): if oid.startswith("."): oid = oid[1:] if oid.endswith(".*"): oid_prefix = oid[:-2] dot_star = True else: oid_prefix = oid dot_star = False path = cmk.paths.snmpwalks_dir + "/" + hostname console.vverbose(" Loading %s from %s\n" % (oid, path)) rowinfo = [] # New implementation: use binary search def to_bin_string(oid): try: return tuple(map(int, oid.strip(".").split("."))) except: raise MKGeneralException("Invalid OID %s" % oid) def compare_oids(a, b): aa = to_bin_string(a) bb = to_bin_string(b) if len(aa) <= len(bb) and bb[:len(aa)] == aa: result = 0 else: result = cmp(aa, bb) return result if hostname in g_walk_cache: lines = g_walk_cache[hostname] else: try: lines = file(path).readlines() except IOError: raise MKSNMPError("No snmpwalk file %s" % path) g_walk_cache[hostname] = lines begin = 0 end = len(lines) hit = None while end - begin > 0: current = (begin + end) / 2 parts = lines[current].split(None, 1) comp = parts[0] hit = compare_oids(oid_prefix, comp) if hit == 0: break elif hit == 1: # we are too low begin = current + 1 else: end = current if hit != 0: return [] # not found def collect_until(index, direction): rows = [] # Handle case, where we run after the end of the lines list if index >= len(lines): if direction > 0: return [] else: index -= 1 while True: line = lines[index] parts = line.split(None, 1) o = parts[0] if o.startswith('.'): o = o[1:] if o == oid or o.startswith(oid_prefix + "."): if len(parts) > 1: try: value = cmk_base.agent_simulator.process(parts[1]) except: value = parts[1] # agent simulator missing in precompiled mode else: value = "" # Fix for missing starting oids rows.append(('.'+o, strip_snmp_value(value))) index += direction if index < 0 or index >= len(lines): break else: break return rows rowinfo = collect_until(current, -1) rowinfo.reverse() rowinfo += collect_until(current + 1, 1) if dot_star: return [ rowinfo[0] ] else: return rowinfo