def _snmp_scan_cache_description( binary_host: bool, *, backend: ABCSNMPBackend, ) -> None: if not binary_host: for oid, name in [ (OID_SYS_DESCR, "system description"), (OID_SYS_OBJ, "system object"), ]: value = snmp_modes.get_single_oid( oid, backend=backend, ) if value is None: raise MKSNMPError( "Cannot fetch %s OID %s. Please check your SNMP " "configuration. Possible reason might be: Wrong credentials, " "wrong SNMP version, Firewall rules, etc." % (name, oid),) else: # Fake OID values to prevent issues with a lot of scan functions console.vverbose( " Skipping system description OID " '(Set %s and %s to "")\n', OID_SYS_DESCR, OID_SYS_OBJ, ) snmp_cache.set_single_oid_cache(OID_SYS_DESCR, "") snmp_cache.set_single_oid_cache(OID_SYS_OBJ, "")
def load( self, *, trees: Iterable[BackendSNMPTree], ) -> None: """Try to read the OIDs data from cache files""" # Do not load the cached data if *any* plugin needs live data do_not_load = { f"{tree.base}.{oid.column}" for tree in trees for oid in tree.oids if not oid.save_to_cache } for path in self._iterfiles(): fetchoid = self._name2oid(path.name) if fetchoid in do_not_load: continue console.vverbose(f" Loading {fetchoid} from walk cache {path}\n") try: read_walk = self._read_row(path) except Exception: console.vverbose( f" Failed to load {fetchoid} from walk cache {path}\n") if cmk.utils.debug.enabled(): raise continue if read_walk is not None: # 'False': no need to store this value: it is already stored! self._store[fetchoid] = (False, read_walk)
def _snmp_scan( sections: Iterable[SNMPScanSection], on_error: str = "ignore", do_snmp_scan: bool = True, *, binary_host: bool, backend: ABCSNMPBackend, ) -> Set[SectionName]: snmp_cache.initialize_single_oid_cache(backend.config) console.vverbose(" SNMP scan:\n") _snmp_scan_cache_description( binary_host=binary_host, do_snmp_scan=do_snmp_scan, backend=backend, ) found_sections = _snmp_scan_find_sections( sections, do_snmp_scan=do_snmp_scan, on_error=on_error, backend=backend, ) _output_snmp_check_plugins("SNMP scan found", found_sections) snmp_cache.write_single_oid_cache(backend.config) return found_sections
def fetch_all( sources: Iterable[Source], *, file_cache_max_age: file_cache.MaxAge, mode: Mode, ) -> Sequence[Tuple[Source, FetcherMessage]]: console.verbose("%s+%s %s\n", tty.yellow, tty.normal, "Fetching data".upper()) out: List[Tuple[Source, FetcherMessage]] = [] for source in sources: console.vverbose(" Source: %s/%s\n" % (source.source_type, source.fetcher_type)) source.file_cache_max_age = file_cache_max_age with CPUTracker() as tracker: raw_data = source.fetch(mode) out.append( ( source, FetcherMessage.from_raw_data( raw_data, tracker.duration, source.fetcher_type, ), ) ) return out
def _perform_snmpwalk(snmp_config, check_plugin_name, base_oid, fetchoid, *, backend): # type: (SNMPHostConfig, CheckPluginName, OID, OID, ABCSNMPBackend) -> SNMPRowInfo added_oids = set([]) # type: Set[OID] rowinfo = [] # type: SNMPRowInfo for context_name in snmp_config.snmpv3_contexts_of(check_plugin_name): rows = backend.walk(snmp_config, oid=fetchoid, check_plugin_name=check_plugin_name, table_base_oid=base_oid, context_name=context_name) # I've seen a broken device (Mikrotik Router), that broke after an # update to RouterOS v6.22. It would return 9 time the same OID when # .1.3.6.1.2.1.1.1.0 was being walked. We try to detect these situations # by removing any duplicate OID information if len(rows) > 1 and rows[0][0] == rows[1][0]: console.vverbose("Detected broken SNMP agent. Ignoring duplicate OID %s.\n" % rows[0][0]) rows = rows[:1] for row_oid, val in rows: if row_oid in added_oids: console.vverbose("Duplicate OID found: %s (%r)\n" % (row_oid, val)) else: rowinfo.append((row_oid, val)) added_oids.add(row_oid) return rowinfo
def _load_raw_autochecks( *, path: Path, check_variables: Optional[Dict[str, Any]], ) -> Union[List[Dict[str, Any]], Tuple]: """Read raw autochecks and resolve parameters""" if not path.exists(): return [] console.vverbose("Loading autochecks from %s\n", path) with path.open(encoding="utf-8") as f: raw_file_content = f.read() if not raw_file_content.strip(): return [] try: return eval(raw_file_content, check_variables or {}, check_variables or {}) except NameError as exc: raise MKGeneralException( "%s in an autocheck entry of host '%s' (%s). This entry is in pre Checkmk 1.7 " "format and needs to be converted. This is normally done by " "\"cmk-update-config -v\" during \"omd update\". Please execute " "\"cmk-update-config -v\" for converting the old configuration." % (str(exc).capitalize(), path.stem, path))
def _perform_snmpwalk(section_name: Optional[SectionName], base_oid: OID, fetchoid: OID, *, backend: ABCSNMPBackend) -> SNMPRowInfo: added_oids: Set[OID] = set([]) rowinfo: SNMPRowInfo = [] for context_name in backend.config.snmpv3_contexts_of(section_name): rows = backend.walk( oid=fetchoid, # revert back to legacy "possilbly-empty-string"-Type # TODO: pass Optional[SectionName] along! check_plugin_name=str(section_name) if section_name else "", table_base_oid=base_oid, context_name=context_name, ) # I've seen a broken device (Mikrotik Router), that broke after an # update to RouterOS v6.22. It would return 9 time the same OID when # .1.3.6.1.2.1.1.1.0 was being walked. We try to detect these situations # by removing any duplicate OID information if len(rows) > 1 and rows[0][0] == rows[1][0]: console.vverbose( "Detected broken SNMP agent. Ignoring duplicate OID %s.\n" % rows[0][0]) rows = rows[:1] for row_oid, val in rows: if row_oid in added_oids: console.vverbose("Duplicate OID found: %s (%r)\n" % (row_oid, val)) else: rowinfo.append((row_oid, val)) added_oids.add(row_oid) return rowinfo
def load( self, *, trees: Iterable[BackendSNMPTree], ) -> None: """Try to read the OIDs data from cache files""" for tree in trees: for oid in (o for o in tree.oids if o.save_to_cache): # no point in reading otherwise fetchoid = f"{tree.base}.{oid.column}" path = self._path / fetchoid console.vverbose( f" Loading {fetchoid} from walk cache {path}\n") try: read_walk = store.load_object_from_file(path) except Exception: console.verbose( f" Failed to load {fetchoid} from walk cache {path}\n" ) if cmk.utils.debug.enabled(): raise continue if read_walk is not None: self._store[fetchoid] = (oid.save_to_cache, read_walk ) # (True, ...)
def push_phase(phase: str) -> None: if _is_not_tracking(): return console.vverbose("[cpu_tracking] Push phase '%s' (Stack: %r)\n" % (phase, phase_stack)) _add_times_to_phase() phase_stack.append(phase)
def pop_phase() -> None: if _is_not_tracking(): return console.vverbose("[cpu_tracking] Pop phase '%s' (Stack: %r)\n" % (phase_stack[-1], phase_stack)) _add_times_to_phase() del phase_stack[-1]
def get(self, snmp_config, oid, context_name=None): # type: (SNMPHostConfig, OID, Optional[ContextName]) -> Optional[RawValue] if oid.endswith(".*"): oid_prefix = oid[:-2] commandtype = "getnext" else: oid_prefix = oid commandtype = "get" protospec = self._snmp_proto_spec(snmp_config) ipaddress = snmp_config.ipaddress if snmp_config.is_ipv6_primary: ipaddress = "[" + ipaddress + "]" portspec = self._snmp_port_spec(snmp_config) command = ( self._snmp_base_command(commandtype, snmp_config, context_name) + [ "-On", "-OQ", "-Oe", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid_prefix ]) console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) snmp_process = subprocess.Popen( command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) exitstatus = snmp_process.wait() if snmp_process.stderr is None or snmp_process.stdout is None: raise TypeError() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error\n") console.verbose(snmp_process.stderr.read() + "\n") return None line = snmp_process.stdout.readline().strip() if not line: console.verbose("Error in response to snmpget.\n") return None parts = line.split("=", 1) if len(parts) != 2: return None item = parts[0] value = parts[1].strip() console.vverbose("SNMP answer: ==> [%s]\n" % value) if value.startswith('No more variables') or \ value.startswith('End of MIB') or \ value.startswith('No Such Object available') or \ value.startswith('No Such Instance currently exists'): return None # In case of .*, check if prefix is the one we are looking for if commandtype == "getnext" and not item.startswith(oid_prefix + "."): return None return strip_snmp_value(value)
def walk(self, snmp_config, oid, check_plugin_name=None, table_base_oid=None, context_name=None): # type: (SNMPHostConfig, str, Optional[str], Optional[str], Optional[str]) -> SNMPRowInfo protospec = self._snmp_proto_spec(snmp_config) ipaddress = snmp_config.ipaddress if snmp_config.is_ipv6_primary: ipaddress = "[" + ipaddress + "]" portspec = self._snmp_port_spec(snmp_config) command = self._snmp_walk_command(snmp_config, context_name) command += [ "-OQ", "-OU", "-On", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid ] console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) snmp_process = None exitstatus = None rowinfo = [] # type: SNMPRowInfo try: snmp_process = subprocess.Popen(command, close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8") rowinfo = self._get_rowinfo_from_snmp_process(snmp_process) except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if snmp_process: os.kill(snmp_process.pid, signal.SIGTERM) snmp_process.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if snmp_process: exitstatus = snmp_process.wait() if snmp_process.stderr: error = snmp_process.stderr.read() snmp_process.stderr.close() if snmp_process.stdout: snmp_process.stdout.close() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error: %s\n" % ensure_str(error).strip()) raise MKSNMPError( "SNMP Error on %s: %s (Exit-Code: %d)" % (ipaddress, ensure_str(error).strip(), exitstatus)) return rowinfo
def _output_snmp_check_plugins(title: str, collection: Iterable[CheckPluginNameStr]) -> None: if collection: collection_out = " ".join(sorted(collection)) else: collection_out = "-" console.vverbose(" %-35s%s%s%s%s\n" % (title, tty.bold, tty.yellow, collection_out, tty.normal))
def load_walk_cache( *, trees: Iterable[BackendSNMPTree], host_name: HostName, ) -> WalkCache: cache = {} for tree in trees: for oid in tree.oids: if not oid.save_to_cache: # no point in reading continue fetchoid: OID = f"{tree.base}.{oid.column}" path = _snmpwalk_cache_path(host_name, fetchoid) console.vverbose(f" Loading {fetchoid} from walk cache {path}\n") try: read_walk = store.load_object_from_file(path) except Exception: console.verbose(f" Failed to load {fetchoid} from walk cache {path}\n") if cmk.utils.debug.enabled(): raise continue if read_walk is not None: cache[fetchoid] = (oid.save_to_cache, read_walk) return cache
def fetch_all( nodes: Iterable[Tuple[HostName, Optional[HostAddress], Sequence[Source]]], *, max_cachefile_age: int, host_config: HostConfig, ) -> Iterator[FetcherMessage]: console.verbose("%s+%s %s\n", tty.yellow, tty.normal, "Fetching data".upper()) # TODO(ml): It is not clear to me in which case it is possible for the following to hold true # for any source in nodes: # - hostname != source.hostname # - ipaddress != source.ipaddress # If this is impossible, then we do not need the Tuple[HostName, HostAddress, ...]. for _hostname, _ipaddress, sources in nodes: for source in sources: console.vverbose(" Source: %s/%s\n" % (source.source_type, source.fetcher_type)) source.file_cache_max_age = max_cachefile_age with CPUTracker() as tracker: raw_data = source.fetch() yield FetcherMessage.from_raw_data( raw_data, tracker.duration, source.fetcher_type, )
def get_single_oid(snmp_config, oid, check_plugin_name=None, do_snmp_scan=True): # type: (SNMPHostConfig, str, Optional[str], bool) -> Optional[DecodedString] # The OID can end with ".*". In that case we do a snmpgetnext and try to # find an OID with the prefix in question. The *cache* is working including # the X, however. if oid[0] != '.': if cmk.utils.debug.enabled(): raise MKGeneralException( "OID definition '%s' does not begin with a '.'" % oid) oid = '.' + oid # TODO: Use generic cache mechanism if snmp_cache.is_in_single_oid_cache(oid): console.vverbose(" Using cached OID %s: " % oid) cached_value = snmp_cache.get_oid_from_single_oid_cache(oid) console.vverbose("%s%s%r%s\n" % (tty.bold, tty.green, cached_value, tty.normal)) return cached_value # get_single_oid() can only return a single value. When SNMPv3 is used with multiple # SNMP contexts, all contextes will be queried until the first answer is received. if check_plugin_name is not None and snmp_config.is_snmpv3_host: snmp_contexts = _snmpv3_contexts_of(snmp_config, check_plugin_name) else: snmp_contexts = [None] console.vverbose(" Getting OID %s: " % oid) for context_name in snmp_contexts: try: value = SNMPBackendFactory.get(snmp_config, use_cache=_enforce_stored_walks, oid=oid, context_name=context_name) if value is not None: break # Use first received answer in case of multiple contextes except Exception: if cmk.utils.debug.enabled(): raise value = None if value is not None: console.vverbose("%s%s%r%s\n" % (tty.bold, tty.green, value, tty.normal)) else: console.vverbose("failed.\n") if value is not None: decoded_value = convert_to_unicode( value, encoding=snmp_config.character_encoding ) # type: Optional[DecodedString] else: decoded_value = value snmp_cache.set_single_oid_cache(oid, decoded_value) return decoded_value
def walk(self, snmp_config, oid, check_plugin_name=None, table_base_oid=None, context_name=None): # type: (SNMPHostConfig, OID, Optional[CheckPluginName], Optional[OID], Optional[ContextName]) -> SNMPRowInfo if oid.startswith("."): oid = oid[1:] if oid.endswith(".*"): oid_prefix = oid[:-2] dot_star = True else: oid_prefix = oid dot_star = False path = cmk.utils.paths.snmpwalks_dir + "/" + snmp_config.hostname console.vverbose(" Loading %s from %s\n" % (oid, path)) if snmp_cache.host_cache_contains(snmp_config.hostname): lines = snmp_cache.host_cache_get(snmp_config.hostname) else: try: lines = open(path).readlines() except IOError: raise MKSNMPError("No snmpwalk file %s" % path) snmp_cache.host_cache_set(snmp_config.hostname, lines) begin = 0 end = len(lines) hit = None while end - begin > 0: current = (begin + end) // 2 # skip over values including newlines to the next oid while not lines[current].startswith(".") and current < end: current += 1 parts = lines[current].split(None, 1) comp = parts[0] hit = self._compare_oids(oid_prefix, comp) if hit == 0: break if hit == 1: # we are too low begin = current + 1 else: end = current if hit != 0: return [] # not found rowinfo = self._collect_until(oid, oid_prefix, lines, current, -1) rowinfo.reverse() rowinfo += self._collect_until(oid, oid_prefix, lines, current + 1, 1) if dot_star: return [rowinfo[0]] return rowinfo
def start(initial_phase: str) -> None: global times, last_time_snapshot console.vverbose("[cpu_tracking] Start with phase '%s'\n" % initial_phase) times = {} last_time_snapshot = _time_snapshot() del phase_stack[:] phase_stack.append(initial_phase)
def _save_snmpwalk_cache(hostname: HostName, fetchoid: OID, rowinfo: SNMPRowInfo) -> None: path = _snmpwalk_cache_path(hostname, fetchoid) if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) console.vverbose(" Saving walk of %s to walk cache %s\n" % (fetchoid, path)) store.save_object_to_file(path, rowinfo, pretty=False)
def walk( self, oid: OID, section_name: Optional[SectionName] = None, table_base_oid: Optional[OID] = None, context_name: Optional[SNMPContextName] = None, ) -> SNMPRowInfo: if oid.startswith("."): oid = oid[1:] if oid.endswith(".*"): oid_prefix = oid[:-2] dot_star = True else: oid_prefix = oid dot_star = False host_cache = snmp_cache.host_cache() try: lines = host_cache[self.config.hostname] except KeyError: path = cmk.utils.paths.snmpwalks_dir + "/" + self.config.hostname console.vverbose(" Loading %s from %s\n" % (oid, path)) try: lines = StoredWalkSNMPBackend.read_walk_data(path) except IOError: raise MKSNMPError("No snmpwalk file %s" % path) host_cache[self.config.hostname] = lines begin = 0 end = len(lines) hit = None while end - begin > 0: current = (begin + end) // 2 parts = lines[current].split(None, 1) comp = parts[0] hit = StoredWalkSNMPBackend._compare_oids(oid_prefix, comp) if hit == 0: break if hit == 1: # we are too low begin = current + 1 else: end = current if hit != 0: return [] # not found rowinfo = StoredWalkSNMPBackend._collect_until(oid, oid_prefix, lines, current, -1) rowinfo.reverse() rowinfo += StoredWalkSNMPBackend._collect_until( oid, oid_prefix, lines, current + 1, 1) if dot_star: return [rowinfo[0]] return rowinfo
def _discover_plugins_services( *, check_plugin_name: CheckPluginName, host_key: HostKey, parsed_sections_broker: ParsedSectionsBroker, on_error: OnError, ) -> Iterator[AutocheckEntry]: # Skip this check type if is ignored for that host if config.service_ignored(host_key.hostname, check_plugin_name, None): console.vverbose(" Skip ignored check plugin name '%s'\n" % check_plugin_name) return check_plugin = agent_based_register.get_check_plugin(check_plugin_name) if check_plugin is None: console.warning(" Missing check plugin: '%s'\n" % check_plugin_name) return try: kwargs = get_section_kwargs(parsed_sections_broker, host_key, check_plugin.sections) except Exception as exc: if cmk.utils.debug.enabled() or on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning(" Exception while parsing agent section: %s\n" % exc) return if not kwargs: return disco_params = config.get_discovery_parameters(host_key.hostname, check_plugin) if disco_params is not None: kwargs = {**kwargs, "params": disco_params} try: yield from ( AutocheckEntry( check_plugin_name=check_plugin.name, item=service.item, parameters=unwrap_parameters(service.parameters), # Convert from APIs ServiceLabel to internal ServiceLabel service_labels={ label.name: label.value for label in service.labels }, ) for service in check_plugin.discovery_function(**kwargs)) except Exception as e: if on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning( " Exception in discovery function of check plugin '%s': %s" % (check_plugin.name, e))
def save(self) -> None: self._path.mkdir(parents=True, exist_ok=True) for fetchoid, (save_flag, rowinfo) in self._store.items(): if not save_flag: continue path = self._path / self._oid2name(fetchoid) console.vverbose(f" Saving walk of {fetchoid} to walk cache {path}\n") self._write_row(path, rowinfo)
def _get_cached_snmpwalk(hostname: HostName, fetchoid: OID) -> Optional[SNMPRowInfo]: path = _snmpwalk_cache_path(hostname, fetchoid) try: console.vverbose(" Loading %s from walk cache %s\n" % (fetchoid, path)) return store.load_object_from_file(path) except Exception: if cmk.utils.debug.enabled(): raise console.verbose(" Failed loading walk cache from %s. Continue without it.\n" % path) return None
def save(self) -> None: self._path.mkdir(parents=True, exist_ok=True) for fetchoid, (save_flag, rowinfo) in self._store.items(): if not save_flag: continue path = self._path / fetchoid console.vverbose(f" Saving walk of {fetchoid} to walk cache {path}\n") store.save_object_to_file(path, rowinfo, pretty=False)
def _fake_description_object() -> None: """Fake OID values to prevent issues with a lot of scan functions""" console.vverbose( " Skipping system description OID " '(Set %s and %s to "")\n', OID_SYS_DESCR, OID_SYS_OBJ, ) snmp_cache.single_oid_cache()[OID_SYS_DESCR] = "" snmp_cache.single_oid_cache()[OID_SYS_OBJ] = ""
def phase(phase_name: str) -> Iterator[None]: console.vverbose("[cpu_tracking] Push phase %r\n", phase_name) start = Snapshot.take() try: yield finally: console.vverbose("[cpu_tracking] Pop phase %r\n", phase_name) delta = Snapshot.take() - start if is_tracking(): times[phase_name] += delta
def _discover_plugins_services( *, check_plugin_name: CheckPluginName, host_name: HostName, ipaddress: Optional[HostAddress], parsed_sections_broker: ParsedSectionsBroker, on_error: OnError, ) -> Iterator[Service]: # Skip this check type if is ignored for that host if config.service_ignored(host_name, check_plugin_name, None): console.vverbose(" Skip ignored check plugin name '%s'\n" % check_plugin_name) return check_plugin = agent_based_register.get_check_plugin(check_plugin_name) if check_plugin is None: console.warning(" Missing check plugin: '%s'\n" % check_plugin_name) return host_key = HostKey( host_name, ipaddress, SourceType.MANAGEMENT if check_plugin.name.is_management_name() else SourceType.HOST, ) try: kwargs = parsed_sections_broker.get_section_kwargs( host_key, check_plugin.sections) except Exception as exc: if cmk.utils.debug.enabled() or on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning(" Exception while parsing agent section: %s\n" % exc) return if not kwargs: return disco_params = config.get_discovery_parameters(host_name, check_plugin) if disco_params is not None: kwargs["params"] = disco_params try: plugins_services = check_plugin.discovery_function(**kwargs) yield from _enriched_discovered_services(host_name, check_plugin.name, plugins_services) except Exception as e: if on_error is OnError.RAISE: raise if on_error is OnError.WARN: console.warning( " Exception in discovery function of check plugin '%s': %s" % (check_plugin.name, e))
def service_outside_check_period( config_cache: config.ConfigCache, hostname: HostName, description: ServiceName ) -> bool: period = config_cache.check_period_of_service(hostname, description) if period is None: return False if cmk.base.core.check_timeperiod(period): console.vverbose("Service %s: timeperiod %s is currently active.\n", description, period) return False console.verbose("Skipping service %s: currently not in timeperiod %s.\n", description, period) return True
def get_single_oid(oid: str, *, section_name: Optional[SectionName] = None, backend: SNMPBackend) -> Optional[SNMPDecodedString]: # The OID can end with ".*". In that case we do a snmpgetnext and try to # find an OID with the prefix in question. The *cache* is working including # the X, however. if oid[0] != ".": if cmk.utils.debug.enabled(): raise MKGeneralException( "OID definition '%s' does not begin with a '.'" % oid) oid = "." + oid # TODO: Use generic cache mechanism if oid in snmp_cache.single_oid_cache(): console.vverbose(" Using cached OID %s: " % oid) cached_value = snmp_cache.single_oid_cache()[oid] console.vverbose("%s%s%r%s\n" % (tty.bold, tty.green, cached_value, tty.normal)) return cached_value # get_single_oid() can only return a single value. When SNMPv3 is used with multiple # SNMP contexts, all contextes will be queried until the first answer is received. console.vverbose(" Getting OID %s: " % oid) for context_name in backend.config.snmpv3_contexts_of(section_name): try: value = backend.get( oid=oid, context_name=context_name, ) if value is not None: break # Use first received answer in case of multiple contextes except Exception: if cmk.utils.debug.enabled(): raise value = None if value is not None: console.vverbose("%s%s%r%s\n" % (tty.bold, tty.green, value, tty.normal)) else: console.vverbose("failed.\n") if value is not None: decoded_value: Optional[SNMPDecodedString] = backend.config.ensure_str( value ) # used ensure_str function with different possible encoding arguments else: decoded_value = value snmp_cache.single_oid_cache()[oid] = decoded_value return decoded_value
def _save_walk_cache(host_name: HostName, cache: WalkCache) -> None: cache_dir = _snmpwalk_cache_path(host_name) if not os.path.exists(cache_dir): os.makedirs(cache_dir) for fetchoid, (save_flag, rowinfo) in cache.items(): if not save_flag: continue path = _snmpwalk_cache_path(host_name, fetchoid) console.vverbose(f" Saving walk of {fetchoid} to walk cache {path}\n") store.save_object_to_file(path, rowinfo, pretty=False)