def convert_context_to_unicode(context): # type: (EventContext) -> None # Convert all values to unicode for key, value in context.items(): if isinstance(value, str): context[key] = convert_to_unicode( value, on_error=u"(Invalid byte sequence)")
def get_single_oid(snmp_config, oid, check_plugin_name=None, do_snmp_scan=True): # type: (SNMPHostConfig, str, Optional[str], bool) -> Optional[DecodedString] # The OID can end with ".*". In that case we do a snmpgetnext and try to # find an OID with the prefix in question. The *cache* is working including # the X, however. if oid[0] != '.': if cmk.utils.debug.enabled(): raise MKGeneralException( "OID definition '%s' does not begin with a '.'" % oid) oid = '.' + oid # TODO: Use generic cache mechanism if _is_in_single_oid_cache(oid): console.vverbose(" Using cached OID %s: " % oid) cached_value = _get_oid_from_single_oid_cache(oid) console.vverbose("%s%s%r%s\n" % (tty.bold, tty.green, cached_value, tty.normal)) return cached_value # get_single_oid() can only return a single value. When SNMPv3 is used with multiple # SNMP contexts, all contextes will be queried until the first answer is received. if check_plugin_name is not None and snmp_utils.is_snmpv3_host( snmp_config): snmp_contexts = _snmpv3_contexts_of(snmp_config, check_plugin_name) else: snmp_contexts = [None] console.vverbose(" Getting OID %s: " % oid) for context_name in snmp_contexts: try: snmp_backend = SNMPBackendFactory().factory( snmp_config, enforce_stored_walks=_enforce_stored_walks) value = snmp_backend.get(snmp_config, oid, context_name) if value is not None: break # Use first received answer in case of multiple contextes except Exception: if cmk.utils.debug.enabled(): raise value = None if value is not None: console.vverbose("%s%s%r%s\n" % (tty.bold, tty.green, value, tty.normal)) else: console.vverbose("failed.\n") if value is not None: decoded_value = convert_to_unicode( value, encoding=snmp_config.character_encoding ) # type: Optional[DecodedString] else: decoded_value = value set_single_oid_cache(oid, decoded_value) return decoded_value
def _sanitize_snmp_encoding(snmp_config, columns): snmp_encoding = snmp_config.character_encoding decode_string_func = lambda s: convert_to_unicode(s, encoding=snmp_encoding) for index, (column, value_encoding) in enumerate(columns): if value_encoding == "string": columns[index] = map(decode_string_func, column) else: columns[index] = map(_snmp_decode_binary, column) return columns
def _sanitize_snmp_encoding(snmp_config, columns): # type: (SNMPHostConfig, ResultColumnsSanitized) -> ResultColumnsDecoded snmp_encoding = snmp_config.character_encoding decode_string_func = lambda s: convert_to_unicode(s, encoding=snmp_encoding) new_columns = [] # type: ResultColumnsDecoded for column, value_encoding in columns: if value_encoding == "string": new_columns.append(map(decode_string_func, column)) else: new_columns.append(map(_snmp_decode_binary, column)) return new_columns
def _parse_autocheck_entry(hostname, entry, service_description): # type: (HostName, Union[ast.Tuple, ast.Dict], GetServiceDescription) -> Optional[DiscoveredService] if isinstance(entry, ast.Tuple): ast_check_plugin_name, ast_item, ast_parameters_unresolved = _parse_pre_16_tuple_autocheck_entry( entry) ast_service_labels = ast.Dict() elif isinstance(entry, ast.Dict): ast_check_plugin_name, ast_item, ast_parameters_unresolved, ast_service_labels = \ _parse_dict_autocheck_entry(entry) else: raise Exception("Invalid autocheck: Wrong type: %r" % entry) if not isinstance(ast_check_plugin_name, ast.Str): raise Exception("Invalid autocheck: Wrong check plugin type: %r" % ast_check_plugin_name) check_plugin_name = ast_check_plugin_name.s item = None # type: Item if isinstance(ast_item, ast.Str): item = ast_item.s elif isinstance(ast_item, ast.Num) and isinstance(ast_item.n, (int, float)): # NOTE: We exclude complex here. :-) item = "%s" % int(ast_item.n) elif isinstance(ast_item, ast.Name) and ast_item.id == "None": item = None else: raise Exception("Invalid autocheck: Wrong item type: %r" % ast_item) # With Check_MK 1.2.7i3 items are now defined to be unicode # strings. Convert items from existing autocheck files for # compatibility. if isinstance(item, str): item = convert_to_unicode(item) try: description = service_description(hostname, check_plugin_name, item) except Exception: return None # ignore return DiscoveredService( check_plugin_name, item, description, _parse_unresolved_parameters_from_ast(ast_parameters_unresolved), service_labels=_parse_discovered_service_label_from_ast( ast_service_labels))
def decode_string_func(s): return convert_to_unicode(s, encoding=snmp_encoding)
def _parse_info(self, raw_data): # type: (RawAgentData) -> AgentHostSections """Split agent output in chunks, splits lines by whitespaces. Returns a HostSections() object. """ sections = {} # type: AgentSections # Unparsed info for other hosts. A dictionary, indexed by the piggybacked host name. # The value is a list of lines which were received for this host. piggybacked_raw_data = {} # type: PiggybackRawData piggybacked_hostname = None piggybacked_cached_at = int(time.time()) # Transform to seconds and give the piggybacked host a little bit more time piggybacked_cache_age = int(1.5 * 60 * self._host_config.check_mk_check_interval) # handle sections with option persist(...) persisted_sections = {} # type: PersistedAgentSections section_content = [] # type: AgentSectionContent section_options = {} # type: Dict[str, Optional[str]] agent_cache_info = {} # type: SectionCacheInfo separator = None # type: Optional[str] encoding = None for line in raw_data.split("\n"): line = line.rstrip("\r") stripped_line = line.strip() if stripped_line[:4] == '<<<<' and stripped_line[-4:] == '>>>>': piggybacked_hostname =\ self._get_sanitized_and_translated_piggybacked_hostname(stripped_line) elif piggybacked_hostname: # processing data for an other host if stripped_line[:3] == '<<<' and stripped_line[-3:] == '>>>': line = self._add_cached_info_to_piggybacked_section_header( stripped_line, piggybacked_cached_at, piggybacked_cache_age) piggybacked_raw_data.setdefault(piggybacked_hostname, []).append(line) # Found normal section header # section header has format <<<name:opt1(args):opt2:opt3(args)>>> elif stripped_line[:3] == '<<<' and stripped_line[-3:] == '>>>': section_header = stripped_line[3:-3] headerparts = section_header.split(":") section_name = headerparts[0] section_options = {} opt_args = None # type: Optional[str] for o in headerparts[1:]: opt_parts = o.split("(") opt_name = opt_parts[0] if len(opt_parts) > 1: opt_args = opt_parts[1][:-1] else: opt_args = None section_options[opt_name] = opt_args content = sections.get(section_name, None) if content is None: # section appears in output for the first time section_content = [] sections[section_name] = section_content else: section_content = content try: separator = chr(int(cast(str, section_options["sep"]))) except Exception: separator = None # Split of persisted section for server-side caching if "persist" in section_options: until = int(cast(str, section_options["persist"])) cached_at = int(time.time()) # Estimate age of the data cache_interval = int(until - cached_at) agent_cache_info[section_name] = (cached_at, cache_interval) persisted_sections[section_name] = (cached_at, until, section_content) if "cached" in section_options: cache_times = map( int, cast(str, section_options["cached"]).split(",")) agent_cache_info[section_name] = cache_times[ 0], cache_times[1] # The section data might have a different encoding encoding = section_options.get("encoding") elif stripped_line != '': if "nostrip" not in section_options: line = stripped_line if encoding: decoded_line = convert_to_unicode(line, std_encoding=encoding) else: decoded_line = convert_to_unicode(line) section_content.append(decoded_line.split(separator)) return AgentHostSections(sections, agent_cache_info, piggybacked_raw_data, persisted_sections)
def _read_raw_autochecks_of(self, hostname): # type: (HostName) -> List[Service] """Read automatically discovered checks of one host""" basedir = cmk.utils.paths.autochecks_dir filepath = basedir + '/' + hostname + '.mk' result = [] # type: List[Service] if not os.path.exists(filepath): return result check_config = config.get_check_variables() try: cmk.base.console.vverbose("Loading autochecks from %s\n", filepath) autochecks_raw = eval( open(filepath).read().decode("utf-8"), check_config, check_config) # type: List[Dict] except SyntaxError as e: cmk.base.console.verbose("Syntax error in file %s: %s\n", filepath, e, stream=sys.stderr) if cmk.utils.debug.enabled(): raise return result except Exception as e: cmk.base.console.verbose("Error in file %s:\n%s\n", filepath, e, stream=sys.stderr) if cmk.utils.debug.enabled(): raise return result for entry in autochecks_raw: if isinstance(entry, tuple): raise MKGeneralException( "Invalid check entry '%r' of host '%s' (%s) found. This " "entry is in pre Checkmk 1.6 format and needs to be converted. This is " "normally done by \"cmk-update-config -v\" during \"omd update\". Please " "execute \"cmk-update-config -v\" for convertig the old configuration." % (entry, hostname, filepath)) labels = DiscoveredServiceLabels() for label_id, label_value in entry["service_labels"].items(): labels.add_label(ServiceLabel(label_id, label_value)) # With Check_MK 1.2.7i3 items are now defined to be unicode strings. Convert # items from existing autocheck files for compatibility. TODO remove this one day item = entry["item"] if isinstance(item, str): item = convert_to_unicode(item) if not isinstance(entry["check_plugin_name"], six.string_types): raise MKGeneralException("Invalid entry '%r' in check table of host '%s': " "The check type must be a string." % (entry, hostname)) check_plugin_name = str(entry["check_plugin_name"]) try: description = config.service_description(hostname, check_plugin_name, item) except Exception: continue # ignore result.append( Service( check_plugin_name=check_plugin_name, item=item, description=description, parameters=entry["parameters"], service_labels=labels, )) return result
def _parse_host_section(self, raw_data): # type: (RawAgentData) -> AgentHostSections """Split agent output in chunks, splits lines by whitespaces. Returns a HostSections() object. """ sections = {} # type: AgentSections # Unparsed info for other hosts. A dictionary, indexed by the piggybacked host name. # The value is a list of lines which were received for this host. piggybacked_raw_data = {} # type: PiggybackRawData piggybacked_hostname = None piggybacked_cached_at = int(time.time()) # Transform to seconds and give the piggybacked host a little bit more time piggybacked_cache_age = int(1.5 * 60 * self._host_config.check_mk_check_interval) # handle sections with option persist(...) persisted_sections = {} # type: PersistedAgentSections section_content = [] # type: AgentSectionContent section_options = {} # type: Dict[str, Optional[str]] agent_cache_info = {} # type: SectionCacheInfo separator = None # type: Optional[str] encoding = None for line in raw_data.split(b"\n"): line = line.rstrip(b"\r") stripped_line = line.strip() if stripped_line[:4] == b'<<<<' and stripped_line[-4:] == b'>>>>': piggybacked_hostname =\ self._get_sanitized_and_translated_piggybacked_hostname(stripped_line) elif piggybacked_hostname: # processing data for an other host if stripped_line[:3] == b'<<<' and stripped_line[-3:] == b'>>>': line = self._add_cached_info_to_piggybacked_section_header( stripped_line, piggybacked_cached_at, piggybacked_cache_age) piggybacked_raw_data.setdefault(piggybacked_hostname, []).append(line) # Found normal section header # section header has format <<<name:opt1(args):opt2:opt3(args)>>> elif stripped_line[:3] == b'<<<' and stripped_line[-3:] == b'>>>': section_name, section_options = self._parse_section_header( stripped_line[3:-3]) section_content = sections.setdefault(section_name, []) raw_separator = section_options.get("sep") if raw_separator is None: separator = None else: separator = chr(int(raw_separator)) # Split of persisted section for server-side caching raw_persist = section_options.get("persist") if raw_persist is not None: until = int(raw_persist) cached_at = int(time.time()) # Estimate age of the data cache_interval = int(until - cached_at) agent_cache_info[section_name] = (cached_at, cache_interval) persisted_sections[section_name] = (cached_at, until, section_content) raw_cached = section_options.get("cached") if raw_cached is not None: cache_times = list(map(int, raw_cached.split(","))) agent_cache_info[section_name] = cache_times[ 0], cache_times[1] # The section data might have a different encoding encoding = section_options.get("encoding") elif stripped_line != b'': raw_nostrip = section_options.get("nostrip") if raw_nostrip is None: line = stripped_line if encoding: decoded_line = convert_to_unicode( line, std_encoding=six.ensure_str(encoding)) else: decoded_line = convert_to_unicode(line) section_content.append(decoded_line.split(separator)) return AgentHostSections(sections, agent_cache_info, piggybacked_raw_data, persisted_sections)