def _get_agent_info_program(self, commandline, command_stdin): # type: (Union[bytes, Text], Optional[bytes]) -> RawAgentData exepath = commandline.split()[0] # for error message, hide options! self._logger.debug("Calling external program %r" % (commandline)) p = None try: if config.monitoring_core == "cmc": p = subprocess.Popen( # nosec commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True, ) else: # We can not create a separate process group when running Nagios # Upon reaching the service_check_timeout Nagios only kills the process # group of the active check. p = subprocess.Popen( # nosec commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) if command_stdin: stdout, stderr = p.communicate(input=ensure_bytestr(command_stdin)) else: stdout, stderr = p.communicate() exitstatus = p.returncode except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if p: os.killpg(os.getpgid(p.pid), signal.SIGTERM) p.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if p: if p.stdout is None or p.stderr is None: raise Exception("stdout needs to be set") p.stdout.close() p.stderr.close() if exitstatus: if exitstatus == 127: raise MKAgentError("Program '%s' not found (exit code 127)" % exepath) else: raise MKAgentError("Agent exited with code %d: %s" % (exitstatus, stderr)) return stdout
def create_mkp_file(package, file_object=None): # type: (PackageInfo, BinaryIO) -> None package["version.packaged"] = cmk.__version__ tar = tarfile.open(fileobj=file_object, mode="w:gz") def create_tar_info(filename, size): # type: (str, int) -> tarfile.TarInfo info = tarfile.TarInfo() info.mtime = int(time.time()) info.uid = 0 info.gid = 0 info.size = size info.mode = 0o644 info.type = tarfile.REGTYPE info.name = filename return info def add_file(filename, data): # type: (str, six.binary_type) -> None info_file = BytesIO(data) info = create_tar_info(filename, len(info_file.getvalue())) tar.addfile(info, info_file) # add the regular info file (Python format) add_file("info", ensure_bytestr(pprint.pformat(package))) # add the info file a second time (JSON format) for external tools add_file("info.json", ensure_bytestr(json.dumps(package))) # Now pack the actual files into sub tars for part in get_package_parts() + get_config_parts(): filenames = package["files"].get(part.ident, []) if len(filenames) > 0: logger.log(VERBOSE, " %s%s%s:", tty.bold, part.title, tty.normal) for f in filenames: logger.log(VERBOSE, " %s", f) subdata = subprocess.check_output([ "tar", "cf", "-", "--dereference", "--force-local", "-C", part.path ] + filenames) add_file(part.ident + ".tar", subdata) tar.close()
def _translate_host_macros(self, cmd): # type: (str) -> str attrs = core_config.get_host_attributes(self._hostname, self._config_cache) if self._host_config.is_cluster: parents_list = core_config.get_cluster_nodes_for_config(self._config_cache, self._host_config) attrs.setdefault("alias", "cluster of %s" % ", ".join(parents_list)) attrs.update( core_config.get_cluster_attributes(self._config_cache, self._host_config, parents_list)) macros = core_config.get_host_macros_from_attributes(self._hostname, attrs) return ensure_bytestr(core_config.replace_macros(cmd, macros))
def data(self): # type: () -> RawAgentData if self._process is None: raise MKFetcherError("No process") stdout, stderr = self._process.communicate( input=ensure_bytestr(self._stdin) if self._stdin else None) if self._process.returncode == 127: exepath = self._cmdline.split()[0] # for error message, hide options! raise MKFetcherError("Program '%s' not found (exit code 127)" % six.ensure_str(exepath)) if self._process.returncode: raise MKFetcherError("Agent exited with code %d: %s" % (self._process.returncode, six.ensure_str(stderr))) return stdout
def output(text, *args, **kwargs): # type: (AnyStr, *Any, **IO[Any]) -> None if args: text = text % args if six.PY3: ensured_text = ensure_unicode(text) # type: Text else: ensured_text = ensure_bytestr(text) # type: bytes stream = kwargs.get("stream", sys.stdout) try: stream.write(ensured_text) stream.flush() except Exception: # TODO: Way to generic! pass # avoid exception on broken pipe (e.g. due to | head)
def send_query(self, query_obj, add_headers="", do_reconnect=True): orig_query = query_obj query = "%s" % query_obj if not self.allow_cache: query = remove_cache_regex.sub("", query) if self.socket is None: self.connect() if not query.endswith("\n"): query += "\n" query += self.auth_header + self.add_headers query += "Localtime: %d\nOutputFormat: python\nKeepAlive: on\nResponseHeader: fixed16\n" % int( time.time()) query += add_headers if not query.endswith("\n"): query += "\n" query += "\n" try: # socket.send() only works with byte strings query = ensure_bytestr(query) self.socket.send(query) except IOError as e: if self.persist: del persistent_connections[self.socketurl] self.successful_persistence = False self.socket = None if do_reconnect: # Automatically try to reconnect in case of an error, but # only once. self.connect() self.send_query(orig_query, add_headers, False) return raise MKLivestatusSocketError("RC1:" + str(e))
def _to_cache_file(self, raw_data): # type: (RawAgentData) -> bytes raw_data = cast(RawAgentData, raw_data) # TODO: This does not seem to be needed return ensure_bytestr(raw_data)
def _fetch_raw_data(commandline, command_stdin, logger): # type: (Union[bytes, Text], Optional[str], Logger) -> RawAgentData exepath = commandline.split()[0] # for error message, hide options! logger.debug("Calling external program %r" % (commandline)) p = None try: if config.monitoring_core == "cmc": if sys.version_info[0] >= 3: # Warning: # The preexec_fn parameter is not safe to use in the presence of threads in your # application. The child process could deadlock before exec is called. If you # must use it, keep it trivial! Minimize the number of libraries you call into. # # Note: # If you need to modify the environment for the child use the env parameter # rather than doing it in a preexec_fn. The start_new_session parameter can take # the place of a previously common use of preexec_fn to call os.setsid() in the # child. p = subprocess.Popen( commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, start_new_session=True, close_fds=True, ) else: # Python 2: start_new_session not available p = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True, ) else: # We can not create a separate process group when running Nagios # Upon reaching the service_check_timeout Nagios only kills the process # group of the active check. p = subprocess.Popen( commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) if command_stdin: stdout, stderr = p.communicate( input=ensure_bytestr(command_stdin)) else: stdout, stderr = p.communicate() exitstatus = p.returncode except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if p: os.killpg(os.getpgid(p.pid), signal.SIGTERM) p.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if p: if p.stdout is None or p.stderr is None: raise Exception("stdout needs to be set") p.stdout.close() p.stderr.close() if exitstatus: if exitstatus == 127: raise MKAgentError("Program '%s' not found (exit code 127)" % six.ensure_str(exepath)) raise MKAgentError("Agent exited with code %d: %s" % (exitstatus, six.ensure_str(stderr))) return stdout
def save_file(path, content, mode=0o660): # type: (Union[Path, str], AnyStr, int) -> None # Just to be sure: ensure_bytestr _save_data_to_file(path, ensure_bytestr(content), mode=mode)
def test_ensure_bytestr(source, bytestr): assert ensure_bytestr(source) == bytestr
def _to_cache_file(self, raw_data): # type: (Text) -> bytes return ensure_bytestr(raw_data)