def run(base_dir, timeout): """run the program. return (ok?, msg)""" cmd = ['./a.out'] out_path = os.path.join(base_dir, 'test.out') in_path = os.path.join(base_dir, 'test.in') with open(out_path, 'w') as fout, open(in_path) as fin: p = Popen(cmd, stdin=fin, stdout=fout, cwd=base_dir) try: p.wait(timeout) except TimeoutExpired: p.kill() return False, 'time limit exceed' else: if p.returncode == 0: return True, '' else: return False, 'runtime error'
def check(): data = request.json t = time() out_file = f'output{t}.txt' code_file = f'code{t}.php' output = open(out_file, 'w+') with open(code_file, 'w') as f: f.write(data['code']) process = Popen(['php', '-d', php_disable_functions, code_file], stdout=output, stderr=output) exit_code = -1 result = '' try: exit_code = process.wait(5) except Exception as e: print(e) if process.is_running(): process.kill() output.seek(0) result = output.read() output.close() os.remove(out_file) os.remove(code_file) return jsonify({'exit_code': exit_code, 'result': result})
def cmdshell(self, cmd, timeout=None, inpt=None, log_cmd=True): if timeout is None: timeout = conf['SHELL_COMMAND_TIMEOUT'] output = [] errors = [] execline = ' '.join(cmd) if log_cmd: display_cmd(self.logger, execline) p = Popen(execline, env=os.environ, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) try: if inpt: p.stdin = subprocess.PIPE out, err = p.communicate(input=bytes(inpt, 'utf-8'), timeout=timeout) else: out, err = p.communicate(timeout=timeout) except subprocess.TimeoutExpired: p.kill() out, err = p.communicate() if out: output = [ l.strip() for l in out.decode('utf-8').split('\n') if len(l) ] if log_cmd: display_cmd_output(self.logger, output) if err: errors = [l.strip() for l in err.decode().split('\n')] self.logger.debug("Errors: %r", errors) self.logger.debug("CMD return code: %r", p.returncode) return p.returncode, output, errors
class BackgroundRunner: def __init__(self, log_queue): self.process = None self.process_two = None self.killed = False self.output_file = None self.error_output_file = None self.log_queue = log_queue self.error_detected = False self.success_detected = False self.error_message = [] self.success_message = [] def start_exec(self, command, work_dir: str = None, shell: bool = False, errors=(), successes=()): self.clean() logger.info(f"Running command: {command}") Path(work_dir).mkdir(exist_ok=True, parents=True) self.output_file = Path( work_dir) / f"encoder_output_{secrets.token_hex(6)}.log" self.error_output_file = Path( work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log" self.output_file.touch(exist_ok=True) self.error_output_file.touch(exist_ok=True) self.error_message = errors self.success_message = successes self.process = Popen( shlex.split(command) if not shell and isinstance(command, str) else command, shell=shell, cwd=work_dir, stdout=open(self.output_file, "w"), stderr=open(self.error_output_file, "w"), stdin=PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux encoding="utf-8", ) Thread(target=self.read_output).start() def start_piped_exec(self, command_one, command_two, work_dir, errors=(), successes=()): self.clean() logger.info( f"Running commands: {' '.join(command_one)} | {' '.join(command_two)}" ) Path(work_dir).mkdir(exist_ok=True, parents=True) self.output_file = Path( work_dir) / f"encoder_output_{secrets.token_hex(6)}.log" self.error_output_file = Path( work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log" self.output_file.touch(exist_ok=True) self.error_output_file.touch(exist_ok=True) self.error_message = errors self.success_message = successes self.process = Popen( command_one, cwd=work_dir, stdout=PIPE, stderr=PIPE, stdin=PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux ) self.process_two = Popen( command_two, cwd=work_dir, stdout=open(self.output_file, "w"), stderr=open(self.error_output_file, "w"), stdin=self.process.stdout, encoding="utf-8", ) self.error_detected = False Thread(target=self.read_output).start() def read_output(self): with open(self.output_file, "r", encoding="utf-8", errors="ignore") as out_file, open( self.error_output_file, "r", encoding="utf-8", errors="ignore") as err_file: while True: if not self.is_alive(): excess = out_file.read() logger.info(excess) self.log_queue.put(excess) err_excess = err_file.read() logger.info(err_excess) self.log_queue.put(err_excess) break line = out_file.readline().rstrip() if line: logger.info(line) self.log_queue.put(line) if not self.success_detected: for success in self.success_message: if success in line: self.success_detected = True err_line = err_file.readline().rstrip() if err_line: logger.info(err_line) self.log_queue.put(err_line) if "Conversion failed!" in err_line: self.error_detected = True if not self.error_detected: for error in self.error_message: if error in err_line: self.error_detected = True try: self.output_file.unlink() self.error_output_file.unlink() except OSError: pass def read(self, limit=None): if not self.is_alive(): return return self.process.stdout.read(limit) def is_alive(self): if not self.process: return False if self.process_two: # TODO make sure process 1 dies cleanly return True if self.process_two.poll() is None else False return True if self.process.poll() is None else False def clean(self): self.kill(log=False) self.process = None self.process_two = None self.error_detected = False self.success_detected = False self.killed = False def kill(self, log=True): if self.process_two: if log: logger.info(f"Killing worker process {self.process_two.pid}") try: self.process_two.terminate() self.process_two.kill() except Exception as err: if log: logger.exception(f"Couldn't terminate process: {err}") if self.process: if log: logger.info(f"Killing worker process {self.process.pid}") try: # if reusables.win_based: # os.kill(self.process.pid, signal.CTRL_C_EVENT) # else: # os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) self.process.terminate() self.process.kill() except Exception as err: if log: logger.exception(f"Couldn't terminate process: {err}") self.killed = True def pause(self): if self.process_two: return False if not self.process: return False self.process.suspend() def resume(self): if self.process_two: return False if not self.process: return False self.process.resume()
class BackgroundRunner: def __init__(self, log_queue): self.process = None self.killed = False self.output_file = None self.error_output_file = None self.log_queue = log_queue self.error_detected = False self.success_detected = False self.error_message = [] self.success_message = [] self.started_at = None def start_exec(self, command, work_dir: str = None, shell: bool = False, errors=(), successes=()): self.clean() logger.debug(f"Using work dir: {work_dir}") work_path = Path(work_dir) work_path.mkdir(exist_ok=True, parents=True) self.output_file = work_path / f"encoder_output_{secrets.token_hex(6)}.log" self.error_output_file = work_path / f"encoder_error_output_{secrets.token_hex(6)}.log" logger.debug(f"command output file set to: {self.output_file}") logger.debug( f"command error output file set to: {self.error_output_file}") self.output_file.touch(exist_ok=True) self.error_output_file.touch(exist_ok=True) self.error_message = errors self.success_message = successes logger.info(f"Running command: {command}") try: self.process = Popen( shlex.split(command.replace("\\", "\\\\")) if not shell and isinstance(command, str) else command, shell=shell, cwd=work_dir, stdout=open(self.output_file, "w"), stderr=open(self.error_output_file, "w"), stdin= PIPE, # FFmpeg can try to read stdin and wrecks havoc on linux encoding="utf-8", ) except PermissionError: logger.error( "Could not encode video due to permissions error." "Please make sure encoder is executable and you have permissions to run it." "Otherwise try running FastFlix as an administrator.") self.error_detected = True return except Exception: logger.exception("Could not start worker process") self.error_detected = True return self.started_at = datetime.datetime.now(datetime.timezone.utc) Thread(target=self.read_output).start() def read_output(self): with open(self.output_file, "r", encoding="utf-8", errors="ignore") as out_file, open( self.error_output_file, "r", encoding="utf-8", errors="ignore") as err_file: while True: if not self.is_alive(): excess = out_file.read() logger.info(excess) self.log_queue.put(excess) err_excess = err_file.read() logger.info(err_excess) self.log_queue.put(err_excess) if self.process.returncode is not None and self.process.returncode > 0: self.error_detected = True break line = out_file.readline().rstrip() if line: logger.info(line) self.log_queue.put(line) if not self.success_detected: for success in self.success_message: if success in line: self.success_detected = True err_line = err_file.readline().rstrip() if err_line: logger.info(err_line) self.log_queue.put(err_line) if "Conversion failed!" in err_line or "Error during output" in err_line: self.error_detected = True if not self.error_detected: for error in self.error_message: if error in err_line: self.error_detected = True try: self.output_file.unlink() self.error_output_file.unlink() except OSError: pass def read(self, limit=None): if not self.is_alive(): return return self.process.stdout.read(limit) def is_alive(self): if not self.process: return False return True if self.process.poll() is None else False def clean(self): self.kill(log=False) self.process = None self.error_detected = False self.success_detected = False self.killed = False self.started_at = None def kill(self, log=True): if self.process and self.process.poll() is None: if log: logger.info(f"Killing worker process {self.process.pid}") try: # if reusables.win_based: # os.kill(self.process.pid, signal.CTRL_C_EVENT) # else: # os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) self.process.terminate() self.process.kill() except Exception as err: if log: logger.exception(f"Couldn't terminate process: {err}") self.killed = True def pause(self): if not self.process: return False self.process.suspend() def resume(self): if not self.process: return False self.process.resume()
class Run(object): """Class to handle processes. :ivar cmds: The ``cmds`` argument passed to the __init__ method (a command line passed in a list, or a list of command lines passed as a list of list). :ivar status: The exit status. As the exit status is only meaningful after the process has exited, its initial value is None. When a problem running the command is detected and a process does not get created, its value gets set to the special value 127. :ivar out: process standard output (if instanciated with output = PIPE) :ivar err: same as out but for standard error :ivar pid: PID. Set to -1 if the command failed to run. """ def __init__(self, cmds, cwd=None, output=PIPE, error=STDOUT, input=None, bg=False, timeout=None, env=None, set_sigpipe=True, parse_shebang=False, ignore_environ=True): """Spawn a process. :param cmds: two possibilities: 1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] 2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. :type cmds: list[str] | list[list[str]] :param cwd: directory in which the process should be executed (string or None). If None then current directory is used :type cwd: str | None :param output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). :type output: int | str | file | None :param error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :type error: int | str | file | None :param input: same as output :type input: int | str | file | None :param bg: if True then run in background :type bg: bool :param timeout: limit execution time (in seconds), None means unlimited :type timeout: int | None :param env: dictionary for environment variables (e.g. os.environ) :type env: dict :param set_sigpipe: reset SIGPIPE handler to default value :type set_sigpipe: bool :param parse_shebang: take the #! interpreter line into account :type parse_shebang: bool :param ignore_environ: Applies only when env parameter is not None. When set to True (the default), the only environment variables passed to the program are the ones provided by the env parameter. Otherwise, the environment passed to the program consists of the environment variables currently defined (os.environ) augmented by the ones provided in env. :type ignore_environ: bool :raise OSError: when trying to execute a non-existent file. If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def add_interpreter_command(cmd_line): """Add the interpreter defined in the #! line to cmd_line. If the #! line cannot be parsed, just return the cmd_line unchanged On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin and /bin/bash & /bin/sh are replaced by $SHELL if defined. :param cmd_line: command line :type cmd_line: list[str] """ if not parse_shebang: # nothing to do return cmd_line prog = which(cmd_line[0], default=None) if prog is None: # Not found. Do not modify the command line return cmd_line with open(prog) as f: try: header = f.read()[0:2] except UnicodeDecodeError: # py3-only # unknown header - cannot decode the first two bytes return cmd_line if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [ l.strip() for l in line[line.find('!') + 1:].split() ] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + list(cmd_line[1:]) else: cmd_line = [prog] if sys.platform == 'win32': # unix: no cover if interpreter_cmds[0] == '/usr/bin/env': return interpreter_cmds[1:] + cmd_line elif interpreter_cmds[0] in ('/bin/bash', '/bin/sh') and \ 'SHELL' in os.environ: return [os.environ['SHELL']] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, 'r') self.output_file = File(output, 'w') self.error_file = File(error, 'w') self.status = None self.out = '' self.err = '' self.cmds = [] if env is not None and not ignore_environ: # ignore_environ is False, so get a copy of the current # environment and update it with the env dictionnary. tmp = os.environ.copy() tmp.update(env) env = tmp rlimit_args = [] if timeout is not None: rlimit = get_rlimit() if os.path.exists(rlimit): rlimit_args = [rlimit, '%d' % timeout] else: logger.warning('cannot find rlimit at %s', rlimit) rlimit_args = [] try: if isinstance(cmds[0], basestring): self.cmds = rlimit_args + list(add_interpreter_command(cmds)) else: self.cmds = [add_interpreter_command(c) for c in cmds] self.cmds[0] = rlimit_args + list(self.cmds[0]) cmdlogger.debug('Run: cd %s; %s', cwd if cwd is not None else os.getcwd(), self.command_line_image()) if isinstance(cmds[0], basestring): popen_args = { 'stdin': self.input_file.fd, 'stdout': self.output_file.fd, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': True } if sys.platform != 'win32' and \ set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup if WIN_NEW_PG and sys.platform == 'win32': popen_args['creationflags'] = \ subprocess.CREATE_NEW_PROCESS_GROUP self.internal = Popen(self.cmds, **popen_args) else: runs = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin = self.input_file.fd else: stdin = runs[index - 1].stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (e.g. gzip -dc foo.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd txt_mode = True else: stdout = subprocess.PIPE txt_mode = False popen_args = { 'stdin': stdin, 'stdout': stdout, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': txt_mode } if sys.platform != 'win32' and \ set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup if WIN_NEW_PG and sys.platform == 'win32': popen_args['creationflags'] = \ subprocess.CREATE_NEW_PROCESS_GROUP try: runs.append(Popen(cmd, **popen_args)) except OSError: logger.error('error when spawning %s', cmd) # We have an error (e.g. file not found), try to kill # all processes already started. for p in runs: p.terminate() raise self.internal = runs[-1] except Exception as e: # defensive code self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() def command_line_image(self): """Get shell command line image of the spawned command(s). :rtype: str This just a convenient wrapper around the function of the same name. """ return command_line_image(self.cmds) def close_files(self): """Close all file descriptors.""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error, cmds): """Set pid to -1 and status to 127 before closing files.""" self.close_files() logger.error(error) def not_found(path): """Raise OSError. :param path: path of the executable :type path: str """ logger.error("%s not found", path) e3.log.debug('PATH=%s', os.environ['PATH']) raise OSError(errno.ENOENT, 'No such file or directory, %s not found' % path) # Try to send an helpful message if one of the executable has not # been found. if isinstance(cmds[0], basestring): if which(cmds[0], default=None) is None: not_found(cmds[0]) else: for cmd in cmds: if which(cmd[0], default=None) is None: not_found(cmd[0]) def wait(self): """Wait until process ends and return its status. :return: exit code of the process :rtype: int """ if self.status is not None: # Wait has already been called return self.status # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if self.output_file.fd != subprocess.PIPE and \ self.error_file.fd != subprocess.PIPE and \ self.input_file.fd != subprocess.PIPE: self.status = self.internal.wait() else: tmp_input = None if self.input_file.fd == subprocess.PIPE: tmp_input = self.input_file.get_command() (self.out, self.err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self.close_files() return self.status def poll(self): """Check the process status and set self.status if available. This method checks whether the underlying process has exited or not. If it hasn't, then it just returns None immediately. Otherwise, it stores the process' exit code in self.status and then returns it. :return: None if the process is still alive; otherwise, returns the process exit status. :rtype: int | None """ if self.status is not None: # Process is already terminated and wait been called return self.status result = self.internal.poll() if result is not None: # Process is finished, call wait to finalize it (closing handles, # ...) return self.wait() else: return None def kill(self, recursive=True, timeout=3): """Kill the process. :param recursive: if True, try to kill the complete process tree :type recursive: bool :param timeout: wait timeout (in seconds) after sending the kill signal (when recursive=True) :type timeout: int """ if recursive: kill_process_tree(self.internal, timeout=timeout) else: self.internal.kill() def interrupt(self): """Send SIGINT to the process, kill on Windows.""" if sys.platform == 'win32': self.kill() # Ctrl-C event is unreliable on Windows else: self.internal.send_signal(signal.SIGINT) def is_running(self): """Check whether the process is running. :rtype: bool """ if psutil is None: # defensive code # psutil not imported, use our is_running function return is_running(self.pid) else: return self.internal.is_running() def children(self): """Return list of child processes (using psutil). :rtype: list[psutil.Process] """ if psutil is None: # defensive code raise NotImplementedError('Run.children() require psutil') return self.internal.children()
class Run: """Class to handle processes. :ivar cmds: The ``cmds`` argument passed to the __init__ method (a command line passed in a list, or a list of command lines passed as a list of list). :ivar status: The exit status. As the exit status is only meaningful after the process has exited, its initial value is None. When a problem running the command is detected and a process does not get created, its value gets set to the special value 127. :ivar raw_out: process standard output as bytes (if instanciated with output = PIPE). Use self.out to get a decoded string. :ivar raw_err: same as raw_out but for standard error. :ivar pid: PID. Set to -1 if the command failed to run. """ def __init__( self, cmds: AnyCmdLine, cwd: Optional[str] = None, output: STDOUT_VALUE | DEVNULL_VALUE | PIPE_VALUE | str | IO | None = PIPE, error: STDOUT_VALUE | DEVNULL_VALUE | PIPE_VALUE | str | IO | None = STDOUT, input: DEVNULL_VALUE | PIPE_VALUE | str | IO | None = None, # noqa: A002 bg: bool = False, timeout: Optional[int] = None, env: Optional[dict] = None, set_sigpipe: bool = True, parse_shebang: bool = False, ignore_environ: bool = True, ) -> None: """Spawn a process. :param cmds: two possibilities: 1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] 2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. :param cwd: directory in which the process should be executed (string or None). If None then current directory is used :param output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). :param error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :param input: same as output :param bg: if True then run in background :param timeout: limit execution time (in seconds), None means unlimited :param env: dictionary for environment variables (e.g. os.environ) :param set_sigpipe: reset SIGPIPE handler to default value :param parse_shebang: take the #! interpreter line into account :param ignore_environ: Applies only when env parameter is not None. When set to True (the default), the only environment variables passed to the program are the ones provided by the env parameter. Otherwise, the environment passed to the program consists of the environment variables currently defined (os.environ) augmented by the ones provided in env. :raise OSError: when trying to execute a non-existent file. If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def add_interpreter_command(cmd_line: CmdLine) -> CmdLine: """Add the interpreter defined in the #! line to cmd_line. If the #! line cannot be parsed, just return the cmd_line unchanged On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin and /bin/bash & /bin/sh are replaced by $SHELL if defined. :param cmd_line: command line """ if not parse_shebang: # nothing to do return cmd_line prog = which(cmd_line[0], default=None) if prog is None: # Not found. Do not modify the command line return cmd_line with open(prog) as f: try: header = f.read()[0:2] except UnicodeDecodeError: # unknown header - cannot decode the first two bytes return cmd_line if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [ word.strip() for word in line[line.find("!") + 1:].split() ] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + list(cmd_line[1:]) else: cmd_line = [prog] if sys.platform == "win32": # unix: no cover if interpreter_cmds[0] == "/usr/bin/env": # On windows be sure that PATH is taken into account by # using which. In some cases involving python # interpreter, the python interpreter used to run this # module has been used rather than the first one on the # path. interpreter_cmds[1] = which( interpreter_cmds[1], default=interpreter_cmds[1]) return interpreter_cmds[1:] + cmd_line elif (interpreter_cmds[0] in ("/bin/bash", "/bin/sh") and "SHELL" in os.environ): return [os.environ["SHELL"]] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, "r") self.output_file = File(output, "w") self.error_file = File(error, "w") self.status: Optional[int] = None self.raw_out = b"" self.raw_err = b"" self.cmds = [] if env is not None: if ignore_environ: if sys.platform == "win32": # On Windows not all environment variables can be # discarded. At least SYSTEMDRIVE, SYSTEMROOT should be # set. In order to be portable propagate their value in # case the user does not pass them in env when # ignore_environ is set to True. tmp = {} for var in ("SYSTEMDRIVE", "SYSTEMROOT"): if var not in env and var in os.environ: tmp[var] = os.environ[var] tmp.update(env) env = tmp else: # ignore_environ is False, so get a copy of the current # environment and update it with the env dictionary. tmp = os.environ.copy() tmp.update(env) env = tmp rlimit_args = [] if timeout is not None: rlimit = get_rlimit() if os.path.exists(rlimit): rlimit_args = [rlimit, "%d" % timeout] else: logger.warning("cannot find rlimit at %s", rlimit) rlimit_args = [] try: self.cmds = [ add_interpreter_command(c) for c in to_cmd_lines(cmds) ] self.cmds[0] = rlimit_args + list(self.cmds[0]) cmdlogger.debug( "Run: cd %s; %s", cwd if cwd is not None else os.getcwd(), self.command_line_image(), ) if len(self.cmds) == 1: popen_args = { "stdin": self.input_file.fd, "stdout": self.output_file.fd, "stderr": self.error_file.fd, "cwd": cwd, "env": env, "universal_newlines": False, } if sys.platform != "win32" and set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args["preexec_fn"] = subprocess_setup # type: ignore if sys.platform == "win32": popen_args[ "creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP self.internal = Popen(self.cmds[0], **popen_args) else: runs: list[subprocess.Popen] = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin: int | IO[Any] = self.input_file.fd else: previous_stdout = runs[index - 1].stdout assert previous_stdout is not None stdin = previous_stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (e.g. gzip -dc foo.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd else: stdout = subprocess.PIPE popen_args = { "stdin": stdin, "stdout": stdout, "stderr": self.error_file.fd, "cwd": cwd, "env": env, "universal_newlines": False, } if sys.platform != "win32" and set_sigpipe: # windows: no cover # preexec_fn is no supported on windows popen_args[ "preexec_fn"] = subprocess_setup # type: ignore if sys.platform == "win32": popen_args[ "creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP try: runs.append(Popen(cmd, **popen_args)) except OSError: logger.error("error when spawning %s", cmd) # We have an error (e.g. file not found), try to kill # all processes already started. for p in runs: p.terminate() raise self.internal = runs[-1] except Exception as e: # defensive code self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() @property def out(self) -> str: """Process output as string. Attempt is done to decode as utf-8 the output. If the output is not in utf-8 a string representation will be returned (see e3.text.bytes_as_str). """ return bytes_as_str(self.raw_out) @property def err(self) -> str: """Process error as string. Attempt is done to decode as utf-8 the output. If the output is not in utf-8 a string representation will be returned (see e3.text.bytes_as_str). """ return bytes_as_str(self.raw_err) def command_line_image(self) -> str: """Get shell command line image of the spawned command(s). This just a convenient wrapper around the function of the same name. """ return command_line_image(self.cmds) def close_files(self) -> None: """Close all file descriptors.""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error: Exception, cmds: list[CmdLine]) -> None: """Set pid to -1 and status to 127 before closing files.""" self.close_files() logger.error(error) def not_found(path: str) -> NoReturn: """Raise OSError. :param path: path of the executable """ logger.error("%s not found", path) e3.log.debug("PATH=%s", os.environ["PATH"]) raise OSError(errno.ENOENT, f"No such file or directory, {path} not found") # Try to send an helpful message if one of the executable has not # been found. for cmd in cmds: if which(cmd[0], default=None) is None: not_found(cmd[0]) def wait(self) -> int: """Wait until process ends and return its status. :return: exit code of the process """ if self.status is not None: # Wait has already been called return self.status # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if (self.output_file.fd != subprocess.PIPE and self.error_file.fd != subprocess.PIPE and self.input_file.fd != subprocess.PIPE): self.status = self.internal.wait() else: tmp_input: Optional[str | bytes] = None if self.input_file.fd == subprocess.PIPE: tmp_input = self.input_file.get_command() if isinstance(tmp_input, str): tmp_input = tmp_input.encode("utf-8") (self.raw_out, self.raw_err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self.close_files() return self.status def poll(self) -> Optional[int]: """Check the process status and set self.status if available. This method checks whether the underlying process has exited or not. If it hasn't, then it just returns None immediately. Otherwise, it stores the process' exit code in self.status and then returns it. :return: None if the process is still alive; otherwise, returns the process exit status. """ if self.status is not None: # Process is already terminated and wait been called return self.status result = self.internal.poll() if result is not None: # Process is finished, call wait to finalize it (closing handles, # ...) return self.wait() else: return None def kill(self, recursive: bool = True, timeout: int = 3) -> None: """Kill the process. :param recursive: if True, try to kill the complete process tree :param timeout: wait timeout (in seconds) after sending the kill signal (when recursive=True) """ if recursive: kill_process_tree(self.internal, timeout=timeout) else: self.internal.kill() def interrupt(self) -> None: """Send SIGINT to the process, kill on Windows.""" if sys.platform == "win32": self.kill() # Ctrl-C event is unreliable on Windows else: self.internal.send_signal(signal.SIGINT) def is_running(self) -> bool: """Check whether the process is running.""" if psutil is None: # defensive code # psutil not imported, use our is_running function return is_running(self.pid) else: return self.internal.is_running() def children(self) -> list[Any]: """Return list of child processes (using psutil).""" if psutil is None: # defensive code raise NotImplementedError("Run.children() require psutil") return self.internal.children()
class Region: """A wrapper class around a psutil popen instance of a region; handling logging and beckground tasks associated with this region""" proc = None stats = {} isRunning = False shuttingDown = False def __init__(self, uuid, name, binDir, regionDir, dispatchUrl, externalAddress): self.id = uuid self.name = name self.externalAddress = externalAddress self.dispatchUrl = dispatchUrl self.startString = "Halcyon.exe -name %s -console rest" % name self.startDir = os.path.join(regionDir, self.id) self.pidFile = os.path.join(self.startDir, 'Halcyon.pid') if os.name != 'nt': self.startString = "mono %s" % self.startString else: self.startString = os.path.join(self.startDir, self.startString) self.logFile = os.path.join(self.startDir, 'Halcyon.log') #start job processing thread self.jobQueue = Queue() th = threading.Thread(target=self._doTasks) th.daemon = True th.start() #start process monitoring thread th = threading.Thread(target=self._monitorProcess) th.daemon = True th.start() #start log monitoring thread th = threading.Thread(target=self._monitorLog) th.daemon = True th.start() # attempt process recovery from pidfile if os.path.exists(self.pidFile): try: pid = int(open(self.pidFile).read()) self.proc = psutil.Process(pid) if not "Halcyon.exe" in self.proc.name(): self.proc = None self.isRunning = self.proc.status() in [psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING, psutil.STATUS_DISK_SLEEP] except: self.isRunning = False if not self.isRunning: # dont potentially delete and replace binaries on running processes! self.jobQueue.put(("_checkBinaries", (binDir,))) def __del__(self): self.shuttingDown = True def _monitorProcess(self): """Monitor process and update statistics""" while not self.shuttingDown: if not self.proc: self.isRunning = False else: try: self.isRunning = self.proc.status() in [psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING, psutil.STATUS_DISK_SLEEP] except psutil.NoSuchProcess: self.isRunning = False stats = {} stats["timestamp"] = time.time() stats["isRunning"] = self.isRunning; if self.isRunning: try: stats["uptime"] = time.time() - self.proc.create_time() stats["memPercent"] = self.proc.memory_percent(memtype="uss") stats["memKB"] = self.proc.memory_full_info().uss / 1024 stats["cpuPercent"] = self.proc.cpu_percent(interval=None) except: pass self.stats = stats time.sleep(5) def _monitorLog(self): """Monitor log file and append new lines to the internal queue""" while not self.shuttingDown and not os.path.isfile(self.logFile): time.sleep(5) f = open(self.logFile, 'r') f.seek(0,2) lines = [] url = "http://%s/logs/%s" % (self.dispatchUrl,self.id) while not self.shuttingDown: line = f.readline() if line: lines.append(line) continue else: if len(lines) == 0: time.sleep(5) continue try: req = requests.post(url,data={'log': json.dumps(lines)}, verify=False) if not req.status_code == requests.codes.ok: print "Error sending %s: %s" % (self.region, req.content) else: #logs uploaded successfully lines = [] except requests.ConnectionError: print "error uploading logs to master" time.sleep(5) continue f.close() def _doTasks(self): """Process asynchronous lambda tasks from internal queue""" while not self.shuttingDown: #block and wait for a new job try: (functor, args) = self.jobQueue.get(False) except Empty: time.sleep(5) continue except IOError: continue try: getattr(self, functor)(*args) except: print "Error processing job %s: %s" % (functor, sys.exc_info()) def _checkBinaries(self, binDir): """make sure we are ready to start the process when necessary""" #clean up/create region folder if not os.path.isdir(self.startDir): shutil.copytree(binDir, self.startDir) else: #update binaries if they have changed if os.path.getmtime(binDir) > os.path.getmtime(self.startDir): shutil.rmtree(self.startDir) shutil.copytree(binDir, self.startDir) def start(self, ini, xml): """schedule the process to start""" self.jobQueue.put(("_start", (ini, xml))) def _start(self, ini, xml): """start the process""" if self.isRunning: return # write the Halcyon.ini config file f = open(os.path.join(self.startDir, 'Halcyon.ini'), 'w') f.write(ini) f.close() # write the Regions.cfg file if not os.path.exists(os.path.join(self.startDir, 'Regions')): os.mkdir(os.path.join(self.startDir, 'Regions')) f = open(os.path.join(self.startDir, 'Regions', 'default.xml'), 'w') f.write(xml) f.close() self.proc = Popen(self.startString.split(" "), cwd=self.startDir, stdout=DEVNULL, stderr=DEVNULL) #write a pidfile f = open(self.pidFile, 'w') f.write(str(self.proc.pid)) f.close() def kill(self): """immediately terminate the process""" if os.path.exists(self.pidFile): os.remove(self.pidFile) try: self.proc.kill() except psutil.NoSuchProcess: pass def saveOar(self, reportUrl, uploadUrl): """schedule an oar save and upload to MGM""" try: self.jobQueue.put(("_saveOar", (reportUrl, uploadUrl,))) except: return False return True def _saveOar(self, reportUrl, uploadUrl): """perform the actual oar load""" if not self.isRunning: print "Save oar aborted, region is not running" requests.post(reportUrl, data={"Status": "Error: Region is not running"}, verify=False) return oarFile = os.path.join(self.startDir, '%s.oar' % self.name) statusFile = os.path.join(self.startDir, '%s.oarstatus' % self.name) requests.post(reportUrl, data={"Status": "Saving..."}, verify=False) #wait for statusfile to be written on completion while self.isRunning and not os.path.exists(statusFile): time.sleep(5) def is_open(file_name): if os.path.exists(file_name): try: os.rename(file_name,file_name) return False except: return True raise NameError #wait for process to close the archive # no timeout, this can take a long time while self.isRunning and is_open(oarFile): time.sleep(5) if not self.isRunning: requests.post(reportUrl, data={"Status": "Error: region halted during save"}, verify=False) return # check statusfile print "Save oar complete for region %s" % self.id with open(statusFile, 'rb') as f: data = f.read() if data[0] == '\x01': # success print "Save oar for region %s succeeded" % self.id r = requests.post(uploadUrl, data={"Success": True}, files={'file': (self.name, open(oarFile, 'rb'))}, verify=False) else: #failure print "Save oar for region %s failed for unspecified reason" % self.id requests.post(reportUrl, data={"Status": "Error: an unknown error occurred while saving the oar file"}, verify=False) def loadOar(self, readyUrl, reportUrl, oarFile): """schedule an oar download from MGM and load into region""" try: data = oarFile.file.read() fp = open(os.path.join(self.startDir, '%s.oar' % self.name), 'wb') fp.write(data) fp.close() except: return False return True
class Run(object): """Class to handle processes. :ivar cmds: The ``cmds`` argument passed to the __init__ method (a command line passed in a list, or a list of command lines passed as a list of list). :ivar status: The exit status. As the exit status is only meaningful after the process has exited, its initial value is None. When a problem running the command is detected and a process does not get created, its value gets set to the special value 127. :ivar out: process standard output (if instanciated with output = PIPE) :ivar err: same as out but for standard error :ivar pid: PID. Set to -1 if the command failed to run. """ def __init__(self, cmds, cwd=None, output=PIPE, error=STDOUT, input=None, bg=False, timeout=None, env=None, set_sigpipe=True, parse_shebang=False, ignore_environ=True, python_executable=sys.executable): """Spawn a process. :param cmds: two possibilities: 1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] 2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. :type cmds: list[str] | list[list[str]] :param cwd: directory in which the process should be executed (string or None). If None then current directory is used :type cwd: str | None :param output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). :type output: int | str | file | None :param error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :type error: int | str | file | None :param input: same as output :type input: int | str | file | None :param bg: if True then run in background :type bg: bool :param timeout: limit execution time (in seconds), None means unlimited :type timeout: int | None :param env: dictionary for environment variables (e.g. os.environ) :type env: dict :param set_sigpipe: reset SIGPIPE handler to default value :type set_sigpipe: bool :param parse_shebang: take the #! interpreter line into account :type parse_shebang: bool :param ignore_environ: Applies only when env parameter is not None. When set to True (the default), the only environment variables passed to the program are the ones provided by the env parameter. Otherwise, the environment passed to the program consists of the environment variables currently defined (os.environ) augmented by the ones provided in env. :type ignore_environ: bool :param python_executable: name or path to the python executable :type python_executable: str :raise OSError: when trying to execute a non-existent file. If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def add_interpreter_command(cmd_line): """Add the interpreter defined in the #! line to cmd_line. If the #! line cannot be parsed, just return the cmd_line unchanged If the interpreter command line contains /usr/bin/env python it will be replaced by the value of python_executable On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin :param cmd_line: command line :type cmd_line: list[str] """ if not parse_shebang: # nothing to do return cmd_line prog = which(cmd_line[0], default=None) if prog is None: # Not found. Do not modify the command line return cmd_line with open(prog) as f: header = f.read()[0:2] if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [l.strip() for l in line[line.find('!') + 1:].split()] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + list(cmd_line[1:]) else: cmd_line = [prog] # If the interpreter is '/usr/bin/env python', use # python_executable instead to keep the same python executable if interpreter_cmds[0:2] == ['/usr/bin/env', 'python']: if len(interpreter_cmds) > 2: return [python_executable] + \ interpreter_cmds[2:] + cmd_line else: return [python_executable] + cmd_line elif sys.platform == 'win32': # unix: no cover if interpreter_cmds[0] == '/usr/bin/env': return interpreter_cmds[1:] + cmd_line elif interpreter_cmds[0] in ('/bin/bash', '/bin/sh') and \ 'SHELL' in os.environ: return [os.environ['SHELL']] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, 'r') self.output_file = File(output, 'w') self.error_file = File(error, 'w') self.status = None self.out = '' self.err = '' self.cmds = [] if env is not None and not ignore_environ: # ignore_environ is False, so get a copy of the current # environment and update it with the env dictionnary. tmp = os.environ.copy() tmp.update(env) env = tmp rlimit_args = [] if timeout is not None: rlimit = get_rlimit() if os.path.exists(rlimit): rlimit_args = [rlimit, '%d' % timeout] else: logger.warning('cannot find rlimit at %s', rlimit) rlimit_args = [] try: if isinstance(cmds[0], basestring): self.cmds = rlimit_args + list(add_interpreter_command(cmds)) else: self.cmds = [add_interpreter_command(c) for c in cmds] self.cmds[0] = rlimit_args + list(self.cmds[0]) cmdlogger.debug('Run: cd %s; %s' % ( cwd if cwd is not None else os.getcwd(), self.command_line_image())) if isinstance(cmds[0], basestring): popen_args = { 'stdin': self.input_file.fd, 'stdout': self.output_file.fd, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': True} if sys.platform != 'win32' and set_sigpipe: # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup self.internal = Popen(self.cmds, **popen_args) else: runs = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin = self.input_file.fd else: stdin = runs[index - 1].stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (e.g. gzip -dc foo.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd txt_mode = True else: stdout = subprocess.PIPE txt_mode = False popen_args = { 'stdin': stdin, 'stdout': stdout, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': txt_mode} if sys.platform != 'win32' and set_sigpipe: # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup try: runs.append(Popen(cmd, **popen_args)) except OSError as e: logger.error('error when spawning %s', cmd) # We have an error (e.g. file not found), try to kill # all processes already started. for p in runs: p.terminate() raise self.internal = runs[-1] except Exception as e: self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() def command_line_image(self): """Get shell command line image of the spawned command(s). :rtype: str This just a convenient wrapper around the function of the same name. """ return command_line_image(self.cmds) def close_files(self): """Close all file descriptors.""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error, cmds): """Set pid to -1 and status to 127 before closing files.""" self.close_files() logger.error(error) def not_found(path): """Raise OSError. :param path: path of the executable :type path: str """ logger.error("%s not found", path) e3.log.debug('PATH=%s', os.environ['PATH']) raise OSError(errno.ENOENT, 'No such file or directory, %s not found' % path) # Try to send an helpful message if one of the executable has not # been found. if isinstance(cmds[0], basestring): if which(cmds[0], default=None) is None: not_found(cmds[0]) else: for cmd in cmds: if which(cmd[0], default=None) is None: not_found(cmd[0]) def wait(self): """Wait until process ends and return its status. :return: exit code of the process :rtype: int """ if self.status is not None: # Wait has already been called return self.status # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if self.output_file.fd != subprocess.PIPE and \ self.error_file.fd != subprocess.PIPE and \ self.input_file.fd != subprocess.PIPE: self.status = self.internal.wait() else: tmp_input = None if self.input_file.fd == subprocess.PIPE: tmp_input = self.input_file.get_command() (self.out, self.err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self.close_files() return self.status def poll(self): """Check the process status and set self.status if available. This method checks whether the underlying process has exited or not. If it hasn't, then it just returns None immediately. Otherwise, it stores the process' exit code in self.status and then returns it. :return: None if the process is still alive; otherwise, returns the process exit status. :rtype: int | None """ if self.status is not None: # Process is already terminated and wait been called return self.status result = self.internal.poll() if result is not None: # Process is finished, call wait to finalize it (closing handles, # ...) return self.wait() else: return None def kill(self): """Kill the process.""" self.internal.kill() def interrupt(self): """Send SIGINT CTRL_C_EVENT to the process.""" # On windows CTRL_C_EVENT is available and SIGINT is not; # and the other way around on other platforms. interrupt_signal = getattr(signal, 'CTRL_C_EVENT', signal.SIGINT) self.internal.send_signal(interrupt_signal) def is_running(self): """Check whether the process is running. :rtype: bool """ if psutil is None: # psutil not imported, use our is_running function return is_running(self.pid) else: return self.internal.is_running() def children(self): """Return list of child processes (using psutil). :rtype: list[psutil.Process] """ if psutil is None: raise NotImplementedError('Run.children() require psutil') return self.internal.children()