Beispiel #1
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain any of the variables available
      that are being passed to this class. They will be replaced using the
      python format syntax.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.

    - **use_fds**: if True, will not close the fds in the subprocess.
      default: False.

    - **pipe_stdout**: if True, will open a PIPE on stdout. default: True.

    - **pipe_stderr**: if True, will open a PIPE on stderr. default: True.

    - **close_child_stdout**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.

    - **close_child_stderr**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.
    """
    def __init__(self, wid, cmd, args=None, working_dir=None, shell=False,
                 uid=None, gid=None, env=None, rlimits=None, executable=None,
                 use_fds=False, watcher=None, spawn=True,
                 pipe_stdout=True, pipe_stderr=True,
                 close_child_stdout=False, close_child_stderr=False):

        self.wid = wid
        self.cmd = cmd
        self.args = args
        self.working_dir = working_dir or get_working_dir()
        self.shell = shell
        self.uid = to_uid(uid) if uid else None
        self.gid = to_gid(gid) if gid else None
        self.env = env or {}
        self.rlimits = rlimits or {}
        self.executable = executable
        self.use_fds = use_fds
        self.watcher = watcher
        self.pipe_stdout = pipe_stdout
        self.pipe_stderr = pipe_stderr
        self.close_child_stdout = close_child_stdout
        self.close_child_stderr = close_child_stderr
        self.stopping = False

        if spawn:
            self.spawn()

    def _null_streams(self, streams):
        devnull = os.open(os.devnull, os.O_RDWR)
        try:
            for stream in streams:
                if not hasattr(stream, 'fileno'):
                    # we're probably dealing with a file-like
                    continue
                try:
                    stream.flush()
                    os.dup2(devnull, stream.fileno())
                except IOError:
                    # some streams, like stdin - might be already closed.
                    pass
        finally:
            os.close(devnull)

    def spawn(self):
        args = self.format_args()

        def preexec_fn():
            streams = [sys.stdin]

            if self.close_child_stdout:
                streams.append(sys.stdout)

            if self.close_child_stderr:
                streams.append(sys.stderr)

            self._null_streams(streams)
            os.setsid()

            for limit, value in self.rlimits.items():
                res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None)
                if res is None:
                    raise ValueError('unknown rlimit "%s"' % limit)
                # TODO(petef): support hard/soft limits
                resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        extra = {}
        if self.pipe_stdout:
            extra['stdout'] = PIPE

        if self.pipe_stderr:
            extra['stderr'] = PIPE

        self._worker = Popen(args, cwd=self.working_dir,
                             shell=self.shell, preexec_fn=preexec_fn,
                             env=self.env, close_fds=not self.use_fds,
                             executable=self.executable, **extra)

        self.started = time.time()

    def format_args(self):
        """ It's possible to use environment variables and some other variables
        that are available in this context, when spawning the processes.
        """
        logger.debug('cmd: ' + bytestring(self.cmd))
        logger.debug('args: ' + str(self.args))

        current_env = ObjectDict(self.env.copy())

        format_kwargs = {
            'wid': self.wid, 'shell': self.shell, 'args': self.args,
            'env': current_env, 'working_dir': self.working_dir,
            'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits,
            'executable': self.executable, 'use_fds': self.use_fds}

        if self.watcher is not None:
            format_kwargs['sockets'] = self.watcher._get_sockets_fds()
            for option in self.watcher.optnames:
                if option not in format_kwargs\
                        and hasattr(self.watcher, option):
                    format_kwargs[option] = getattr(self.watcher, option)

        cmd = replace_gnu_args(self.cmd, **format_kwargs)

        if '$WID' in cmd or (self.args and '$WID' in self.args):
            msg = "Using $WID in the command is deprecated. You should use "\
                  "the python string format instead. In you case, this means "\
                  "replacing the $WID in your command by $(WID)."

            warnings.warn(msg, DeprecationWarning)
            self.cmd = cmd.replace('$WID', str(self.wid))

        if self.args is not None:
            if isinstance(self.args, string_types):
                args = shlex.split(bytestring(replace_gnu_args(
                    self.args, **format_kwargs)))
            else:
                args = [bytestring(replace_gnu_args(arg, **format_kwargs))
                        for arg in self.args]
            args = shlex.split(bytestring(cmd)) + args
        else:
            args = shlex.split(bytestring(cmd))

        logger.debug("process args: %s", args)
        return args

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def is_alive(self):
        return self.poll() is None

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        logger.debug("sending signal %s to %s" % (sig, self.pid))
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Stop the process and close stdout/stderr

        If the corresponding process is still here
        (normally it's already killed by the watcher),
        a SIGTERM is sent, then a SIGKILL after 1 second.

        The shutdown process (SIGTERM then SIGKILL) is
        normally taken by the watcher. So if the process
        is still there here, it's a kind of bad behavior
        because the graceful timeout won't be respected here.
        """
        try:
            try:
                if self._worker.poll() is None:
                    return self._worker.terminate()
            finally:
                if self._worker.stderr is not None:
                    self._worker.stderr.close()
                if self._worker.stdout is not None:
                    self._worker.stdout.close()
        except NoSuchProcess:
            pass

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["age"] = self.age()
        info["started"] = self.started
        info["children"] = []
        for child in self._worker.get_children():
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in self._worker.get_children()]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict([(child.pid, child)
                         for child in self._worker.get_children()])

        children[pid].send_signal(signum)

    @debuglog
    def send_signal_children(self, signum):
        """Send signal *signum* to all children."""
        for child in self._worker.get_children():
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - UNEXISTING
        - OTHER
        """
        try:
            if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return UNEXISTING

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr

    def __eq__(self, other):
        return self is other

    def __lt__(self, other):
        return self.started < other.started

    def __gt__(self, other):
        return self.started > other.started
Beispiel #2
0
class BackgroundRunner:
    def __init__(self, log_queue):
        self.process = None
        self.killed = False
        self.output_file = None
        self.error_output_file = None
        self.log_queue = log_queue
        self.error_detected = False
        self.success_detected = False
        self.error_message = []
        self.success_message = []
        self.started_at = None

    def start_exec(self,
                   command,
                   work_dir: str = None,
                   shell: bool = False,
                   errors=(),
                   successes=()):
        self.clean()
        logger.debug(f"Using work dir: {work_dir}")
        work_path = Path(work_dir)
        work_path.mkdir(exist_ok=True, parents=True)
        self.output_file = work_path / f"encoder_output_{secrets.token_hex(6)}.log"
        self.error_output_file = work_path / f"encoder_error_output_{secrets.token_hex(6)}.log"
        logger.debug(f"command output file set to: {self.output_file}")
        logger.debug(
            f"command error output file set to: {self.error_output_file}")
        self.output_file.touch(exist_ok=True)
        self.error_output_file.touch(exist_ok=True)
        self.error_message = errors
        self.success_message = successes
        logger.info(f"Running command: {command}")
        try:
            self.process = Popen(
                shlex.split(command.replace("\\", "\\\\"))
                if not shell and isinstance(command, str) else command,
                shell=shell,
                cwd=work_dir,
                stdout=open(self.output_file, "w"),
                stderr=open(self.error_output_file, "w"),
                stdin=
                PIPE,  # FFmpeg can try to read stdin and wrecks havoc on linux
                encoding="utf-8",
            )
        except PermissionError:
            logger.error(
                "Could not encode video due to permissions error."
                "Please make sure encoder is executable and you have permissions to run it."
                "Otherwise try running FastFlix as an administrator.")
            self.error_detected = True
            return
        except Exception:
            logger.exception("Could not start worker process")
            self.error_detected = True
            return

        self.started_at = datetime.datetime.now(datetime.timezone.utc)

        Thread(target=self.read_output).start()

    def read_output(self):
        with open(self.output_file, "r", encoding="utf-8",
                  errors="ignore") as out_file, open(
                      self.error_output_file,
                      "r",
                      encoding="utf-8",
                      errors="ignore") as err_file:
            while True:
                if not self.is_alive():
                    excess = out_file.read()
                    logger.info(excess)
                    self.log_queue.put(excess)

                    err_excess = err_file.read()
                    logger.info(err_excess)
                    self.log_queue.put(err_excess)
                    if self.process.returncode is not None and self.process.returncode > 0:
                        self.error_detected = True
                    break
                line = out_file.readline().rstrip()
                if line:
                    logger.info(line)
                    self.log_queue.put(line)
                    if not self.success_detected:
                        for success in self.success_message:
                            if success in line:
                                self.success_detected = True

                err_line = err_file.readline().rstrip()
                if err_line:
                    logger.info(err_line)
                    self.log_queue.put(err_line)
                    if "Conversion failed!" in err_line or "Error during output" in err_line:
                        self.error_detected = True
                    if not self.error_detected:
                        for error in self.error_message:
                            if error in err_line:
                                self.error_detected = True

        try:
            self.output_file.unlink()
            self.error_output_file.unlink()
        except OSError:
            pass

    def read(self, limit=None):
        if not self.is_alive():
            return
        return self.process.stdout.read(limit)

    def is_alive(self):
        if not self.process:
            return False
        return True if self.process.poll() is None else False

    def clean(self):
        self.kill(log=False)
        self.process = None
        self.error_detected = False
        self.success_detected = False
        self.killed = False
        self.started_at = None

    def kill(self, log=True):
        if self.process and self.process.poll() is None:
            if log:
                logger.info(f"Killing worker process {self.process.pid}")
            try:
                # if reusables.win_based:
                #     os.kill(self.process.pid, signal.CTRL_C_EVENT)
                # else:
                #     os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
                self.process.terminate()
                self.process.kill()
            except Exception as err:
                if log:
                    logger.exception(f"Couldn't terminate process: {err}")
        self.killed = True

    def pause(self):
        if not self.process:
            return False
        self.process.suspend()

    def resume(self):
        if not self.process:
            return False
        self.process.resume()
Beispiel #3
0
class BackgroundRunner:
    def __init__(self, log_queue):
        self.process = None
        self.process_two = None
        self.killed = False
        self.output_file = None
        self.error_output_file = None
        self.log_queue = log_queue
        self.error_detected = False
        self.success_detected = False
        self.error_message = []
        self.success_message = []

    def start_exec(self,
                   command,
                   work_dir: str = None,
                   shell: bool = False,
                   errors=(),
                   successes=()):
        self.clean()
        logger.info(f"Running command: {command}")
        Path(work_dir).mkdir(exist_ok=True, parents=True)
        self.output_file = Path(
            work_dir) / f"encoder_output_{secrets.token_hex(6)}.log"
        self.error_output_file = Path(
            work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log"
        self.output_file.touch(exist_ok=True)
        self.error_output_file.touch(exist_ok=True)
        self.error_message = errors
        self.success_message = successes

        self.process = Popen(
            shlex.split(command)
            if not shell and isinstance(command, str) else command,
            shell=shell,
            cwd=work_dir,
            stdout=open(self.output_file, "w"),
            stderr=open(self.error_output_file, "w"),
            stdin=PIPE,  # FFmpeg can try to read stdin and wrecks havoc on linux
            encoding="utf-8",
        )

        Thread(target=self.read_output).start()

    def start_piped_exec(self,
                         command_one,
                         command_two,
                         work_dir,
                         errors=(),
                         successes=()):
        self.clean()
        logger.info(
            f"Running commands: {' '.join(command_one)} | {' '.join(command_two)}"
        )
        Path(work_dir).mkdir(exist_ok=True, parents=True)
        self.output_file = Path(
            work_dir) / f"encoder_output_{secrets.token_hex(6)}.log"
        self.error_output_file = Path(
            work_dir) / f"encoder_error_output_{secrets.token_hex(6)}.log"
        self.output_file.touch(exist_ok=True)
        self.error_output_file.touch(exist_ok=True)
        self.error_message = errors
        self.success_message = successes

        self.process = Popen(
            command_one,
            cwd=work_dir,
            stdout=PIPE,
            stderr=PIPE,
            stdin=PIPE,  # FFmpeg can try to read stdin and wrecks havoc on linux
        )

        self.process_two = Popen(
            command_two,
            cwd=work_dir,
            stdout=open(self.output_file, "w"),
            stderr=open(self.error_output_file, "w"),
            stdin=self.process.stdout,
            encoding="utf-8",
        )

        self.error_detected = False

        Thread(target=self.read_output).start()

    def read_output(self):
        with open(self.output_file, "r", encoding="utf-8",
                  errors="ignore") as out_file, open(
                      self.error_output_file,
                      "r",
                      encoding="utf-8",
                      errors="ignore") as err_file:
            while True:
                if not self.is_alive():
                    excess = out_file.read()
                    logger.info(excess)
                    self.log_queue.put(excess)

                    err_excess = err_file.read()
                    logger.info(err_excess)
                    self.log_queue.put(err_excess)
                    break
                line = out_file.readline().rstrip()
                if line:
                    logger.info(line)
                    self.log_queue.put(line)
                    if not self.success_detected:
                        for success in self.success_message:
                            if success in line:
                                self.success_detected = True

                err_line = err_file.readline().rstrip()
                if err_line:
                    logger.info(err_line)
                    self.log_queue.put(err_line)
                    if "Conversion failed!" in err_line:
                        self.error_detected = True
                    if not self.error_detected:
                        for error in self.error_message:
                            if error in err_line:
                                self.error_detected = True

        try:
            self.output_file.unlink()
            self.error_output_file.unlink()
        except OSError:
            pass

    def read(self, limit=None):
        if not self.is_alive():
            return
        return self.process.stdout.read(limit)

    def is_alive(self):
        if not self.process:
            return False
        if self.process_two:
            # TODO make sure process 1 dies cleanly
            return True if self.process_two.poll() is None else False
        return True if self.process.poll() is None else False

    def clean(self):
        self.kill(log=False)
        self.process = None
        self.process_two = None
        self.error_detected = False
        self.success_detected = False
        self.killed = False

    def kill(self, log=True):
        if self.process_two:
            if log:
                logger.info(f"Killing worker process {self.process_two.pid}")
            try:
                self.process_two.terminate()
                self.process_two.kill()
            except Exception as err:
                if log:
                    logger.exception(f"Couldn't terminate process: {err}")
        if self.process:
            if log:
                logger.info(f"Killing worker process {self.process.pid}")
            try:
                # if reusables.win_based:
                #     os.kill(self.process.pid, signal.CTRL_C_EVENT)
                # else:
                #     os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
                self.process.terminate()
                self.process.kill()
            except Exception as err:
                if log:
                    logger.exception(f"Couldn't terminate process: {err}")
        self.killed = True

    def pause(self):
        if self.process_two:
            return False
        if not self.process:
            return False
        self.process.suspend()

    def resume(self):
        if self.process_two:
            return False
        if not self.process:
            return False
        self.process.resume()
def cmd(command, user=None, input=None, cli_input=None, cli_output=False, communicate=True,
        timeout=None, fail=True, log=None, tries=1, delay_min=5, delay_max=10, **kwargs):
    """
    Calls the `command` and returns a dictionary with process, stdout, stderr, and the returncode.

    Returned returncode, stdout and stderr will be None if `communicate` is set to False.

    :param user: If set, this will use ``sudo -u <user> ...`` to execute `command` as `user`.
    :type user: unicode
    :param input: If set, sended to stdin (if `communicate` is True).
    :type input: unicode
    :param cli_input: If set, sended to stdin (no condition).
    :type cli_input: unicode
    :param cli_output: Set to True to output (in real-time) stdout to stdout and stderr to stderr.
    :type cli_output: bool
    :param fail: Set to False to avoid the exception `subprocess.CalledProcessError`.
    :type fail: bool
    :param log: A function to log/print details about what is executed/any failure, can be a logger.
    :type log: callable, logging.Logger
    :param communicate: Set to True to communicate with the process, this is a locking call
                        (if timeout is None).
    :type communicate: bool
    :param timeout: Time-out for the communication with the process, in seconds.
    :type timeout: float
    :param tries: How many times you want the command to be retried ?
    :type tries: int
    :param delay_min: Minimum delay to sleep after every attempt communicate must be True.
    :type delay: float, int
    :param delay_max: Maximum delay to sleep after every attempt communicate must be True.
    :type delay: float, int

    * Delay will be a random number in range (`delay_min`, `delay_max`)
    * Set kwargs with any argument of the :mod:`subprocess`.Popen constructor excepting
      stdin, stdout and stderr.

    """
    # convert log argument to logging functions
    log_debug = log_warning = log_exception = None
    if isinstance(log, logging.Logger):
        log_debug, log_warning, log_exception = log.debug, log.warning, log.exception
    elif hasattr(log, '__call__'):
        log_debug = log_warning = log_exception = log
    # create a list and a string of the arguments
    if isinstance(command, string_types):
        if user is not None:
            command = 'sudo -u {0} {1}'.format(user, command)
        args_list, args_string = shlex.split(to_bytes(command)), command
    else:
        if user is not None:
            command = ['sudo', '-u', user] + command
        args_list = [to_bytes(a) for a in command if a is not None]
        args_string = ' '.join([to_unicode(a) for a in command if a is not None])
    # log the execution
    if log_debug:
        log_debug('Execute {0}{1}{2}'.format(
            '' if input is None else 'echo {0}|'.format(repr(input)),
            args_string,
            '' if cli_input is None else ' < {0}'.format(repr(cli_input))))

    for trial in xrange(tries):  # noqa
        # create the sub-process
        try:
            process = Popen(
                args_list,
                stdin=subprocess.PIPE,
                stdout=None if cli_output else subprocess.PIPE,
                stderr=None if cli_output else subprocess.PIPE, **kwargs)
        except OSError as e:
            # unable to execute the program (e.g. does not exist)
            if log_exception:
                log_exception(e)
            if fail:
                raise
            return {'process': None, 'stdout': '', 'stderr': e, 'returncode': 2}
        # write to stdin (answer to questions, ...)
        if cli_input is not None:
            process.stdin.write(to_bytes(cli_input))
            process.stdin.flush()
        # interact with the process and wait for the process to terminate
        if communicate:
            data = {}

            # thanks http://stackoverflow.com/questions/1191374/subprocess-with-timeout
            def communicate_with_timeout(data=None):
                data['stdout'], data['stderr'] = process.communicate(input=input)
            thread = threading.Thread(target=communicate_with_timeout, kwargs={'data': data})
            thread.start()
            thread.join(timeout=timeout)
            if thread.is_alive():
                try:
                    process.terminate()
                    thread.join()
                except OSError as e:
                    # Manage race condition with process that may terminate just after the call to
                    # thread.is_alive() !
                    if e.errno != errno.ESRCH:
                        raise
            stdout, stderr = data['stdout'], data['stderr']
        else:
            # get a return code that may be None of course ...
            process.poll()
            stdout = stderr = None
        result = {
            'process': process,
            'stdout': stdout,
            'stderr': stderr,
            'returncode': process.returncode
        }
        if process.returncode == 0:
            break
        # failed attempt, may retry
        do_retry = trial < tries - 1
        delay = random.uniform(delay_min, delay_max)
        if log_warning:
            log_warning('Attempt {0} out of {1}: {2}'.format(trial+1, tries,
                        'Will retry in {0} seconds'.format(delay) if do_retry else 'Failed'))
        # raise if this is the last try
        if fail and not do_retry:
            raise subprocess.CalledProcessError(process.returncode, args_string, stderr)
        if do_retry:
            time.sleep(delay)

    return result
Beispiel #5
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain any of the variables available
      that are being passed to this class. They will be replaced using the
      python format syntax.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.

    - **use_fds**: if True, will not close the fds in the subprocess.
      default: False.

    - **pipe_stdout**: if True, will open a PIPE on stdout. default: True.

    - **pipe_stderr**: if True, will open a PIPE on stderr. default: True.

    - **close_child_stdout**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.

    - **close_child_stderr**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.
    """
    def __init__(self,
                 wid,
                 cmd,
                 args=None,
                 working_dir=None,
                 shell=False,
                 uid=None,
                 gid=None,
                 env=None,
                 rlimits=None,
                 executable=None,
                 use_fds=False,
                 watcher=None,
                 spawn=True,
                 pipe_stdout=True,
                 pipe_stderr=True,
                 close_child_stdout=False,
                 close_child_stderr=False):

        self.wid = wid
        self.cmd = cmd
        self.args = args
        self.working_dir = working_dir or get_working_dir()
        self.shell = shell
        self.uid = to_uid(uid) if uid else None
        self.gid = to_gid(gid) if gid else None
        self.env = env or {}
        self.rlimits = rlimits or {}
        self.executable = executable
        self.use_fds = use_fds
        self.watcher = watcher
        self.pipe_stdout = pipe_stdout
        self.pipe_stderr = pipe_stderr
        self.close_child_stdout = close_child_stdout
        self.close_child_stderr = close_child_stderr
        self.stopping = False
        # sockets created before fork, should be let go after.
        self._sockets = []

        if spawn:
            self.spawn()

    def _null_streams(self, streams):
        devnull = os.open(os.devnull, os.O_RDWR)
        try:
            for stream in streams:
                if not hasattr(stream, 'fileno'):
                    # we're probably dealing with a file-like
                    continue
                try:
                    stream.flush()
                    os.dup2(devnull, stream.fileno())
                except IOError:
                    # some streams, like stdin - might be already closed.
                    pass
        finally:
            os.close(devnull)

    def _get_sockets_fds(self):
        """Returns sockets dict. If this worker's cmd indicates use of
        a SO_REUSEPORT socket, a new socket is created and bound. This
        new socket's FD replaces original socket's FD in returned dict.
        This method populates `self._sockets` list. This list should be
        let go after `fork()`.
        """
        sockets_fds = None

        if self.watcher is not None and self.watcher.sockets is not None:
            sockets_fds = self.watcher._get_sockets_fds()
            reuseport_sockets = tuple(
                (sn, s) for (sn, s) in self.watcher.sockets.items()
                if s.so_reuseport)

            for sn, s in reuseport_sockets:
                # watcher.cmd uses this reuseport socket
                if 'circus.sockets.%s' % sn in self.watcher.cmd:
                    sock = CircusSocket.load_from_config(s._cfg)
                    sock.bind_and_listen()
                    # replace original socket's fd
                    sockets_fds[sn] = sock.fileno()
                    # keep new socket until fork returns
                    self._sockets.append(sock)

        return sockets_fds

    def spawn(self):
        sockets_fds = self._get_sockets_fds()

        args = self.format_args(sockets_fds=sockets_fds)

        def preexec_fn():
            streams = [sys.stdin]

            if self.close_child_stdout:
                streams.append(sys.stdout)

            if self.close_child_stderr:
                streams.append(sys.stderr)

            self._null_streams(streams)
            os.setsid()

            for limit, value in self.rlimits.items():
                res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None)
                if res is None:
                    raise ValueError('unknown rlimit "%s"' % limit)
                # TODO(petef): support hard/soft limits
                resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        extra = {}
        if self.pipe_stdout:
            extra['stdout'] = PIPE

        if self.pipe_stderr:
            extra['stderr'] = PIPE

        self._worker = Popen(args,
                             cwd=self.working_dir,
                             shell=self.shell,
                             preexec_fn=preexec_fn,
                             env=self.env,
                             close_fds=not self.use_fds,
                             executable=self.executable,
                             **extra)

        # let go of sockets created only for self._worker to inherit
        self._sockets = []

        self.started = time.time()

    def format_args(self, sockets_fds=None):
        """ It's possible to use environment variables and some other variables
        that are available in this context, when spawning the processes.
        """
        logger.debug('cmd: ' + bytestring(self.cmd))
        logger.debug('args: ' + str(self.args))

        current_env = ObjectDict(self.env.copy())

        format_kwargs = {
            'wid': self.wid,
            'shell': self.shell,
            'args': self.args,
            'env': current_env,
            'working_dir': self.working_dir,
            'uid': self.uid,
            'gid': self.gid,
            'rlimits': self.rlimits,
            'executable': self.executable,
            'use_fds': self.use_fds
        }

        if sockets_fds is not None:
            format_kwargs['sockets'] = sockets_fds

        if self.watcher is not None:
            for option in self.watcher.optnames:
                if option not in format_kwargs\
                        and hasattr(self.watcher, option):
                    format_kwargs[option] = getattr(self.watcher, option)

        cmd = replace_gnu_args(self.cmd, **format_kwargs)

        if '$WID' in cmd or (self.args and '$WID' in self.args):
            msg = "Using $WID in the command is deprecated. You should use "\
                  "the python string format instead. In you case, this means "\
                  "replacing the $WID in your command by $(WID)."

            warnings.warn(msg, DeprecationWarning)
            self.cmd = cmd.replace('$WID', str(self.wid))

        if self.args is not None:
            if isinstance(self.args, string_types):
                args = shlex.split(
                    bytestring(replace_gnu_args(self.args, **format_kwargs)))
            else:
                args = [
                    bytestring(replace_gnu_args(arg, **format_kwargs))
                    for arg in self.args
                ]
            args = shlex.split(bytestring(cmd)) + args
        else:
            args = shlex.split(bytestring(cmd))

        if self.shell:
            # subprocess.Popen(shell=True) implies that 1st arg is the
            # requested command, remaining args are applied to sh.
            args = [' '.join(quote(arg) for arg in args)]
            shell_args = format_kwargs.get('shell_args', None)
            if shell_args and is_win():
                logger.warn(
                    "shell_args won't apply for "
                    "windows platforms: %s", shell_args)
            elif isinstance(shell_args, string_types):
                args += shlex.split(
                    bytestring(replace_gnu_args(shell_args, **format_kwargs)))
            elif shell_args:
                args += [
                    bytestring(replace_gnu_args(arg, **format_kwargs))
                    for arg in shell_args
                ]

        elif format_kwargs.get('shell_args', False):
            logger.warn(
                "shell_args is defined but won't be used "
                "in this context: %s", format_kwargs['shell_args'])
        logger.debug("process args: %s", args)
        return args

    def returncode(self):
        return self._worker.returncode

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def is_alive(self):
        return self.poll() is None

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        logger.debug("sending signal %s to %s" % (sig, self.pid))
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Stop the process and close stdout/stderr

        If the corresponding process is still here
        (normally it's already killed by the watcher),
        a SIGTERM is sent, then a SIGKILL after 1 second.

        The shutdown process (SIGTERM then SIGKILL) is
        normally taken by the watcher. So if the process
        is still there here, it's a kind of bad behavior
        because the graceful timeout won't be respected here.
        """
        try:
            try:
                if self._worker.poll() is None:
                    return self._worker.terminate()
            finally:
                if self._worker.stderr is not None:
                    self._worker.stderr.close()
                if self._worker.stdout is not None:
                    self._worker.stdout.close()
        except NoSuchProcess:
            pass

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["age"] = self.age()
        info["started"] = self.started
        info["children"] = []
        info['wid'] = self.wid
        for child in self._worker.get_children():
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in self._worker.get_children()]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict(
            (child.pid, child) for child in self._worker.get_children())
        try:
            children[pid].send_signal(signum)
        except KeyError:
            raise NoSuchProcess(pid)

    @debuglog
    def send_signal_children(self, signum):
        """Send signal *signum* to all children."""
        for child in self._worker.get_children():
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - UNEXISTING
        - OTHER
        """
        try:
            if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return UNEXISTING

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr

    def __eq__(self, other):
        return self is other

    def __lt__(self, other):
        return self.started < other.started

    def __gt__(self, other):
        return self.started > other.started
Beispiel #6
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain any of the variables available
      that are being passed to this class. They will be replaced using the
      python format syntax.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.

    - **use_fds**: if True, will not close the fds in the subprocess.
      default: False.
    """
    def __init__(self,
                 wid,
                 cmd,
                 args=None,
                 working_dir=None,
                 shell=False,
                 uid=None,
                 gid=None,
                 env=None,
                 rlimits=None,
                 executable=None,
                 use_fds=False,
                 watcher=None,
                 spawn=True):

        self.wid = wid
        self.cmd = cmd
        self.args = args
        self.working_dir = working_dir or get_working_dir()
        self.shell = shell
        self.uid = to_uid(uid) if uid else None
        self.gid = to_gid(gid) if gid else None
        self.env = env or {}
        self.rlimits = rlimits or {}
        self.executable = executable
        self.use_fds = use_fds
        self.watcher = watcher

        if spawn:
            self.spawn()

    def spawn(self):
        args = self.format_args()

        def preexec_fn():
            os.setsid()

            for limit, value in self.rlimits.items():
                res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None)
                if res is None:
                    raise ValueError('unknown rlimit "%s"' % limit)
                # TODO(petef): support hard/soft limits
                resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        self._worker = Popen(args,
                             cwd=self.working_dir,
                             shell=self.shell,
                             preexec_fn=preexec_fn,
                             env=self.env,
                             close_fds=not self.use_fds,
                             stdout=PIPE,
                             stderr=PIPE,
                             executable=self.executable)

        self.started = time.time()

    def format_args(self):
        """ It's possible to use environment variables and some other variables
        that are available in this context, when spawning the processes.
        """
        logger.debug('cmd: ' + bytestring(self.cmd))
        logger.debug('args: ' + str(self.args))

        current_env = ObjectDict(self.env.copy())

        format_kwargs = {
            'wid': self.wid,
            'shell': self.shell,
            'args': self.args,
            'env': current_env,
            'working_dir': self.working_dir,
            'uid': self.uid,
            'gid': self.gid,
            'rlimits': self.rlimits,
            'executable': self.executable,
            'use_fds': self.use_fds
        }

        if self.watcher is not None:
            for option in self.watcher.optnames:
                if option not in format_kwargs\
                        and hasattr(self.watcher, option):
                    format_kwargs[option] = getattr(self.watcher, option)

        cmd = replace_gnu_args(self.cmd, **format_kwargs)

        if '$WID' in cmd or (self.args and '$WID' in self.args):
            msg = "Using $WID in the command is deprecated. You should use "\
                  "the python string format instead. In you case, this means "\
                  "replacing the $WID in your command by $(WID)."

            warnings.warn(msg, DeprecationWarning)
            self.cmd = cmd.replace('$WID', str(self.wid))

        if self.args is not None:
            if isinstance(self.args, string_types):
                args = shlex.split(
                    bytestring(replace_gnu_args(self.args, **format_kwargs)))
            else:
                args = [bytestring(replace_gnu_args(arg, **format_kwargs))\
                        for arg in self.args]
            args = shlex.split(bytestring(cmd)) + args
        else:
            args = shlex.split(bytestring(cmd))

        logger.debug("process args: %s", args)
        return args

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        logger.debug("sending signal %s to %s" % (sig, self.pid))
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Terminate the process."""
        try:
            try:
                if self._worker.poll() is None:
                    return self._worker.terminate()
            finally:
                self._worker.stderr.close()
                self._worker.stdout.close()
        except NoSuchProcess:
            pass

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["age"] = self.age()
        info["started"] = self.started
        info["children"] = []
        for child in self._worker.get_children():
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in self._worker.get_children()]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict([(child.pid, child) \
                for child in self._worker.get_children()])

        children[pid].send_signal(signum)

    @debuglog
    def send_signal_children(self, signum):
        """Send signal *signum* to all children."""
        for child in self._worker.get_children():
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - UNEXISTING
        - OTHER
        """
        try:
            if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return UNEXISTING

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr

    def __eq__(self, other):
        return self is other

    def __lt__(self, other):
        return self.started < other.started

    def __gt__(self, other):
        return self.started > other.started
Beispiel #7
0
class Run(object):
    """Class to handle processes.

    :ivar cmds: The ``cmds`` argument passed to the __init__ method
        (a command line passed in a list, or a list of command lines passed as
        a list of list).
    :ivar status: The exit status. As the exit status is only meaningful after
        the process has exited, its initial value is None.  When a problem
        running the command is detected and a process does not get
        created, its value gets set to the special value 127.
    :ivar out: process standard output  (if instanciated with output = PIPE)
    :ivar err: same as out but for standard error
    :ivar pid: PID. Set to -1 if the command failed to run.
    """
    def __init__(self,
                 cmds,
                 cwd=None,
                 output=PIPE,
                 error=STDOUT,
                 input=None,
                 bg=False,
                 timeout=None,
                 env=None,
                 set_sigpipe=True,
                 parse_shebang=False,
                 ignore_environ=True):
        """Spawn a process.

        :param cmds: two possibilities:
            1) a command line: a tool name and its arguments, passed
            in a list. e.g. ['ls', '-a', '.']
            2) a list of command lines (as defined in (1)): the
            different commands will be piped. This means that
            [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to
            the system command line 'ps -a | grep vxsim'.
        :type cmds: list[str] | list[list[str]]
        :param cwd: directory in which the process should be executed (string
            or None). If None then current directory is used
        :type cwd: str | None
        :param output: can be PIPE (default), a filename string, a fd on an
            already opened file, a python file object or None (for stdout).
        :type output: int | str | file | None
        :param error: same as output or STDOUT, which indicates that the
            stderr data from the applications should be captured into the same
            file handle as for stdout.
        :type error: int | str | file | None
        :param input: same as output
        :type input: int | str | file | None
        :param bg: if True then run in background
        :type bg: bool
        :param timeout: limit execution time (in seconds), None means
            unlimited
        :type timeout: int | None
        :param env: dictionary for environment variables (e.g. os.environ)
        :type env: dict
        :param set_sigpipe: reset SIGPIPE handler to default value
        :type set_sigpipe: bool
        :param parse_shebang: take the #! interpreter line into account
        :type parse_shebang: bool
        :param ignore_environ: Applies only when env parameter is not None.
            When set to True (the default), the only environment variables
            passed to the program are the ones provided by the env parameter.
            Otherwise, the environment passed to the program consists of the
            environment variables currently defined (os.environ) augmented by
            the ones provided in env.
        :type ignore_environ: bool

        :raise OSError: when trying to execute a non-existent file.

        If you specify a filename for output or stderr then file content is
        reseted (equiv. to > in shell). If you prepend the filename with '+'
        then the file will be opened in append mode (equiv. to >> in shell)
        If you prepend the input with '|', then the content of input string
        will be used for process stdin.
        """
        def add_interpreter_command(cmd_line):
            """Add the interpreter defined in the #! line to cmd_line.

            If the #! line cannot be parsed, just return the cmd_line
            unchanged

            On windows, /usr/bin/env will be ignored to avoid a dependency on
            cygwin and /bin/bash & /bin/sh are replaced by $SHELL if defined.
            :param cmd_line: command line
            :type cmd_line: list[str]
            """
            if not parse_shebang:
                # nothing to do
                return cmd_line
            prog = which(cmd_line[0], default=None)
            if prog is None:
                # Not found. Do not modify the command line
                return cmd_line

            with open(prog) as f:
                try:
                    header = f.read()[0:2]
                except UnicodeDecodeError:  # py3-only
                    # unknown header - cannot decode the first two bytes
                    return cmd_line
                if header != "#!":
                    # Unknown header
                    return cmd_line
                # Header found, get the interpreter command in the first line
                f.seek(0)
                line = f.readline()
                interpreter_cmds = [
                    l.strip() for l in line[line.find('!') + 1:].split()
                ]
                # Pass the program path to the interpreter
                if len(cmd_line) > 1:
                    cmd_line = [prog] + list(cmd_line[1:])
                else:
                    cmd_line = [prog]

                if sys.platform == 'win32':  # unix: no cover
                    if interpreter_cmds[0] == '/usr/bin/env':
                        return interpreter_cmds[1:] + cmd_line
                    elif interpreter_cmds[0] in ('/bin/bash', '/bin/sh') and \
                            'SHELL' in os.environ:
                        return [os.environ['SHELL']] + cmd_line
                return interpreter_cmds + cmd_line

        # First resolve output, error and input
        self.input_file = File(input, 'r')
        self.output_file = File(output, 'w')
        self.error_file = File(error, 'w')

        self.status = None
        self.out = ''
        self.err = ''
        self.cmds = []

        if env is not None and not ignore_environ:
            # ignore_environ is False, so get a copy of the current
            # environment and update it with the env dictionnary.
            tmp = os.environ.copy()
            tmp.update(env)
            env = tmp

        rlimit_args = []
        if timeout is not None:
            rlimit = get_rlimit()
            if os.path.exists(rlimit):
                rlimit_args = [rlimit, '%d' % timeout]
            else:
                logger.warning('cannot find rlimit at %s', rlimit)
                rlimit_args = []

        try:
            if isinstance(cmds[0], basestring):
                self.cmds = rlimit_args + list(add_interpreter_command(cmds))
            else:
                self.cmds = [add_interpreter_command(c) for c in cmds]
                self.cmds[0] = rlimit_args + list(self.cmds[0])

            cmdlogger.debug('Run: cd %s; %s',
                            cwd if cwd is not None else os.getcwd(),
                            self.command_line_image())

            if isinstance(cmds[0], basestring):
                popen_args = {
                    'stdin': self.input_file.fd,
                    'stdout': self.output_file.fd,
                    'stderr': self.error_file.fd,
                    'cwd': cwd,
                    'env': env,
                    'universal_newlines': True
                }

                if sys.platform != 'win32' and \
                        set_sigpipe:  # windows: no cover
                    # preexec_fn is no supported on windows
                    popen_args['preexec_fn'] = subprocess_setup

                if WIN_NEW_PG and sys.platform == 'win32':
                    popen_args['creationflags'] = \
                        subprocess.CREATE_NEW_PROCESS_GROUP

                self.internal = Popen(self.cmds, **popen_args)

            else:
                runs = []
                for index, cmd in enumerate(self.cmds):
                    if index == 0:
                        stdin = self.input_file.fd
                    else:
                        stdin = runs[index - 1].stdout

                    # When connecting two processes using a Pipe don't use
                    # universal_newlines mode. Indeed commands transmitting
                    # binary data between them will crash
                    # (e.g. gzip -dc foo.txt | tar -xf -)
                    if index == len(self.cmds) - 1:
                        stdout = self.output_file.fd
                        txt_mode = True
                    else:
                        stdout = subprocess.PIPE
                        txt_mode = False

                    popen_args = {
                        'stdin': stdin,
                        'stdout': stdout,
                        'stderr': self.error_file.fd,
                        'cwd': cwd,
                        'env': env,
                        'universal_newlines': txt_mode
                    }

                    if sys.platform != 'win32' and \
                            set_sigpipe:  # windows: no cover
                        # preexec_fn is no supported on windows
                        popen_args['preexec_fn'] = subprocess_setup

                    if WIN_NEW_PG and sys.platform == 'win32':
                        popen_args['creationflags'] = \
                            subprocess.CREATE_NEW_PROCESS_GROUP

                    try:
                        runs.append(Popen(cmd, **popen_args))
                    except OSError:
                        logger.error('error when spawning %s', cmd)
                        # We have an error (e.g. file not found), try to kill
                        # all processes already started.
                        for p in runs:
                            p.terminate()
                        raise

                    self.internal = runs[-1]

        except Exception as e:  # defensive code
            self.__error(e, self.cmds)
            raise

        self.pid = self.internal.pid

        if not bg:
            self.wait()

    def command_line_image(self):
        """Get shell command line image of the spawned command(s).

        :rtype: str

        This just a convenient wrapper around the function of the same
        name.
        """
        return command_line_image(self.cmds)

    def close_files(self):
        """Close all file descriptors."""
        self.output_file.close()
        self.error_file.close()
        self.input_file.close()

    def __error(self, error, cmds):
        """Set pid to -1 and status to 127 before closing files."""
        self.close_files()
        logger.error(error)

        def not_found(path):
            """Raise OSError.

            :param path: path of the executable
            :type path: str
            """
            logger.error("%s not found", path)
            e3.log.debug('PATH=%s', os.environ['PATH'])
            raise OSError(errno.ENOENT,
                          'No such file or directory, %s not found' % path)

        # Try to send an helpful message if one of the executable has not
        # been found.
        if isinstance(cmds[0], basestring):
            if which(cmds[0], default=None) is None:
                not_found(cmds[0])
        else:
            for cmd in cmds:
                if which(cmd[0], default=None) is None:
                    not_found(cmd[0])

    def wait(self):
        """Wait until process ends and return its status.

        :return: exit code of the process
        :rtype: int
        """
        if self.status is not None:
            # Wait has already been called
            return self.status

        # If there is no pipe in the loop then just do a wait. Otherwise
        # in order to avoid blocked processes due to full pipes, use
        # communicate.
        if self.output_file.fd != subprocess.PIPE and \
                self.error_file.fd != subprocess.PIPE and \
                self.input_file.fd != subprocess.PIPE:
            self.status = self.internal.wait()
        else:
            tmp_input = None
            if self.input_file.fd == subprocess.PIPE:
                tmp_input = self.input_file.get_command()

            (self.out, self.err) = self.internal.communicate(tmp_input)
            self.status = self.internal.returncode

        self.close_files()
        return self.status

    def poll(self):
        """Check the process status and set self.status if available.

        This method checks whether the underlying process has exited
        or not. If it hasn't, then it just returns None immediately.
        Otherwise, it stores the process' exit code in self.status
        and then returns it.

        :return: None if the process is still alive; otherwise, returns
          the process exit status.
        :rtype: int | None
        """
        if self.status is not None:
            # Process is already terminated and wait been called
            return self.status

        result = self.internal.poll()

        if result is not None:
            # Process is finished, call wait to finalize it (closing handles,
            # ...)
            return self.wait()
        else:
            return None

    def kill(self, recursive=True, timeout=3):
        """Kill the process.

        :param recursive: if True, try to kill the complete process tree
        :type recursive: bool
        :param timeout: wait timeout (in seconds) after sending the kill
            signal (when recursive=True)
        :type timeout: int
        """
        if recursive:
            kill_process_tree(self.internal, timeout=timeout)
        else:
            self.internal.kill()

    def interrupt(self):
        """Send SIGINT to the process, kill on Windows."""
        if sys.platform == 'win32':
            self.kill()  # Ctrl-C event is unreliable on Windows
        else:
            self.internal.send_signal(signal.SIGINT)

    def is_running(self):
        """Check whether the process is running.

        :rtype: bool
        """
        if psutil is None:  # defensive code
            # psutil not imported, use our is_running function
            return is_running(self.pid)
        else:
            return self.internal.is_running()

    def children(self):
        """Return list of child processes (using psutil).

        :rtype: list[psutil.Process]
        """
        if psutil is None:  # defensive code
            raise NotImplementedError('Run.children() require psutil')
        return self.internal.children()
Beispiel #8
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain any of the variables available
      that are being passed to this class. They will be replaced using the
      python format syntax.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.

    - **use_fds**: if True, will not close the fds in the subprocess. Must be
      be set to True on Windows if stdout or stderr are redirected.
      default: False.

    - **pipe_stdout**: if True, will open a PIPE on stdout. default: True.

    - **pipe_stderr**: if True, will open a PIPE on stderr. default: True.

    - **close_child_stdin**: If True, redirects the child process' stdin
      to /dev/null after the fork. default: True.

    - **close_child_stdout**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.

    - **close_child_stderr**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.
    """
    def __init__(self, name, wid, cmd, args=None, working_dir=None,
                 shell=False, uid=None, gid=None, env=None, rlimits=None,
                 executable=None, use_fds=False, watcher=None, spawn=True,
                 pipe_stdout=True, pipe_stderr=True, close_child_stdin=True,
                 close_child_stdout=False, close_child_stderr=False):

        self.name = name
        self.wid = wid
        self.cmd = cmd
        self.args = args
        self.working_dir = working_dir or get_working_dir()
        self.shell = shell
        if uid:
            self.uid = to_uid(uid)
            self.username = get_username_from_uid(self.uid)
        else:
            self.username = None
            self.uid = None
        self.gid = to_gid(gid) if gid else None
        self.env = env or {}
        self.rlimits = rlimits or {}
        self.executable = executable
        self.use_fds = use_fds
        self.watcher = watcher
        self.pipe_stdout = pipe_stdout
        self.pipe_stderr = pipe_stderr
        self.close_child_stdin = close_child_stdin
        self.close_child_stdout = close_child_stdout
        self.close_child_stderr = close_child_stderr
        self.stopping = False
        # sockets created before fork, should be let go after.
        self._sockets = []
        self._worker = None
        self.redirected = False
        self.started = 0

        if self.uid is not None and self.gid is None:
            self.gid = get_default_gid(self.uid)

        if IS_WINDOWS:
            if not self.use_fds and (self.pipe_stderr or self.pipe_stdout):
                raise ValueError("On Windows, you can't close the fds if "
                                 "you are redirecting stdout or stderr")

        if spawn:
            self.spawn()

    def _null_streams(self, streams):
        devnull = os.open(os.devnull, os.O_RDWR)
        try:
            for stream in streams:
                if not hasattr(stream, 'fileno'):
                    # we're probably dealing with a file-like
                    continue
                try:
                    stream.flush()
                    os.dup2(devnull, stream.fileno())
                except IOError:
                    # some streams, like stdin - might be already closed.
                    pass
        finally:
            os.close(devnull)

    def _get_sockets_fds(self):
        """Returns sockets dict. If this worker's cmd indicates use of
        a SO_REUSEPORT socket, a new socket is created and bound. This
        new socket's FD replaces original socket's FD in returned dict.
        This method populates `self._sockets` list. This list should be
        let go after `fork()`.
        """
        sockets_fds = None

        if self.watcher is not None and self.watcher.sockets is not None:
            sockets_fds = self.watcher._get_sockets_fds()
            reuseport_sockets = tuple((sn, s) for (sn, s)
                                      in self.watcher.sockets.items()
                                      if s.so_reuseport)

            for sn, s in reuseport_sockets:
                # watcher.cmd uses this reuseport socket
                if 'circus.sockets.%s' % sn in self.watcher.cmd:
                    sock = CircusSocket.load_from_config(s._cfg)
                    sock.bind_and_listen()
                    # replace original socket's fd
                    sockets_fds[sn] = sock.fileno()
                    # keep new socket until fork returns
                    self._sockets.append(sock)

        return sockets_fds

    def spawn(self):
        self.started = time.time()
        sockets_fds = self._get_sockets_fds()

        args = self.format_args(sockets_fds=sockets_fds)

        def preexec():
            streams = []

            if self.close_child_stdin:
                streams.append(sys.stdin)

            if self.close_child_stdout:
                streams.append(sys.stdout)

            if self.close_child_stderr:
                streams.append(sys.stderr)

            self._null_streams(streams)
            os.setsid()

            if resource:
                for limit, value in self.rlimits.items():
                    res = getattr(
                        resource, 'RLIMIT_%s' % limit.upper(), None
                    )
                    if res is None:
                        raise ValueError('unknown rlimit "%s"' % limit)

                    # TODO(petef): support hard/soft limits

                    # for the NOFILE limit, if we fail to set an unlimited
                    # value then check the existing hard limit because we
                    # probably can't bypass it due to a kernel limit - so just
                    # assume that the caller means they want to use the kernel
                    # limit when they pass the unlimited value. This is better
                    # than failing to start the process and forcing the caller
                    # to always be aware of what the kernel configuration is.
                    # If they do pass in a real limit value, then we'll just
                    # raise the failure as they should know that their
                    # expectations couldn't be met.
                    # TODO - we can't log here as this occurs in the child
                    # process after the fork but it would be very good to
                    # notify the admin of the situation somehow.
                    retry = False
                    try:
                        resource.setrlimit(res, (value, value))
                    except ValueError:
                        if res == resource.RLIMIT_NOFILE and \
                                value == resource.RLIM_INFINITY:
                            _soft, value = resource.getrlimit(res)
                            retry = True
                        else:
                            raise
                    if retry:
                        resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

                if self.username is not None:
                    try:
                        os.initgroups(self.username, self.gid)
                    except (OSError, AttributeError):
                        # not support on Mac or 2.6
                        pass

            if self.uid:
                os.setuid(self.uid)

        if IS_WINDOWS:
            # On Windows we can't use a pre-exec function
            preexec_fn = None
        else:
            preexec_fn = preexec

        extra = {}
        if self.pipe_stdout:
            extra['stdout'] = PIPE

        if self.pipe_stderr:
            extra['stderr'] = PIPE

        self._worker = Popen(args, cwd=self.working_dir,
                             shell=self.shell, preexec_fn=preexec_fn,
                             env=self.env, close_fds=not self.use_fds,
                             executable=self.executable, **extra)

        # let go of sockets created only for self._worker to inherit
        self._sockets = []

    def format_args(self, sockets_fds=None):
        """ It's possible to use environment variables and some other variables
        that are available in this context, when spawning the processes.
        """
        logger.debug('cmd: ' + bytestring(self.cmd))
        logger.debug('args: ' + str(self.args))

        current_env = ObjectDict(self.env.copy())

        format_kwargs = {
            'wid': self.wid, 'shell': self.shell, 'args': self.args,
            'env': current_env, 'working_dir': self.working_dir,
            'uid': self.uid, 'gid': self.gid, 'rlimits': self.rlimits,
            'executable': self.executable, 'use_fds': self.use_fds}

        if sockets_fds is not None:
            format_kwargs['sockets'] = sockets_fds

        if self.watcher is not None:
            for option in self.watcher.optnames:
                if option not in format_kwargs\
                        and hasattr(self.watcher, option):
                    format_kwargs[option] = getattr(self.watcher, option)

        cmd = replace_gnu_args(self.cmd, **format_kwargs)

        if '$WID' in cmd or (self.args and '$WID' in self.args):
            msg = "Using $WID in the command is deprecated. You should use "\
                  "the python string format instead. In your case, this "\
                  "means replacing the $WID in your command by $(WID)."

            warnings.warn(msg, DeprecationWarning)
            self.cmd = cmd.replace('$WID', str(self.wid))

        if self.args is not None:
            if isinstance(self.args, string_types):
                args = shlex.split(bytestring(replace_gnu_args(
                    self.args, **format_kwargs)))
            else:
                args = [bytestring(replace_gnu_args(arg, **format_kwargs))
                        for arg in self.args]
            args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS) + args
        else:
            args = shlex.split(bytestring(cmd), posix=not IS_WINDOWS)

        if self.shell:
            # subprocess.Popen(shell=True) implies that 1st arg is the
            # requested command, remaining args are applied to sh.
            args = [' '.join(quote(arg) for arg in args)]
            shell_args = format_kwargs.get('shell_args', None)
            if shell_args and IS_WINDOWS:
                logger.warn("shell_args won't apply for "
                            "windows platforms: %s", shell_args)
            elif isinstance(shell_args, string_types):
                args += shlex.split(bytestring(replace_gnu_args(
                    shell_args, **format_kwargs)))
            elif shell_args:
                args += [bytestring(replace_gnu_args(arg, **format_kwargs))
                         for arg in shell_args]

        elif format_kwargs.get('shell_args', False):
            logger.warn("shell_args is defined but won't be used "
                        "in this context: %s", format_kwargs['shell_args'])
        logger.debug("process args: %s", args)
        return args

    def returncode(self):
        return self._worker.returncode

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def is_alive(self):
        return self.poll() is None

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        logger.debug("sending signal %s to %s" % (sig, self.pid))
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Stop the process and close stdout/stderr

        If the corresponding process is still here
        (normally it's already killed by the watcher),
        a SIGTERM is sent, then a SIGKILL after 1 second.

        The shutdown process (SIGTERM then SIGKILL) is
        normally taken by the watcher. So if the process
        is still there here, it's a kind of bad behavior
        because the graceful timeout won't be respected here.
        """
        try:
            try:
                if self.is_alive():
                    try:
                        return self._worker.terminate()
                    except AccessDenied:
                        # It can happen on Windows if the process
                        # dies after poll returns (unlikely)
                        pass
            finally:
                self.close_output_channels()
        except NoSuchProcess:
            pass

    def close_output_channels(self):
        if self._worker.stderr is not None:
            self._worker.stderr.close()
        if self._worker.stdout is not None:
            self._worker.stdout.close()

    def wait(self, timeout=None):
        """
        Wait for the process to terminate, in the fashion
        of waitpid.

        Accepts a timeout in seconds.
        """
        self._worker.wait(timeout)

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["age"] = self.age()
        info["started"] = self.started
        info["children"] = []
        info['wid'] = self.wid
        for child in get_children(self._worker):
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in get_children(self._worker)]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in get_children(self._worker)]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict((child.pid, child)
                        for child in get_children(self._worker))
        try:
            children[pid].send_signal(signum)
        except KeyError:
            raise NoSuchProcess(pid)

    @debuglog
    def send_signal_children(self, signum, recursive=False):
        """Send signal *signum* to all children."""
        for child in get_children(self._worker, recursive):
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - UNEXISTING
        - OTHER
        """
        try:
            if get_status(self._worker) in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return UNEXISTING

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr

    def __eq__(self, other):
        return self is other

    def __lt__(self, other):
        return self.started < other.started

    def __gt__(self, other):
        return self.started > other.started
def cmd(  # pylint:disable=too-many-branches,too-many-locals,too-many-statements
        command,
        user=None,
        input=None,  # pylint:disable=redefined-builtin
        cli_input=None,
        cli_output=False,
        communicate=True,
        timeout=None,
        fail=True,
        log=None,
        tries=1,
        delay_min=5,
        delay_max=10,
        **kwargs):
    """
    Calls the `command` and returns a dictionary with process, stdout, stderr, and the returncode.

    Returned returncode, stdout and stderr will be None if `communicate` is set to False.

    :param user: If set, this will use ``sudo -u <user> ...`` to execute `command` as `user`.
    :type user: unicode
    :param input: If set, sended to stdin (if `communicate` is True).
    :type input: unicode
    :param cli_input: If set, sended to stdin (no condition).
    :type cli_input: unicode
    :param cli_output: Set to True to output (in real-time) stdout to stdout and stderr to stderr.
    :type cli_output: bool
    :param fail: Set to False to avoid the exception `subprocess.CalledProcessError`.
    :type fail: bool
    :param log: A function to log/print details about what is executed/any failure, can be a logger.
    :type log: callable, logging.Logger
    :param communicate: Set to True to communicate with the process, this is a locking call
                        (if timeout is None).
    :type communicate: bool
    :param timeout: Time-out for the communication with the process, in seconds.
    :type timeout: float
    :param tries: How many times you want the command to be retried ?
    :type tries: int
    :param delay_min: Minimum delay to sleep after every attempt communicate must be True.
    :type delay: float, int
    :param delay_max: Maximum delay to sleep after every attempt communicate must be True.
    :type delay: float, int

    * Delay will be a random number in range (`delay_min`, `delay_max`)
    * Set kwargs with any argument of the :mod:`subprocess`.Popen constructor excepting
      stdin, stdout and stderr.

    """

    # convert log argument to logging functions
    log_debug = log_warning = log_exception = None
    if isinstance(log, logging.Logger):
        log_debug, log_warning, log_exception = log.debug, log.warning, log.exception
    elif hasattr(log, '__call__'):
        log_debug = log_warning = log_exception = log

    # create a list and a string of the arguments
    if isinstance(command, str):
        if user is not None:
            command = f'sudo -u {user} {command}'
        args_list, args_string = shlex.split(command), command
    else:
        if user is not None:
            command = ['sudo', '-u', user] + command
        args_list = [str(a) for a in command if a is not None]
        args_string = ' '.join([str(a) for a in command if a is not None])

    # log the execution
    if log_debug:
        # FIXME simplify this
        log_debug('Execute {0}{1}{2}'.format(
            '' if input is None else 'echo {0}|'.format(repr(input)),
            args_string,
            '' if cli_input is None else ' < {0}'.format(repr(cli_input))))

    for trial in range(tries):  # noqa
        # create the sub-process
        try:
            process = Popen(args_list,
                            stdin=subprocess.PIPE,
                            stdout=None if cli_output else subprocess.PIPE,
                            stderr=None if cli_output else subprocess.PIPE,
                            **kwargs)
        except OSError as e:
            # unable to execute the program (e.g. does not exist)
            if log_exception:
                log_exception(e)
            if fail:
                raise
            return {
                'process': None,
                'stdout': '',
                'stderr': e,
                'returncode': 2
            }

        # write to stdin (answer to questions, ...)
        if cli_input is not None:
            process.stdin.write(cli_input)
            process.stdin.flush()

        # interact with the process and wait for the process to terminate
        if communicate:
            data = {}

            thread = threading.Thread(target=_communicate_with_timeout,
                                      kwargs={
                                          'data': data,
                                          'input': input,
                                          'process': process
                                      })
            thread.start()
            thread.join(timeout=timeout)
            if thread.is_alive():
                try:
                    process.terminate()
                    thread.join()
                except OSError as e:
                    # Manage race condition with process that may terminate just after the call to
                    # thread.is_alive() !
                    if e.errno != errno.ESRCH:
                        raise
            stdout, stderr = data['stdout'], data['stderr']
        else:
            # get a return code that may be None of course ...
            process.poll()
            stdout = stderr = None

        result = {
            'process': process,
            'stdout': stdout,
            'stderr': stderr,
            'returncode': process.returncode
        }

        if process.returncode == 0:
            break

        # failed attempt, may retry
        do_retry = trial < tries - 1
        delay = random.uniform(delay_min, delay_max)
        if log_warning:
            # FIXME simplify this
            log_warning('Attempt {0} out of {1}: {2}'.format(
                trial + 1, tries, 'Will retry in {0} seconds'.format(delay)
                if do_retry else 'Failed'))

        # raise if this is the last try
        if fail and not do_retry:
            raise subprocess.CalledProcessError(process.returncode,
                                                args_string, stderr)

        if do_retry:
            time.sleep(delay)

    return result
Beispiel #10
0
class Fly(object):
    def __init__(self, wid, cmd, working_dir, shell, uid=None, gid=None, env=None):
        self.wid = wid
        self.working_dir = working_dir
        self.shell = shell
        self.env = env
        self.cmd = cmd.replace("$WID", str(self.wid))

        self.uid = to_uid(uid)
        self.gid = to_gid(gid)

        def preexec_fn():
            os.setsid()
            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        self._worker = Popen(
            self.cmd.split(),
            cwd=self.working_dir,
            shell=self.shell,
            preexec_fn=preexec_fn,
            env=self.env,
            close_fds=True,
        )
        self.started = time.time()

    def poll(self):
        return self._worker.poll()

    def send_signal(self, sig):
        return self._worker.send_signal(sig)

    def stop(self):
        if self._worker.poll() is None:
            return self._worker.terminate()

    def age(self):
        return time.time() - self.started

    def info(self):
        """ return process info """
        info = _INFOLINE % get_info(self._worker)
        lines = ["%s: %s" % (self.wid, info)]

        for child in self._worker.get_children():
            info = _INFOLINE % get_info(child)
            lines.append("   %s" % info)

        return "\n".join(lines)

    def children(self):
        return ",".join(["%s" % child.pid for child in self._worker.get_children()])

    def send_signal_child(self, pid, signum):
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            child.send_signal(signum)
            return "ok"
        else:
            return "error: child not found"

    def send_signal_children(self, signum):
        for child in self._worker.get_children():
            child.send_signal(signum)
        return "ok"

    @property
    def pid(self):
        return self._worker.pid
Beispiel #11
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain *$WID*, which will be
      replaced by **wid**.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.
    """
    def __init__(self, wid, cmd, args=None, working_dir=None, shell=False,
                 uid=None, gid=None, env=None, rlimits=None, executable=None):
        self.wid = wid
        if working_dir is None:
            self.working_dir = get_working_dir()
        else:
            self.working_dir = working_dir
        self.shell = shell
        self.env = env

        if rlimits is not None:
            self.rlimits = rlimits
        else:
            self.rlimits = {}

        self.cmd = cmd.replace('$WID', str(self.wid))
        if uid is None:
            self.uid = None
        else:
            self.uid = to_uid(uid)

        if gid is None:
            self.gid = None
        else:
            self.gid = to_gid(gid)

        def preexec_fn():
            os.setsid()

            for limit, value in self.rlimits.items():
                res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None)
                if res is None:
                    raise ValueError('unknown rlimit "%s"' % limit)
                # TODO(petef): support hard/soft limits
                resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        logger.debug('cmd: ' + cmd)
        logger.debug('args: ' + str(args))

        if args is not None:
            if isinstance(args, str):
                args_ = shlex.split(bytestring(args))
            else:
                args_ = args[:]

            args_ = shlex.split(bytestring(cmd)) + args_
        else:
            args_ = shlex.split(bytestring(cmd))

        logger.debug('Running %r' % ' '.join(args_))

        self._worker = Popen(args_, cwd=self.working_dir,
                             shell=self.shell, preexec_fn=preexec_fn,
                             env=self.env, close_fds=True, stdout=PIPE,
                             stderr=PIPE, executable=executable)

        self.started = time.time()

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Terminate the process."""
        try:
            if self._worker.poll() is None:
                return self._worker.terminate()
        finally:
            self._worker.stderr.close()
            self._worker.stdout.close()

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["children"] = []
        for child in self._worker.get_children():
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in self._worker.get_children()]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict([(child.pid, child) \
                for child in self._worker.get_children()])

        children[pid].send_signal(signum)

    @debuglog
    def send_signal_children(self, signum):
        """Send signal *signum* to all children."""
        for child in self._worker.get_children():
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - OTHER
        """
        try:
            if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return OTHER

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr
Beispiel #12
0
class Run:
    """Class to handle processes.

    :ivar cmds: The ``cmds`` argument passed to the __init__ method
        (a command line passed in a list, or a list of command lines passed as
        a list of list).
    :ivar status: The exit status. As the exit status is only meaningful after
        the process has exited, its initial value is None.  When a problem
        running the command is detected and a process does not get
        created, its value gets set to the special value 127.
    :ivar raw_out: process standard output as bytes (if instanciated with
        output = PIPE). Use self.out to get a decoded string.
    :ivar raw_err: same as raw_out but for standard error.
    :ivar pid: PID. Set to -1 if the command failed to run.
    """
    def __init__(
        self,
        cmds: AnyCmdLine,
        cwd: Optional[str] = None,
        output: STDOUT_VALUE | DEVNULL_VALUE | PIPE_VALUE | str | IO
        | None = PIPE,
        error: STDOUT_VALUE | DEVNULL_VALUE | PIPE_VALUE | str | IO
        | None = STDOUT,
        input: DEVNULL_VALUE | PIPE_VALUE | str | IO
        | None = None,  # noqa: A002
        bg: bool = False,
        timeout: Optional[int] = None,
        env: Optional[dict] = None,
        set_sigpipe: bool = True,
        parse_shebang: bool = False,
        ignore_environ: bool = True,
    ) -> None:
        """Spawn a process.

        :param cmds: two possibilities:
            1) a command line: a tool name and its arguments, passed
            in a list. e.g. ['ls', '-a', '.']
            2) a list of command lines (as defined in (1)): the
            different commands will be piped. This means that
            [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to
            the system command line 'ps -a | grep vxsim'.
        :param cwd: directory in which the process should be executed (string
            or None). If None then current directory is used
        :param output: can be PIPE (default), a filename string, a fd on an
            already opened file, a python file object or None (for stdout).
        :param error: same as output or STDOUT, which indicates that the
            stderr data from the applications should be captured into the same
            file handle as for stdout.
        :param input: same as output
        :param bg: if True then run in background
        :param timeout: limit execution time (in seconds), None means
            unlimited
        :param env: dictionary for environment variables (e.g. os.environ)
        :param set_sigpipe: reset SIGPIPE handler to default value
        :param parse_shebang: take the #! interpreter line into account
        :param ignore_environ: Applies only when env parameter is not None.
            When set to True (the default), the only environment variables
            passed to the program are the ones provided by the env parameter.
            Otherwise, the environment passed to the program consists of the
            environment variables currently defined (os.environ) augmented by
            the ones provided in env.

        :raise OSError: when trying to execute a non-existent file.

        If you specify a filename for output or stderr then file content is
        reseted (equiv. to > in shell). If you prepend the filename with '+'
        then the file will be opened in append mode (equiv. to >> in shell)
        If you prepend the input with '|', then the content of input string
        will be used for process stdin.
        """
        def add_interpreter_command(cmd_line: CmdLine) -> CmdLine:
            """Add the interpreter defined in the #! line to cmd_line.

            If the #! line cannot be parsed, just return the cmd_line
            unchanged

            On windows, /usr/bin/env will be ignored to avoid a dependency on
            cygwin and /bin/bash & /bin/sh are replaced by $SHELL if defined.
            :param cmd_line: command line
            """
            if not parse_shebang:
                # nothing to do
                return cmd_line
            prog = which(cmd_line[0], default=None)
            if prog is None:
                # Not found. Do not modify the command line
                return cmd_line

            with open(prog) as f:
                try:
                    header = f.read()[0:2]
                except UnicodeDecodeError:
                    # unknown header - cannot decode the first two bytes
                    return cmd_line
                if header != "#!":
                    # Unknown header
                    return cmd_line
                # Header found, get the interpreter command in the first line
                f.seek(0)
                line = f.readline()
                interpreter_cmds = [
                    word.strip() for word in line[line.find("!") + 1:].split()
                ]
                # Pass the program path to the interpreter
                if len(cmd_line) > 1:
                    cmd_line = [prog] + list(cmd_line[1:])
                else:
                    cmd_line = [prog]

                if sys.platform == "win32":  # unix: no cover
                    if interpreter_cmds[0] == "/usr/bin/env":
                        # On windows be sure that PATH is taken into account by
                        # using which. In some cases involving python
                        # interpreter, the python interpreter used to run this
                        # module has been used rather than the first one on the
                        # path.
                        interpreter_cmds[1] = which(
                            interpreter_cmds[1], default=interpreter_cmds[1])
                        return interpreter_cmds[1:] + cmd_line
                    elif (interpreter_cmds[0] in ("/bin/bash", "/bin/sh")
                          and "SHELL" in os.environ):
                        return [os.environ["SHELL"]] + cmd_line
                return interpreter_cmds + cmd_line

        # First resolve output, error and input
        self.input_file = File(input, "r")
        self.output_file = File(output, "w")
        self.error_file = File(error, "w")

        self.status: Optional[int] = None
        self.raw_out = b""
        self.raw_err = b""
        self.cmds = []

        if env is not None:
            if ignore_environ:
                if sys.platform == "win32":
                    # On Windows not all environment variables can be
                    # discarded. At least SYSTEMDRIVE, SYSTEMROOT should be
                    # set. In order to be portable propagate their value in
                    # case the user does not pass them in env when
                    # ignore_environ is set to True.
                    tmp = {}
                    for var in ("SYSTEMDRIVE", "SYSTEMROOT"):
                        if var not in env and var in os.environ:
                            tmp[var] = os.environ[var]
                    tmp.update(env)
                    env = tmp
            else:
                # ignore_environ is False, so get a copy of the current
                # environment and update it with the env dictionary.
                tmp = os.environ.copy()
                tmp.update(env)
                env = tmp

        rlimit_args = []
        if timeout is not None:
            rlimit = get_rlimit()
            if os.path.exists(rlimit):
                rlimit_args = [rlimit, "%d" % timeout]
            else:
                logger.warning("cannot find rlimit at %s", rlimit)
                rlimit_args = []

        try:
            self.cmds = [
                add_interpreter_command(c) for c in to_cmd_lines(cmds)
            ]
            self.cmds[0] = rlimit_args + list(self.cmds[0])

            cmdlogger.debug(
                "Run: cd %s; %s",
                cwd if cwd is not None else os.getcwd(),
                self.command_line_image(),
            )

            if len(self.cmds) == 1:
                popen_args = {
                    "stdin": self.input_file.fd,
                    "stdout": self.output_file.fd,
                    "stderr": self.error_file.fd,
                    "cwd": cwd,
                    "env": env,
                    "universal_newlines": False,
                }

                if sys.platform != "win32" and set_sigpipe:  # windows: no cover
                    # preexec_fn is no supported on windows
                    popen_args["preexec_fn"] = subprocess_setup  # type: ignore

                if sys.platform == "win32":
                    popen_args[
                        "creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP

                self.internal = Popen(self.cmds[0], **popen_args)

            else:
                runs: list[subprocess.Popen] = []
                for index, cmd in enumerate(self.cmds):
                    if index == 0:
                        stdin: int | IO[Any] = self.input_file.fd
                    else:
                        previous_stdout = runs[index - 1].stdout
                        assert previous_stdout is not None
                        stdin = previous_stdout

                    # When connecting two processes using a Pipe don't use
                    # universal_newlines mode. Indeed commands transmitting
                    # binary data between them will crash
                    # (e.g. gzip -dc foo.txt | tar -xf -)
                    if index == len(self.cmds) - 1:
                        stdout = self.output_file.fd
                    else:
                        stdout = subprocess.PIPE

                    popen_args = {
                        "stdin": stdin,
                        "stdout": stdout,
                        "stderr": self.error_file.fd,
                        "cwd": cwd,
                        "env": env,
                        "universal_newlines": False,
                    }

                    if sys.platform != "win32" and set_sigpipe:  # windows: no cover
                        # preexec_fn is no supported on windows
                        popen_args[
                            "preexec_fn"] = subprocess_setup  # type: ignore

                    if sys.platform == "win32":
                        popen_args[
                            "creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP

                    try:
                        runs.append(Popen(cmd, **popen_args))
                    except OSError:
                        logger.error("error when spawning %s", cmd)
                        # We have an error (e.g. file not found), try to kill
                        # all processes already started.
                        for p in runs:
                            p.terminate()
                        raise

                    self.internal = runs[-1]

        except Exception as e:  # defensive code
            self.__error(e, self.cmds)
            raise

        self.pid = self.internal.pid

        if not bg:
            self.wait()

    @property
    def out(self) -> str:
        """Process output as string.

        Attempt is done to decode as utf-8 the output. If the output is not in
        utf-8 a string representation will be returned
        (see e3.text.bytes_as_str).
        """
        return bytes_as_str(self.raw_out)

    @property
    def err(self) -> str:
        """Process error as string.

        Attempt is done to decode as utf-8 the output. If the output is not in
        utf-8 a string representation will be returned
        (see e3.text.bytes_as_str).
        """
        return bytes_as_str(self.raw_err)

    def command_line_image(self) -> str:
        """Get shell command line image of the spawned command(s).

        This just a convenient wrapper around the function of the same
        name.
        """
        return command_line_image(self.cmds)

    def close_files(self) -> None:
        """Close all file descriptors."""
        self.output_file.close()
        self.error_file.close()
        self.input_file.close()

    def __error(self, error: Exception, cmds: list[CmdLine]) -> None:
        """Set pid to -1 and status to 127 before closing files."""
        self.close_files()
        logger.error(error)

        def not_found(path: str) -> NoReturn:
            """Raise OSError.

            :param path: path of the executable
            """
            logger.error("%s not found", path)
            e3.log.debug("PATH=%s", os.environ["PATH"])
            raise OSError(errno.ENOENT,
                          f"No such file or directory, {path} not found")

        # Try to send an helpful message if one of the executable has not
        # been found.
        for cmd in cmds:
            if which(cmd[0], default=None) is None:
                not_found(cmd[0])

    def wait(self) -> int:
        """Wait until process ends and return its status.

        :return: exit code of the process
        """
        if self.status is not None:
            # Wait has already been called
            return self.status

        # If there is no pipe in the loop then just do a wait. Otherwise
        # in order to avoid blocked processes due to full pipes, use
        # communicate.
        if (self.output_file.fd != subprocess.PIPE
                and self.error_file.fd != subprocess.PIPE
                and self.input_file.fd != subprocess.PIPE):
            self.status = self.internal.wait()
        else:
            tmp_input: Optional[str | bytes] = None
            if self.input_file.fd == subprocess.PIPE:
                tmp_input = self.input_file.get_command()

            if isinstance(tmp_input, str):
                tmp_input = tmp_input.encode("utf-8")

            (self.raw_out, self.raw_err) = self.internal.communicate(tmp_input)
            self.status = self.internal.returncode

        self.close_files()
        return self.status

    def poll(self) -> Optional[int]:
        """Check the process status and set self.status if available.

        This method checks whether the underlying process has exited
        or not. If it hasn't, then it just returns None immediately.
        Otherwise, it stores the process' exit code in self.status
        and then returns it.

        :return: None if the process is still alive; otherwise, returns
          the process exit status.
        """
        if self.status is not None:
            # Process is already terminated and wait been called
            return self.status

        result = self.internal.poll()

        if result is not None:
            # Process is finished, call wait to finalize it (closing handles,
            # ...)
            return self.wait()
        else:
            return None

    def kill(self, recursive: bool = True, timeout: int = 3) -> None:
        """Kill the process.

        :param recursive: if True, try to kill the complete process tree
        :param timeout: wait timeout (in seconds) after sending the kill
            signal (when recursive=True)
        """
        if recursive:
            kill_process_tree(self.internal, timeout=timeout)
        else:
            self.internal.kill()

    def interrupt(self) -> None:
        """Send SIGINT to the process, kill on Windows."""
        if sys.platform == "win32":
            self.kill()  # Ctrl-C event is unreliable on Windows
        else:
            self.internal.send_signal(signal.SIGINT)

    def is_running(self) -> bool:
        """Check whether the process is running."""
        if psutil is None:  # defensive code
            # psutil not imported, use our is_running function
            return is_running(self.pid)
        else:
            return self.internal.is_running()

    def children(self) -> list[Any]:
        """Return list of child processes (using psutil)."""
        if psutil is None:  # defensive code
            raise NotImplementedError("Run.children() require psutil")
        return self.internal.children()
Beispiel #13
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain any of the variables available
      that are being passed to this class. They will be replaced using the
      python format syntax.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.

    - **use_fds**: if True, will not close the fds in the subprocess. Must be
      be set to True on Windows if stdout or stderr are redirected.
      default: False.

    - **pipe_stdout**: if True, will open a PIPE on stdout. default: True.

    - **pipe_stderr**: if True, will open a PIPE on stderr. default: True.

    - **close_child_stdin**: If True, redirects the child process' stdin
      to /dev/null after the fork. default: True.

    - **close_child_stdout**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.

    - **close_child_stderr**: If True, redirects the child process' stdout
      to /dev/null after the fork. default: False.
    """
    def __init__(self,
                 name,
                 wid,
                 cmd,
                 args=None,
                 working_dir=None,
                 shell=False,
                 uid=None,
                 gid=None,
                 env=None,
                 rlimits=None,
                 executable=None,
                 use_fds=False,
                 watcher=None,
                 spawn=True,
                 pipe_stdout=True,
                 pipe_stderr=True,
                 close_child_stdin=True,
                 close_child_stdout=False,
                 close_child_stderr=False):

        self.name = name
        self.wid = wid
        self.cmd = cmd
        self.args = args
        self.working_dir = working_dir or get_working_dir()
        self.shell = shell
        if uid:
            self.uid = to_uid(uid)
            self.username = get_username_from_uid(self.uid)
        else:
            self.username = None
            self.uid = None
        self.gid = to_gid(gid) if gid else None
        self.env = env or {}
        self.rlimits = rlimits or {}
        self.executable = executable
        self.use_fds = use_fds
        self.watcher = watcher
        self.pipe_stdout = pipe_stdout
        self.pipe_stderr = pipe_stderr
        self.close_child_stdin = close_child_stdin
        self.close_child_stdout = close_child_stdout
        self.close_child_stderr = close_child_stderr
        self.stopping = False
        self._worker = None
        self.redirected = False
        self.started = 0

        if self.uid is not None and self.gid is None:
            self.gid = get_default_gid(self.uid)

        if IS_WINDOWS:
            if not self.use_fds and (self.pipe_stderr or self.pipe_stdout):
                raise ValueError("On Windows, you can't close the fds if "
                                 "you are redirecting stdout or stderr")

        if spawn:
            self.spawn()

    def _null_streams(self, streams):
        devnull = os.open(os.devnull, os.O_RDWR)
        try:
            for stream in streams:
                if not hasattr(stream, 'fileno'):
                    # we're probably dealing with a file-like
                    continue
                try:
                    stream.flush()
                    os.dup2(devnull, stream.fileno())
                except IOError:
                    # some streams, like stdin - might be already closed.
                    pass
        finally:
            os.close(devnull)

    def spawn(self):
        self.started = time.time()
        args = self.format_args()

        def preexec():
            streams = []

            if self.close_child_stdin:
                streams.append(sys.stdin)

            if self.close_child_stdout:
                streams.append(sys.stdout)

            if self.close_child_stderr:
                streams.append(sys.stderr)

            self._null_streams(streams)
            os.setsid()

            if resource:
                for limit, value in self.rlimits.items():
                    res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None)
                    if res is None:
                        raise ValueError('unknown rlimit "%s"' % limit)

                    # TODO(petef): support hard/soft limits

                    # for the NOFILE limit, if we fail to set an unlimited
                    # value then check the existing hard limit because we
                    # probably can't bypass it due to a kernel limit - so just
                    # assume that the caller means they want to use the kernel
                    # limit when they pass the unlimited value. This is better
                    # than failing to start the process and forcing the caller
                    # to always be aware of what the kernel configuration is.
                    # If they do pass in a real limit value, then we'll just
                    # raise the failure as they should know that their
                    # expectations couldn't be met.
                    # TODO - we can't log here as this occurs in the child
                    # process after the fork but it would be very good to
                    # notify the admin of the situation somehow.
                    retry = False
                    try:
                        resource.setrlimit(res, (value, value))
                    except ValueError:
                        if res == resource.RLIMIT_NOFILE and \
                                value == resource.RLIM_INFINITY:
                            _soft, value = resource.getrlimit(res)
                            retry = True
                        else:
                            raise
                    if retry:
                        resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

                if self.username is not None:
                    try:
                        os.initgroups(self.username, self.gid)
                    except (OSError, AttributeError):
                        # not support on Mac or 2.6
                        pass

            if self.uid:
                os.setuid(self.uid)

        if IS_WINDOWS:
            # On Windows we can't use a pre-exec function
            preexec_fn = None
        else:
            preexec_fn = preexec

        extra = {}
        if self.pipe_stdout:
            extra['stdout'] = PIPE

        if self.pipe_stderr:
            extra['stderr'] = PIPE

        if isinstance(args, list):
            if args[0].startswith('./') or args[0].startswith('.\\'):
                args[0] = os.path.normpath(
                    os.path.join(self.working_dir, args[0]))
            args = [arg.strip('"') for arg in args]

        self._worker = Popen(args,
                             cwd=self.working_dir,
                             shell=self.shell,
                             preexec_fn=preexec_fn,
                             env=self.env,
                             close_fds=not self.use_fds,
                             executable=self.executable,
                             **extra)

    def format_args(self):
        """ It's possible to use environment variables and some other variables
        that are available in this context, when spawning the processes.
        """
        logger.debug('cmd: ' + to_string(self.cmd))
        logger.debug('args: ' + str(self.args))

        current_env = ObjectDict(self.env.copy())

        format_kwargs = {
            'wid': self.wid,
            'shell': self.shell,
            'args': self.args,
            'env': current_env,
            'working_dir': self.working_dir,
            'uid': self.uid,
            'gid': self.gid,
            'rlimits': self.rlimits,
            'executable': self.executable,
            'use_fds': self.use_fds
        }

        if self.watcher is not None:
            for option in self.watcher.optnames:
                if option not in format_kwargs\
                        and hasattr(self.watcher, option):
                    format_kwargs[option] = getattr(self.watcher, option)

        cmd = replace_gnu_args(self.cmd, **format_kwargs)

        if '$WID' in cmd or (self.args and '$WID' in self.args):
            msg = "Using $WID in the command is deprecated. You should use "\
                  "the python string format instead. In your case, this "\
                  "means replacing the $WID in your command by $(WID)."

            warnings.warn(msg, DeprecationWarning)
            self.cmd = cmd.replace('$WID', str(self.wid))

        if self.args is not None:
            if isinstance(self.args, basestring):
                args = shlex.split(
                    to_string(replace_gnu_args(self.args, **format_kwargs)))
            else:
                args = [
                    to_string(replace_gnu_args(arg, **format_kwargs))
                    for arg in self.args
                ]
            args = shlex.split(to_string(cmd), posix=not IS_WINDOWS) + args
        else:
            args = shlex.split(to_string(cmd), posix=not IS_WINDOWS)

        if self.shell:
            # subprocess.Popen(shell=True) implies that 1st arg is the
            # requested command, remaining args are applied to sh.
            args = [' '.join(quote(arg) for arg in args)]
            shell_args = format_kwargs.get('shell_args', None)
            if shell_args and IS_WINDOWS:
                logger.warn("shell_args won't apply for "
                            "windows platforms: {}".format(shell_args))
            elif isinstance(shell_args, basestring):
                args += shlex.split(
                    to_string(replace_gnu_args(shell_args, **format_kwargs)))
            elif shell_args:
                args += [
                    to_string(replace_gnu_args(arg, **format_kwargs))
                    for arg in shell_args
                ]

        elif format_kwargs.get('shell_args', False):
            logger.warn("shell_args is defined but won't be used "
                        "in this context: {}".format(
                            format_kwargs['shell_args']))
        logger.debug('Process args: {}'.format(args))
        return args

    def returncode(self):
        return self._worker.returncode

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def is_alive(self):
        return self.poll() is None

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        logger.debug('sending signal {} to {}'.format(sig, self.pid))
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Stop the process and close stdout/stderr

        If the corresponding process is still here
        (normally it's already killed by the watcher),
        a SIGTERM is sent, then a SIGKILL after 1 second.

        The shutdown process (SIGTERM then SIGKILL) is
        normally taken by the watcher. So if the process
        is still there here, it's a kind of bad behavior
        because the graceful timeout won't be respected here.
        """
        try:
            try:
                if self.is_alive():
                    try:
                        return self._worker.terminate()
                    except AccessDenied:
                        # It can happen on Windows if the process
                        # dies after poll returns (unlikely)
                        pass
            finally:
                self.close_output_channels()
        except NoSuchProcess:
            pass

    def close_output_channels(self):
        if self._worker.stderr is not None:
            self._worker.stderr.close()
        if self._worker.stdout is not None:
            self._worker.stdout.close()

    def wait(self, timeout=None):
        """
        Wait for the process to terminate, in the fashion
        of waitpid.

        Accepts a timeout in seconds.
        """
        self._worker.wait(timeout)

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["age"] = self.age()
        info["started"] = self.started
        info["children"] = []
        info['wid'] = self.wid
        for child in get_children(self._worker):
            info["children"].append(get_info(child))

        return info

    def children(self, recursive=False):
        """Return a list of children pids."""
        return [child.pid for child in get_children(self._worker, recursive)]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in get_children(self._worker)]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict(
            (child.pid, child) for child in get_children(self._worker))
        try:
            children[pid].send_signal(signum)
        except KeyError:
            raise NoSuchProcess(pid)

    @debuglog
    def send_signal_children(self, signum, recursive=False):
        """Send signal *signum* to all children."""
        for child in get_children(self._worker, recursive):
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - UNEXISTING
        - OTHER
        """
        try:
            if get_status(self._worker) in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return UNEXISTING

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr

    def __repr__(self):
        return '<Process ({0})> pid={1}, cmd={2}, args={3}'.format(
            self.name, self.pid, repr(self.cmd), repr(self.args))

    def __eq__(self, other):
        return self is other

    def __lt__(self, other):
        return self.started < other.started

    def __gt__(self, other):
        return self.started > other.started
Beispiel #14
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain *$WID*, which will be
      replaced by **wid**.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **executable**: When executable is given, the first item in
      the args sequence obtained from **cmd** is still treated by most
      programs as the command name, which can then be different from the
      actual executable name. It becomes the display name for the executing
      program in utilities such as **ps**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.
    """
    def __init__(self,
                 wid,
                 cmd,
                 args=None,
                 working_dir=None,
                 shell=False,
                 uid=None,
                 gid=None,
                 env=None,
                 rlimits=None,
                 executable=None):
        self.wid = wid
        if working_dir is None:
            self.working_dir = get_working_dir()
        else:
            self.working_dir = working_dir
        self.shell = shell
        self.env = env

        if rlimits is not None:
            self.rlimits = rlimits
        else:
            self.rlimits = {}

        self.cmd = cmd.replace('$WID', str(self.wid))
        if uid is None:
            self.uid = None
        else:
            self.uid = to_uid(uid)

        if gid is None:
            self.gid = None
        else:
            self.gid = to_gid(gid)

        def preexec_fn():
            os.setsid()

            for limit, value in self.rlimits.items():
                res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None)
                if res is None:
                    raise ValueError('unknown rlimit "%s"' % limit)
                # TODO(petef): support hard/soft limits
                resource.setrlimit(res, (value, value))

            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        logger.debug('cmd: ' + bytestring(cmd))
        logger.debug('args: ' + str(args))

        if args is not None:
            if isinstance(args, string_types):
                args_ = shlex.split(bytestring(args))
            else:
                args_ = [bytestring(arg) for arg in args]

            args_ = shlex.split(bytestring(cmd)) + args_
        else:
            args_ = shlex.split(bytestring(cmd))

        logger.debug("process args: %s", args_)
        logger.debug('Running %r' % ' '.join(args_))

        self._worker = Popen(args_,
                             cwd=self.working_dir,
                             shell=self.shell,
                             preexec_fn=preexec_fn,
                             env=self.env,
                             close_fds=True,
                             stdout=PIPE,
                             stderr=PIPE,
                             executable=executable)

        self.started = time.time()

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Terminate the process."""
        try:
            if self._worker.poll() is None:
                return self._worker.terminate()
        finally:
            self._worker.stderr.close()
            self._worker.stdout.close()

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["children"] = []
        for child in self._worker.get_children():
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in self._worker.get_children()]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict([(child.pid, child) \
                for child in self._worker.get_children()])

        children[pid].send_signal(signum)

    @debuglog
    def send_signal_children(self, signum):
        """Send signal *signum* to all children."""
        for child in self._worker.get_children():
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - OTHER
        """
        try:
            if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return OTHER

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr
Beispiel #15
0
class Run(object):
    """Class to handle processes.

    :ivar cmds: The ``cmds`` argument passed to the __init__ method
        (a command line passed in a list, or a list of command lines passed as
        a list of list).
    :ivar status: The exit status. As the exit status is only meaningful after
        the process has exited, its initial value is None.  When a problem
        running the command is detected and a process does not get
        created, its value gets set to the special value 127.
    :ivar out: process standard output  (if instanciated with output = PIPE)
    :ivar err: same as out but for standard error
    :ivar pid: PID. Set to -1 if the command failed to run.
    """

    def __init__(self, cmds, cwd=None, output=PIPE,
                 error=STDOUT, input=None, bg=False, timeout=None,
                 env=None, set_sigpipe=True, parse_shebang=False,
                 ignore_environ=True, python_executable=sys.executable):
        """Spawn a process.

        :param cmds: two possibilities:
            1) a command line: a tool name and its arguments, passed
            in a list. e.g. ['ls', '-a', '.']
            2) a list of command lines (as defined in (1)): the
            different commands will be piped. This means that
            [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to
            the system command line 'ps -a | grep vxsim'.
        :type cmds: list[str] | list[list[str]]
        :param cwd: directory in which the process should be executed (string
            or None). If None then current directory is used
        :type cwd: str | None
        :param output: can be PIPE (default), a filename string, a fd on an
            already opened file, a python file object or None (for stdout).
        :type output: int | str | file | None
        :param error: same as output or STDOUT, which indicates that the
            stderr data from the applications should be captured into the same
            file handle as for stdout.
        :type error: int | str | file | None
        :param input: same as output
        :type input: int | str | file | None
        :param bg: if True then run in background
        :type bg: bool
        :param timeout: limit execution time (in seconds), None means
            unlimited
        :type timeout: int | None
        :param env: dictionary for environment variables (e.g. os.environ)
        :type env: dict
        :param set_sigpipe: reset SIGPIPE handler to default value
        :type set_sigpipe: bool
        :param parse_shebang: take the #! interpreter line into account
        :type parse_shebang: bool
        :param ignore_environ: Applies only when env parameter is not None.
            When set to True (the default), the only environment variables
            passed to the program are the ones provided by the env parameter.
            Otherwise, the environment passed to the program consists of the
            environment variables currently defined (os.environ) augmented by
            the ones provided in env.
        :type ignore_environ: bool
        :param python_executable: name or path to the python executable
        :type python_executable: str

        :raise OSError: when trying to execute a non-existent file.

        If you specify a filename for output or stderr then file content is
        reseted (equiv. to > in shell). If you prepend the filename with '+'
        then the file will be opened in append mode (equiv. to >> in shell)
        If you prepend the input with '|', then the content of input string
        will be used for process stdin.
        """
        def add_interpreter_command(cmd_line):
            """Add the interpreter defined in the #! line to cmd_line.

            If the #! line cannot be parsed, just return the cmd_line
            unchanged

            If the interpreter command line contains /usr/bin/env python it
            will be replaced by the value of python_executable

            On windows, /usr/bin/env will be ignored to avoid a dependency on
            cygwin
            :param cmd_line: command line
            :type cmd_line: list[str]
            """
            if not parse_shebang:
                # nothing to do
                return cmd_line
            prog = which(cmd_line[0], default=None)
            if prog is None:
                # Not found. Do not modify the command line
                return cmd_line

            with open(prog) as f:
                header = f.read()[0:2]
                if header != "#!":
                    # Unknown header
                    return cmd_line
                # Header found, get the interpreter command in the first line
                f.seek(0)
                line = f.readline()
                interpreter_cmds = [l.strip() for l in
                                    line[line.find('!') + 1:].split()]
                # Pass the program path to the interpreter
                if len(cmd_line) > 1:
                    cmd_line = [prog] + list(cmd_line[1:])
                else:
                    cmd_line = [prog]

                # If the interpreter is '/usr/bin/env python', use
                # python_executable instead to keep the same python executable
                if interpreter_cmds[0:2] == ['/usr/bin/env', 'python']:
                    if len(interpreter_cmds) > 2:
                        return [python_executable] + \
                            interpreter_cmds[2:] + cmd_line
                    else:
                        return [python_executable] + cmd_line
                elif sys.platform == 'win32':  # unix: no cover
                    if interpreter_cmds[0] == '/usr/bin/env':
                        return interpreter_cmds[1:] + cmd_line
                    elif interpreter_cmds[0] in ('/bin/bash', '/bin/sh') and \
                            'SHELL' in os.environ:
                        return [os.environ['SHELL']] + cmd_line
                return interpreter_cmds + cmd_line

        # First resolve output, error and input
        self.input_file = File(input, 'r')
        self.output_file = File(output, 'w')
        self.error_file = File(error, 'w')

        self.status = None
        self.out = ''
        self.err = ''
        self.cmds = []

        if env is not None and not ignore_environ:
            # ignore_environ is False, so get a copy of the current
            # environment and update it with the env dictionnary.
            tmp = os.environ.copy()
            tmp.update(env)
            env = tmp

        rlimit_args = []
        if timeout is not None:
            rlimit = get_rlimit()
            if os.path.exists(rlimit):
                rlimit_args = [rlimit, '%d' % timeout]
            else:
                logger.warning('cannot find rlimit at %s', rlimit)
                rlimit_args = []

        try:
            if isinstance(cmds[0], basestring):
                self.cmds = rlimit_args + list(add_interpreter_command(cmds))
            else:
                self.cmds = [add_interpreter_command(c) for c in cmds]
                self.cmds[0] = rlimit_args + list(self.cmds[0])

            cmdlogger.debug('Run: cd %s; %s' % (
                cwd if cwd is not None else os.getcwd(),
                self.command_line_image()))

            if isinstance(cmds[0], basestring):
                popen_args = {
                    'stdin': self.input_file.fd,
                    'stdout': self.output_file.fd,
                    'stderr': self.error_file.fd,
                    'cwd': cwd,
                    'env': env,
                    'universal_newlines': True}

                if sys.platform != 'win32' and set_sigpipe:
                    # preexec_fn is no supported on windows
                    popen_args['preexec_fn'] = subprocess_setup

                self.internal = Popen(self.cmds, **popen_args)

            else:
                runs = []
                for index, cmd in enumerate(self.cmds):
                    if index == 0:
                        stdin = self.input_file.fd
                    else:
                        stdin = runs[index - 1].stdout

                    # When connecting two processes using a Pipe don't use
                    # universal_newlines mode. Indeed commands transmitting
                    # binary data between them will crash
                    # (e.g. gzip -dc foo.txt | tar -xf -)
                    if index == len(self.cmds) - 1:
                        stdout = self.output_file.fd
                        txt_mode = True
                    else:
                        stdout = subprocess.PIPE
                        txt_mode = False

                    popen_args = {
                        'stdin': stdin,
                        'stdout': stdout,
                        'stderr': self.error_file.fd,
                        'cwd': cwd,
                        'env': env,
                        'universal_newlines': txt_mode}

                    if sys.platform != 'win32' and set_sigpipe:
                        # preexec_fn is no supported on windows
                        popen_args['preexec_fn'] = subprocess_setup

                    try:
                        runs.append(Popen(cmd, **popen_args))
                    except OSError as e:
                        logger.error('error when spawning %s', cmd)
                        # We have an error (e.g. file not found), try to kill
                        # all processes already started.
                        for p in runs:
                            p.terminate()
                        raise

                    self.internal = runs[-1]

        except Exception as e:
            self.__error(e, self.cmds)
            raise

        self.pid = self.internal.pid

        if not bg:
            self.wait()

    def command_line_image(self):
        """Get shell command line image of the spawned command(s).

        :rtype: str

        This just a convenient wrapper around the function of the same
        name.
        """
        return command_line_image(self.cmds)

    def close_files(self):
        """Close all file descriptors."""
        self.output_file.close()
        self.error_file.close()
        self.input_file.close()

    def __error(self, error, cmds):
        """Set pid to -1 and status to 127 before closing files."""
        self.close_files()
        logger.error(error)

        def not_found(path):
            """Raise OSError.

            :param path: path of the executable
            :type path: str
            """
            logger.error("%s not found", path)
            e3.log.debug('PATH=%s', os.environ['PATH'])
            raise OSError(errno.ENOENT,
                          'No such file or directory, %s not found' % path)

        # Try to send an helpful message if one of the executable has not
        # been found.
        if isinstance(cmds[0], basestring):
            if which(cmds[0], default=None) is None:
                not_found(cmds[0])
        else:
            for cmd in cmds:
                if which(cmd[0], default=None) is None:
                    not_found(cmd[0])

    def wait(self):
        """Wait until process ends and return its status.

        :return: exit code of the process
        :rtype: int
        """
        if self.status is not None:
            # Wait has already been called
            return self.status

        # If there is no pipe in the loop then just do a wait. Otherwise
        # in order to avoid blocked processes due to full pipes, use
        # communicate.
        if self.output_file.fd != subprocess.PIPE and \
                self.error_file.fd != subprocess.PIPE and \
                self.input_file.fd != subprocess.PIPE:
            self.status = self.internal.wait()
        else:
            tmp_input = None
            if self.input_file.fd == subprocess.PIPE:
                tmp_input = self.input_file.get_command()

            (self.out, self.err) = self.internal.communicate(tmp_input)
            self.status = self.internal.returncode

        self.close_files()
        return self.status

    def poll(self):
        """Check the process status and set self.status if available.

        This method checks whether the underlying process has exited
        or not. If it hasn't, then it just returns None immediately.
        Otherwise, it stores the process' exit code in self.status
        and then returns it.

        :return: None if the process is still alive; otherwise, returns
          the process exit status.
        :rtype: int | None
        """
        if self.status is not None:
            # Process is already terminated and wait been called
            return self.status

        result = self.internal.poll()

        if result is not None:
            # Process is finished, call wait to finalize it (closing handles,
            # ...)
            return self.wait()
        else:
            return None

    def kill(self):
        """Kill the process."""
        self.internal.kill()

    def interrupt(self):
        """Send SIGINT CTRL_C_EVENT to the process."""
        # On windows CTRL_C_EVENT is available and SIGINT is not;
        # and the other way around on other platforms.
        interrupt_signal = getattr(signal, 'CTRL_C_EVENT', signal.SIGINT)
        self.internal.send_signal(interrupt_signal)

    def is_running(self):
        """Check whether the process is running.

        :rtype: bool
        """
        if psutil is None:
            # psutil not imported, use our is_running function
            return is_running(self.pid)
        else:
            return self.internal.is_running()

    def children(self):
        """Return list of child processes (using psutil).

        :rtype: list[psutil.Process]
        """
        if psutil is None:
            raise NotImplementedError('Run.children() require psutil')
        return self.internal.children()
Beispiel #16
0
class Process(object):
    """Wraps a process.

    Options:

    - **wid**: the process unique identifier. This value will be used to
      replace the *$WID* string in the command line if present.

    - **cmd**: the command to run. May contain *$WID*, which will be
      replaced by **wid**.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.
    """
    def __init__(self, wid, cmd, working_dir=None, shell=False, uid=None,
                 gid=None, env=None):
        self.wid = wid
        if working_dir is None:
            self.working_dir = get_working_dir()
        else:
            self.working_dir = working_dir
        self.shell = shell
        self.env = env
        self.cmd = cmd.replace('$WID', str(self.wid))
        if uid is None:
            self.uid = None
        else:
            self.uid = to_uid(uid)

        if gid is None:
            self.gid = None
        else:
            self.gid = to_gid(gid)

        def preexec_fn():
            os.setsid()
            if self.gid:
                try:
                    os.setgid(self.gid)
                except OverflowError:
                    if not ctypes:
                        raise
                    # versions of python < 2.6.2 don't manage unsigned int for
                    # groups like on osx or fedora
                    os.setgid(-ctypes.c_int(-self.gid).value)

            if self.uid:
                os.setuid(self.uid)

        self._worker = Popen(self.cmd.split(), cwd=self.working_dir,
                             shell=self.shell, preexec_fn=preexec_fn,
                             env=self.env, close_fds=True, stdout=PIPE,
                             stderr=PIPE)
        self.started = time.time()

    @debuglog
    def poll(self):
        return self._worker.poll()

    @debuglog
    def send_signal(self, sig):
        """Sends a signal **sig** to the process."""
        return self._worker.send_signal(sig)

    @debuglog
    def stop(self):
        """Terminate the process."""
        if self._worker.poll() is None:
            return self._worker.terminate()

    def age(self):
        """Return the age of the process in seconds."""
        return time.time() - self.started

    def info(self):
        """Return process info.

        The info returned is a mapping with these keys:

        - **mem_info1**: Resident Set Size Memory in bytes (RSS)
        - **mem_info2**: Virtual Memory Size in bytes (VMS).
        - **cpu**: % of cpu usage.
        - **mem**: % of memory usage.
        - **ctime**: process CPU (user + system) time in seconds.
        - **pid**: process id.
        - **username**: user name that owns the process.
        - **nice**: process niceness (between -20 and 20)
        - **cmdline**: the command line the process was run with.
        """
        try:
            info = get_info(self._worker)
        except NoSuchProcess:
            return "No such process (stopped?)"

        info["children"] = []
        for child in self._worker.get_children():
            info["children"].append(get_info(child))

        return info

    def children(self):
        """Return a list of children pids."""
        return [child.pid for child in self._worker.get_children()]

    def is_child(self, pid):
        """Return True is the given *pid* is a child of that process."""
        pids = [child.pid for child in self._worker.get_children()]
        if pid in pids:
            return True
        return False

    @debuglog
    def send_signal_child(self, pid, signum):
        """Send signal *signum* to child *pid*."""
        children = dict([(child.pid, child) \
                for child in self._worker.get_children()])

        children[pid].send_signal(signum)

    @debuglog
    def send_signal_children(self, signum):
        """Send signal *signum* to all children."""
        for child in self._worker.get_children():
            try:
                child.send_signal(signum)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise

    @property
    def status(self):
        """Return the process status as a constant

        - RUNNING
        - DEAD_OR_ZOMBIE
        - OTHER
        """
        try:
            if self._worker.status in (STATUS_ZOMBIE, STATUS_DEAD):
                return DEAD_OR_ZOMBIE
        except NoSuchProcess:
            return OTHER

        if self._worker.is_running():
            return RUNNING
        return OTHER

    @property
    def pid(self):
        """Return the *pid*"""
        return self._worker.pid

    @property
    def stdout(self):
        """Return the *stdout* stream"""
        return self._worker.stdout

    @property
    def stderr(self):
        """Return the *stdout* stream"""
        return self._worker.stderr