Exemple #1
0
def nopen_keep_parent_stdin(f, mode="r"):

    if f.startswith("|"):
        # using shell explicitly makes things like process substitution work:
        # http://stackoverflow.com/questions/7407667/python-subprocess-subshells-and-redirection
        # use sys.stderr so we dont have to worry about checking it...
        p = Popen(
            f[1:],
            stdout=PIPE,
            stdin=sys.stdin,
            stderr=sys.stderr if mode == "r" else PIPE,
            shell=True,
            bufsize=-1,  # use system default for buffering
            preexec_fn=toolshed.files.prefunc,
            close_fds=False,
            executable=os.environ.get('SHELL'))
        if sys.version_info[0] > 2:
            import io
            p.stdout = io.TextIOWrapper(p.stdout)
            p.stdin = io.TextIOWrapper(sys.stdin)
            if mode != "r":
                p.stderr = io.TextIOWrapper(p.stderr)

        if mode and mode[0] == "r":
            return toolshed.files.process_iter(p, f[1:])
        return p
    else:
        return toolshed.files.nopen(f, mode)
Exemple #2
0
def nopen_keep_parent_stdin(f, mode="r"):

    if f.startswith("|"):
        # using shell explicitly makes things like process substitution work:
        # http://stackoverflow.com/questions/7407667/python-subprocess-subshells-and-redirection
        # use sys.stderr so we dont have to worry about checking it...
        p = Popen(f[1:], stdout=PIPE, stdin=sys.stdin,
                  stderr=sys.stderr if mode == "r" else PIPE,
                  shell=True, bufsize=-1, # use system default for buffering
                  preexec_fn=toolshed.files.prefunc,
                  close_fds=False, executable=os.environ.get('SHELL'))
        if sys.version_info[0] > 2:
            import io
            p.stdout = io.TextIOWrapper(p.stdout)
            p.stdin = io.TextIOWrapper(sys.stdin)
            if mode != "r":
                p.stderr = io.TextIOWrapper(p.stderr)

        if mode and mode[0] == "r":
            return toolshed.files.process_iter(p, f[1:])
        return p
    else:
        return toolshed.files.nopen(f,mode)
    def shell(self):
        thr_id = threading.get_ident()
        p = self._shell.get(thr_id)
        if p:
            return p
        scmd = self._prep_scmd("bash -l", ssh=self._ssh)
        p = Popen(scmd,
                  shell=True,
                  stdin=PIPE,
                  stdout=PIPE,
                  stderr=STDOUT,
                  preexec_fn=os.setpgrp,
                  executable="/bin/bash")
        p.stdin = p.stdin.detach()  # don't use buffer
        p.stdout = p.stdout.detach()
        os.set_blocking(p.stdout.fileno(), False)
        if p.stderr:
            p.stderr = p.stderr.detach()
            os.set_blocking(p.stderr.fileno(), False)
        p.stdin.write(b'echo \xFF\n')
        while True:
            b = p.stdout.read()
            if b == b'\xFF\n':
                break
            if p.poll() is not None:
                return None
        self._shell[thr_id] = p
        p.read = p.stdout.read

        def p_write(p, data):
            off = 0
            while off < len(data):
                wlen = p.stdin.write(data[off:])
                off += wlen

        p.write = p_write.__get__(p, p.__class__)
        return p
Exemple #4
0
def nopen(f, mode="r"):
    r"""
    open a file that's gzipped or return stdin for '-'
    if f is a number, the result of nopen(sys.argv[f]) is returned.

    >>> nopen('-') == sys.stdin, nopen('-', 'w') == sys.stdout
    (True, True)

    >>> nopen(sys.argv[0])
    <...file...>

    # expands user and vars ($HOME)
    >>> nopen("~/.bashrc").name == nopen("$HOME/.bashrc").name
    True

    # an already open file.
    >>> nopen(open(sys.argv[0]))
    <...file...>

    >>> nopen(0)
    <...file...>

    Or provide nicer access to Popen.stdout
    >>> files = list(nopen("|ls"))
    >>> assert 'setup.py\n' in files or b'setup.py\n' in files, files
    """
    if isinstance(f, int_types):
        return nopen(sys.argv[f], mode)

    if not isinstance(f, basestring):
        return f
    if f.startswith("|"):
        # using shell explicitly makes things like process substitution work:
        # http://stackoverflow.com/questions/7407667/python-subprocess-subshells-and-redirection
        # use sys.stderr so we dont have to worry about checking it...
        p = Popen(
            f[1:],
            stdout=PIPE,
            stdin=PIPE,
            stderr=sys.stderr if mode == "r" else PIPE,
            shell=True,
            bufsize=-1,  # use system default for buffering
            close_fds=False,
            executable=os.environ.get('SHELL'))
        if sys.version_info[0] > 2:
            import io
            p.stdout = io.TextIOWrapper(p.stdout)
            p.stdin = io.TextIOWrapper(p.stdin)
            if mode != "r":
                p.stderr = io.TextIOWrapper(p.stderr)

        if mode and mode[0] == "r":
            return process_iter(p, f[1:])
        return p

    if f.startswith(("http://", "https://", "ftp://")):
        fh = urlopen(f)
        if f.endswith(".gz"):
            return ungzipper(fh)
        if sys.version_info[0] < 3:
            return fh
        import io
        return io.TextIOWrapper(fh)
    f = op.expanduser(op.expandvars(f))
    if f.endswith((".gz", ".Z", ".z")):
        fh = gzip.open(f, mode)
        if sys.version_info[0] < 3:
            return fh
        import io
        return io.TextIOWrapper(fh)
    elif f.endswith((".bz", ".bz2", ".bzip2")):
        fh = bz2.BZ2File(f, mode)
        if sys.version_info[0] < 3:
            return fh
        import io
        return io.TextIOWrapper(fh)

    return {"r": sys.stdin, "w": sys.stdout}[mode[0]] if f == "-" \
         else open(f, mode)
def ssh(host,
        cmd='',
        input=None,
        interactive=False,
        wait=True,
        port=22,
        **kwargs):
    """a utility that mimics Popen() and run() but under `ssh` command

    Examples
    --------
    # interactive
    >>> p = ssh('node1', interactive=True)
    >>> p.stdin.write(b'hostname\n')
    9
    >>> p.stdout.read()
    b'node1\n'
    >>> p.stdin.write(b'pwd\n')
    4
    >>> p.stdout.read()
    b'/root\n'
    >>>

    # one shot
    >>> p = ssh('node1', cmd='hostname')
    >>> p.wait()
    0
    >>> p.stdout.read()
    b'node1\n'

    # one shot with input
    >>> p = ssh('node1', cmd='bash', input='echo $HOSTNAME')
    >>> p.wait()
    0
    >>> p.stdout.read()
    b'node1\n'

    Returns
    -------
    obj(Popen): a Popen object for the ssh process
    """
    _kwargs = dict(shell=True,
                   stdout=PIPE,
                   stdin=PIPE,
                   stderr=STDOUT,
                   executable="/bin/bash")
    _kwargs.update(kwargs)
    p = Popen("ssh -T {} -p {} {}".format(host, port, cmd), **_kwargs)
    p.stdin = p.stdin.detach()  # don't use buffer
    p.stdout = p.stdout.detach()
    os.set_blocking(p.stdout.fileno(), False)
    if p.stderr:
        p.stderr = p.stderr.detach()
        os.set_blocking(p.stderr.fileno(), False)
    if input:
        p.stdin.write(BYTES(input))
    if not interactive:
        p.stdin.close()
        if wait:
            p.wait()
    return p
Exemple #6
0
def nopen(f, mode="r"):
    r"""
    open a file that's gzipped or return stdin for '-'
    if f is a number, the result of nopen(sys.argv[f]) is returned.

    >>> nopen('-') == sys.stdin, nopen('-', 'w') == sys.stdout
    (True, True)

    >>> nopen(sys.argv[0])
    <...file...>

    # expands user and vars ($HOME)
    >>> nopen("~/.bashrc").name == nopen("$HOME/.bashrc").name
    True

    # an already open file.
    >>> nopen(open(sys.argv[0]))
    <...file...>

    >>> nopen(0)
    <...file...>

    Or provide nicer access to Popen.stdout
    >>> files = list(nopen("|ls"))
    >>> assert 'setup.py\n' in files or b'setup.py\n' in files, files
    """
    if isinstance(f, int_types):
        return nopen(sys.argv[f], mode)

    if not isinstance(f, basestring):
        return f
    if f.startswith("|"):
        # using shell explicitly makes things like process substitution work:
        # http://stackoverflow.com/questions/7407667/python-subprocess-subshells-and-redirection
        # use sys.stderr so we dont have to worry about checking it...
        p = Popen(f[1:], stdout=PIPE, stdin=PIPE,
                  stderr=sys.stderr if mode == "r" else PIPE,
                  shell=True, bufsize=-1, # use system default for buffering
                  close_fds=False, executable=os.environ.get('SHELL'))
        if sys.version_info[0] > 2:
            import io
            p.stdout = io.TextIOWrapper(p.stdout)
            p.stdin = io.TextIOWrapper(p.stdin)
            if mode != "r":
                p.stderr = io.TextIOWrapper(p.stderr)

        if mode and mode[0] == "r":
            return process_iter(p, f[1:])
        return p

    if f.startswith(("http://", "https://", "ftp://")):
        fh = urlopen(f)
        if f.endswith(".gz"):
            return ungzipper(fh)
        if sys.version_info[0] < 3:
            return fh
        import io
        return io.TextIOWrapper(fh)
    f = op.expanduser(op.expandvars(f))
    if f.endswith((".gz", ".Z", ".z")):
        fh = gzip.open(f, mode)
        if sys.version_info[0] < 3:
            return fh
        import io
        return io.TextIOWrapper(fh)
    elif f.endswith((".bz", ".bz2", ".bzip2")):
        fh = bz2.BZ2File(f, mode)
        if sys.version_info[0] < 3:
            return fh
        import io
        return io.TextIOWrapper(fh)

    return {"r": sys.stdin, "w": sys.stdout}[mode[0]] if f == "-" \
         else open(f, mode)
Exemple #7
0
def run_subproc(cmds, captured=True):
    """Runs a subprocess, in its many forms. This takes a list of 'commands,'
    which may be a list of command line arguments or a string, representing
    a special connecting character.  For example::

        $ ls | grep wakka

    is represented by the following cmds::

        [['ls'], '|', ['grep', 'wakka']]

    Lastly, the captured argument affects only the last real command.
    """
    global ENV
    last_stdout = PIPE if captured else None
    background = False
    if cmds[-1] == '&':
        background = True
        cmds = cmds[:-1]
    write_target = None
    if len(cmds) >= 3 and cmds[-2] in WRITER_MODES:
        write_target = cmds[-1][0]
        write_mode = WRITER_MODES[cmds[-2]]
        cmds = cmds[:-2]
        if write_target is not None:
            try:
                last_stdout = open(write_target, write_mode)
            except FileNotFoundError:
                e = 'xonsh: {0}: no such file or directory'
                raise XonshError(e.format(write_target))
        else:
            last_stdout = PIPE
    last_cmd = cmds[-1]
    prev = None
    procs = []
    prev_proc = None
    for cmd in cmds:
        if isinstance(cmd, string_types):
            prev = cmd
            continue
        stdout = last_stdout if cmd is last_cmd else PIPE
        uninew = cmd is last_cmd
        alias = builtins.aliases.get(cmd[0], None)
        if _is_runnable_name(cmd[0]):
            try:
                aliased_cmd = get_script_subproc_command(cmd[0], cmd[1:])
            except PermissionError:
                e = 'xonsh: subprocess mode: permission denied: {0}'
                raise XonshError(e.format(cmd[0]))
        elif alias is None:
            aliased_cmd = cmd
        elif callable(alias):
            aliased_cmd = alias
        else:
            aliased_cmd = alias + cmd[1:]
        # compute stdin for subprocess
        if prev_proc is None:
            stdin = None
        else:
            stdin = prev_proc.stdout
        if callable(aliased_cmd):
            prev_is_proxy = True
            numargs = len(inspect.signature(aliased_cmd).parameters)
            if numargs == 2:
                cls = SimpleProcProxy
            elif numargs == 4:
                cls = ProcProxy
            else:
                e = 'Expected callable with 2 or 4 arguments, not {}'
                raise XonshError(e.format(numargs))
            proc = cls(aliased_cmd, cmd[1:],
                       stdin, stdout, None,
                       universal_newlines=uninew)
        else:
            prev_is_proxy = False
            subproc_kwargs = {}
            if os.name == 'posix':
                subproc_kwargs['preexec_fn'] = _subproc_pre
            try:
                proc = Popen(aliased_cmd,
                             universal_newlines=uninew,
                             env=ENV.detype(),
                             stdin=stdin,
                             stdout=PIPE,
                             stderr=PIPE, **subproc_kwargs)
                proc_out, proc_err = proc.communicate()
                sys.stdout.write(proc_out)
                sys.stderr.write(proc_err)
                proc.stdout = sys.stdout 
                proc.stderr = sys.stderr
            except PermissionError:
                e = 'xonsh: subprocess mode: permission denied: {0}'
                raise XonshError(e.format(aliased_cmd[0]))
            except FileNotFoundError:
                cmd = aliased_cmd[0]
                e = 'xonsh: subprocess mode: command not found: {0}'.format(cmd)
                e += '\n' + suggest_commands(cmd, ENV, builtins.aliases)
                raise XonshError(e)
        procs.append(proc)
        prev = None
        prev_proc = proc
    for proc in procs[:-1]:
        try:
            proc.stdout.close()
        except OSError:
            pass
    if not prev_is_proxy:
        add_job({
            'cmds': cmds,
            'pids': [i.pid for i in procs],
            'obj': prev_proc,
            'bg': background
        })
    if background:
        return
    if prev_is_proxy:
        prev_proc.wait()
    wait_for_active_job()
    if write_target is None:
        # get output
        output = ''
        if prev_proc.stdout not in (None, sys.stdout):
            output = prev_proc.stdout.read()
        if captured:
            return output
    elif last_stdout not in (PIPE, None, sys.stdout):
        last_stdout.close()
Exemple #8
0
    def execute(self,
                command: Union[str, Sequence[Any]],
                istream: Union[None, BinaryIO] = None,
                with_extended_output: bool = False,
                with_exceptions: bool = True,
                as_process: bool = False,
                output_stream: Union[None, BinaryIO] = None,
                stdout_as_string: bool = True,
                kill_after_timeout: Union[None, int] = None,
                with_stdout: bool = True,
                universal_newlines: bool = False,
                shell: Union[None, bool] = None,
                env: Union[None, Mapping[str, str]] = None,
                max_chunk_size: int = io.DEFAULT_BUFFER_SIZE,
                **subprocess_kwargs: Any
                ) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], AutoInterrupt]:
        """Handles executing the command on the shell and consumes and returns
        the returned information (stdout)

        :param command:
            The command argument list to execute.
            It should be a string, or a sequence of program arguments. The
            program to execute is the first item in the args sequence or string.

        :param istream:
            Standard input filehandle passed to subprocess.Popen.

        :param with_extended_output:
            Whether to return a (status, stdout, stderr) tuple.

        :param with_exceptions:
            Whether to raise an exception when git returns a non-zero status.

        :param as_process:
            Whether to return the created process instance directly from which
            streams can be read on demand. This will render with_extended_output and
            with_exceptions ineffective - the caller will have
            to deal with the details himself.
            It is important to note that the process will be placed into an AutoInterrupt
            wrapper that will interrupt the process once it goes out of scope. If you
            use the command in iterators, you should pass the whole process instance
            instead of a single stream.

        :param output_stream:
            If set to a file-like object, data produced by the git command will be
            output to the given stream directly.
            This feature only has any effect if as_process is False. Processes will
            always be created with a pipe due to issues with subprocess.
            This merely is a workaround as data will be copied from the
            output pipe to the given output stream directly.
            Judging from the implementation, you shouldn't use this flag !

        :param stdout_as_string:
            if False, the commands standard output will be bytes. Otherwise, it will be
            decoded into a string using the default encoding (usually utf-8).
            The latter can fail, if the output contains binary data.

        :param env:
            A dictionary of environment variables to be passed to `subprocess.Popen`.

        :param max_chunk_size:
            Maximum number of bytes in one chunk of data passed to the output_stream in
            one invocation of write() method. If the given number is not positive then
            the default value is used.

        :param subprocess_kwargs:
            Keyword arguments to be passed to subprocess.Popen. Please note that
            some of the valid kwargs are already set by this method, the ones you
            specify may not be the same ones.

        :param with_stdout: If True, default True, we open stdout on the created process
        :param universal_newlines:
            if True, pipes will be opened as text, and lines are split at
            all known line endings.
        :param shell:
            Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
            It overrides :attr:`USE_SHELL` if it is not `None`.
        :param kill_after_timeout:
            To specify a timeout in seconds for the git command, after which the process
            should be killed. This will have no effect if as_process is set to True. It is
            set to None by default and will let the process run until the timeout is
            explicitly specified. This feature is not supported on Windows. It's also worth
            noting that kill_after_timeout uses SIGKILL, which can have negative side
            effects on a repository. For example, stale locks in case of git gc could
            render the repository incapable of accepting changes until the lock is manually
            removed.

        :return:
            * str(output) if extended_output = False (Default)
            * tuple(int(status), str(stdout), str(stderr)) if extended_output = True

            if output_stream is True, the stdout value will be your output stream:
            * output_stream if extended_output = False
            * tuple(int(status), output_stream, str(stderr)) if extended_output = True

            Note git is executed with LC_MESSAGES="C" to ensure consistent
            output regardless of system language.

        :raise GitCommandError:

        :note:
           If you add additional keyword arguments to the signature of this method,
           you must update the execute_kwargs tuple housed in this module."""
        # Remove password for the command if present
        redacted_command = remove_password_if_present(command)
        if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != 'full' or as_process):
            log.info(' '.join(redacted_command))

        # Allow the user to have the command executed in their working dir.
        cwd = self._working_dir or os.getcwd()

        # Start the process
        inline_env = env
        env = os.environ.copy()
        # Attempt to force all output to plain ascii english, which is what some parsing code
        # may expect.
        # According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
        # just to be sure.
        env["LANGUAGE"] = "C"
        env["LC_ALL"] = "C"
        env.update(self._environment)
        if inline_env is not None:
            env.update(inline_env)

        if is_win:
            cmd_not_found_exception = OSError
            if kill_after_timeout:
                raise GitCommandError(redacted_command, '"kill_after_timeout" feature is not supported on Windows.')
        else:
            if sys.version_info[0] > 2:
                cmd_not_found_exception = FileNotFoundError  # NOQA # exists, flake8 unknown @UndefinedVariable
            else:
                cmd_not_found_exception = OSError
        # end handle

        stdout_sink = (PIPE
                       if with_stdout
                       else getattr(subprocess, 'DEVNULL', None) or open(os.devnull, 'wb'))
        istream_ok = "None"
        if istream:
            istream_ok = "<valid stream>"
        log.debug("Popen(%s, cwd=%s, universal_newlines=%s, shell=%s, istream=%s)",
                  redacted_command, cwd, universal_newlines, shell, istream_ok)
        try:
            proc = Popen(command,
                         env=env,
                         cwd=cwd,
                         bufsize=-1,
                         stdin=istream,
                         stderr=PIPE,
                         stdout=stdout_sink,
                         shell=shell is not None and shell or self.USE_SHELL,
                         close_fds=is_posix,  # unsupported on windows
                         universal_newlines=universal_newlines,
                         creationflags=PROC_CREATIONFLAGS,
                         **subprocess_kwargs
                         )

        except cmd_not_found_exception as err:
            raise GitCommandNotFound(redacted_command, err) from err
        else:
            proc = cast(Popen, proc)
            proc.stdout = cast(BinaryIO, proc.stdout)
            proc.stderr = cast(BinaryIO, proc.stderr)

        if as_process:
            return self.AutoInterrupt(proc, command)

        def _kill_process(pid: int) -> None:
            """ Callback method to kill a process. """
            p = Popen(['ps', '--ppid', str(pid)], stdout=PIPE,
                      creationflags=PROC_CREATIONFLAGS)
            child_pids = []
            if p.stdout is not None:
                for line in p.stdout:
                    if len(line.split()) > 0:
                        local_pid = (line.split())[0]
                        if local_pid.isdigit():
                            child_pids.append(int(local_pid))
            try:
                # Windows does not have SIGKILL, so use SIGTERM instead
                sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
                os.kill(pid, sig)
                for child_pid in child_pids:
                    try:
                        os.kill(child_pid, sig)
                    except OSError:
                        pass
                kill_check.set()    # tell the main routine that the process was killed
            except OSError:
                # It is possible that the process gets completed in the duration after timeout
                # happens and before we try to kill the process.
                pass
            return
        # end

        if kill_after_timeout:
            kill_check = threading.Event()
            watchdog = threading.Timer(kill_after_timeout, _kill_process, args=(proc.pid,))

        # Wait for the process to return
        status = 0
        stdout_value = b''  # type: Union[str, bytes]
        stderr_value = b''  # type: Union[str, bytes]
        newline = "\n" if universal_newlines else b"\n"
        try:
            if output_stream is None:
                if kill_after_timeout:
                    watchdog.start()
                stdout_value, stderr_value = proc.communicate()
                if kill_after_timeout:
                    watchdog.cancel()
                    if kill_check.is_set():
                        stderr_value = ('Timeout: the command "%s" did not complete in %d '
                                        'secs.' % (" ".join(redacted_command), kill_after_timeout))
                        if not universal_newlines:
                            stderr_value = stderr_value.encode(defenc)
                # strip trailing "\n"
                if stdout_value.endswith(newline):  # type: ignore
                    stdout_value = stdout_value[:-1]
                if stderr_value.endswith(newline):  # type: ignore
                    stderr_value = stderr_value[:-1]

                status = proc.returncode
            else:
                max_chunk_size = max_chunk_size if max_chunk_size and max_chunk_size > 0 else io.DEFAULT_BUFFER_SIZE
                stream_copy(proc.stdout, output_stream, max_chunk_size)
                stdout_value = proc.stdout.read()
                stderr_value = proc.stderr.read()
                # strip trailing "\n"
                if stderr_value.endswith(newline):   # type: ignore
                    stderr_value = stderr_value[:-1]
                status = proc.wait()
            # END stdout handling
        finally:
            proc.stdout.close()
            proc.stderr.close()

        if self.GIT_PYTHON_TRACE == 'full':
            cmdstr = " ".join(redacted_command)

            def as_text(stdout_value):
                return not output_stream and safe_decode(stdout_value) or '<OUTPUT_STREAM>'
            # end

            if stderr_value:
                log.info("%s -> %d; stdout: '%s'; stderr: '%s'",
                         cmdstr, status, as_text(stdout_value), safe_decode(stderr_value))
            elif stdout_value:
                log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
            else:
                log.info("%s -> %d", cmdstr, status)
        # END handle debug printing

        if with_exceptions and status != 0:
            raise GitCommandError(redacted_command, status, stderr_value, stdout_value)

        if isinstance(stdout_value, bytes) and stdout_as_string:  # could also be output_stream
            stdout_value = safe_decode(stdout_value)

        # Allow access to the command's status code
        if with_extended_output:
            return (status, stdout_value, safe_decode(stderr_value))
        else:
            return stdout_value