コード例 #1
0
    def popen(self, **kwargs):
        """Convenience method runs the command using Popen.

        Args:
            kwargs: passed through to Popen

        Returns:
            Popen instance
        """
        if self._shell:
            # on Windows, with shell=True Python interprets the args as NOT quoted
            # and quotes them, but assumes a single string parameter is pre-quoted
            # which is what we want.
            # on Unix, with shell=True we interpret args[0]
            # the same as a single string (it's the parameter after -c in sh -c)
            # and anything after args[0] is passed as another flag to sh.
            # (we never have anything after args[0])
            # So if we always use the single string to popen when shell=True, things
            # should work OK on all platforms.
            assert len(self._args) == 1
            args = self._args[0]
        else:

            args = self._args
        return logged_subprocess.Popen(args=args,
                                       env=py2_compat.env_without_unicode(self._env),
                                       cwd=self._cwd,
                                       shell=self._shell,
                                       **kwargs)
コード例 #2
0
def test_log_subprocess_Popen(monkeypatch):
    recorded = dict(args=(), kwargs=())

    def mock_Popen(*args, **kwargs):
        recorded['args'] = args
        recorded['kwargs'] = kwargs

    monkeypatch.setattr('subprocess.Popen', mock_Popen)

    logged_subprocess.Popen(['a', 'b'], foo='bar')

    assert recorded == dict(args=(), kwargs=dict(args=['a', 'b'], foo='bar'))
コード例 #3
0
ファイル: pip_api.py プロジェクト: irfanalamt/JSnoobie
def _call_pip(prefix, extra_args):
    cmd_list = _get_pip_command(prefix, extra_args)

    try:
        p = logged_subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    except OSError as e:
        raise PipError("failed to run: %r: %r" % (" ".join(cmd_list), repr(e)))
    (out, err) = p.communicate()
    errstr = err.decode().strip()
    if p.returncode != 0:
        raise PipError('%s: %s' % (" ".join(cmd_list), errstr))
    elif errstr != '':
        for line in errstr.split("\n"):
            print("%s %s: %s" % (cmd_list[0], cmd_list[1], line), file=sys.stderr)
    return out
コード例 #4
0
def test_log_subprocess_Popen_with_logging(monkeypatch):
    logger = _test_logger()
    verbose.push_verbose_logger(logger)
    try:

        recorded = dict(args=(), kwargs=())

        def mock_Popen(*args, **kwargs):
            recorded['args'] = args
            recorded['kwargs'] = kwargs

        monkeypatch.setattr('subprocess.Popen', mock_Popen)

        logged_subprocess.Popen(['a', 'b'], foo='bar')

        assert recorded == dict(args=(), kwargs=dict(args=['a', 'b'], foo='bar'))

        assert logger.messages == ['$ a b']
    finally:
        verbose.pop_verbose_logger()
コード例 #5
0
def _call_conda(extra_args, json_mode=False, platform=None):
    assert len(extra_args) > 0  # we deref extra_args[0] below

    (cmd_list,
     command_in_errors) = _get_platform_hacked_conda_command(extra_args,
                                                             platform=platform)

    try:
        p = logged_subprocess.Popen(cmd_list,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
    except OSError as e:
        raise CondaError("failed to run: %r: %r" %
                         (command_in_errors, repr(e)))
    (out, err) = p.communicate()
    errstr = err.decode().strip()
    if p.returncode != 0:
        parsed = None
        message = errstr
        if json_mode:
            try:
                parsed = json.loads(out.decode())
                if parsed is not None and isinstance(parsed, dict):
                    # some versions of conda do 'error' and others
                    # both 'error' and 'message' and they appear to
                    # be the same.
                    for field in ('message', 'error'):
                        if field in parsed:
                            message = parsed[field]
                            break
            except Exception:
                pass

        raise CondaError('%s: %s' % (command_in_errors, message), json=parsed)
    elif errstr != '':
        for line in errstr.split("\n"):
            print("%s %s: %s" % ("conda", extra_args[0], line),
                  file=sys.stderr)
    return out
コード例 #6
0
def popen(args, stdout_callback, stderr_callback, **kwargs):
    def ignore_line(line):
        pass

    if stdout_callback is None:
        stdout_callback = ignore_line
    if stderr_callback is None:
        stderr_callback = ignore_line

    p = logged_subprocess.Popen(args,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)

    queue = Queue()

    # Create/destroy reader outside of the threads, since there
    # have been threading bugs in related destructors such as TextIOWrapper
    # https://bugs.python.org/issue28387
    #
    # In case the reader isn't thread-safe (likely) we only use it
    # from one thread at a time, even though all creation/deletion
    # is in this main thread, reading is always in the child
    # threads.
    #
    # we use errors=replace primarily because with strict
    # errors, TextIOWrapper can raise an exception
    # "prematurely" (before returning all valid bytes).
    # Arguably replace is nicer anyway for our purposes.
    #
    # We don't close these readers, because it seems to result
    # in a double-close on the underlying file.
    stdout_wrapper = codecs.getreader('utf-8')(p.stdout, errors='replace')
    stderr_wrapper = codecs.getreader('utf-8')(p.stderr, errors='replace')

    stdout_thread = _reader_thread(stdout_wrapper, queue)
    stderr_thread = _reader_thread(stderr_wrapper, queue)

    stdout_buffer = []
    stderr_buffer = []

    first_error = None
    stdout_joined = False
    stderr_joined = False
    while not (queue.empty() and (stdout_joined and stderr_joined)):
        (which, data, error) = queue.get()
        if error is not None and first_error is None:
            first_error = error
        if data is None:
            if which is stdout_wrapper:
                stdout_thread.join()
                stdout_joined = True
                assert not stdout_thread.is_alive()
            else:
                assert which is stderr_wrapper
                stderr_thread.join()
                stderr_joined = True
                assert not stderr_thread.is_alive()
        else:
            if which is stdout_wrapper:
                stdout_callback(data)
                stdout_buffer.append(data)
            else:
                assert which is stderr_wrapper
                stderr_callback(data)
                stderr_buffer.append(data)

    assert queue.empty()

    p.stdout.close()
    p.stderr.close()

    p.wait()

    stdout_buffer = _combine_lines(stdout_buffer)
    stderr_buffer = _combine_lines(stderr_buffer)

    if first_error is not None:
        raise first_error

    return (p, stdout_buffer, stderr_buffer)
コード例 #7
0
        def ensure_redis(run_state):
            # this is pretty lame, we'll want to get fancier at a
            # future time (e.g. use Chalmers, stuff like
            # that). The desired semantic is a new copy of Redis
            # dedicated to this project directory; it should not
            # require the user to have set up anything in advance,
            # e.g. if we use Chalmers we should automatically take
            # care of configuring/starting Chalmers itself.
            url = context.status.analysis.existing_scoped_instance_url
            if url is not None:
                frontend.info(
                    "Using redis-server we started previously at {url}".format(
                        url=url))
                return url

            run_state.clear()

            workdir = context.ensure_service_directory(requirement.env_var)
            pidfile = os.path.join(workdir, "redis.pid")
            logfile = os.path.join(workdir, "redis.log")

            # 6379 is the default Redis port; leave that one free
            # for a systemwide Redis. Try looking for a port above
            # it. This is a pretty huge hack and a race condition,
            # but Redis doesn't as far as I know have "let the OS
            # pick the port" mode.
            LOWER_PORT = config['lower_port']
            UPPER_PORT = config['upper_port']
            port = LOWER_PORT
            while port <= UPPER_PORT:
                if not network_util.can_connect_to_socket(host='localhost',
                                                          port=port):
                    break
                port += 1
            if port > UPPER_PORT:
                frontend.error(
                    ("All ports from {lower} to {upper} were in use, " +
                     "could not start redis-server on one of them.").format(
                         lower=LOWER_PORT, upper=UPPER_PORT))
                return None

            # be sure we don't get confused by an old log file
            try:
                os.remove(logfile)
            except IOError:  # pragma: no cover (py3 only)
                pass
            except OSError:  # pragma: no cover (py2 only)
                pass

            command = [
                'redis-server', '--pidfile', pidfile, '--logfile', logfile,
                '--daemonize', 'yes', '--port',
                str(port)
            ]
            frontend.info("Starting " + repr(command))

            # we don't close_fds=True because on Windows that is documented to
            # keep us from collected stderr. But on Unix it's kinda broken not
            # to close_fds. Hmm.
            try:
                popen = logged_subprocess.Popen(
                    args=command,
                    stderr=subprocess.PIPE,
                    env=py2_compat.env_without_unicode(context.environ))
            except Exception as e:
                frontend.error("Error executing redis-server: %s" % (str(e)))
                return None

            # communicate() waits for the process to exit, which
            # is supposed to happen immediately due to --daemonize
            (out, err) = popen.communicate()
            assert out is None  # because we didn't PIPE it
            err = err.decode(errors='replace')

            url = None
            if popen.returncode == 0:
                # now we need to wait for Redis to be ready; we
                # are not sure whether it will create the port or
                # pidfile first, so wait for both.
                port_is_ready = False
                pidfile_is_ready = False
                MAX_WAIT_TIME = 10
                so_far = 0
                while so_far < MAX_WAIT_TIME:
                    increment = MAX_WAIT_TIME / 500.0
                    time.sleep(increment)
                    so_far += increment
                    if not port_is_ready:
                        if network_util.can_connect_to_socket(host='localhost',
                                                              port=port):
                            port_is_ready = True

                    if not pidfile_is_ready:
                        if os.path.exists(pidfile):
                            pidfile_is_ready = True

                    if port_is_ready and pidfile_is_ready:
                        break

                # if we time out with no pidfile we forge ahead at this point
                if port_is_ready:
                    run_state['port'] = port
                    url = "redis://localhost:{port}".format(port=port)

                    # note: --port doesn't work, only -p, and the failure with --port is silent.
                    run_state['shutdown_commands'] = [[
                        'redis-cli', '-p',
                        str(port), 'shutdown'
                    ]]
                else:
                    frontend.info(
                        "redis-server started successfully, but we timed out trying to connect to it on port %d"
                        % (port))

            if url is None:
                for line in err.split("\n"):
                    if line != "":
                        frontend.info(line)
                try:
                    with codecs.open(logfile, 'r', 'utf-8') as log:
                        for line in log.readlines():
                            frontend.info(line)
                except IOError as e:
                    # just be silent if redis-server failed before creating a log file,
                    # that's fine. Hopefully it had some stderr.
                    if e.errno != errno.ENOENT:
                        frontend.info(
                            "Failed to read {logfile}: {error}".format(
                                logfile=logfile, error=e))

                frontend.error(
                    "redis-server process failed or timed out, exited with code {code}"
                    .format(code=popen.returncode))

            return url