Пример #1
0
def test_030_db_sanity_from_another_process(mutable_database):
    def read_and_modify():
        # check that other process can read DB
        _check_db_sanity(mutable_database)
        with mutable_database.write_transaction():
            _mock_remove('mpileaks ^zmpi')

    p = fork_context.Process(target=read_and_modify, args=())
    p.start()
    p.join()

    # ensure child process change is visible in parent process
    with mutable_database.read_transaction():
        assert len(mutable_database.query('mpileaks ^zmpi')) == 0
Пример #2
0
def local_multiproc_test(*functions, **kwargs):
    """Order some processes using simple barrier synchronization."""
    b = mp.Barrier(len(functions), timeout=barrier_timeout)

    args = (b, ) + tuple(kwargs.get('extra_args', ()))
    procs = [
        fork_context.Process(target=f, args=args, name=f.__name__)
        for f in functions
    ]

    for p in procs:
        p.start()

    for p in procs:
        p.join()

    assert all(p.exitcode == 0 for p in procs)
Пример #3
0
    def start(self, **kwargs):
        """Start the controller and minion processes.

        Arguments:
            kwargs (dict): arbitrary keyword arguments that will be
                passed to controller and minion functions

        The controller process will create the minion, then call
        ``controller_function``.  The minion process will call
        ``minion_function``.

        """
        self.proc = fork_context.Process(
            target=PseudoShell._set_up_and_run_controller_function,
            args=(self.controller_function, self.minion_function,
                  self.controller_timeout, self.sleep_time),
            kwargs=kwargs,
        )
        self.proc.start()
Пример #4
0
    def __enter__(self):
        if self._active:
            raise RuntimeError("Can't re-enter the same log_output!")

        if self.file_like is None:
            raise RuntimeError(
                "file argument must be set by either __init__ or __call__")

        # set up a stream for the daemon to write to
        self.close_log_in_parent = True
        self.write_log_in_parent = False
        if isinstance(self.file_like, string_types):
            self.log_file = open(self.file_like, 'w')

        elif _file_descriptors_work(self.file_like):
            self.log_file = self.file_like
            self.close_log_in_parent = False

        else:
            self.log_file = StringIO()
            self.write_log_in_parent = True

        # record parent color settings before redirecting.  We do this
        # because color output depends on whether the *original* stdout
        # is a TTY.  New stdout won't be a TTY so we force colorization.
        self._saved_color = tty.color._force_color
        forced_color = tty.color.get_color_when()

        # also record parent debug settings -- in case the logger is
        # forcing debug output.
        self._saved_debug = tty._debug

        # OS-level pipe for redirecting output to logger
        read_fd, write_fd = os.pipe()

        # Multiprocessing pipe for communication back from the daemon
        # Currently only used to save echo value between uses
        self.parent_pipe, child_pipe = multiprocessing.Pipe()

        # Sets a daemon that writes to file what it reads from a pipe
        try:
            # need to pass this b/c multiprocessing closes stdin in child.
            try:
                input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
            except BaseException:
                input_stream = None  # just don't forward input if this fails

            self.process = fork_context.Process(
                target=_writer_daemon,
                args=(input_stream, read_fd, write_fd, self.echo,
                      self.log_file, child_pipe))
            self.process.daemon = True  # must set before start()
            self.process.start()
            os.close(read_fd)  # close in the parent process

        finally:
            if input_stream:
                input_stream.close()

        # Flush immediately before redirecting so that anything buffered
        # goes to the original stream
        sys.stdout.flush()
        sys.stderr.flush()

        # Now do the actual output rediction.
        self.use_fds = _file_descriptors_work(sys.stdout, sys.stderr)
        if self.use_fds:
            # We try first to use OS-level file descriptors, as this
            # redirects output for subprocesses and system calls.

            # Save old stdout and stderr file descriptors
            self._saved_stdout = os.dup(sys.stdout.fileno())
            self._saved_stderr = os.dup(sys.stderr.fileno())

            # redirect to the pipe we created above
            os.dup2(write_fd, sys.stdout.fileno())
            os.dup2(write_fd, sys.stderr.fileno())
            os.close(write_fd)

        else:
            # Handle I/O the Python way. This won't redirect lower-level
            # output, but it's the best we can do, and the caller
            # shouldn't expect any better, since *they* have apparently
            # redirected I/O the Python way.

            # Save old stdout and stderr file objects
            self._saved_stdout = sys.stdout
            self._saved_stderr = sys.stderr

            # create a file object for the pipe; redirect to it.
            pipe_fd_out = os.fdopen(write_fd, 'w')
            sys.stdout = pipe_fd_out
            sys.stderr = pipe_fd_out

        # Unbuffer stdout and stderr at the Python level
        if not self.buffer:
            sys.stdout = Unbuffered(sys.stdout)
            sys.stderr = Unbuffered(sys.stderr)

        # Force color and debug settings now that we have redirected.
        tty.color.set_color_when(forced_color)
        tty._debug = self.debug

        # track whether we're currently inside this log_output
        self._active = True

        # return this log_output object so that the user can do things
        # like temporarily echo some ouptut.
        return self
Пример #5
0
def fork(pkg, function, dirty, fake):
    """Fork a child process to do part of a spack build.

    Args:

        pkg (PackageBase): package whose environment we should set up the
            forked process for.
        function (callable): argless function to run in the child
            process.
        dirty (bool): If True, do NOT clean the environment before
            building.
        fake (bool): If True, skip package setup b/c it's not a real build

    Usage::

        def child_fun():
            # do stuff
        build_env.fork(pkg, child_fun)

    Forked processes are run with the build environment set up by
    spack.build_environment.  This allows package authors to have full
    control over the environment, etc. without affecting other builds
    that might be executed in the same spack call.

    If something goes wrong, the child process catches the error and
    passes it to the parent wrapped in a ChildError.  The parent is
    expected to handle (or re-raise) the ChildError.
    """
    def child_process(child_pipe, input_stream):
        # We are in the child process. Python sets sys.stdin to
        # open(os.devnull) to prevent our process and its parent from
        # simultaneously reading from the original stdin. But, we assume
        # that the parent process is not going to read from it till we
        # are done with the child, so we undo Python's precaution.
        if input_stream is not None:
            sys.stdin = input_stream

        try:
            if not fake:
                setup_package(pkg, dirty=dirty)
            return_value = function()
            child_pipe.send(return_value)

        except StopPhase as e:
            # Do not create a full ChildError from this, it's not an error
            # it's a control statement.
            child_pipe.send(e)
        except BaseException:
            # catch ANYTHING that goes wrong in the child process
            exc_type, exc, tb = sys.exc_info()

            # Need to unwind the traceback in the child because traceback
            # objects can't be sent to the parent.
            tb_string = traceback.format_exc()

            # build up some context from the offending package so we can
            # show that, too.
            package_context = get_package_context(tb)

            build_log = None
            if hasattr(pkg, 'log_path'):
                build_log = pkg.log_path

            # make a pickleable exception to send to parent.
            msg = "%s: %s" % (exc_type.__name__, str(exc))

            ce = ChildError(msg, exc_type.__module__, exc_type.__name__,
                            tb_string, build_log, package_context)
            child_pipe.send(ce)

        finally:
            child_pipe.close()

    parent_pipe, child_pipe = multiprocessing.Pipe()
    input_stream = None
    try:
        # Forward sys.stdin when appropriate, to allow toggling verbosity
        if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
            input_stream = os.fdopen(os.dup(sys.stdin.fileno()))

        p = fork_context.Process(target=child_process,
                                 args=(child_pipe, input_stream))
        p.start()

    except InstallError as e:
        e.pkg = pkg
        raise

    finally:
        # Close the input stream in the parent process
        if input_stream is not None:
            input_stream.close()

    child_result = parent_pipe.recv()
    p.join()

    # If returns a StopPhase, raise it
    if isinstance(child_result, StopPhase):
        # do not print
        raise child_result

    # let the caller know which package went wrong.
    if isinstance(child_result, InstallError):
        child_result.pkg = pkg

    if isinstance(child_result, ChildError):
        # If the child process raised an error, print its output here rather
        # than waiting until the call to SpackError.die() in main(). This
        # allows exception handling output to be logged from within Spack.
        # see spack.main.SpackCommand.
        child_result.print_context()
        raise child_result

    return child_result