Example #1
0
def spawn_test(test, prefix, passthrough, run_skipped, show_cmd):
    """Spawn one child, return a task struct."""
    if not test.enable and not run_skipped:
        return None

    cmd = test.get_command(prefix)
    if show_cmd:
        print(escape_cmdline(cmd))

    if not passthrough:
        (rout, wout) = os.pipe()
        (rerr, werr) = os.pipe()

        rv = os.fork()

        # Parent.
        if rv:
            os.close(wout)
            os.close(werr)
            return Task(test, prefix, rv, rout, rerr)

        # Child.
        os.close(rout)
        os.close(rerr)

        os.dup2(wout, 1)
        os.dup2(werr, 2)

    os.execvp(cmd[0], cmd)
Example #2
0
def remote_agent(in_stream_cls, out_stream_cls):
    """
    Connect file descriptors to right pipe and start slave command loop.
    When something happend it raise exception which could be caught by cmd
    master.

    :params in_stream_cls: Class encapsulated input stream.
    :params out_stream_cls: Class encapsulated output stream.
    """
    try:
        fd_stdout = sys.stdout.fileno()
        fd_stderr = sys.stderr.fileno()
        fd_stdin = sys.stdin.fileno()
        soutr, soutw = os.pipe()
        serrr, serrw = os.pipe()
        sys.stdout = os.fdopen(soutw, "w", 0)
        sys.stderr = os.fdopen(serrw, "w", 0)
        os.write(fd_stdout, "#")

        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

        w_stdin = None
        w_stdout = out_stream_cls(fd_stdout)
        w_stdin = in_stream_cls(fd_stdin)

        cmd = CommanderSlaveCmds(w_stdin, w_stdout, soutr, serrr)

        cmd.cmd_loop()
    except SystemExit:
        pass
    except:
        e = traceback.format_exc()
        sys.stderr.write(e)
Example #3
0
def connect_to_new_process(fds):
    """Request forkserver to create a child process.

    Returns a pair of fds (status_r, data_w).  The calling process can read
    the child process's pid and (eventually) its returncode from status_r.
    The calling process should write to data_w the pickled preparation and
    process data.
    """
    if len(fds) + 4 >= MAXFDS_TO_SEND:
        raise ValueError("too many fds")
    with socket.socket(socket.AF_UNIX) as client:
        client.connect(_forkserver_address)
        parent_r, child_w = os.pipe()
        child_r, parent_w = os.pipe()
        allfds = [child_r, child_w, _forkserver_alive_fd, semaphore_tracker._semaphore_tracker_fd]
        allfds += fds
        try:
            reduction.sendfds(client, allfds)
            return parent_r, parent_w
        except:
            os.close(parent_r)
            os.close(parent_w)
            raise
        finally:
            os.close(child_r)
            os.close(child_w)
Example #4
0
 def does_stuff():
     a, b = os.pipe()
     c = os.dup(a)
     d = os.dup(b)
     assert a != b
     assert a != c
     assert a != d
     assert b != c
     assert b != d
     assert c != d
     os.close(c)
     os.dup2(d, c)
     e, f = os.pipe()
     assert e != a
     assert e != b
     assert e != c
     assert e != d
     assert f != a
     assert f != b
     assert f != c
     assert f != d
     assert f != e
     os.close(a)
     os.close(b)
     os.close(c)
     os.close(d)
     os.close(e)
     os.close(f)
     return 42
Example #5
0
    def __init__(self, rrdtool="rrdtool"):
        """Get an rrdtool pipe."""

        r1 = None
        r2 = None
        w1 = None
        w2 = None
        self.pid = 0
        self.pfile = None

        try:
            r1, w1 = os.pipe()
            r2, w2 = os.pipe()
            self.pfile = PipeFile(r1, w2)
            # Fork off rrdtool
            self.pid = os.fork()
            if self.pid == 0:
                self.pfile.close()
                os.dup2(r2, 0)
                os.dup2(w1, 1)
                os.dup2(w1, 2)
                os.execv(rrdtool, [rrdtool, "-"])
                os._exit(1)
        except:
            if r1:
                os.close(r1)
            if r2:
                os.close(r2)
            if w1:
                os.close(w1)
            if w2:
                os.close(w2)
            # Rethrow
            raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
Example #6
0
        def __init__(self, process_obj):
            self.force_execv = process_obj.force_execv

            if self.force_execv:
                sys.stdout.flush()
                sys.stderr.flush()
                r, w = os.pipe()
                self.sentinel = r

                from_parent_fd, to_child_fd = os.pipe()
                cmd = get_command_line() + [str(from_parent_fd)]

                self.pid = os.fork()
                if self.pid == 0:
                    os.close(r)
                    os.close(to_child_fd)
                    os.execv(sys.executable, cmd)

                # send information to child
                prep_data = get_preparation_data(process_obj._name)
                os.close(from_parent_fd)
                to_child = os.fdopen(to_child_fd, "wb")
                Popen._tls.process_handle = self.pid
                try:
                    dump(prep_data, to_child, HIGHEST_PROTOCOL)
                    dump(process_obj, to_child, HIGHEST_PROTOCOL)
                finally:
                    del (Popen._tls.process_handle)
                    to_child.close()
            else:
                super(Popen, self).__init__(process_obj)
    def test_gevent_friendly(self):

        # Used to verify that file descriptors aren't consumed
        r, w = os.pipe()
        os.close(r)
        os.close(w)

        # Get a good benchmark without any concurrent actions
        t1 = time.time()
        with AsyncDispatcher(self.block_stuff) as dispatcher:
            v = dispatcher.wait(10)
        dt = time.time() - t1

        # Check that it takes less than 5 seconds and that it's the right value
        self.assertTrue(dt < 10)
        self.assertTrue(np.array_equal(v, np.arange(20)))

        # Try it again but this time with a gevent sleep that should run
        # Concurrently with the dispatcher thread
        t1 = time.time()
        with AsyncDispatcher(self.block_stuff) as dispatcher:
            gevent.sleep(5)
            v = dispatcher.wait(10)
        ndt = time.time() - t1

        # There is ususally some difference but should definitely be less than
        # one second
        self.assertTrue(abs(dt - ndt) < 5)

        try:
            # Make sure we're not losing file descriptors to maintain thread synchronization
            self.assertEquals((r, w), os.pipe())
        finally:
            os.close(r)
            os.close(w)
Example #8
0
 def __init__(self, logfd, stdoutfd):
     self.stdoutfd = stdoutfd
     self.logfd = logfd
     if vars.LOG:
         self.fd_std_in, self.fd_std_out = os.pipe()
         self.fd_err_in, self.fd_err_out = os.pipe()
         self.fd_log_in, self.fd_log_out = os.pipe()
Example #9
0
 def _get_handles(self, stdin, stdout, stderr):
     p2cread, p2cwrite = (None, None)
     c2pread, c2pwrite = (None, None)
     errread, errwrite = (None, None)
     if stdin is None:
         pass
     elif stdin == PIPE:
         p2cread, p2cwrite = os.pipe()
     elif isinstance(stdin, int):
         p2cread = stdin
     else:
         p2cread = stdin.fileno()
     if stdout is None:
         pass
     elif stdout == PIPE:
         c2pread, c2pwrite = os.pipe()
     elif isinstance(stdout, int):
         c2pwrite = stdout
     else:
         c2pwrite = stdout.fileno()
     if stderr is None:
         pass
     elif stderr == PIPE:
         errread, errwrite = os.pipe()
     elif stderr == STDOUT:
         errwrite = c2pwrite
     elif isinstance(stderr, int):
         errwrite = stderr
     else:
         errwrite = stderr.fileno()
     return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
Example #10
0
def genrsa(password):
    p_in = pipe()
    p_out = pipe()

    cmd = ["openssl", "genrsa"]

    if password:
        cmd += ["-des3", "-passout", "stdin"]

    cmd += ["1024"]

    spawn(cmd, stdin=p_in, stdout=p_out)

    # r_in, w_in file descriptors to new process's standard input
    # likewise r_out, w_out for new process's standard output
    r_in, w_in = p_in
    r_out, w_out = p_out

    close(r_in)
    close(w_out)

    if password:
        write(w_in, password)
    close(w_in)

    f_out = fdopen(r_out, "r")
    key = f_out.readlines()
    f_out.close()

    return "".join(key)
Example #11
0
def req(key, subject, password):
    p_in = pipe()
    p_out = pipe()

    w_tmp, w_fn = mkstemp()

    write(w_tmp, key)
    close(w_tmp)

    subject = "".join(["/%s=%s" % (k, v) for k, v in subject.items() if v])

    spawn(["openssl", "req", "-new", "-key", w_fn, "-passin", "stdin", "-subj", subject], stdin=p_in, stdout=p_out)

    r_in, w_in = p_in
    r_out, w_out = p_out

    close(r_in)
    close(w_out)

    write(w_in, password)
    close(w_in)

    f_out = fdopen(r_out, "r")
    csr = f_out.readlines()
    f_out.close()

    remove(w_fn)

    return "".join(csr)
Example #12
0
    def __init__(self, firstSequence):
        """
		Create a Debugger instance starting with the given sequence. This spawns off a thread to handle
		the asynchronous pipe I/O to/from the target Pdb process.
		"""
        threading.Thread.__init__(self)
        self.quitFlag = False

        self.sequence = Sequence.Sequence(firstSequence)

        (self.fromTargetReadFD, self.fromTargetWriteFD) = os.pipe()
        (self.toTargetReadFD, self.toTargetWriteFD) = os.pipe()

        self.toTargetRead = os.fdopen(self.toTargetReadFD, "r")
        self.toTargetWrite = os.fdopen(self.toTargetWriteFD, "w")
        self.fromTargetRead = os.fdopen(self.fromTargetReadFD, "r")
        self.fromTargetWrite = os.fdopen(self.fromTargetWriteFD, "w")

        self.targetProcess = subprocess.Popen(
            args=["python", "-u", "-m", "pdb", firstSequence],
            stdin=self.toTargetRead,
            stdout=self.fromTargetWrite,
            stderr=self.fromTargetWrite,
        )

        self.response = None
        self.showStdout = False
        self.start()
Example #13
0
    def __init__(self):
        from_parent, to_worker = os.pipe()
        from_worker, to_parent = os.pipe()
        sync_from_worker, sync_to_parent = os.pipe()

        unix.close_on_exec(to_worker)
        unix.close_on_exec(from_worker)
        unix.close_on_exec(sync_from_worker)

        unix.keep_on_exec(from_parent)
        unix.keep_on_exec(to_parent)
        unix.keep_on_exec(sync_to_parent)

        worker_pid = os.fork()
        if not worker_pid:
            os.setpgrp()  # prevent worker from receiving Ctrl-C
            python = sys.executable
            os.execvp(python, [python, "-m", "assay.worker", str(from_parent), str(to_parent), str(sync_to_parent)])

        os.close(from_parent)
        os.close(to_parent)
        os.close(sync_to_parent)

        self.pids = [worker_pid]
        self.to_worker = os.fdopen(to_worker, "wb")
        self.from_worker = os.fdopen(from_worker, "rb", BUFSIZE)
        self.sync_from_worker = sync_from_worker
Example #14
0
def timed_command(cmd, timeout=None):
    """timed_command(cmd, timeout=None)
    Run a shell command, with an optional timeout
    If command takes longer than specified time it will be killed,
        first with SIGTERM then 2 seconds later with SIGKILL if needed.
        (The 2 second value can be adjusted by changing GRACE_PERIOD).
    Return (exit_status, time_used, command_output, error_output)
    If the program was terminated due to timeout, the exit status will be
        artificially set as though the program returned ETIMEDOUT."""

    if timeout == 0:
        timeout = None

    stdin_r, stdin_w = os.pipe()
    stdout_r, stdout_w = os.pipe()
    stderr_r, stderr_w = os.pipe()

    prev_sighandler = signal.getsignal(signal.SIGCHLD)
    signal.signal(signal.SIGCHLD, _sighandler)
    pid = os.fork()
    if pid == 0:  # Child
        _child(stdin_r, stdout_w, stderr_w, cmd)
    else:  # Parent
        ret = _parent(stdout_r, stderr_r, pid, timeout)
        # Close all file descriptors we opened
        for fd in stdin_r, stdin_w, stdout_r, stdout_w, stderr_r, stderr_w:
            try:
                os.close(fd)
            except:
                pass
        # Restore default behavior on SIGCHLD
        signal.signal(signal.SIGCHLD, prev_sighandler)
        return ret
    def __init__(self, tests, num_workers, total_cpus, total_mem, bindir):
        """
        Initialize the class.

        @param tests: A list of test dictionaries.
        @param num_workers: The number of workers (pipelines).
        @param total_cpus: The total number of CPUs to dedicate to tests.
        @param total_mem: The total amount of memory to dedicate to tests.
        @param bindir: The directory where environment files reside.
        """
        self.tests = tests
        self.num_workers = num_workers
        self.total_cpus = total_cpus
        self.total_mem = total_mem
        self.bindir = bindir
        # Pipes -- s stands for scheduler, w stands for worker
        self.s2w = [os.pipe() for i in range(num_workers)]
        self.w2s = [os.pipe() for i in range(num_workers)]
        self.s2w_r = [os.fdopen(r, "r", 0) for r, w in self.s2w]
        self.s2w_w = [os.fdopen(w, "w", 0) for r, w in self.s2w]
        self.w2s_r = [os.fdopen(r, "r", 0) for r, w in self.w2s]
        self.w2s_w = [os.fdopen(w, "w", 0) for r, w in self.w2s]
        # "Personal" worker dicts contain modifications that are applied
        # specifically to each worker.  For example, each worker must use a
        # different environment file and a different MAC address pool.
        self.worker_dicts = [{"env": "env%d" % i} for i in range(num_workers)]
Example #16
0
def cmd_forkoff(data, server, witem):
    global child_pid

    rs, ws = os.pipe()
    re, we = os.pipe()

    pid = os.fork()
    if pid > 0:
        # parent
        child_pid = pid
        irssi.pidwait_add(pid)
        print "forked off", pid
        irssi.signal_add("pidwait", sig_pidwait)

        # redirect child output
        irssi.io_add_watch(rs, read_child, sys.stdout)
        irssi.io_add_watch(re, read_child, sys.stderr)

    else:
        # child
        sys.stdout = os.fdopen(ws, "w", 0)
        sys.stderr = os.fdopen(we, "w", 0)

        childfunc()

        sys.stdout.close()
        sys.stderr.close()
        os._exit(5)
Example #17
0
    def start(self):
        assert not self._started
        self._started = True

        up_read, up_write = os.pipe()
        down_read, down_write = os.pipe()
        args, sock = self.args, self.sock

        pid = os.fork()
        if pid:
            # parent
            os.close(up_read)
            os.close(down_write)
            asyncio.ensure_future(self.connect(pid, up_write, down_read))
        else:
            # child
            os.close(up_write)
            os.close(down_read)

            # cleanup after fork
            asyncio.set_event_loop(None)

            # setup process
            process = ChildProcess(up_read, down_write, args, sock)
            process.start()
Example #18
0
File: util.py Project: lucciano/pdk
def execv(execv_args, set_up=noop, pipes=True):
    """Fork and exec.

    Returns pipes for input and output, and a closure which waits on
    the pid.

    The set_up function is called just before exec.

    Execv args should be a tuple/list in the form:
    [binary, [exec args]]
    """
    if pipes:
        child_in_read, child_in_write = os.pipe()
        parent_in_read, parent_in_write = os.pipe()
    pid = os.fork()
    if pid:
        # parent
        _wait = get_waiter(pid, execv_args)
        if pipes:
            os.close(child_in_read)
            os.close(parent_in_write)
            return os.fdopen(child_in_write, "w"), os.fdopen(parent_in_read), _wait
        else:
            return _wait
    else:
        # child
        if pipes:
            os.close(child_in_write)
            os.close(parent_in_read)
            os.dup2(child_in_read, 0)
            os.dup2(parent_in_write, 1)
        set_up()
        os.execv(*execv_args)
    def spawn(self, children):
        if self.children:
            raise RuntimeError, "children are already running"

        self.childinfo = []
        self.fdchild = {}
        self.childitem = [None] * children
        self.readpipes = []
        self.reporter.init(children)
        self.children = children

        for i in range(children):
            childread, parentwrite = os.pipe()
            parentread, childwrite = os.pipe()
            pid = os.fork()
            self.fdchild[parentread] = i
            if pid > 0:
                os.close(childread)
                os.close(childwrite)
                self.childinfo.append((pid, parentread, parentwrite))
                self.reporter.spawn(pid)
            else:
                os.close(parentread)
                os.close(parentwrite)
                self.child(childread, childwrite)
Example #20
0
 def __init__(self, cmd, capturestderr=False, bufsize=-1):
     """The parameter 'cmd' is the shell command to execute in a
     sub-process.  On UNIX, 'cmd' may be a sequence, in which case arguments
     will be passed directly to the program without shell intervention (as
     with os.spawnv()).  If 'cmd' is a string it will be passed to the shell
     (as with os.system()).   The 'capturestderr' flag, if true, specifies
     that the object should capture standard error output of the child
     process.  The default is false.  If the 'bufsize' parameter is
     specified, it specifies the size of the I/O buffers to/from the child
     process."""
     _cleanup()
     self.cmd = cmd
     p2cread, p2cwrite = os.pipe()
     c2pread, c2pwrite = os.pipe()
     if capturestderr:
         errout, errin = os.pipe()
     self.pid = os.fork()
     if self.pid == 0:
         # Child
         os.dup2(p2cread, 0)
         os.dup2(c2pwrite, 1)
         if capturestderr:
             os.dup2(errin, 2)
         self._run_child(cmd)
     os.close(p2cread)
     self.tochild = os.fdopen(p2cwrite, "w", bufsize)
     os.close(c2pwrite)
     self.fromchild = os.fdopen(c2pread, "r", bufsize)
     if capturestderr:
         os.close(errin)
         self.childerr = os.fdopen(errout, "r", bufsize)
     else:
         self.childerr = None
Example #21
0
   def __init__(self, cache_timeout=600, required_group=None):
       """
	:param int cache_timeout: The life time of cached credentials in seconds.
	:param str required_group: A group that if specified, users must be a member of to be authenticated.
	"""
       self.logger = logging.getLogger("KingPhisher.Server.Authenticator")
       self.cache_timeout = cache_timeout
       """The timeout of the credential cache in seconds."""
       self.required_group = required_group
       if self.required_group and not self.required_group in [g.gr_name for g in grp.getgrall()]:
           self.logger.error("the specified group for authentication was not found")
       self.parent_rfile, self.child_wfile = os.pipe()
       self.child_rfile, self.parent_wfile = os.pipe()
       self.child_pid = os.fork()
       """The PID of the forked child."""
       if not self.child_pid:
           self.rfile = self.child_rfile
           self.wfile = self.child_wfile
       else:
           self.rfile = self.parent_rfile
           self.wfile = self.parent_wfile
       self.rfile = os.fdopen(self.rfile, "r", 1)
       self.wfile = os.fdopen(self.wfile, "w", 1)
       if not self.child_pid:
           self.child_routine()
           self.rfile.close()
           self.wfile.close()
           logging.shutdown()
           os._exit(os.EX_OK)
       self.cache_salt = make_salt()
       """The salt to be prepended to passwords before hashing them for the cache."""
       self.cache = {}
       """The credential cache dictionary. Keys are usernames and values are tuples of password hashes and ages."""
       return
Example #22
0
    def _launch(self, process_obj):
        from . import semaphore_tracker

        tracker_fd = semaphore_tracker.getfd()
        self._fds.append(tracker_fd)
        prep_data = spawn.get_preparation_data(process_obj._name)
        fp = io.BytesIO()
        context.set_spawning_popen(self)
        try:
            reduction.dump(prep_data, fp)
            reduction.dump(process_obj, fp)
        finally:
            context.set_spawning_popen(None)

        parent_r = child_w = child_r = parent_w = None
        try:
            parent_r, child_w = os.pipe()
            child_r, parent_w = os.pipe()
            cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r)
            self._fds.extend([child_r, child_w])
            self.pid = util.spawnv_passfds(spawn.get_executable(), cmd, self._fds)
            self.sentinel = parent_r
            with open(parent_w, "wb", closefd=False) as f:
                f.write(fp.getbuffer())
        finally:
            if parent_r is not None:
                util.Finalize(self, os.close, (parent_r,))
            for fd in (child_r, child_w, parent_w):
                if fd is not None:
                    os.close(fd)
Example #23
0
    def __init__(self, args, debugger, time, memory):
        self._args = args
        self._child = _find_exe(self._args[0])
        self._debugger = debugger
        self._time = time
        self._memory = memory
        self._returncode = None
        self._tle = False
        self._pid = None
        self._rusage = None
        self._start = None
        self._duration = None
        self._r_duration = None

        self._stdin_, self._stdin = os.pipe()
        self._stdout, self._stdout_ = os.pipe()
        self._stderr, self._stderr_ = os.pipe()
        self.stdin = os.fdopen(self._stdin, "w")
        self.stdout = os.fdopen(self._stdout, "r")
        self.stderr = os.fdopen(self._stderr, "r")

        self._started = threading.Event()
        self._died = threading.Event()
        self._worker = threading.Thread(target=self.__spawn_execute)
        self._worker.start()
        if time:
            # Spawn thread to kill process after it times out
            self._shocker = threading.Thread(target=self.__shocker)
            self._shocker.start()
Example #24
0
 def __init__(self, cmd, capturestderr=False, bufsize=-1):
     """The parameter 'cmd' is the shell command to execute in a
     sub-process.  The 'capturestderr' flag, if true, specifies that
     the object should capture standard error output of the child process.
     The default is false.  If the 'bufsize' parameter is specified, it
     specifies the size of the I/O buffers to/from the child process."""
     _cleanup()
     p2cread, p2cwrite = os.pipe()
     c2pread, c2pwrite = os.pipe()
     if capturestderr:
         errout, errin = os.pipe()
     self.pid = os.fork()
     if self.pid == 0:
         # Child
         os.dup2(p2cread, 0)
         os.dup2(c2pwrite, 1)
         if capturestderr:
             os.dup2(errin, 2)
         self._run_child(cmd)
     os.close(p2cread)
     self.tochild = os.fdopen(p2cwrite, "w", bufsize)
     os.close(c2pwrite)
     self.fromchild = os.fdopen(c2pread, "r", bufsize)
     if capturestderr:
         os.close(errin)
         self.childerr = os.fdopen(errout, "r", bufsize)
     else:
         self.childerr = None
     _active.append(self)
    def __init__(self, command_str, alias=None):
        """Arguments to __init__() are as described in the description above."""
        # Initialize Thread before start().
        super(Monitor, self).__init__()

        # Split command for shell.
        args = shlex.split(command_str)
        self.command = args

        # Create pipes and system will clean them up after corresponding
        # process finishes.
        (pipe_r, pipe_w) = os.pipe()
        self.in_r = os.fdopen(pipe_r, "r", 0)  # No buffering.
        self.in_w = os.fdopen(pipe_w, "w", 0)  # No buffering.
        (pipe_r, pipe_w) = os.pipe()
        self.out_r = os.fdopen(pipe_r, "r", 0)
        self.out_w = os.fdopen(pipe_w, "w", 0)

        # The flag indicating whether the Monitor thread should stop.
        self._done = False
        # The lock to protect slow popen() running from stop().
        self._lanch_lock = threading.Lock()
        self._process = None  # The underlying process
        if alias is None:
            self.alias = self.command[0]  # The name of binary to be excuted.
        else:
            self.alias = alias

        # The state of moniter that console is interested in when starting
        # the process.
        self.interest = Monitor.LANCHED
 def test_run_playbook_logging(self, mock_inventory_str, mock_run_playbook):
     """
     Ensure logging routines are working on _run_playbook method
     """
     stdout_r, stdout_w = os.pipe()
     stderr_r, stderr_w = os.pipe()
     with open(stdout_r, "rb", buffering=0) as stdout, open(stderr_r, "rb", buffering=0) as stderr:
         mock_run_playbook.return_value.__enter__.return_value.stdout = stdout
         mock_run_playbook.return_value.__enter__.return_value.stderr = stderr
         mock_run_playbook.return_value.__enter__.return_value.returncode = 0
         os.write(stdout_w, b"Hello\n")
         os.close(stdout_w)
         os.write(stderr_w, b"Hi\n")
         os.close(stderr_w)
         appserver = make_test_appserver()
         playbook = Playbook(
             source_repo="dummy",
             playbook_path="dummy",
             requirements_path="dummy",
             version="dummy",
             variables="dummy",
         )
         log, returncode = appserver._run_playbook("/tmp/test/working/dir/", playbook)
         self.assertCountEqual(log, ["Hello", "Hi"])
         self.assertEqual(returncode, 0)
Example #27
0
def remote_agent(terminal_input=False):
    """
    Connect file descriptors to right pipe and start slave command loop.
    When something happend it raise exception which could be caught by cmd
    master.

    :param terminal_input: If True read commands in terminal mode.
    :type terminal_input: Bool
    """
    try:
        fd_stdout = sys.stdout.fileno()
        fd_stderr = sys.stderr.fileno()
        fd_stdin = sys.stdin.fileno()
        soutr, soutw = os.pipe()
        serrr, serrw = os.pipe()
        sys.stdout = os.fdopen(soutw, "w", 0)
        sys.stderr = os.fdopen(serrw, "w", 0)
        os.write(fd_stdout, "#")

        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

        w_stdin = None
        w_stdout = ms.StdIOWrapperOut(fd_stdout)
        w_stdin = ms.StdIOWrapperIn(fd_stdin)

        cmd = CommanderSlaveCmds(w_stdin, w_stdout, soutr, serrr)

        cmd.cmd_loop()
    except SystemExit:
        pass
    except:
        e = traceback.format_exc()
        sys.stderr.write(e)
Example #28
0
def piped_fork_spawn(sh, escape, cmd, args, env, stdout, stderr):
    # spawn using fork / exec and providing a pipe for the command's
    # stdout / stderr stream
    if stdout != stderr:
        (rFdOut, wFdOut) = os.pipe()
        (rFdErr, wFdErr) = os.pipe()
    else:
        (rFdOut, wFdOut) = os.pipe()
        rFdErr = rFdOut
        wFdErr = wFdOut
    # do the fork
    pid = os.fork()
    if not pid:
        # Child process
        os.close(rFdOut)
        if rFdOut != rFdErr:
            os.close(rFdErr)
        os.dup2(wFdOut, 1)  # is there some symbolic way to do that ?
        os.dup2(wFdErr, 2)
        os.close(wFdOut)
        if stdout != stderr:
            os.close(wFdErr)
        exitval = 127
        args = [sh, "-c", string.join(args)]
        try:
            os.execvpe(sh, args, env)
        except OSError, e:
            exitval = exitvalmap[e[0]]
            stderr.write("scons: %s: %s\n" % (cmd, e[1]))
        os._exit(exitval)
Example #29
0
def exec_piped_fork(l, env, stdout, stderr):
    # spawn using fork / exec and providing a pipe for the command's
    # stdout / stderr stream
    if stdout != stderr:
        (rFdOut, wFdOut) = os.pipe()
        (rFdErr, wFdErr) = os.pipe()
    else:
        (rFdOut, wFdOut) = os.pipe()
        rFdErr = rFdOut
        wFdErr = wFdOut
    # do the fork
    pid = os.fork()
    if not pid:
        # Child process
        os.close(rFdOut)
        if rFdOut != rFdErr:
            os.close(rFdErr)
        os.dup2(wFdOut, 1)  # is there some symbolic way to do that ?
        os.dup2(wFdErr, 2)
        os.close(wFdOut)
        if stdout != stderr:
            os.close(wFdErr)
        exitval = 127
        try:
            os.execvpe(l[0], l, env)
        except OSError, e:
            exitval = exitvalmap.get(e[0], e[0])
            stderr.write("scons: %s: %s\n" % (l[0], e[1]))
        os._exit(exitval)
Example #30
0
    def test_synchronized_externally(self):
        """We can lock across multiple processes"""
        with utils.tempdir() as tempdir:
            self.flags(lock_path=tempdir)
            rpipe1, wpipe1 = os.pipe()
            rpipe2, wpipe2 = os.pipe()

            @utils.synchronized("testlock1", external=True)
            def f(rpipe, wpipe):
                try:
                    os.write(wpipe, "foo")
                except OSError, e:
                    self.assertEquals(e.errno, errno.EPIPE)
                    return

                rfds, _wfds, _efds = select.select([rpipe], [], [], 1)
                self.assertEquals(
                    len(rfds),
                    0,
                    "The other process, which was" " supposed to be locked, " "wrote on its end of the " "pipe",
                )
                os.close(rpipe)

            pid = os.fork()
            if pid > 0:
                os.close(wpipe1)
                os.close(rpipe2)

                f(rpipe1, wpipe2)
            else:
                os.close(rpipe1)
                os.close(wpipe2)

                f(rpipe2, wpipe1)
                os._exit(0)