示例#1
0
    async def _testPipeLoggerToPipe(self, test_string, loop):
        """
		Test PipeLogger writing to a pipe connected to a PipeReader.
		This verifies that PipeLogger does not deadlock when writing
		to a pipe that's drained by a PipeReader running in the same
		process (requires non-blocking write).
		"""

        input_fd, writer_pipe = os.pipe()
        _set_nonblocking(writer_pipe)
        writer_pipe = os.fdopen(writer_pipe, 'wb', 0)
        writer = asyncio.ensure_future(
            _writer(writer_pipe, test_string.encode('ascii')))
        writer.add_done_callback(lambda writer: writer_pipe.close())

        pr, pw = os.pipe()

        consumer = PipeLogger(background=True,
                              input_fd=input_fd,
                              log_file_path=os.fdopen(pw, 'wb', 0),
                              scheduler=loop)
        consumer.start()

        # Before starting the reader, wait here for a moment, in order
        # to exercise PipeLogger's handling of EAGAIN during write.
        await asyncio.wait([writer], timeout=0.01)

        reader = _reader(pr)
        await writer
        content = await reader
        await consumer.async_wait()

        self.assertEqual(consumer.returncode, os.EX_OK)

        return content.decode('ascii', 'replace')
示例#2
0
	def _start(self):
		in_pr, in_pw = os.pipe()
		out_pr, out_pw = os.pipe()
		self._files = {}
		self._files['pipe_in'] = in_pr
		self._files['pipe_out'] = out_pw

		fcntl.fcntl(in_pr, fcntl.F_SETFL,
			fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)

		# FD_CLOEXEC is enabled by default in Python >=3.4.
		if sys.hexversion < 0x3040000:
			try:
				fcntl.FD_CLOEXEC
			except AttributeError:
				pass
			else:
				fcntl.fcntl(in_pr, fcntl.F_SETFD,
					fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		self._reg_id = self.scheduler.io_add_watch(in_pr,
			self.scheduler.IO_IN, self._output_handler)
		self._registered = True
		self._proc = SpawnProcess(
			args=[portage._python_interpreter,
				os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
				env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
				fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
				scheduler=self.scheduler)
		self._proc.addExitListener(self._proc_exit)
		self._proc.start()
		os.close(out_pr)
		os.close(in_pw)
示例#3
0
    def _start(self):
        in_pr, in_pw = os.pipe()
        out_pr, out_pw = os.pipe()
        self._files = {}
        self._files["pipe_in"] = in_pr
        self._files["pipe_out"] = out_pw

        fcntl.fcntl(in_pr, fcntl.F_SETFL,
                    fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)

        self.scheduler.add_reader(in_pr, self._output_handler)
        self._registered = True
        self._proc = SpawnProcess(
            args=[
                portage._python_interpreter,
                os.path.join(portage._bin_path, "lock-helper.py"),
                self.path,
            ],
            env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
            fd_pipes={
                0: out_pr,
                1: in_pw,
                2: sys.__stderr__.fileno()
            },
            scheduler=self.scheduler,
        )
        self._proc.addExitListener(self._proc_exit)
        self._proc.start()
        os.close(out_pr)
        os.close(in_pw)
示例#4
0
def _create_pty_or_pipe(copy_term_size=None):
	"""
	Try to create a pty and if then fails then create a normal
	pipe instead.

	@param copy_term_size: If a tty file descriptor is given
		then the term size will be copied to the pty.
	@type copy_term_size: int
	@rtype: tuple
	@return: A tuple of (is_pty, master_fd, slave_fd) where
		is_pty is True if a pty was successfully allocated, and
		False if a normal pipe was allocated.
	"""

	got_pty = False

	global _disable_openpty, _fbsd_test_pty

	if _fbsd_test_pty and not _disable_openpty:
		# Test for python openpty breakage after freebsd7 to freebsd8
		# upgrade, which results in a 'Function not implemented' error
		# and the process being killed.
		pid = os.fork()
		if pid == 0:
			pty.openpty()
			os._exit(os.EX_OK)
		pid, status = os.waitpid(pid, 0)
		if (status & 0xff) == 140:
			_disable_openpty = True
		_fbsd_test_pty = False

	if _disable_openpty:
		master_fd, slave_fd = os.pipe()
	else:
		try:
			master_fd, slave_fd = pty.openpty()
			got_pty = True
		except EnvironmentError as e:
			_disable_openpty = True
			writemsg("openpty failed: '%s'\n" % str(e),
				noiselevel=-1)
			del e
			master_fd, slave_fd = os.pipe()

	if got_pty:
		# Disable post-processing of output since otherwise weird
		# things like \n -> \r\n transformations may occur.
		mode = termios.tcgetattr(slave_fd)
		mode[1] &= ~termios.OPOST
		termios.tcsetattr(slave_fd, termios.TCSANOW, mode)

	if got_pty and \
		copy_term_size is not None and \
		os.isatty(copy_term_size):
		rows, columns = get_term_size()
		set_term_size(rows, columns, slave_fd)

	return (got_pty, master_fd, slave_fd)
示例#5
0
def _create_pty_or_pipe(copy_term_size=None):
	"""
	Try to create a pty and if then fails then create a normal
	pipe instead.

	@param copy_term_size: If a tty file descriptor is given
		then the term size will be copied to the pty.
	@type copy_term_size: int
	@rtype: tuple
	@returns: A tuple of (is_pty, master_fd, slave_fd) where
		is_pty is True if a pty was successfully allocated, and
		False if a normal pipe was allocated.
	"""

	got_pty = False

	global _disable_openpty, _fbsd_test_pty

	if _fbsd_test_pty and not _disable_openpty:
		# Test for python openpty breakage after freebsd7 to freebsd8
		# upgrade, which results in a 'Function not implemented' error
		# and the process being killed.
		pid = os.fork()
		if pid == 0:
			pty.openpty()
			os._exit(os.EX_OK)
		pid, status = os.waitpid(pid, 0)
		if (status & 0xff) == 140:
			_disable_openpty = True
		_fbsd_test_pty = False

	if _disable_openpty:
		master_fd, slave_fd = os.pipe()
	else:
		try:
			master_fd, slave_fd = pty.openpty()
			got_pty = True
		except EnvironmentError as e:
			_disable_openpty = True
			writemsg("openpty failed: '%s'\n" % str(e),
				noiselevel=-1)
			del e
			master_fd, slave_fd = os.pipe()

	if got_pty:
		# Disable post-processing of output since otherwise weird
		# things like \n -> \r\n transformations may occur.
		mode = termios.tcgetattr(slave_fd)
		mode[1] &= ~termios.OPOST
		termios.tcsetattr(slave_fd, termios.TCSANOW, mode)

	if got_pty and \
		copy_term_size is not None and \
		os.isatty(copy_term_size):
		rows, columns = get_term_size()
		set_term_size(rows, columns, slave_fd)

	return (got_pty, master_fd, slave_fd)
示例#6
0
    def _async_start(self):
        pipe_logger = None
        filter_proc = None
        try:
            log_input = None
            if self.log_path is not None:
                log_filter_file = self.log_filter_file
                if log_filter_file is not None:
                    split_value = shlex_split(log_filter_file)
                    log_filter_file = split_value if split_value else None
                if log_filter_file:
                    filter_input, stdin = os.pipe()
                    log_input, filter_output = os.pipe()
                    try:
                        filter_proc = yield asyncio.create_subprocess_exec(
                            *log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                            loop=self.scheduler)
                    except EnvironmentError:
                        # Maybe the command is missing or broken somehow...
                        os.close(filter_input)
                        os.close(stdin)
                        os.close(log_input)
                        os.close(filter_output)
                    else:
                        self._stdin = os.fdopen(stdin, 'wb', 0)
                        os.close(filter_input)
                        os.close(filter_output)

            if self._stdin is None:
                # Since log_filter_file is unspecified or refers to a file
                # that is missing or broken somehow, create a pipe that
                # logs directly to pipe_logger.
                log_input, stdin = os.pipe()
                self._stdin = os.fdopen(stdin, 'wb', 0)

            # Set background=True so that pipe_logger does not log to stdout.
            pipe_logger = PipeLogger(background=True,
                                     scheduler=self.scheduler,
                                     input_fd=log_input,
                                     log_file_path=self.log_path)

            yield pipe_logger.async_start()
        except asyncio.CancelledError:
            if pipe_logger is not None and pipe_logger.poll() is None:
                pipe_logger.cancel()
            if filter_proc is not None and filter_proc.returncode is None:
                filter_proc.terminate()
            raise

        self._main_task = asyncio.ensure_future(self._main(
            pipe_logger, filter_proc=filter_proc),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
示例#7
0
    def _start(self):
        filter_proc = None
        log_input = None
        if self.log_path is not None:
            log_filter_file = self.log_filter_file
            if log_filter_file is not None:
                split_value = shlex_split(log_filter_file)
                log_filter_file = split_value if split_value else None
            if log_filter_file:
                filter_input, stdin = os.pipe()
                log_input, filter_output = os.pipe()
                try:
                    filter_proc = PopenProcess(
                        proc=subprocess.Popen(
                            log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                        ),
                        scheduler=self.scheduler,
                    )
                    filter_proc.start()
                except EnvironmentError:
                    # Maybe the command is missing or broken somehow...
                    os.close(filter_input)
                    os.close(stdin)
                    os.close(log_input)
                    os.close(filter_output)
                else:
                    self._stdin = os.fdopen(stdin, "wb", 0)
                    os.close(filter_input)
                    os.close(filter_output)

        if self._stdin is None:
            # Since log_filter_file is unspecified or refers to a file
            # that is missing or broken somehow, create a pipe that
            # logs directly to pipe_logger.
            log_input, stdin = os.pipe()
            self._stdin = os.fdopen(stdin, "wb", 0)

        # Set background=True so that pipe_logger does not log to stdout.
        pipe_logger = PipeLogger(
            background=True,
            scheduler=self.scheduler,
            input_fd=log_input,
            log_file_path=self.log_path,
        )
        pipe_logger.start()

        self._main_task_cancel = functools.partial(self._main_cancel,
                                                   filter_proc, pipe_logger)
        self._main_task = asyncio.ensure_future(self._main(
            filter_proc, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
示例#8
0
    def _spawn(self, args, fd_pipes, **kwargs):
        """
        Extend the superclass _spawn method to perform some pre-fork and
        post-fork actions.
        """

        elog_reader_fd, elog_writer_fd = os.pipe()

        fcntl.fcntl(
            elog_reader_fd,
            fcntl.F_SETFL,
            fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK,
        )

        blockers = None
        if self.blockers is not None:
            # Query blockers in the main process, since closing
            # of file descriptors in the subprocess can prevent
            # access to open database connections such as that
            # used by the sqlite metadata cache module.
            blockers = self.blockers()
        mylink = portage.dblink(
            self.mycat,
            self.mypkg,
            settings=self.settings,
            treetype=self.treetype,
            vartree=self.vartree,
            blockers=blockers,
            pipe=elog_writer_fd,
        )
        fd_pipes[elog_writer_fd] = elog_writer_fd
        self.scheduler.add_reader(elog_reader_fd, self._elog_output_handler)

        # If a concurrent emerge process tries to install a package
        # in the same SLOT as this one at the same time, there is an
        # extremely unlikely chance that the COUNTER values will not be
        # ordered correctly unless we lock the vdb here.
        # FEATURES=parallel-install skips this lock in order to
        # improve performance, and the risk is practically negligible.
        self._lock_vdb()
        if not self.unmerge:
            self._counter = self.vartree.dbapi.counter_tick()

        self._dblink = mylink
        self._elog_reader_fd = elog_reader_fd
        pids = super(MergeProcess, self)._spawn(args, fd_pipes, **kwargs)
        os.close(elog_writer_fd)
        self._buf = ""
        self._elog_keys = set()
        # Discard messages which will be collected by the subprocess,
        # in order to avoid duplicates (bug #446136).
        portage.elog.messages.collect_messages(key=mylink.mycpv)

        # invalidate relevant vardbapi caches
        if self.vartree.dbapi._categories is not None:
            self.vartree.dbapi._categories = None
        self.vartree.dbapi._pkgs_changed = True
        self.vartree.dbapi._clear_pkg_cache(mylink)

        return pids
示例#9
0
    async def run(self):
        self.expected = getattr(self, "expected", None) or {"returncode": 0}
        if self.debug:
            fd_pipes = {}
            pr = None
            pw = None
        else:
            pr, pw = os.pipe()
            fd_pipes = {1: pw, 2: pw}
            pr = open(pr, "rb", 0)

        proc = AsyncFunction(
            scheduler=asyncio.get_event_loop(),
            target=self._subprocess,
            args=(self.args, self.cwd, self.env, self.expected, self.debug),
            fd_pipes=fd_pipes,
        )

        proc.start()
        if pw is not None:
            os.close(pw)

        await proc.async_wait()

        if pr is None:
            stdio = None
        else:
            stdio = await _reader(pr)

        self.result = {
            "stdio": stdio,
            "result": proc.result,
        }
示例#10
0
    def _testPipeLoggerToPipe(self, test_string, loop=None):
        """
		Test PipeLogger writing to a pipe connected to a PipeReader.
		This verifies that PipeLogger does not deadlock when writing
		to a pipe that's drained by a PipeReader running in the same
		process (requires non-blocking write).
		"""

        producer = PopenProcess(proc=subprocess.Popen(
            ["bash", "-c", self._echo_cmd % test_string],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT),
                                scheduler=loop)

        pr, pw = os.pipe()

        consumer = producer.pipe_reader = PipeLogger(
            background=True,
            input_fd=producer.proc.stdout,
            log_file_path=os.fdopen(pw, 'wb', 0))

        reader = _reader(pr, loop=loop)
        yield producer.async_start()
        content = yield reader
        yield producer.async_wait()
        yield consumer.async_wait()

        self.assertEqual(producer.returncode, os.EX_OK)
        self.assertEqual(consumer.returncode, os.EX_OK)

        coroutine_return(content.decode('ascii', 'replace'))
示例#11
0
 def _start(self):
     pr, pw = os.pipe()
     self.fd_pipes = {}
     self.fd_pipes[pw] = pw
     self._async_func_reader_pw = pw
     self._async_func_reader = PipeReader(input_files={"input": pr},
                                          scheduler=self.scheduler)
     self._async_func_reader.addExitListener(self._async_func_reader_exit)
     self._async_func_reader.start()
     ForkProcess._start(self)
     os.close(pw)
示例#12
0
	def _pipe(self, fd_pipes):
		"""When appropriate, use a pty so that fetcher progress bars,
		like wget has, will work properly."""
		if self.background or not sys.stdout.isatty():
			# When the output only goes to a log file,
			# there's no point in creating a pty.
			return os.pipe()
		stdout_pipe = fd_pipes.get(1)
		got_pty, master_fd, slave_fd = \
			_create_pty_or_pipe(copy_term_size=stdout_pipe)
		return (master_fd, slave_fd)
示例#13
0
    def _pipe(self, fd_pipes):
        """When appropriate, use a pty so that fetcher progress bars,
		like wget has, will work properly."""
        if self.background or not sys.stdout.isatty():
            # When the output only goes to a log file,
            # there's no point in creating a pty.
            return os.pipe()
        stdout_pipe = fd_pipes.get(1)
        got_pty, master_fd, slave_fd = \
         _create_pty_or_pipe(copy_term_size=stdout_pipe)
        return (master_fd, slave_fd)
示例#14
0
	def _start(self):
		in_pr, in_pw = os.pipe()
		out_pr, out_pw = os.pipe()
		self._files = {}
		self._files['pipe_in'] = in_pr
		self._files['pipe_out'] = out_pw
		fcntl.fcntl(in_pr, fcntl.F_SETFL,
			fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
		self._reg_id = self.scheduler.register(in_pr,
			self.scheduler.IO_IN, self._output_handler)
		self._registered = True
		self._proc = SpawnProcess(
			args=[portage._python_interpreter,
				os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
				env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
				fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
				scheduler=self.scheduler)
		self._proc.addExitListener(self._proc_exit)
		self._proc.start()
		os.close(out_pr)
		os.close(in_pw)
示例#15
0
	def _start(self):
		pr, pw = os.pipe()
		self.fd_pipes = {}
		self.fd_pipes[pw] = pw
		self._digest_pw = pw
		self._digest_pipe_reader = PipeReader(
			input_files={"input":pr},
			scheduler=self.scheduler)
		self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
		self._digest_pipe_reader.start()
		ForkProcess._start(self)
		os.close(pw)
    def testLazyImportPortageBaseline(self):
        """
        Check what modules are imported by a baseline module import.
        """

        env = os.environ.copy()
        pythonpath = env.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is None:
            pythonpath = ""
        else:
            pythonpath = ":" + pythonpath
        pythonpath = PORTAGE_PYM_PATH + pythonpath
        env["PYTHONPATH"] = pythonpath

        # If python is patched to insert the path of the
        # currently installed portage module into sys.path,
        # then the above PYTHONPATH override doesn't help.
        env["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH

        scheduler = global_event_loop()
        master_fd, slave_fd = os.pipe()
        master_file = os.fdopen(master_fd, "rb", 0)
        slave_file = os.fdopen(slave_fd, "wb")
        producer = SpawnProcess(
            args=self._baseline_import_cmd,
            env=env,
            fd_pipes={1: slave_fd},
            scheduler=scheduler,
        )
        producer.start()
        slave_file.close()

        consumer = PipeReader(input_files={"producer": master_file},
                              scheduler=scheduler)

        consumer.start()
        consumer.wait()
        self.assertEqual(producer.wait(), os.EX_OK)
        self.assertEqual(consumer.wait(), os.EX_OK)

        output = consumer.getvalue().decode("ascii", "replace").split()

        unexpected_modules = " ".join(
            sorted(x for x in output if self._module_re.match(x) is not None
                   and x not in self._baseline_imports))

        self.assertEqual("", unexpected_modules)
示例#17
0
 def _testAsyncFunctionStdin(self, loop=None):
     test_string = '1\n2\n3\n'
     pr, pw = os.pipe()
     fd_pipes = {0: pr}
     reader = AsyncFunction(scheduler=loop,
                            fd_pipes=fd_pipes,
                            target=self._read_from_stdin,
                            args=(pw, ))
     reader.start()
     os.close(pr)
     _set_nonblocking(pw)
     with open(pw, mode='wb', buffering=0) as pipe_write:
         yield _writer(pipe_write, test_string.encode('utf_8'), loop=loop)
     self.assertEqual((yield reader.async_wait()), os.EX_OK)
     self.assertEqual(reader.result, test_string)
示例#18
0
 async def _testAsyncFunctionStdin(self, loop):
     test_string = "1\n2\n3\n"
     pr, pw = os.pipe()
     fd_pipes = {0: pr}
     reader = AsyncFunction(scheduler=loop,
                            fd_pipes=fd_pipes,
                            target=self._read_from_stdin,
                            args=(pw, ))
     reader.start()
     os.close(pr)
     _set_nonblocking(pw)
     with open(pw, mode="wb", buffering=0) as pipe_write:
         await _writer(pipe_write, test_string.encode("utf_8"))
     self.assertEqual((await reader.async_wait()), os.EX_OK)
     self.assertEqual(reader.result, test_string)
	def testLazyImportPortageBaseline(self):
		"""
		Check what modules are imported by a baseline module import.
		"""

		env = os.environ.copy()
		pythonpath = env.get('PYTHONPATH')
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is None:
			pythonpath = ''
		else:
			pythonpath = ':' + pythonpath
		pythonpath = PORTAGE_PYM_PATH + pythonpath
		env['PYTHONPATH'] = pythonpath

		# If python is patched to insert the path of the
		# currently installed portage module into sys.path,
		# then the above PYTHONPATH override doesn't help.
		env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH

		scheduler = PollScheduler().sched_iface
		master_fd, slave_fd = os.pipe()
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb')
		producer = SpawnProcess(
			args=self._baseline_import_cmd,
			env=env, fd_pipes={1:slave_fd},
			scheduler=scheduler)
		producer.start()
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=scheduler)

		consumer.start()
		consumer.wait()
		self.assertEqual(producer.wait(), os.EX_OK)
		self.assertEqual(consumer.wait(), os.EX_OK)

		output = consumer.getvalue().decode('ascii', 'replace').split()

		unexpected_modules = " ".join(sorted(x for x in output \
			if self._module_re.match(x) is not None and \
			x not in self._baseline_imports))

		self.assertEqual("", unexpected_modules)
示例#20
0
	def _start(self):
		pr, pw = os.pipe()
		self._files = {}
		self._files['pipe_read'] = pr
		self._files['pipe_write'] = pw
		for f in self._files.values():
			fcntl.fcntl(f, fcntl.F_SETFL,
				fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
		self._reg_id = self.scheduler.register(self._files['pipe_read'],
			self.scheduler.IO_IN, self._output_handler)
		self._registered = True
		threading_mod = threading
		if self._force_dummy:
			threading_mod = dummy_threading
		self._thread = threading_mod.Thread(target=self._run_lock)
		self._thread.start()
示例#21
0
	def _start(self):
		pr, pw = os.pipe()
		self._files = {}
		self._files['pipe_read'] = pr
		self._files['pipe_write'] = pw
		for f in self._files.values():
			fcntl.fcntl(f, fcntl.F_SETFL,
				fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
		self._reg_id = self.scheduler.io_add_watch(self._files['pipe_read'],
			self.scheduler.IO_IN, self._output_handler)
		self._registered = True
		threading_mod = threading
		if self._force_dummy:
			threading_mod = dummy_threading
		self._thread = threading_mod.Thread(target=self._run_lock)
		self._thread.start()
	def _start(self):
		pr, pw = os.pipe()
		self._files = {}
		self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
		self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
		for k, f in self._files.items():
			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
		self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
			PollConstants.POLLIN, self._output_handler)
		self._registered = True
		threading_mod = threading
		if self._force_dummy:
			threading_mod = dummy_threading
		self._thread = threading_mod.Thread(target=self._run_lock)
		self._thread.start()
示例#23
0
    def _testPipeReader(self, test_string):
        """
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

        if self._use_pty:
            got_pty, master_fd, slave_fd = _create_pty_or_pipe()
            if not got_pty:
                os.close(slave_fd)
                os.close(master_fd)
                skip_reason = "pty not acquired"
                self.portage_skip = skip_reason
                self.fail(skip_reason)
                return
        else:
            master_fd, slave_fd = os.pipe()

        # WARNING: It is very important to use unbuffered mode here,
        # in order to avoid issue 5380 with python3.
        master_file = os.fdopen(master_fd, 'rb', 0)
        slave_file = os.fdopen(slave_fd, 'wb', 0)
        task_scheduler = TaskScheduler(max_jobs=2)
        producer = SpawnProcess(
            args=["bash", "-c", self._echo_cmd % test_string],
            env=os.environ,
            fd_pipes={1: slave_fd},
            scheduler=task_scheduler.sched_iface)
        task_scheduler.add(producer)
        slave_file.close()

        consumer = PipeReader(input_files={"producer": master_file},
                              scheduler=task_scheduler.sched_iface,
                              _use_array=self._use_array)

        task_scheduler.add(consumer)

        # This will ensure that both tasks have exited, which
        # is necessary to avoid "ResourceWarning: unclosed file"
        # warnings since Python 3.2 (and also ensures that we
        # don't leave any zombie child processes).
        task_scheduler.run()
        self.assertEqual(producer.returncode, os.EX_OK)
        self.assertEqual(consumer.returncode, os.EX_OK)

        return consumer.getvalue().decode('ascii', 'replace')
示例#24
0
	def _testPipeReader(self, test_string):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		if self._use_pty:
			got_pty, master_fd, slave_fd = _create_pty_or_pipe()
			if not got_pty:
				os.close(slave_fd)
				os.close(master_fd)
				skip_reason = "pty not acquired"
				self.portage_skip = skip_reason
				self.fail(skip_reason)
				return
		else:
			master_fd, slave_fd = os.pipe()

		# WARNING: It is very important to use unbuffered mode here,
		# in order to avoid issue 5380 with python3.
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb', 0)
		task_scheduler = TaskScheduler(max_jobs=2)
		producer = SpawnProcess(
			args=["bash", "-c", self._echo_cmd % test_string],
			env=os.environ, fd_pipes={1:slave_fd},
			scheduler=task_scheduler.sched_iface)
		task_scheduler.add(producer)
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=task_scheduler.sched_iface, _use_array=self._use_array)

		task_scheduler.add(consumer)

		# This will ensure that both tasks have exited, which
		# is necessary to avoid "ResourceWarning: unclosed file"
		# warnings since Python 3.2 (and also ensures that we
		# don't leave any zombie child processes).
		task_scheduler.run()
		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		return consumer.getvalue().decode('ascii', 'replace')
示例#25
0
	def _testPipeReader(self, test_string):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		if self._use_pty:
			got_pty, master_fd, slave_fd = _create_pty_or_pipe()
			if not got_pty:
				os.close(slave_fd)
				os.close(master_fd)
				skip_reason = "pty not acquired"
				self.portage_skip = skip_reason
				self.fail(skip_reason)
				return
		else:
			master_fd, slave_fd = os.pipe()

		# WARNING: It is very important to use unbuffered mode here,
		# in order to avoid issue 5380 with python3.
		master_file = os.fdopen(master_fd, 'rb', 0)
		scheduler = global_event_loop()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			_use_array=self._use_array,
			scheduler=scheduler)

		producer = PopenProcess(
			pipe_reader=consumer,
			proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
				stdout=slave_fd),
			scheduler=scheduler)

		producer.start()
		os.close(slave_fd)
		producer.wait()

		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		return consumer.getvalue().decode('ascii', 'replace')
示例#26
0
	def testPipeReader(self):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		test_string = 2 * "blah blah blah\n"

		scheduler = PollScheduler().sched_iface
		master_fd, slave_fd = os.pipe()
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb')
		producer = SpawnProcess(
			args=["bash", "-c", "echo -n '%s'" % test_string],
			env=os.environ, fd_pipes={1:slave_fd},
			scheduler=scheduler)
		producer.start()
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=scheduler)

		consumer.start()

		# This will ensure that both tasks have exited, which
		# is necessary to avoid "ResourceWarning: unclosed file"
		# warnings since Python 3.2 (and also ensures that we
		# don't leave any zombie child processes).
		scheduler.schedule()
		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		output = consumer.getvalue().decode('ascii', 'replace')
		self.assertEqual(test_string, output)
示例#27
0
def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
          uid=None, gid=None, groups=None, umask=None, logfile=None,
          path_lookup=True, pre_exec=None):
	"""
	Spawns a given command.
	
	@param mycommand: the command to execute
	@type mycommand: String or List (Popen style list)
	@param env: A dict of Key=Value pairs for env variables
	@type env: Dictionary
	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
	@type opt_name: String
	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
	@type fd_pipes: Dictionary
	@param returnpid: Return the Process IDs for a successful spawn.
	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
	@type returnpid: Boolean
	@param uid: User ID to spawn as; useful for dropping privilages
	@type uid: Integer
	@param gid: Group ID to spawn as; useful for dropping privilages
	@type gid: Integer
	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
	@type groups: List
	@param umask: An integer representing the umask for the process (see man chmod for umask details)
	@type umask: Integer
	@param logfile: name of a file to use for logging purposes
	@type logfile: String
	@param path_lookup: If the binary is not fully specified then look for it in PATH
	@type path_lookup: Boolean
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	
	logfile requires stdout and stderr to be assigned to this process (ie not pointed
	   somewhere else.)
	
	"""

	# mycommand is either a str or a list
	if isinstance(mycommand, basestring):
		mycommand = mycommand.split()

	if sys.hexversion < 0x3000000:
		# Avoid a potential UnicodeEncodeError from os.execve().
		env_bytes = {}
		for k, v in env.items():
			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
				_unicode_encode(v, encoding=_encodings['content'])
		env = env_bytes
		del env_bytes

	# If an absolute path to an executable file isn't given
	# search for it unless we've been told not to.
	binary = mycommand[0]
	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
		(not os.path.isabs(binary) or not os.path.isfile(binary)
	    or not os.access(binary, os.X_OK)):
		binary = path_lookup and find_binary(binary) or None
		if not binary:
			raise CommandNotFound(mycommand[0])

	# If we haven't been told what file descriptors to use
	# default to propagating our stdin, stdout and stderr.
	if fd_pipes is None:
		fd_pipes = {
			0:sys.__stdin__.fileno(),
			1:sys.__stdout__.fileno(),
			2:sys.__stderr__.fileno(),
		}

	# mypids will hold the pids of all processes created.
	mypids = []

	if logfile:
		# Using a log file requires that stdout and stderr
		# are assigned to the process we're running.
		if 1 not in fd_pipes or 2 not in fd_pipes:
			raise ValueError(fd_pipes)

		# Create a pipe
		(pr, pw) = os.pipe()

		# Create a tee process, giving it our stdout and stderr
		# as well as the read end of the pipe.
		mypids.extend(spawn(('tee', '-i', '-a', logfile),
		              returnpid=True, fd_pipes={0:pr,
		              1:fd_pipes[1], 2:fd_pipes[2]}))

		# We don't need the read end of the pipe, so close it.
		os.close(pr)

		# Assign the write end of the pipe to our stdout and stderr.
		fd_pipes[1] = pw
		fd_pipes[2] = pw

	pid = os.fork()

	if pid == 0:
		try:
			_exec(binary, mycommand, opt_name, fd_pipes,
			      env, gid, groups, uid, umask, pre_exec)
		except SystemExit:
			raise
		except Exception as e:
			# We need to catch _any_ exception so that it doesn't
			# propagate out of this function and cause exiting
			# with anything other than os._exit()
			sys.stderr.write("%s:\n   %s\n" % (e, " ".join(mycommand)))
			traceback.print_exc()
			sys.stderr.flush()
			os._exit(1)

	if not isinstance(pid, int):
		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))

	# Add the pid to our local and the global pid lists.
	mypids.append(pid)
	spawned_pids.append(pid)

	# If we started a tee process the write side of the pipe is no
	# longer needed, so close it.
	if logfile:
		os.close(pw)

	# If the caller wants to handle cleaning up the processes, we tell
	# it about all processes that were created.
	if returnpid:
		return mypids

	# Otherwise we clean them up.
	while mypids:

		# Pull the last reader in the pipe chain. If all processes
		# in the pipe are well behaved, it will die when the process
		# it is reading from dies.
		pid = mypids.pop(0)

		# and wait for it.
		retval = os.waitpid(pid, 0)[1]

		# When it's done, we can remove it from the
		# global pid list as well.
		spawned_pids.remove(pid)

		if retval:
			# If it failed, kill off anything else that
			# isn't dead yet.
			for pid in mypids:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if os.waitpid(pid, os.WNOHANG)[0] == 0:
					os.kill(pid, signal.SIGTERM)
					os.waitpid(pid, 0)
				spawned_pids.remove(pid)

			# If it got a signal, return the signal that was sent.
			if (retval & 0xff):
				return ((retval & 0xff) << 8)

			# Otherwise, return its exit code.
			return (retval >> 8)

	# Everything succeeded
	return 0
	def _start(self):
		settings = self.settings
		settings.setcpv(self.cpv)
		ebuild_path = self.ebuild_hash.location

		# the caller can pass in eapi in order to avoid
		# redundant _parse_eapi_ebuild_head calls
		eapi = self.eapi
		if eapi is None and \
			'parse-eapi-ebuild-head' in settings.features:
			with io.open(_unicode_encode(ebuild_path,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['repo.content'],
				errors='replace') as f:
				eapi = portage._parse_eapi_ebuild_head(f)

		if eapi is not None:
			if not portage.eapi_is_supported(eapi):
				self.metadata = self.metadata_callback(self.cpv,
					self.repo_path, {'EAPI' : eapi}, self.ebuild_hash)
				self._set_returncode((self.pid, os.EX_OK << 8))
				self.wait()
				return

			settings.configdict['pkg']['EAPI'] = eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		null_input = open('/dev/null', 'rb')
		fd_pipes.setdefault(0, null_input.fileno())
		fd_pipes.setdefault(1, sys.stdout.fileno())
		fd_pipes.setdefault(2, sys.stderr.fileno())

		# flush any pending output
		for fd in fd_pipes.values():
			if fd == sys.stdout.fileno():
				sys.stdout.flush()
			if fd == sys.stderr.fileno():
				sys.stderr.flush()

		fd_pipes_orig = fd_pipes.copy()
		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		fd_pipes[self._metadata_fd] = slave_fd

		self._raw_metadata = []
		files.ebuild = os.fdopen(master_fd, 'rb', 0)
		self._reg_id = self.scheduler.register(files.ebuild.fileno(),
			self._registered_events, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings=settings, debug=debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)

		os.close(slave_fd)
		null_input.close()

		if isinstance(retval, int):
			# doebuild failed before spawning
			self._unregister()
			self._set_returncode((self.pid, retval << 8))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)
示例#29
0
    def _spawn(self, args, fd_pipes, **kwargs):
        """
		Fork a subprocess, apply local settings, and call
		dblink.merge(). TODO: Share code with ForkProcess.
		"""

        elog_reader_fd, elog_writer_fd = os.pipe()
        fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
                    fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        blockers = None
        if self.blockers is not None:
            # Query blockers in the main process, since closing
            # of file descriptors in the subprocess can prevent
            # access to open database connections such as that
            # used by the sqlite metadata cache module.
            blockers = self.blockers()
        mylink = portage.dblink(self.mycat,
                                self.mypkg,
                                settings=self.settings,
                                treetype=self.treetype,
                                vartree=self.vartree,
                                blockers=blockers,
                                pipe=elog_writer_fd)
        fd_pipes[elog_writer_fd] = elog_writer_fd
        self._elog_reg_id = self.scheduler.io_add_watch(
            elog_reader_fd, self._registered_events, self._elog_output_handler)

        # If a concurrent emerge process tries to install a package
        # in the same SLOT as this one at the same time, there is an
        # extremely unlikely chance that the COUNTER values will not be
        # ordered correctly unless we lock the vdb here.
        # FEATURES=parallel-install skips this lock in order to
        # improve performance, and the risk is practically negligible.
        self._lock_vdb()
        counter = None
        if not self.unmerge:
            counter = self.vartree.dbapi.counter_tick()

        parent_pid = os.getpid()
        pid = None
        try:
            pid = os.fork()

            if pid != 0:
                if not isinstance(pid, int):
                    raise AssertionError("fork returned non-integer: %s" %
                                         (repr(pid), ))

                os.close(elog_writer_fd)
                self._elog_reader_fd = elog_reader_fd
                self._buf = ""
                self._elog_keys = set()

                # invalidate relevant vardbapi caches
                if self.vartree.dbapi._categories is not None:
                    self.vartree.dbapi._categories = None
                self.vartree.dbapi._pkgs_changed = True
                self.vartree.dbapi._clear_pkg_cache(mylink)

                portage.process.spawned_pids.append(pid)
                return [pid]

            os.close(elog_reader_fd)

            # Use default signal handlers in order to avoid problems
            # killing subprocesses as reported in bug #353239.
            signal.signal(signal.SIGINT, signal.SIG_DFL)
            signal.signal(signal.SIGTERM, signal.SIG_DFL)

            portage.locks._close_fds()
            # We don't exec, so use close_fds=False
            # (see _setup_pipes docstring).
            portage.process._setup_pipes(fd_pipes, close_fds=False)

            portage.output.havecolor = self.settings.get('NOCOLOR') \
             not in ('yes', 'true')

            # Avoid wastful updates of the vdb cache.
            self.vartree.dbapi._flush_cache_enabled = False

            # In this subprocess we don't want PORTAGE_BACKGROUND to
            # suppress stdout/stderr output since they are pipes. We
            # also don't want to open PORTAGE_LOG_FILE, since it will
            # already be opened by the parent process, so we set the
            # "subprocess" value for use in conditional logging code
            # involving PORTAGE_LOG_FILE.
            if not self.unmerge:
                # unmerge phases have separate logs
                if self.settings.get("PORTAGE_BACKGROUND") == "1":
                    self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
                else:
                    self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
                self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
            self.settings["PORTAGE_BACKGROUND"] = "subprocess"
            self.settings.backup_changes("PORTAGE_BACKGROUND")

            rval = 1
            try:
                if self.unmerge:
                    if not mylink.exists():
                        rval = os.EX_OK
                    elif mylink.unmerge(
                            ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
                        mylink.lockdb()
                        try:
                            mylink.delete()
                        finally:
                            mylink.unlockdb()
                        rval = os.EX_OK
                else:
                    rval = mylink.merge(self.pkgloc,
                                        self.infloc,
                                        myebuild=self.myebuild,
                                        mydbapi=self.mydbapi,
                                        prev_mtimes=self.prev_mtimes,
                                        counter=counter)
            except SystemExit:
                raise
            except:
                traceback.print_exc()
            finally:
                os._exit(rval)

        finally:
            if pid == 0 or (pid is None and os.getpid() != parent_pid):
                # Call os._exit() from a finally block in order
                # to suppress any finally blocks from earlier
                # in the call stack (see bug #345289). This
                # finally block has to be setup before the fork
                # in order to avoid a race condition.
                os._exit(1)
示例#30
0
文件: process.py 项目: gentoo/portage
def spawn(mycommand, env=None, opt_name=None, fd_pipes=None, returnpid=False,
          uid=None, gid=None, groups=None, umask=None, cwd=None, logfile=None,
          path_lookup=True, pre_exec=None,
          close_fds=(sys.version_info < (3, 4)), unshare_net=False,
          unshare_ipc=False, unshare_mount=False, unshare_pid=False,
	  cgroup=None):
	"""
	Spawns a given command.
	
	@param mycommand: the command to execute
	@type mycommand: String or List (Popen style list)
	@param env: If env is not None, it must be a mapping that defines the environment
		variables for the new process; these are used instead of the default behavior
		of inheriting the current process's environment.
	@type env: None or Mapping
	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
	@type opt_name: String
	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
		(default is {0:stdin, 1:stdout, 2:stderr})
	@type fd_pipes: Dictionary
	@param returnpid: Return the Process IDs for a successful spawn.
	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
	@type returnpid: Boolean
	@param uid: User ID to spawn as; useful for dropping privilages
	@type uid: Integer
	@param gid: Group ID to spawn as; useful for dropping privilages
	@type gid: Integer
	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
	@type groups: List
	@param umask: An integer representing the umask for the process (see man chmod for umask details)
	@type umask: Integer
	@param cwd: Current working directory
	@type cwd: String
	@param logfile: name of a file to use for logging purposes
	@type logfile: String
	@param path_lookup: If the binary is not fully specified then look for it in PATH
	@type path_lookup: Boolean
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	@param close_fds: If True, then close all file descriptors except those
		referenced by fd_pipes (default is True for python3.3 and earlier, and False for
		python3.4 and later due to non-inheritable file descriptor behavior from PEP 446).
	@type close_fds: Boolean
	@param unshare_net: If True, networking will be unshared from the spawned process
	@type unshare_net: Boolean
	@param unshare_ipc: If True, IPC will be unshared from the spawned process
	@type unshare_ipc: Boolean
	@param unshare_mount: If True, mount namespace will be unshared and mounts will
		be private to the namespace
	@type unshare_mount: Boolean
	@param unshare_pid: If True, PID ns will be unshared from the spawned process
	@type unshare_pid: Boolean
	@param cgroup: CGroup path to bind the process to
	@type cgroup: String

	logfile requires stdout and stderr to be assigned to this process (ie not pointed
	   somewhere else.)
	
	"""

	# mycommand is either a str or a list
	if isinstance(mycommand, basestring):
		mycommand = mycommand.split()

	env = os.environ if env is None else env

	if sys.hexversion < 0x3000000:
		# Avoid a potential UnicodeEncodeError from os.execve().
		env_bytes = {}
		for k, v in env.items():
			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
				_unicode_encode(v, encoding=_encodings['content'])
		env = env_bytes
		del env_bytes

	# If an absolute path to an executable file isn't given
	# search for it unless we've been told not to.
	binary = mycommand[0]
	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
		(not os.path.isabs(binary) or not os.path.isfile(binary)
	    or not os.access(binary, os.X_OK)):
		binary = path_lookup and find_binary(binary) or None
		if not binary:
			raise CommandNotFound(mycommand[0])

	# If we haven't been told what file descriptors to use
	# default to propagating our stdin, stdout and stderr.
	if fd_pipes is None:
		fd_pipes = {
			0:portage._get_stdin().fileno(),
			1:sys.__stdout__.fileno(),
			2:sys.__stderr__.fileno(),
		}

	# mypids will hold the pids of all processes created.
	mypids = []

	if logfile:
		# Using a log file requires that stdout and stderr
		# are assigned to the process we're running.
		if 1 not in fd_pipes or 2 not in fd_pipes:
			raise ValueError(fd_pipes)

		# Create a pipe
		(pr, pw) = os.pipe()

		# Create a tee process, giving it our stdout and stderr
		# as well as the read end of the pipe.
		mypids.extend(spawn(('tee', '-i', '-a', logfile),
		              returnpid=True, fd_pipes={0:pr,
		              1:fd_pipes[1], 2:fd_pipes[2]}))

		# We don't need the read end of the pipe, so close it.
		os.close(pr)

		# Assign the write end of the pipe to our stdout and stderr.
		fd_pipes[1] = pw
		fd_pipes[2] = pw

	# This caches the libc library lookup and _unshare_validator results
	# in the current process, so that results are cached for use in
	# child processes.
	unshare_flags = 0
	if unshare_net or unshare_ipc or unshare_mount or unshare_pid:
		# from /usr/include/bits/sched.h
		CLONE_NEWNS = 0x00020000
		CLONE_NEWIPC = 0x08000000
		CLONE_NEWPID = 0x20000000
		CLONE_NEWNET = 0x40000000

		if unshare_net:
			unshare_flags |= CLONE_NEWNET
		if unshare_ipc:
			unshare_flags |= CLONE_NEWIPC
		if unshare_mount:
			# NEWNS = mount namespace
			unshare_flags |= CLONE_NEWNS
		if unshare_pid:
			# we also need mount namespace for slave /proc
			unshare_flags |= CLONE_NEWPID | CLONE_NEWNS

		_unshare_validate(unshare_flags)

	# Force instantiation of portage.data.userpriv_groups before the
	# fork, so that the result is cached in the main process.
	bool(groups)

	parent_pid = os.getpid()
	pid = None
	try:
		pid = os.fork()

		if pid == 0:
			try:
				_exec(binary, mycommand, opt_name, fd_pipes,
					env, gid, groups, uid, umask, cwd, pre_exec, close_fds,
					unshare_net, unshare_ipc, unshare_mount, unshare_pid,
					unshare_flags, cgroup)
			except SystemExit:
				raise
			except Exception as e:
				# We need to catch _any_ exception so that it doesn't
				# propagate out of this function and cause exiting
				# with anything other than os._exit()
				writemsg("%s:\n   %s\n" % (e, " ".join(mycommand)),
					noiselevel=-1)
				traceback.print_exc()
				sys.stderr.flush()

	finally:
		if pid == 0 or (pid is None and os.getpid() != parent_pid):
			# Call os._exit() from a finally block in order
			# to suppress any finally blocks from earlier
			# in the call stack (see bug #345289). This
			# finally block has to be setup before the fork
			# in order to avoid a race condition.
			os._exit(1)

	if not isinstance(pid, int):
		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))

	# Add the pid to our local and the global pid lists.
	mypids.append(pid)

	# If we started a tee process the write side of the pipe is no
	# longer needed, so close it.
	if logfile:
		os.close(pw)

	# If the caller wants to handle cleaning up the processes, we tell
	# it about all processes that were created.
	if returnpid:
		return mypids

	# Otherwise we clean them up.
	while mypids:

		# Pull the last reader in the pipe chain. If all processes
		# in the pipe are well behaved, it will die when the process
		# it is reading from dies.
		pid = mypids.pop(0)

		# and wait for it.
		retval = os.waitpid(pid, 0)[1]

		if retval:
			# If it failed, kill off anything else that
			# isn't dead yet.
			for pid in mypids:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if os.waitpid(pid, os.WNOHANG)[0] == 0:
					os.kill(pid, signal.SIGTERM)
					os.waitpid(pid, 0)

			# If it got a signal, return the signal that was sent.
			if (retval & 0xff):
				return ((retval & 0xff) << 8)

			# Otherwise, return its exit code.
			return (retval >> 8)

	# Everything succeeded
	return 0
示例#31
0
	def _create_pipe(self):
		return os.pipe()
示例#32
0
    def _start(self):
        settings = self.settings
        settings.setcpv(self.cpv)
        ebuild_path = self.ebuild_path

        eapi = None
        if 'parse-eapi-glep-55' in settings.features:
            pf, eapi = portage._split_ebuild_name_glep55(
                os.path.basename(ebuild_path))
        if eapi is None and \
         'parse-eapi-ebuild-head' in settings.features:
            eapi = portage._parse_eapi_ebuild_head(
                codecs.open(_unicode_encode(ebuild_path,
                                            encoding=_encodings['fs'],
                                            errors='strict'),
                            mode='r',
                            encoding=_encodings['repo.content'],
                            errors='replace'))

        if eapi is not None:
            if not portage.eapi_is_supported(eapi):
                self.metadata_callback(self.cpv, self.ebuild_path,
                                       self.repo_path, {'EAPI': eapi},
                                       self.ebuild_mtime)
                self.returncode = os.EX_OK
                self.wait()
                return

            settings.configdict['pkg']['EAPI'] = eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        fd_pipes.setdefault(0, sys.stdin.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        fd_pipes_orig = fd_pipes.copy()
        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        fd_pipes[self._metadata_fd] = slave_fd

        self._raw_metadata = []
        files.ebuild = os.fdopen(master_fd, 'rb')
        self._reg_id = self.scheduler.register(files.ebuild.fileno(),
                                               self._registered_events,
                                               self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings["ROOT"],
                                  settings,
                                  debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)

        os.close(slave_fd)

        if isinstance(retval, int):
            # doebuild failed before spawning
            self._unregister()
            self.returncode = retval
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)
示例#33
0
	def _spawn(self, args, fd_pipes, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call
		dblink.merge().
		"""

		elog_reader_fd, elog_writer_fd = os.pipe()
		fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
			fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
		blockers = None
		if self.blockers is not None:
			# Query blockers in the main process, since closing
			# of file descriptors in the subprocess can prevent
			# access to open database connections such as that
			# used by the sqlite metadata cache module.
			blockers = self.blockers()
		mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
			treetype=self.treetype, vartree=self.vartree,
			blockers=blockers, scheduler=self.scheduler,
			pipe=elog_writer_fd)
		fd_pipes[elog_writer_fd] = elog_writer_fd
		self._elog_reg_id = self.scheduler.register(elog_reader_fd,
			self._registered_events, self._elog_output_handler)

		# If a concurrent emerge process tries to install a package
		# in the same SLOT as this one at the same time, there is an
		# extremely unlikely chance that the COUNTER values will not be
		# ordered correctly unless we lock the vdb here.
		# FEATURES=parallel-install skips this lock in order to
		# improve performance, and the risk is practically negligible.
		self._lock_vdb()
		counter = None
		if not self.unmerge:
			counter = self.vartree.dbapi.counter_tick()

		pid = os.fork()
		if pid != 0:
			if not isinstance(pid, int):
				raise AssertionError(
					"fork returned non-integer: %s" % (repr(pid),))

			os.close(elog_writer_fd)
			self._elog_reader_fd = elog_reader_fd
			self._buf = ""
			self._elog_keys = set()

			# invalidate relevant vardbapi caches
			if self.vartree.dbapi._categories is not None:
				self.vartree.dbapi._categories = None
			self.vartree.dbapi._pkgs_changed = True
			self.vartree.dbapi._clear_pkg_cache(mylink)

			portage.process.spawned_pids.append(pid)
			return [pid]

		os.close(elog_reader_fd)

		# TODO: Find out why PyPy 1.8 with close_fds=True triggers
		# "[Errno 9] Bad file descriptor" in subprocesses. It could
		# be due to garbage collection of file objects that were not
		# closed before going out of scope, since PyPy's garbage
		# collector does not support the refcounting semantics that
		# CPython does.
		close_fds = platform.python_implementation() != 'PyPy'
		portage.process._setup_pipes(fd_pipes, close_fds=close_fds)

		# Use default signal handlers since the ones inherited
		# from the parent process are irrelevant here.
		signal.signal(signal.SIGINT, signal.SIG_DFL)
		signal.signal(signal.SIGTERM, signal.SIG_DFL)

		portage.output.havecolor = self.settings.get('NOCOLOR') \
			not in ('yes', 'true')

		# In this subprocess we want mylink._display_merge() to use
		# stdout/stderr directly since they are pipes. This behavior
		# is triggered when mylink._scheduler is None.
		mylink._scheduler = None

		# Avoid wastful updates of the vdb cache.
		self.vartree.dbapi._flush_cache_enabled = False

		# In this subprocess we don't want PORTAGE_BACKGROUND to
		# suppress stdout/stderr output since they are pipes. We
		# also don't want to open PORTAGE_LOG_FILE, since it will
		# already be opened by the parent process, so we set the
		# "subprocess" value for use in conditional logging code
		# involving PORTAGE_LOG_FILE.
		if not self.unmerge:
			# unmerge phases have separate logs
			if self.settings.get("PORTAGE_BACKGROUND") == "1":
				self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
			else:
				self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
			self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
		self.settings["PORTAGE_BACKGROUND"] = "subprocess"
		self.settings.backup_changes("PORTAGE_BACKGROUND")

		rval = 1
		try:
			if self.unmerge:
				if not mylink.exists():
					rval = os.EX_OK
				elif mylink.unmerge(
					ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
					mylink.lockdb()
					try:
						mylink.delete()
					finally:
						mylink.unlockdb()
					rval = os.EX_OK
			else:
				rval = mylink.merge(self.pkgloc, self.infloc,
					myebuild=self.myebuild, mydbapi=self.mydbapi,
					prev_mtimes=self.prev_mtimes, counter=counter)
		except SystemExit:
			raise
		except:
			traceback.print_exc()
		finally:
			# Call os._exit() from finally block, in order to suppress any
			# finally blocks from earlier in the call stack. See bug #345289.
			os._exit(rval)
示例#34
0
	def _start(self):
		ebuild_path = self.ebuild_hash.location

		with io.open(_unicode_encode(ebuild_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace') as f:
			self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

		parsed_eapi = self._eapi
		if parsed_eapi is None:
			parsed_eapi = "0"

		if not parsed_eapi:
			# An empty EAPI setting is invalid.
			self._eapi_invalid(None)
			self._set_returncode((self.pid, 1 << 8))
			self.wait()
			return

		self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
		if not self.eapi_supported:
			self.metadata = {"EAPI": parsed_eapi}
			self._set_returncode((self.pid, os.EX_OK << 8))
			self.wait()
			return

		settings = self.settings
		settings.setcpv(self.cpv)
		settings.configdict['pkg']['EAPI'] = parsed_eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		null_input = open('/dev/null', 'rb')
		fd_pipes.setdefault(0, null_input.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		fd_pipes[self._metadata_fd] = slave_fd

		self._raw_metadata = []
		files.ebuild = master_fd
		self._reg_id = self.scheduler.register(files.ebuild,
			self._registered_events, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings=settings, debug=debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)

		os.close(slave_fd)
		null_input.close()

		if isinstance(retval, int):
			# doebuild failed before spawning
			self._unregister()
			self._set_returncode((self.pid, retval << 8))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)
示例#35
0
	def _start(self):
		ebuild_path = self.ebuild_hash.location

		with io.open(_unicode_encode(ebuild_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace') as f:
			self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

		parsed_eapi = self._eapi
		if parsed_eapi is None:
			parsed_eapi = "0"

		if not parsed_eapi:
			# An empty EAPI setting is invalid.
			self._eapi_invalid(None)
			self.returncode = 1
			self._async_wait()
			return

		self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
		if not self.eapi_supported:
			self.metadata = {"EAPI": parsed_eapi}
			self.returncode = os.EX_OK
			self._async_wait()
			return

		settings = self.settings
		settings.setcpv(self.cpv)
		settings.configdict['pkg']['EAPI'] = parsed_eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		null_input = open('/dev/null', 'rb')
		fd_pipes.setdefault(0, null_input.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()

		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		# FD_CLOEXEC is enabled by default in Python >=3.4.
		if sys.hexversion < 0x3040000:
			try:
				fcntl.FD_CLOEXEC
			except AttributeError:
				pass
			else:
				fcntl.fcntl(master_fd, fcntl.F_SETFD,
					fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		fd_pipes[slave_fd] = slave_fd
		settings["PORTAGE_PIPE_FD"] = str(slave_fd)

		self._raw_metadata = []
		files.ebuild = master_fd
		self.scheduler.add_reader(files.ebuild, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings=settings, debug=debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)
		settings.pop("PORTAGE_PIPE_FD", None)

		os.close(slave_fd)
		null_input.close()

		if isinstance(retval, int):
			# doebuild failed before spawning
			self.returncode = retval
			self._async_wait()
			return

		self.pid = retval[0]
    def testDoebuild(self):
        """
        Invoke portage.doebuild() with the fd_pipes parameter, and
        check that the expected output appears in the pipe. This
        functionality is not used by portage internally, but it is
        supported for API consumers (see bug #475812).
        """

        output_fd = 200
        ebuild_body = ["S=${WORKDIR}"]
        for phase_func in (
                "pkg_info",
                "pkg_nofetch",
                "pkg_pretend",
                "pkg_setup",
                "src_unpack",
                "src_prepare",
                "src_configure",
                "src_compile",
                "src_test",
                "src_install",
        ):
            ebuild_body.append(("%s() { echo ${EBUILD_PHASE}"
                                " 1>&%s; }") % (phase_func, output_fd))

        ebuild_body.append("")
        ebuild_body = "\n".join(ebuild_body)

        ebuilds = {
            "app-misct/foo-1": {
                "EAPI": "5",
                "MISC_CONTENT": ebuild_body,
            }
        }

        # Override things that may be unavailable, or may have portability
        # issues when running tests in exotic environments.
        #   prepstrip - bug #447810 (bash read builtin EINTR problem)
        true_symlinks = ("find", "prepstrip", "sed", "scanelf")
        true_binary = portage.process.find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")

        dev_null = open(os.devnull, "wb")
        playground = ResolverPlayground(ebuilds=ebuilds)
        try:
            QueryCommand._db = playground.trees
            root_config = playground.trees[playground.eroot]["root_config"]
            portdb = root_config.trees["porttree"].dbapi
            settings = portage.config(clone=playground.settings)
            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                    "__PORTAGE_TEST_HARDLINK_LOCKS"]
                settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

            settings.features.add("noauto")
            settings.features.add("test")
            settings["PORTAGE_PYTHON"] = portage._python_interpreter
            settings["PORTAGE_QUIET"] = "1"
            settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get(
                "PYTHONDONTWRITEBYTECODE", "")

            fake_bin = os.path.join(settings["EPREFIX"], "bin")
            portage.util.ensure_dirs(fake_bin)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))

            settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
            settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")

            cpv = "app-misct/foo-1"
            metadata = dict(
                zip(Package.metadata_keys,
                    portdb.aux_get(cpv, Package.metadata_keys)))

            pkg = Package(
                built=False,
                cpv=cpv,
                installed=False,
                metadata=metadata,
                root_config=root_config,
                type_name="ebuild",
            )
            settings.setcpv(pkg)
            ebuildpath = portdb.findname(cpv)
            self.assertNotEqual(ebuildpath, None)

            for phase in (
                    "info",
                    "nofetch",
                    "pretend",
                    "setup",
                    "unpack",
                    "prepare",
                    "configure",
                    "compile",
                    "test",
                    "install",
                    "qmerge",
                    "clean",
                    "merge",
            ):

                pr, pw = os.pipe()

                producer = DoebuildProcess(
                    doebuild_pargs=(ebuildpath, phase),
                    doebuild_kwargs={
                        "settings": settings,
                        "mydbapi": portdb,
                        "tree": "porttree",
                        "vartree": root_config.trees["vartree"],
                        "fd_pipes": {
                            1: dev_null.fileno(),
                            2: dev_null.fileno(),
                            output_fd: pw,
                        },
                        "prev_mtimes": {},
                    },
                )

                consumer = PipeReader(input_files={"producer": pr})

                task_scheduler = TaskScheduler(iter([producer, consumer]),
                                               max_jobs=2)

                try:
                    task_scheduler.start()
                finally:
                    # PipeReader closes pr
                    os.close(pw)

                task_scheduler.wait()
                output = portage._unicode_decode(
                    consumer.getvalue()).rstrip("\n")

                if task_scheduler.returncode != os.EX_OK:
                    portage.writemsg(output, noiselevel=-1)

                self.assertEqual(task_scheduler.returncode, os.EX_OK)

                if phase not in ("clean", "merge", "qmerge"):
                    self.assertEqual(phase, output)

        finally:
            dev_null.close()
            playground.cleanup()
            QueryCommand._db = None
示例#37
0
 def make_pipes():
     return os.pipe(), None
示例#38
0
	def _spawn(self, args, fd_pipes, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call
		dblink.merge(). TODO: Share code with ForkProcess.
		"""

		elog_reader_fd, elog_writer_fd = os.pipe()
		fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
			fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
		blockers = None
		if self.blockers is not None:
			# Query blockers in the main process, since closing
			# of file descriptors in the subprocess can prevent
			# access to open database connections such as that
			# used by the sqlite metadata cache module.
			blockers = self.blockers()
		mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
			treetype=self.treetype, vartree=self.vartree,
			blockers=blockers, pipe=elog_writer_fd)
		fd_pipes[elog_writer_fd] = elog_writer_fd
		self._elog_reg_id = self.scheduler.io_add_watch(elog_reader_fd,
			self._registered_events, self._elog_output_handler)

		# If a concurrent emerge process tries to install a package
		# in the same SLOT as this one at the same time, there is an
		# extremely unlikely chance that the COUNTER values will not be
		# ordered correctly unless we lock the vdb here.
		# FEATURES=parallel-install skips this lock in order to
		# improve performance, and the risk is practically negligible.
		self._lock_vdb()
		counter = None
		if not self.unmerge:
			counter = self.vartree.dbapi.counter_tick()

		parent_pid = os.getpid()
		pid = None
		try:
			pid = os.fork()

			if pid != 0:
				if not isinstance(pid, int):
					raise AssertionError(
						"fork returned non-integer: %s" % (repr(pid),))

				os.close(elog_writer_fd)
				self._elog_reader_fd = elog_reader_fd
				self._buf = ""
				self._elog_keys = set()

				# invalidate relevant vardbapi caches
				if self.vartree.dbapi._categories is not None:
					self.vartree.dbapi._categories = None
				self.vartree.dbapi._pkgs_changed = True
				self.vartree.dbapi._clear_pkg_cache(mylink)

				portage.process.spawned_pids.append(pid)
				return [pid]

			os.close(elog_reader_fd)

			# Use default signal handlers in order to avoid problems
			# killing subprocesses as reported in bug #353239.
			signal.signal(signal.SIGINT, signal.SIG_DFL)
			signal.signal(signal.SIGTERM, signal.SIG_DFL)

			portage.locks._close_fds()
			# We don't exec, so use close_fds=False
			# (see _setup_pipes docstring).
			portage.process._setup_pipes(fd_pipes, close_fds=False)

			portage.output.havecolor = self.settings.get('NOCOLOR') \
				not in ('yes', 'true')

			# Avoid wastful updates of the vdb cache.
			self.vartree.dbapi._flush_cache_enabled = False

			# In this subprocess we don't want PORTAGE_BACKGROUND to
			# suppress stdout/stderr output since they are pipes. We
			# also don't want to open PORTAGE_LOG_FILE, since it will
			# already be opened by the parent process, so we set the
			# "subprocess" value for use in conditional logging code
			# involving PORTAGE_LOG_FILE.
			if not self.unmerge:
				# unmerge phases have separate logs
				if self.settings.get("PORTAGE_BACKGROUND") == "1":
					self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
				else:
					self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
				self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
			self.settings["PORTAGE_BACKGROUND"] = "subprocess"
			self.settings.backup_changes("PORTAGE_BACKGROUND")

			rval = 1
			try:
				if self.unmerge:
					if not mylink.exists():
						rval = os.EX_OK
					elif mylink.unmerge(
						ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
						mylink.lockdb()
						try:
							mylink.delete()
						finally:
							mylink.unlockdb()
						rval = os.EX_OK
				else:
					rval = mylink.merge(self.pkgloc, self.infloc,
						myebuild=self.myebuild, mydbapi=self.mydbapi,
						prev_mtimes=self.prev_mtimes, counter=counter)
			except SystemExit:
				raise
			except:
				traceback.print_exc()
			finally:
				os._exit(rval)

		finally:
			if pid == 0 or (pid is None and os.getpid() != parent_pid):
				# Call os._exit() from a finally block in order
				# to suppress any finally blocks from earlier
				# in the call stack (see bug #345289). This
				# finally block has to be setup before the fork
				# in order to avoid a race condition.
				os._exit(1)
示例#39
0
文件: process.py 项目: cgfuh/portage
def spawn(mycommand, env=None, opt_name=None, fd_pipes=None, returnpid=False,
          uid=None, gid=None, groups=None, umask=None, cwd=None, logfile=None,
          path_lookup=True, pre_exec=None,
          close_fds=(sys.version_info < (3, 4)), unshare_net=False,
          unshare_ipc=False, unshare_mount=False, unshare_pid=False,
	  cgroup=None):
	"""
	Spawns a given command.
	
	@param mycommand: the command to execute
	@type mycommand: String or List (Popen style list)
	@param env: If env is not None, it must be a mapping that defines the environment
		variables for the new process; these are used instead of the default behavior
		of inheriting the current process's environment.
	@type env: None or Mapping
	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
	@type opt_name: String
	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
		(default is {0:stdin, 1:stdout, 2:stderr})
	@type fd_pipes: Dictionary
	@param returnpid: Return the Process IDs for a successful spawn.
	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
	@type returnpid: Boolean
	@param uid: User ID to spawn as; useful for dropping privilages
	@type uid: Integer
	@param gid: Group ID to spawn as; useful for dropping privilages
	@type gid: Integer
	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
	@type groups: List
	@param umask: An integer representing the umask for the process (see man chmod for umask details)
	@type umask: Integer
	@param cwd: Current working directory
	@type cwd: String
	@param logfile: name of a file to use for logging purposes
	@type logfile: String
	@param path_lookup: If the binary is not fully specified then look for it in PATH
	@type path_lookup: Boolean
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	@param close_fds: If True, then close all file descriptors except those
		referenced by fd_pipes (default is True for python3.3 and earlier, and False for
		python3.4 and later due to non-inheritable file descriptor behavior from PEP 446).
	@type close_fds: Boolean
	@param unshare_net: If True, networking will be unshared from the spawned process
	@type unshare_net: Boolean
	@param unshare_ipc: If True, IPC will be unshared from the spawned process
	@type unshare_ipc: Boolean
	@param unshare_mount: If True, mount namespace will be unshared and mounts will
		be private to the namespace
	@type unshare_mount: Boolean
	@param unshare_pid: If True, PID ns will be unshared from the spawned process
	@type unshare_pid: Boolean
	@param cgroup: CGroup path to bind the process to
	@type cgroup: String

	logfile requires stdout and stderr to be assigned to this process (ie not pointed
	   somewhere else.)
	
	"""

	# mycommand is either a str or a list
	if isinstance(mycommand, basestring):
		mycommand = mycommand.split()

	env = os.environ if env is None else env

	if sys.hexversion < 0x3000000:
		# Avoid a potential UnicodeEncodeError from os.execve().
		env_bytes = {}
		for k, v in env.items():
			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
				_unicode_encode(v, encoding=_encodings['content'])
		env = env_bytes
		del env_bytes

	# If an absolute path to an executable file isn't given
	# search for it unless we've been told not to.
	binary = mycommand[0]
	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
		(not os.path.isabs(binary) or not os.path.isfile(binary)
	    or not os.access(binary, os.X_OK)):
		binary = path_lookup and find_binary(binary) or None
		if not binary:
			raise CommandNotFound(mycommand[0])

	# If we haven't been told what file descriptors to use
	# default to propagating our stdin, stdout and stderr.
	if fd_pipes is None:
		fd_pipes = {
			0:portage._get_stdin().fileno(),
			1:sys.__stdout__.fileno(),
			2:sys.__stderr__.fileno(),
		}

	# mypids will hold the pids of all processes created.
	mypids = []

	if logfile:
		# Using a log file requires that stdout and stderr
		# are assigned to the process we're running.
		if 1 not in fd_pipes or 2 not in fd_pipes:
			raise ValueError(fd_pipes)

		# Create a pipe
		(pr, pw) = os.pipe()

		# Create a tee process, giving it our stdout and stderr
		# as well as the read end of the pipe.
		mypids.extend(spawn(('tee', '-i', '-a', logfile),
		              returnpid=True, fd_pipes={0:pr,
		              1:fd_pipes[1], 2:fd_pipes[2]}))

		# We don't need the read end of the pipe, so close it.
		os.close(pr)

		# Assign the write end of the pipe to our stdout and stderr.
		fd_pipes[1] = pw
		fd_pipes[2] = pw

	# Cache _has_ipv6() result for use in child processes.
	_has_ipv6()

	# This caches the libc library lookup and _unshare_validator results
	# in the current process, so that results are cached for use in
	# child processes.
	unshare_flags = 0
	if unshare_net or unshare_ipc or unshare_mount or unshare_pid:
		# from /usr/include/bits/sched.h
		CLONE_NEWNS = 0x00020000
		CLONE_NEWUTS = 0x04000000
		CLONE_NEWIPC = 0x08000000
		CLONE_NEWPID = 0x20000000
		CLONE_NEWNET = 0x40000000

		if unshare_net:
			# UTS namespace to override hostname
			unshare_flags |= CLONE_NEWNET | CLONE_NEWUTS
		if unshare_ipc:
			unshare_flags |= CLONE_NEWIPC
		if unshare_mount:
			# NEWNS = mount namespace
			unshare_flags |= CLONE_NEWNS
		if unshare_pid:
			# we also need mount namespace for slave /proc
			unshare_flags |= CLONE_NEWPID | CLONE_NEWNS

		_unshare_validate(unshare_flags)

	# Force instantiation of portage.data.userpriv_groups before the
	# fork, so that the result is cached in the main process.
	bool(groups)

	parent_pid = os.getpid()
	pid = None
	try:
		pid = os.fork()

		if pid == 0:
			try:
				_exec(binary, mycommand, opt_name, fd_pipes,
					env, gid, groups, uid, umask, cwd, pre_exec, close_fds,
					unshare_net, unshare_ipc, unshare_mount, unshare_pid,
					unshare_flags, cgroup)
			except SystemExit:
				raise
			except Exception as e:
				# We need to catch _any_ exception so that it doesn't
				# propagate out of this function and cause exiting
				# with anything other than os._exit()
				writemsg("%s:\n   %s\n" % (e, " ".join(mycommand)),
					noiselevel=-1)
				traceback.print_exc()
				sys.stderr.flush()

	finally:
		if pid == 0 or (pid is None and os.getpid() != parent_pid):
			# Call os._exit() from a finally block in order
			# to suppress any finally blocks from earlier
			# in the call stack (see bug #345289). This
			# finally block has to be setup before the fork
			# in order to avoid a race condition.
			os._exit(1)

	if not isinstance(pid, int):
		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))

	# Add the pid to our local and the global pid lists.
	mypids.append(pid)

	# If we started a tee process the write side of the pipe is no
	# longer needed, so close it.
	if logfile:
		os.close(pw)

	# If the caller wants to handle cleaning up the processes, we tell
	# it about all processes that were created.
	if returnpid:
		return mypids

	# Otherwise we clean them up.
	while mypids:

		# Pull the last reader in the pipe chain. If all processes
		# in the pipe are well behaved, it will die when the process
		# it is reading from dies.
		pid = mypids.pop(0)

		# and wait for it.
		retval = os.waitpid(pid, 0)[1]

		if retval:
			# If it failed, kill off anything else that
			# isn't dead yet.
			for pid in mypids:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if os.waitpid(pid, os.WNOHANG)[0] == 0:
					os.kill(pid, signal.SIGTERM)
					os.waitpid(pid, 0)

			# If it got a signal, return the signal that was sent.
			if (retval & 0xff):
				return ((retval & 0xff) << 8)

			# Otherwise, return its exit code.
			return (retval >> 8)

	# Everything succeeded
	return 0
示例#40
0
    def _start(self):
        settings = self.settings
        settings.setcpv(self.cpv)
        ebuild_path = self.ebuild_hash.location

        # the caller can pass in eapi in order to avoid
        # redundant _parse_eapi_ebuild_head calls
        eapi = self.eapi
        if eapi is None and \
         'parse-eapi-ebuild-head' in settings.features:
            with io.open(_unicode_encode(ebuild_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='r',
                         encoding=_encodings['repo.content'],
                         errors='replace') as f:
                eapi = portage._parse_eapi_ebuild_head(f)

        if eapi is not None:
            if not portage.eapi_is_supported(eapi):
                self.metadata = self.metadata_callback(self.cpv,
                                                       self.repo_path,
                                                       {'EAPI': eapi},
                                                       self.ebuild_hash)
                self._set_returncode((self.pid, os.EX_OK << 8))
                self.wait()
                return

            settings.configdict['pkg']['EAPI'] = eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open('/dev/null', 'rb')
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        fd_pipes[self._metadata_fd] = slave_fd

        self._raw_metadata = []
        files.ebuild = master_fd
        self._reg_id = self.scheduler.register(files.ebuild,
                                               self._registered_events,
                                               self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings=settings,
                                  debug=debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self._unregister()
            self._set_returncode((self.pid, retval << 8))
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)
示例#41
0
		def make_pipes():
			return os.pipe(), None
示例#42
0
    def _start(self):
        ebuild_path = self.ebuild_hash.location

        with io.open(_unicode_encode(ebuild_path,
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='r',
                     encoding=_encodings['repo.content'],
                     errors='replace') as f:
            self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

        parsed_eapi = self._eapi
        if parsed_eapi is None:
            parsed_eapi = "0"

        if not parsed_eapi:
            # An empty EAPI setting is invalid.
            self._eapi_invalid(None)
            self.returncode = 1
            self._async_wait()
            return

        self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
        if not self.eapi_supported:
            self.metadata = {"EAPI": parsed_eapi}
            self.returncode = os.EX_OK
            self._async_wait()
            return

        settings = self.settings
        settings.setcpv(self.cpv)
        settings.configdict['pkg']['EAPI'] = parsed_eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open('/dev/null', 'rb')
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()

        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    master_fd, fcntl.F_SETFD,
                    fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

        fd_pipes[slave_fd] = slave_fd
        settings["PORTAGE_PIPE_FD"] = str(slave_fd)

        self._raw_metadata = []
        files.ebuild = master_fd
        self.scheduler.add_reader(files.ebuild, self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings=settings,
                                  debug=debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)
        settings.pop("PORTAGE_PIPE_FD", None)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]
	def _start(self):
		settings = self.settings
		settings.setcpv(self.cpv)
		ebuild_path = self.ebuild_path

		eapi = None
		if 'parse-eapi-glep-55' in settings.features:
			pf, eapi = portage._split_ebuild_name_glep55(
				os.path.basename(ebuild_path))
		if eapi is None and \
			'parse-eapi-ebuild-head' in settings.features:
			eapi = portage._parse_eapi_ebuild_head(
				codecs.open(_unicode_encode(ebuild_path,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['repo.content'],
				errors='replace'))

		if eapi is not None:
			if not portage.eapi_is_supported(eapi):
				self.metadata_callback(self.cpv, self.ebuild_path,
					self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
				self.returncode = os.EX_OK
				self.wait()
				return

			settings.configdict['pkg']['EAPI'] = eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		fd_pipes.setdefault(0, sys.stdin.fileno())
		fd_pipes.setdefault(1, sys.stdout.fileno())
		fd_pipes.setdefault(2, sys.stderr.fileno())

		# flush any pending output
		for fd in fd_pipes.values():
			if fd == sys.stdout.fileno():
				sys.stdout.flush()
			if fd == sys.stderr.fileno():
				sys.stderr.flush()

		fd_pipes_orig = fd_pipes.copy()
		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		fd_pipes[self._metadata_fd] = slave_fd

		self._raw_metadata = []
		files.ebuild = os.fdopen(master_fd, 'rb')
		self._reg_id = self.scheduler.register(files.ebuild.fileno(),
			self._registered_events, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings["ROOT"], settings, debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)

		os.close(slave_fd)

		if isinstance(retval, int):
			# doebuild failed before spawning
			self._unregister()
			self.returncode = retval
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)
示例#44
0
	def _pipe(self, fd_pipes):
		"""
		@type fd_pipes: dict
		@param fd_pipes: pipes from which to copy terminal size if desired.
		"""
		return os.pipe()
示例#45
0
    def _spawn(self, args, fd_pipes, **kwargs):
        """
		Fork a subprocess, apply local settings, and call
		dblink.merge().
		"""

        elog_reader_fd, elog_writer_fd = os.pipe()
        fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
                    fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        blockers = None
        if self.blockers is not None:
            # Query blockers in the main process, since closing
            # of file descriptors in the subprocess can prevent
            # access to open database connections such as that
            # used by the sqlite metadata cache module.
            blockers = self.blockers()
        mylink = portage.dblink(self.mycat,
                                self.mypkg,
                                settings=self.settings,
                                treetype=self.treetype,
                                vartree=self.vartree,
                                blockers=blockers,
                                scheduler=self.scheduler,
                                pipe=elog_writer_fd)
        fd_pipes[elog_writer_fd] = elog_writer_fd
        self._elog_reg_id = self.scheduler.register(elog_reader_fd,
                                                    self._registered_events,
                                                    self._elog_output_handler)

        # If a concurrent emerge process tries to install a package
        # in the same SLOT as this one at the same time, there is an
        # extremely unlikely chance that the COUNTER values will not be
        # ordered correctly unless we lock the vdb here.
        # FEATURES=parallel-install skips this lock in order to
        # improve performance, and the risk is practically negligible.
        self._lock_vdb()
        counter = None
        if not self.unmerge:
            counter = self.vartree.dbapi.counter_tick()

        pid = os.fork()
        if pid != 0:
            if not isinstance(pid, int):
                raise AssertionError("fork returned non-integer: %s" %
                                     (repr(pid), ))

            os.close(elog_writer_fd)
            self._elog_reader_fd = elog_reader_fd
            self._buf = ""
            self._elog_keys = set()

            # invalidate relevant vardbapi caches
            if self.vartree.dbapi._categories is not None:
                self.vartree.dbapi._categories = None
            self.vartree.dbapi._pkgs_changed = True
            self.vartree.dbapi._clear_pkg_cache(mylink)

            portage.process.spawned_pids.append(pid)
            return [pid]

        os.close(elog_reader_fd)

        # TODO: Find out why PyPy 1.8 with close_fds=True triggers
        # "[Errno 9] Bad file descriptor" in subprocesses. It could
        # be due to garbage collection of file objects that were not
        # closed before going out of scope, since PyPy's garbage
        # collector does not support the refcounting semantics that
        # CPython does.
        close_fds = platform.python_implementation() != 'PyPy'
        portage.process._setup_pipes(fd_pipes, close_fds=close_fds)

        # Use default signal handlers since the ones inherited
        # from the parent process are irrelevant here.
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        portage.output.havecolor = self.settings.get('NOCOLOR') \
         not in ('yes', 'true')

        # In this subprocess we want mylink._display_merge() to use
        # stdout/stderr directly since they are pipes. This behavior
        # is triggered when mylink._scheduler is None.
        mylink._scheduler = None

        # Avoid wastful updates of the vdb cache.
        self.vartree.dbapi._flush_cache_enabled = False

        # In this subprocess we don't want PORTAGE_BACKGROUND to
        # suppress stdout/stderr output since they are pipes. We
        # also don't want to open PORTAGE_LOG_FILE, since it will
        # already be opened by the parent process, so we set the
        # "subprocess" value for use in conditional logging code
        # involving PORTAGE_LOG_FILE.
        if not self.unmerge:
            # unmerge phases have separate logs
            if self.settings.get("PORTAGE_BACKGROUND") == "1":
                self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
            else:
                self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
            self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
        self.settings["PORTAGE_BACKGROUND"] = "subprocess"
        self.settings.backup_changes("PORTAGE_BACKGROUND")

        rval = 1
        try:
            if self.unmerge:
                if not mylink.exists():
                    rval = os.EX_OK
                elif mylink.unmerge(
                        ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
                    mylink.lockdb()
                    try:
                        mylink.delete()
                    finally:
                        mylink.unlockdb()
                    rval = os.EX_OK
            else:
                rval = mylink.merge(self.pkgloc,
                                    self.infloc,
                                    myebuild=self.myebuild,
                                    mydbapi=self.mydbapi,
                                    prev_mtimes=self.prev_mtimes,
                                    counter=counter)
        except SystemExit:
            raise
        except:
            traceback.print_exc()
        finally:
            # Call os._exit() from finally block, in order to suppress any
            # finally blocks from earlier in the call stack. See bug #345289.
            os._exit(rval)
	def testDoebuild(self):
		"""
		Invoke portage.doebuild() with the fd_pipes parameter, and
		check that the expected output appears in the pipe. This
		functionality is not used by portage internally, but it is
		supported for API consumers (see bug #475812).
		"""

		output_fd = 200
		ebuild_body = ['S=${WORKDIR}']
		for phase_func in ('pkg_info', 'pkg_nofetch', 'pkg_pretend',
			'pkg_setup', 'src_unpack', 'src_prepare', 'src_configure',
			'src_compile', 'src_test', 'src_install'):
			ebuild_body.append(('%s() { echo ${EBUILD_PHASE}'
				' 1>&%s; }') % (phase_func, output_fd))

		ebuild_body.append('')
		ebuild_body = '\n'.join(ebuild_body)

		ebuilds = {
			'app-misct/foo-1': {
				'EAPI'      : '5',
				"MISC_CONTENT": ebuild_body,
			}
		}

		# Override things that may be unavailable, or may have portability
		# issues when running tests in exotic environments.
		#   prepstrip - bug #447810 (bash read builtin EINTR problem)
		true_symlinks = ("find", "prepstrip", "sed", "scanelf")
		true_binary = portage.process.find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")

		dev_null = open(os.devnull, 'wb')
		playground = ResolverPlayground(ebuilds=ebuilds)
		try:
			QueryCommand._db = playground.trees
			root_config = playground.trees[playground.eroot]['root_config']
			portdb = root_config.trees["porttree"].dbapi
			settings = portage.config(clone=playground.settings)
			if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
				settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
					os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
				settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

			settings.features.add("noauto")
			settings.features.add("test")
			settings['PORTAGE_PYTHON'] = portage._python_interpreter
			settings['PORTAGE_QUIET'] = "1"
			settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get("PYTHONDONTWRITEBYTECODE", "")

			fake_bin = os.path.join(settings["EPREFIX"], "bin")
			portage.util.ensure_dirs(fake_bin)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))

			settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
			settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")

			cpv = 'app-misct/foo-1'
			metadata = dict(zip(Package.metadata_keys,
				portdb.aux_get(cpv, Package.metadata_keys)))

			pkg = Package(built=False, cpv=cpv, installed=False,
				metadata=metadata, root_config=root_config,
				type_name='ebuild')
			settings.setcpv(pkg)
			ebuildpath = portdb.findname(cpv)
			self.assertNotEqual(ebuildpath, None)

			for phase in ('info', 'nofetch',
				 'pretend', 'setup', 'unpack', 'prepare', 'configure',
				 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):

				pr, pw = os.pipe()

				producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
					doebuild_kwargs={"settings" : settings,
						"mydbapi": portdb, "tree": "porttree",
						"vartree": root_config.trees["vartree"],
						"fd_pipes": {
							1: dev_null.fileno(),
							2: dev_null.fileno(),
							output_fd: pw,
						},
						"prev_mtimes": {}})

				consumer = PipeReader(
					input_files={"producer" : pr})

				task_scheduler = TaskScheduler(iter([producer, consumer]),
					max_jobs=2)

				try:
					task_scheduler.start()
				finally:
					# PipeReader closes pr
					os.close(pw)

				task_scheduler.wait()
				output = portage._unicode_decode(
					consumer.getvalue()).rstrip("\n")

				if task_scheduler.returncode != os.EX_OK:
					portage.writemsg(output, noiselevel=-1)

				self.assertEqual(task_scheduler.returncode, os.EX_OK)

				if phase not in ('clean', 'merge', 'qmerge'):
					self.assertEqual(phase, output)

		finally:
			dev_null.close()
			playground.cleanup()
			QueryCommand._db = None