Exemplo n.º 1
0
        def test(loop):
            output = None
            try:
                proc = loop.run_until_complete(
                    asyncio.create_subprocess_exec(cat_binary,
                                                   stdin=stdin_pr,
                                                   stdout=stdout_pw,
                                                   stderr=stdout_pw))

                # These belong exclusively to the subprocess now.
                stdout_pw.close()
                stdin_pr.close()

                output = asyncio.ensure_future(reader(stdout_pr, loop=loop),
                                               loop=loop)

                with ForkExecutor(loop=loop) as executor:
                    writer = asyncio.ensure_future(loop.run_in_executor(
                        executor, stdin_pw.write, stdin_data),
                                                   loop=loop)

                    # This belongs exclusively to the writer now.
                    stdin_pw.close()
                    loop.run_until_complete(writer)

                self.assertEqual(loop.run_until_complete(proc.wait()),
                                 os.EX_OK)
                self.assertEqual(loop.run_until_complete(output), stdin_data)
            finally:
                if output is not None and not output.done():
                    output.cancel()
                for f in files:
                    f.close()
Exemplo n.º 2
0
	def _wait_loop(self, timeout=None):
		loop = self.scheduler
		tasks = [self.async_wait()]
		if timeout is not None:
			tasks.append(asyncio.ensure_future(
				asyncio.sleep(timeout, loop=loop), loop=loop))
		try:
			loop.run_until_complete(asyncio.ensure_future(
				asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED,
				loop=loop), loop=loop))
		finally:
			for task in tasks:
				task.cancel()
Exemplo n.º 3
0
	def _wait_loop(self, timeout=None):
		loop = self.scheduler
		tasks = [self.async_wait()]
		if timeout is not None:
			tasks.append(asyncio.ensure_future(
				asyncio.sleep(timeout, loop=loop), loop=loop))
		try:
			loop.run_until_complete(asyncio.ensure_future(
				asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED,
				loop=loop), loop=loop))
		finally:
			for task in tasks:
				task.cancel()
Exemplo n.º 4
0
    async def _testPipeLoggerToPipe(self, test_string, loop):
        """
		Test PipeLogger writing to a pipe connected to a PipeReader.
		This verifies that PipeLogger does not deadlock when writing
		to a pipe that's drained by a PipeReader running in the same
		process (requires non-blocking write).
		"""

        input_fd, writer_pipe = os.pipe()
        _set_nonblocking(writer_pipe)
        writer_pipe = os.fdopen(writer_pipe, 'wb', 0)
        writer = asyncio.ensure_future(
            _writer(writer_pipe, test_string.encode('ascii')))
        writer.add_done_callback(lambda writer: writer_pipe.close())

        pr, pw = os.pipe()

        consumer = PipeLogger(background=True,
                              input_fd=input_fd,
                              log_file_path=os.fdopen(pw, 'wb', 0),
                              scheduler=loop)
        consumer.start()

        # Before starting the reader, wait here for a moment, in order
        # to exercise PipeLogger's handling of EAGAIN during write.
        await asyncio.wait([writer], timeout=0.01)

        reader = _reader(pr)
        await writer
        content = await reader
        await consumer.async_wait()

        self.assertEqual(consumer.returncode, os.EX_OK)

        return content.decode('ascii', 'replace')
Exemplo n.º 5
0
        def test(loop):
            output = None
            try:
                with open(os.devnull, 'rb', 0) as devnull:
                    proc = loop.run_until_complete(
                        asyncio.create_subprocess_exec(echo_binary,
                                                       *args_tuple,
                                                       stdin=devnull,
                                                       stdout=stdout_pw,
                                                       stderr=stdout_pw))

                # This belongs exclusively to the subprocess now.
                stdout_pw.close()

                output = asyncio.ensure_future(reader(stdout_pr, loop=loop),
                                               loop=loop)

                self.assertEqual(loop.run_until_complete(proc.wait()),
                                 os.EX_OK)
                self.assertEqual(
                    tuple(loop.run_until_complete(output).split()), args_tuple)
            finally:
                if output is not None and not output.done():
                    output.cancel()
                for f in files:
                    f.close()
Exemplo n.º 6
0
    def schedule(self):

        if self._scheduling:
            # Ignore any recursive schedule() calls triggered via
            # self._task_exit().
            return

        self._scheduling = True
        try:
            while self._task_queue and (self.max_jobs is True or len(
                    self.running_tasks) < self.max_jobs):
                task = self._task_queue.popleft()
                cancelled = getattr(task, "cancelled", None)
                if not cancelled:
                    self.running_tasks.add(task)
                    future = asyncio.ensure_future(self._task_coroutine(task),
                                                   loop=task.scheduler)
                    future.add_done_callback(
                        lambda future: future.cancelled() or future.result())
                    # This callback will be invoked as soon as the task
                    # exits (before the future's done callback is called),
                    # and this is required in order for bool(self) to have
                    # an updated value for Scheduler._schedule to base
                    # assumptions upon. Delayed updates to bool(self) is
                    # what caused Scheduler to hang as in bug 709746.
                    task.addExitListener(self._task_exit)
        finally:
            self._scheduling = False
Exemplo n.º 7
0
    def _commands_exit(self, task):

        if self._default_exit(task) != os.EX_OK:
            self._async_wait()
            return

        if self.phase == "install":
            out = io.StringIO()
            _post_src_install_soname_symlinks(self.settings, out)
            msg = out.getvalue()
            if msg:
                self.scheduler.output(
                    msg, log_path=self.settings.get("PORTAGE_LOG_FILE"))

            if "qa-unresolved-soname-deps" in self.settings.features:
                # This operates on REQUIRES metadata generated by the above function call.
                future = asyncio.ensure_future(self._soname_deps_qa(),
                                               loop=self.scheduler)
                # If an unexpected exception occurs, then this will raise it.
                future.add_done_callback(
                    lambda future: future.cancelled() or future.result())
                self._start_task(AsyncTaskFuture(future=future),
                                 self._default_final_exit)
            else:
                self._default_final_exit(task)
        else:
            self._default_final_exit(task)
Exemplo n.º 8
0
    def _spawn(self, args, fd_pipes=None, **kwargs):
        """
        Override SpawnProcess._spawn to fork a subprocess that calls
        self._run(). This uses multiprocessing.Process in order to leverage
        any pre-fork and post-fork interpreter housekeeping that it provides,
        promoting a healthy state for the forked interpreter.
        """
        # Since multiprocessing.Process closes sys.__stdin__, create a
        # temporary duplicate of fd_pipes[0] so that sys.__stdin__ can
        # be restored in the subprocess, in case this is needed for
        # things like PROPERTIES=interactive support.
        stdin_dup = None
        try:
            stdin_fd = fd_pipes.get(0)
            if stdin_fd is not None and stdin_fd == portage._get_stdin(
            ).fileno():
                stdin_dup = os.dup(stdin_fd)
                fcntl.fcntl(stdin_dup, fcntl.F_SETFD,
                            fcntl.fcntl(stdin_fd, fcntl.F_GETFD))
                fd_pipes[0] = stdin_dup
            self._proc = multiprocessing.Process(target=self._bootstrap,
                                                 args=(fd_pipes, ))
            self._proc.start()
        finally:
            if stdin_dup is not None:
                os.close(stdin_dup)

        self._proc_join_task = asyncio.ensure_future(self._proc_join(
            self._proc, loop=self.scheduler),
                                                     loop=self.scheduler)
        self._proc_join_task.add_done_callback(
            functools.partial(self._proc_join_done, self._proc))

        return [self._proc.pid]
Exemplo n.º 9
0
	def _schedule(self):
		while (not self._shutdown and self._submit_queue and
			len(self._running_tasks) < self._max_workers):
			future, proc = self._submit_queue.popleft()
			proc.scheduler = self._loop
			self._running_tasks[id(proc)] = proc
			future.add_done_callback(functools.partial(self._cancel_cb, proc))
			proc_future = asyncio.ensure_future(self._proc_coroutine(proc), loop=self._loop)
			proc_future.add_done_callback(functools.partial(self._proc_coroutine_done, future, proc))
Exemplo n.º 10
0
 def _subprocess_transport_callback(self, transp, protocol, result, waiter):
     if waiter.exception() is None:
         result.set_result((transp, protocol))
     else:
         transp.close()
         wait_transp = asyncio.ensure_future(transp._wait(), loop=self)
         wait_transp.add_done_callback(
             functools.partial(self._subprocess_transport_failure, result,
                               waiter.exception()))
Exemplo n.º 11
0
	def _subprocess_transport_callback(self, transp, protocol, result, waiter):
		if waiter.exception() is None:
			result.set_result((transp, protocol))
		else:
			transp.close()
			wait_transp = asyncio.ensure_future(transp._wait(), loop=self)
			wait_transp.add_done_callback(
				functools.partial(self._subprocess_transport_failure,
				result, waiter.exception()))
Exemplo n.º 12
0
    def _async_start(self):
        pipe_logger = None
        filter_proc = None
        try:
            log_input = None
            if self.log_path is not None:
                log_filter_file = self.log_filter_file
                if log_filter_file is not None:
                    split_value = shlex_split(log_filter_file)
                    log_filter_file = split_value if split_value else None
                if log_filter_file:
                    filter_input, stdin = os.pipe()
                    log_input, filter_output = os.pipe()
                    try:
                        filter_proc = yield asyncio.create_subprocess_exec(
                            *log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                            loop=self.scheduler)
                    except EnvironmentError:
                        # Maybe the command is missing or broken somehow...
                        os.close(filter_input)
                        os.close(stdin)
                        os.close(log_input)
                        os.close(filter_output)
                    else:
                        self._stdin = os.fdopen(stdin, 'wb', 0)
                        os.close(filter_input)
                        os.close(filter_output)

            if self._stdin is None:
                # Since log_filter_file is unspecified or refers to a file
                # that is missing or broken somehow, create a pipe that
                # logs directly to pipe_logger.
                log_input, stdin = os.pipe()
                self._stdin = os.fdopen(stdin, 'wb', 0)

            # Set background=True so that pipe_logger does not log to stdout.
            pipe_logger = PipeLogger(background=True,
                                     scheduler=self.scheduler,
                                     input_fd=log_input,
                                     log_file_path=self.log_path)

            yield pipe_logger.async_start()
        except asyncio.CancelledError:
            if pipe_logger is not None and pipe_logger.poll() is None:
                pipe_logger.cancel()
            if filter_proc is not None and filter_proc.returncode is None:
                filter_proc.terminate()
            raise

        self._main_task = asyncio.ensure_future(self._main(
            pipe_logger, filter_proc=filter_proc),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Exemplo n.º 13
0
 def execute_wrapper():
     kill_switch = parent_loop.create_future()
     parent_future = asyncio.ensure_future(parent_loop.run_in_executor(
         self._executor, wrapper, kill_switch),
                                           loop=parent_loop)
     parent_future.add_done_callback(
         lambda parent_future: None
         if kill_switch.done() else kill_switch.set_result(None))
     return parent_future
Exemplo n.º 14
0
		def execute_wrapper():
			kill_switch = parent_loop.create_future()
			parent_future = asyncio.ensure_future(
				parent_loop.run_in_executor(self._executor, wrapper, kill_switch),
				loop=parent_loop)
			parent_future.add_done_callback(
				lambda parent_future: None if kill_switch.done()
				else kill_switch.set_result(None))
			return parent_future
Exemplo n.º 15
0
    def _start(self):
        filter_proc = None
        log_input = None
        if self.log_path is not None:
            log_filter_file = self.log_filter_file
            if log_filter_file is not None:
                split_value = shlex_split(log_filter_file)
                log_filter_file = split_value if split_value else None
            if log_filter_file:
                filter_input, stdin = os.pipe()
                log_input, filter_output = os.pipe()
                try:
                    filter_proc = PopenProcess(
                        proc=subprocess.Popen(
                            log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                        ),
                        scheduler=self.scheduler,
                    )
                    filter_proc.start()
                except EnvironmentError:
                    # Maybe the command is missing or broken somehow...
                    os.close(filter_input)
                    os.close(stdin)
                    os.close(log_input)
                    os.close(filter_output)
                else:
                    self._stdin = os.fdopen(stdin, "wb", 0)
                    os.close(filter_input)
                    os.close(filter_output)

        if self._stdin is None:
            # Since log_filter_file is unspecified or refers to a file
            # that is missing or broken somehow, create a pipe that
            # logs directly to pipe_logger.
            log_input, stdin = os.pipe()
            self._stdin = os.fdopen(stdin, "wb", 0)

        # Set background=True so that pipe_logger does not log to stdout.
        pipe_logger = PipeLogger(
            background=True,
            scheduler=self.scheduler,
            input_fd=log_input,
            log_file_path=self.log_path,
        )
        pipe_logger.start()

        self._main_task_cancel = functools.partial(self._main_cancel,
                                                   filter_proc, pipe_logger)
        self._main_task = asyncio.ensure_future(self._main(
            filter_proc, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Exemplo n.º 16
0
        def execute_wrapper():
            kill_switch = threading.Event()
            parent_future = asyncio.ensure_future(parent_loop.run_in_executor(
                self._executor, wrapper, kill_switch),
                                                  loop=parent_loop)

            def kill_callback(parent_future):
                if not kill_switch.is_set():
                    kill_switch.set()

            parent_future.add_done_callback(kill_callback)
            return parent_future
Exemplo n.º 17
0
    def _start(self):

        log_file_path = self.log_file_path
        if hasattr(log_file_path, 'write'):
            self._log_file_nb = True
            self._log_file = log_file_path
            _set_nonblocking(self._log_file.fileno())
        elif log_file_path is not None:
            try:
                self._log_file = open(
                    _unicode_encode(log_file_path,
                                    encoding=_encodings["fs"],
                                    errors="strict"),
                    mode="ab",
                )

                if log_file_path.endswith(".gz"):
                    self._log_file_real = self._log_file
                    self._log_file = gzip.GzipFile(filename="",
                                                   mode="ab",
                                                   fileobj=self._log_file)

                portage.util.apply_secpass_permissions(
                    log_file_path,
                    uid=portage.portage_uid,
                    gid=portage.portage_gid,
                    mode=0o660,
                )
            except FileNotFoundError:
                if self._was_cancelled():
                    self._async_wait()
                    return
                raise

        if isinstance(self.input_fd, int):
            self.input_fd = os.fdopen(self.input_fd, 'rb', 0)

        fd = self.input_fd.fileno()

        fcntl.fcntl(fd, fcntl.F_SETFL,
                    fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        self._io_loop_task = asyncio.ensure_future(self._io_loop(
            self.input_fd),
                                                   loop=self.scheduler)
        self._io_loop_task.add_done_callback(self._io_loop_done)
        self._registered = True
Exemplo n.º 18
0
    def _start_task(self, task, exit_handler):
        """
		Register exit handler for the given task, set it
		as self._current_task, and call task.async_start().

		Subclasses can use this as a generic way to start
		a task.

		"""
        try:
            task.scheduler = self.scheduler
        except AttributeError:
            pass
        task.addExitListener(exit_handler)
        self._current_task = task
        result = asyncio.ensure_future(task.async_start(), loop=self.scheduler)
        result.add_done_callback(self._current_task_start_cb)
Exemplo n.º 19
0
	def _start(self):

		log_file_path = self.log_file_path
		if hasattr(log_file_path, 'write'):
			self._log_file_nb = True
			self._log_file = log_file_path
			_set_nonblocking(self._log_file.fileno())
		elif log_file_path is not None:
			self._log_file = open(_unicode_encode(log_file_path,
				encoding=_encodings['fs'], errors='strict'), mode='ab')
			if log_file_path.endswith('.gz'):
				self._log_file_real = self._log_file
				self._log_file = gzip.GzipFile(filename='', mode='ab',
					fileobj=self._log_file)

			portage.util.apply_secpass_permissions(log_file_path,
				uid=portage.portage_uid, gid=portage.portage_gid,
				mode=0o660)

		if isinstance(self.input_fd, int):
			self.input_fd = os.fdopen(self.input_fd, 'rb', 0)

		fd = self.input_fd.fileno()

		fcntl.fcntl(fd, fcntl.F_SETFL,
			fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		# FD_CLOEXEC is enabled by default in Python >=3.4.
		if sys.hexversion < 0x3040000:
			try:
				fcntl.FD_CLOEXEC
			except AttributeError:
				pass
			else:
				fcntl.fcntl(fd, fcntl.F_SETFD,
					fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		self._io_loop_task = asyncio.ensure_future(self._io_loop(self.input_fd), loop=self.scheduler)
		self._io_loop_task.add_done_callback(self._io_loop_done)
		self._registered = True
Exemplo n.º 20
0
    def _start(self):

        if self.fd_pipes is None:
            self.fd_pipes = {}
        else:
            self.fd_pipes = self.fd_pipes.copy()
        fd_pipes = self.fd_pipes

        master_fd, slave_fd = self._pipe(fd_pipes)

        can_log = self._can_log(slave_fd)
        if can_log:
            log_file_path = self.logfile
        else:
            log_file_path = None

        null_input = None
        if not self.background or 0 in fd_pipes:
            # Subclasses such as AbstractEbuildProcess may have already passed
            # in a null file descriptor in fd_pipes, so use that when given.
            pass
        else:
            # TODO: Use job control functions like tcsetpgrp() to control
            # access to stdin. Until then, use /dev/null so that any
            # attempts to read from stdin will immediately return EOF
            # instead of blocking indefinitely.
            null_input = os.open('/dev/null', os.O_RDWR)
            fd_pipes[0] = null_input

        fd_pipes.setdefault(0, portage._get_stdin().fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        fd_pipes_orig = fd_pipes.copy()

        if log_file_path is not None or self.background:
            fd_pipes[1] = slave_fd
            fd_pipes[2] = slave_fd

        else:
            # Create a dummy pipe that PipeLogger uses to efficiently
            # monitor for process exit by listening for the EOF event.
            # Re-use of the allocated fd number for the key in fd_pipes
            # guarantees that the keys will not collide for similarly
            # allocated pipes which are used by callers such as
            # FileDigester and MergeProcess. See the _setup_pipes
            # docstring for more benefits of this allocation approach.
            self._dummy_pipe_fd = slave_fd
            fd_pipes[slave_fd] = slave_fd

        kwargs = {}
        for k in self._spawn_kwarg_names:
            v = getattr(self, k)
            if v is not None:
                kwargs[k] = v

        kwargs["fd_pipes"] = fd_pipes
        kwargs["returnpid"] = True
        kwargs.pop("logfile", None)

        retval = self._spawn(self.args, **kwargs)

        os.close(slave_fd)
        if null_input is not None:
            os.close(null_input)

        if isinstance(retval, int):
            # spawn failed
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]

        stdout_fd = None
        if can_log and not self.background:
            stdout_fd = os.dup(fd_pipes_orig[1])

        build_logger = BuildLogger(env=self.env,
                                   log_path=log_file_path,
                                   log_filter_file=self.log_filter_file,
                                   scheduler=self.scheduler)
        build_logger.start()

        pipe_logger = PipeLogger(background=self.background,
                                 scheduler=self.scheduler,
                                 input_fd=master_fd,
                                 log_file_path=build_logger.stdin,
                                 stdout_fd=stdout_fd)

        pipe_logger.start()

        self._registered = True
        self._main_task = asyncio.ensure_future(self._main(
            build_logger, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Exemplo n.º 21
0
def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
	"""
	An asynchronous version of iter_completed. This yields futures, which
	when done, result in a set of input futures that are done. This serves
	as a wrapper around portage's internal TaskScheduler class, using
	standard asyncio interfaces.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures, which when done, result in a set of
		input futures that are done
	@rtype: iterator
	"""
	loop = asyncio._wrap_loop(loop)

	max_jobs = max_jobs or get_cpu_count()
	max_load = max_load or get_cpu_count()

	future_map = {}
	def task_generator():
		for future in futures:
			future_map[id(future)] = future
			yield AsyncTaskFuture(future=future)

	scheduler = TaskScheduler(
		task_generator(),
		max_jobs=max_jobs,
		max_load=max_load,
		event_loop=loop)

	def done_callback(future_done_set, wait_result):
		"""Propagate results from wait_result to future_done_set."""
		if future_done_set.cancelled():
			return
		done, pending = wait_result.result()
		for future in done:
			del future_map[id(future)]
		future_done_set.set_result(done)

	def cancel_callback(wait_result, future_done_set):
		"""Cancel wait_result if future_done_set has been cancelled."""
		if future_done_set.cancelled() and not wait_result.done():
			wait_result.cancel()

	try:
		scheduler.start()

		# scheduler should ensure that future_map is non-empty until
		# task_generator is exhausted
		while future_map:
			wait_result = asyncio.ensure_future(
				asyncio.wait(list(future_map.values()),
				return_when=asyncio.FIRST_COMPLETED, loop=loop), loop=loop)
			future_done_set = loop.create_future()
			future_done_set.add_done_callback(
				functools.partial(cancel_callback, wait_result))
			wait_result.add_done_callback(
				functools.partial(done_callback, future_done_set))
			yield future_done_set
	finally:
		# cleanup in case of interruption by SIGINT, etc
		scheduler.cancel()
		scheduler.wait()
Exemplo n.º 22
0
    def testSimple(self):

        debug = False

        install_something = """
S="${WORKDIR}"

pkg_pretend() {
	einfo "called pkg_pretend for $CATEGORY/$PF"
}

src_install() {
	einfo "installing something..."
	insinto /usr/lib/${P}
	echo "blah blah blah" > "${T}"/regular-file
	doins "${T}"/regular-file
	dosym regular-file /usr/lib/${P}/symlink || die

	# Test CONFIG_PROTECT
	insinto /etc
	newins "${T}"/regular-file ${PN}-${SLOT%/*}

	# Test code for bug #381629, using a copyright symbol encoded with latin-1.
	# We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
	# works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
	# some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
	# running tests for Python 3.2 (even though it's bash that is ultimately
	# responsible for performing the transformation).
	local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
	insinto "${latin_1_dir}"
	echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
	doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
	dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die

	call_has_and_best_version
}

pkg_config() {
	einfo "called pkg_config for $CATEGORY/$PF"
}

pkg_info() {
	einfo "called pkg_info for $CATEGORY/$PF"
}

pkg_preinst() {
	if ! ___eapi_best_version_and_has_version_support_-b_-d_-r; then
		# The BROOT variable is unset during pkg_* phases for EAPI 7,
		# therefore best/has_version -b is expected to fail if we attempt
		# to call it for EAPI 7 here.
		call_has_and_best_version
	fi
}

call_has_and_best_version() {
	local root_arg
	if ___eapi_best_version_and_has_version_support_-b_-d_-r; then
		root_arg="-b"
	else
		root_arg="--host-root"
	fi
	einfo "called ${EBUILD_PHASE_FUNC} for $CATEGORY/$PF"
	einfo "EPREFIX=${EPREFIX}"
	einfo "PORTAGE_OVERRIDE_EPREFIX=${PORTAGE_OVERRIDE_EPREFIX}"
	einfo "ROOT=${ROOT}"
	einfo "EROOT=${EROOT}"
	einfo "SYSROOT=${SYSROOT}"
	einfo "ESYSROOT=${ESYSROOT}"
	einfo "BROOT=${BROOT}"
	# Test that has_version and best_version work correctly with
	# prefix (involves internal ROOT -> EROOT calculation in order
	# to support ROOT override via the environment with EAPIs 3
	# and later which support prefix).
	if has_version $CATEGORY/$PN:$SLOT ; then
		einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
		einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
	else
		einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
	fi
	if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
		if has_version ${root_arg} $CATEGORY/$PN:$SLOT ; then
			einfo "has_version ${root_arg} detects an installed instance of $CATEGORY/$PN:$SLOT"
			einfo "best_version ${root_arg} reports that the installed instance is $(best_version ${root_arg} $CATEGORY/$PN:$SLOT)"
		else
			einfo "has_version ${root_arg} does not detect an installed instance of $CATEGORY/$PN:$SLOT"
		fi
	fi
}

"""

        ebuilds = {
            "dev-libs/A-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "MISC_CONTENT": install_something,
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/B-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "MISC_CONTENT": install_something,
            },
            "dev-libs/C-1": {
                "EAPI": "7",
                "KEYWORDS": "~x86",
                "RDEPEND": "dev-libs/D[flag]",
                "MISC_CONTENT": install_something,
            },
            "dev-libs/D-1": {
                "EAPI": "7",
                "KEYWORDS": "~x86",
                "IUSE": "flag",
                "MISC_CONTENT": install_something,
            },
            "virtual/foo-0": {
                "EAPI": "5",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
            },
        }

        installed = {
            "dev-libs/A-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
                "USE": "flag",
            },
            "dev-libs/B-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "USE": "flag",
            },
            "dev-libs/depclean-me-1": {
                "EAPI": "5",
                "IUSE": "",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "USE": "",
            },
            "app-misc/depclean-me-1": {
                "EAPI": "5",
                "IUSE": "",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "dev-libs/depclean-me",
                "USE": "",
            },
        }

        metadata_xml_files = (
            (
                "dev-libs/A",
                {
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/B",
                {
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
        )

        for binpkg_format in SUPPORTED_GENTOO_BINPKG_FORMATS:
            with self.subTest(binpkg_format=binpkg_format):
                print(colorize("HILITE", binpkg_format), end=" ... ")
                sys.stdout.flush()
                playground = ResolverPlayground(
                    ebuilds=ebuilds,
                    installed=installed,
                    debug=debug,
                    user_config={
                        "make.conf": ('BINPKG_FORMAT="%s"' % binpkg_format, ),
                    },
                )

                loop = asyncio._wrap_loop()
                loop.run_until_complete(
                    asyncio.ensure_future(
                        self._async_test_simple(playground,
                                                metadata_xml_files,
                                                loop=loop),
                        loop=loop,
                    ))
Exemplo n.º 23
0
 def _start(self):
     self.future = asyncio.ensure_future(
         self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler),
                                        self._run))
     super(FileCopier, self)._start()
Exemplo n.º 24
0
def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
    """
	An asynchronous version of iter_completed. This yields futures, which
	when done, result in a set of input futures that are done. This serves
	as a wrapper around portage's internal TaskScheduler class, using
	standard asyncio interfaces.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures, which when done, result in a set of
		input futures that are done
	@rtype: iterator
	"""
    loop = asyncio._wrap_loop(loop)

    max_jobs = max_jobs or get_cpu_count()
    max_load = max_load or get_cpu_count()

    future_map = {}

    def task_generator():
        for future in futures:
            future_map[id(future)] = future
            yield AsyncTaskFuture(future=future)

    scheduler = TaskScheduler(task_generator(),
                              max_jobs=max_jobs,
                              max_load=max_load,
                              event_loop=loop)

    def done_callback(future_done_set, wait_result):
        """Propagate results from wait_result to future_done_set."""
        if future_done_set.cancelled():
            return
        done, pending = wait_result.result()
        for future in done:
            del future_map[id(future)]
        future_done_set.set_result(done)

    def cancel_callback(wait_result, future_done_set):
        """Cancel wait_result if future_done_set has been cancelled."""
        if future_done_set.cancelled() and not wait_result.done():
            wait_result.cancel()

    @coroutine
    def fetch_wait_result(scheduler, first, loop=None):
        if first:
            yield scheduler.async_start()

        # If the current coroutine awakens just after a call to
        # done_callback but before scheduler has been notified of
        # corresponding done future(s), then wait here until scheduler
        # is notified (which will cause future_map to populate).
        while not future_map and scheduler.poll() is None:
            yield asyncio.sleep(0, loop=loop)

        if not future_map:
            if scheduler.poll() is not None:
                coroutine_return((set(), set()))
            else:
                raise AssertionError('expected non-empty future_map')

        wait_result = yield asyncio.wait(list(future_map.values()),
                                         return_when=asyncio.FIRST_COMPLETED,
                                         loop=loop)

        coroutine_return(wait_result)

    first = True
    try:
        while True:
            wait_result = asyncio.ensure_future(fetch_wait_result(scheduler,
                                                                  first,
                                                                  loop=loop),
                                                loop=loop)
            first = False
            future_done_set = loop.create_future()
            future_done_set.add_done_callback(
                functools.partial(cancel_callback, wait_result))
            wait_result.add_done_callback(
                functools.partial(done_callback, future_done_set))
            yield future_done_set
            if not future_map and scheduler.poll() is not None:
                break
    finally:
        # cleanup in case of interruption by SIGINT, etc
        scheduler.cancel()
        scheduler.wait()
Exemplo n.º 25
0
 def start_coroutine(future):
     result = asyncio.ensure_future(coroutine_func(),
                                    loop=parent_loop)
     pending[id(result)] = result
     result.add_done_callback(done_callback)
     future.set_result(result)
Exemplo n.º 26
0
 def _start(self):
     future = asyncio.ensure_future(self._async_start(),
                                    loop=self.scheduler)
     self._start_task(AsyncTaskFuture(future=future),
                      self._async_start_exit)
Exemplo n.º 27
0
    def test_method_coroutine(self):
        class Cubby:

            _empty = object()

            def __init__(self, loop):
                self._loop = loop
                self._value = self._empty
                self._waiters = []

            def _notify(self):
                waiters = self._waiters
                self._waiters = []
                for waiter in waiters:
                    waiter.cancelled() or waiter.set_result(None)

            def _wait(self):
                waiter = self._loop.create_future()
                self._waiters.append(waiter)
                return waiter

            @coroutine
            def read(self, loop=None):
                while self._value is self._empty:
                    yield self._wait()

                value = self._value
                self._value = self._empty
                self._notify()
                coroutine_return(value)

            @coroutine
            def write(self, value, loop=None):
                while self._value is not self._empty:
                    yield self._wait()

                self._value = value
                self._notify()

        @coroutine
        def writer_coroutine(cubby, values, sentinel, loop=None):
            for value in values:
                yield cubby.write(value, loop=loop)
            yield cubby.write(sentinel, loop=loop)

        @coroutine
        def reader_coroutine(cubby, sentinel, loop=None):
            results = []
            while True:
                result = yield cubby.read(loop=loop)
                if result == sentinel:
                    break
                results.append(result)
            coroutine_return(results)

        loop = asyncio.get_event_loop()
        cubby = Cubby(loop)
        values = list(range(3))
        writer = asyncio.ensure_future(writer_coroutine(cubby,
                                                        values,
                                                        None,
                                                        loop=loop),
                                       loop=loop)
        reader = asyncio.ensure_future(reader_coroutine(cubby, None,
                                                        loop=loop),
                                       loop=loop)
        loop.run_until_complete(asyncio.wait([writer, reader], loop=loop))

        self.assertEqual(reader.result(), values)

        # Test decoration of coroutine methods and functions for
        # synchronous usage, allowing coroutines to smoothly
        # blend with synchronous code.
        sync_cubby = _sync_methods(cubby, loop=loop)
        sync_reader = _sync_decorator(reader_coroutine, loop=loop)
        writer = asyncio.ensure_future(writer_coroutine(cubby,
                                                        values,
                                                        None,
                                                        loop=loop),
                                       loop=loop)
        self.assertEqual(sync_reader(cubby, None), values)
        self.assertTrue(writer.done())

        for i in range(3):
            sync_cubby.write(i)
            self.assertEqual(sync_cubby.read(), i)
Exemplo n.º 28
0
	def _iter_tasks(self, loop, executor, ebuild, pkg):
		for keyword, groups, prof in ebuild.relevant_profiles:

			is_stable_profile = prof.status == "stable"
			is_dev_profile = prof.status == "dev" and \
				self.options.include_dev
			is_exp_profile = prof.status == "exp" and \
				self.options.include_exp_profiles == 'y'
			if not (is_stable_profile or is_dev_profile or is_exp_profile):
				continue

			dep_settings = self.caches['arch'].get(prof.sub_path)
			if dep_settings is None:
				dep_settings = portage.config(
					config_profile_path=prof.abs_path,
					config_incrementals=self.repoman_incrementals,
					config_root=self.repo_settings.config_root,
					local_config=False,
					_unmatched_removal=self.options.unmatched_removal,
					env=self.env, repositories=self.repo_settings.repoman_settings.repositories)

				if not prof.abs_path:
					self._populate_implicit_iuse(dep_settings,
						self.repo_settings.repo_config.eclass_db.porttrees)

				dep_settings.categories = self.repo_settings.repoman_settings.categories
				if self.options.without_mask:
					dep_settings._mask_manager_obj = \
						copy.deepcopy(dep_settings._mask_manager)
					dep_settings._mask_manager._pmaskdict.clear()
				self.caches['arch'][prof.sub_path] = dep_settings

			xmatch_cache_key = (prof.sub_path, tuple(groups))
			xcache = self.caches['arch_xmatch'].get(xmatch_cache_key)
			if xcache is None:
				self.portdb.melt()
				self.portdb.freeze()
				xcache = self.portdb.xcache
				xcache.update(self.caches['shared_xmatch'])
				self.caches['arch_xmatch'][xmatch_cache_key] = xcache

			self.repo_settings.trees[self.repo_settings.root]["porttree"].settings = dep_settings
			self.portdb.settings = dep_settings
			self.portdb.xcache = xcache

			dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
			# just in case, prevent config.reset() from nuking these.
			dep_settings.backup_changes("ACCEPT_KEYWORDS")

			# This attribute is used in dbapi._match_use() to apply
			# use.stable.{mask,force} settings based on the stable
			# status of the parent package. This is required in order
			# for USE deps of unstable packages to be resolved correctly,
			# since otherwise use.stable.{mask,force} settings of
			# dependencies may conflict (see bug #456342).
			dep_settings._parent_stable = dep_settings._isStable(pkg)

			# Handle package.use*.{force,mask) calculation, for use
			# in dep_check.
			dep_settings.useforce = dep_settings._use_manager.getUseForce(
				pkg, stable=dep_settings._parent_stable)
			dep_settings.usemask = dep_settings._use_manager.getUseMask(
				pkg, stable=dep_settings._parent_stable)

			task = types.SimpleNamespace(ebuild=ebuild, prof=prof, keyword=keyword)

			target = functools.partial(self._task_subprocess, task, pkg, dep_settings)

			if self.options.jobs <= 1:
				yield (task, target())
			else:
				task.future = asyncio.ensure_future(loop.run_in_executor(executor, target), loop=loop)
				yield self._task(task, loop=loop)
Exemplo n.º 29
0
def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
    """
	An asynchronous version of iter_completed. This yields futures, which
	when done, result in a set of input futures that are done. This serves
	as a wrapper around portage's internal TaskScheduler class, using
	standard asyncio interfaces.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures, which when done, result in a set of
		input futures that are done
	@rtype: iterator
	"""
    loop = asyncio._wrap_loop(loop)

    max_jobs = max_jobs or get_cpu_count()
    max_load = max_load or get_cpu_count()

    future_map = {}

    def task_generator():
        for future in futures:
            future_map[id(future)] = future
            yield AsyncTaskFuture(future=future)

    scheduler = TaskScheduler(task_generator(),
                              max_jobs=max_jobs,
                              max_load=max_load,
                              event_loop=loop)

    def done_callback(future_done_set, wait_result):
        """Propagate results from wait_result to future_done_set."""
        if future_done_set.cancelled():
            return
        done, pending = wait_result.result()
        for future in done:
            del future_map[id(future)]
        future_done_set.set_result(done)

    def cancel_callback(wait_result, future_done_set):
        """Cancel wait_result if future_done_set has been cancelled."""
        if future_done_set.cancelled() and not wait_result.done():
            wait_result.cancel()

    try:
        scheduler.start()

        # scheduler should ensure that future_map is non-empty until
        # task_generator is exhausted
        while future_map:
            wait_result = asyncio.ensure_future(asyncio.wait(
                list(future_map.values()),
                return_when=asyncio.FIRST_COMPLETED,
                loop=loop),
                                                loop=loop)
            future_done_set = loop.create_future()
            future_done_set.add_done_callback(
                functools.partial(cancel_callback, wait_result))
            wait_result.add_done_callback(
                functools.partial(done_callback, future_done_set))
            yield future_done_set
    finally:
        # cleanup in case of interruption by SIGINT, etc
        scheduler.cancel()
        scheduler.wait()
Exemplo n.º 30
0
	def test_method_coroutine(self):

		class Cubby(object):

			_empty = object()

			def __init__(self, loop):
				self._loop = loop
				self._value = self._empty
				self._waiters = []

			def _notify(self):
				waiters = self._waiters
				self._waiters = []
				for waiter in waiters:
					waiter.cancelled() or waiter.set_result(None)

			def _wait(self):
				waiter = self._loop.create_future()
				self._waiters.append(waiter)
				return waiter

			@coroutine
			def read(self):
				while self._value is self._empty:
					yield self._wait()

				value = self._value
				self._value = self._empty
				self._notify()
				coroutine_return(value)

			@coroutine
			def write(self, value):
				while self._value is not self._empty:
					yield self._wait()

				self._value = value
				self._notify()

		@coroutine
		def writer_coroutine(cubby, values, sentinel):
			for value in values:
				yield cubby.write(value)
			yield cubby.write(sentinel)

		@coroutine
		def reader_coroutine(cubby, sentinel):
			results = []
			while True:
				result = yield cubby.read()
				if result == sentinel:
					break
				results.append(result)
			coroutine_return(results)

		loop = asyncio.get_event_loop()
		cubby = Cubby(loop)
		values = list(range(3))
		writer = asyncio.ensure_future(writer_coroutine(cubby, values, None), loop=loop)
		reader = asyncio.ensure_future(reader_coroutine(cubby, None), loop=loop)
		loop.run_until_complete(asyncio.wait([writer, reader]))

		self.assertEqual(reader.result(), values)
Exemplo n.º 31
0
 def _start(self):
     self.future = asyncio.ensure_future(self.future, self.scheduler)
     self.future.add_done_callback(self._done_callback)
Exemplo n.º 32
0
    def testSimple(self):
        debug = False

        skip_reason = self._must_skip()
        if skip_reason:
            self.portage_skip = skip_reason
            self.assertFalse(True, skip_reason)
            return

        copyright_header = """# Copyright 1999-%s Gentoo Authors
# Distributed under the terms of the GNU General Public License v2

""" % time.gmtime().tm_year

        pkg_preinst_references_forbidden_var = """
pkg_preinst() {
	echo "This ${A} reference is not allowed. Neither is this $BROOT reference."
}
"""

        repo_configs = {
            "test_repo": {
                "layout.conf": ("update-changelog = true", ),
            }
        }

        profiles = (
            ("x86", "default/linux/x86/test_profile", "stable"),
            ("x86", "default/linux/x86/test_dev", "dev"),
            ("x86", "default/linux/x86/test_exp", "exp"),
        )

        profile = {
            "eapi": ("5", ),
            "package.use.stable.mask": ("dev-libs/A flag", )
        }

        ebuilds = {
            "dev-libs/A-0": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "5",
                "HOMEPAGE": "https://example.com",
                "IUSE": "flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/A-1": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "4",
                "HOMEPAGE": "https://example.com",
                "IUSE": "flag",
                "KEYWORDS": "~x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/B-1": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "4",
                "HOMEPAGE": "https://example.com",
                "IUSE": "flag",
                "KEYWORDS": "~x86",
                "LICENSE": "GPL-2",
            },
            "dev-libs/C-0": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "7",
                "HOMEPAGE": "https://example.com",
                "IUSE": "flag",
                # must be unstable, since dev-libs/A[flag] is stable masked
                "KEYWORDS": "~x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/A[flag] )",
                "MISC_CONTENT": pkg_preinst_references_forbidden_var,
            },
        }
        licenses = ["GPL-2"]
        arch_list = ["x86"]
        metadata_xsd = os.path.join(REPOMAN_BASE_PATH, "cnf/metadata.xsd")
        metadata_xml_files = (
            (
                "dev-libs/A",
                {
                    "flags":
                    "<flag name='flag' restrict='&gt;=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/B",
                {
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/C",
                {
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
        )

        use_desc = (("flag",
                     "Description of how USE='flag' affects packages"), )

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        profile=profile,
                                        repo_configs=repo_configs,
                                        debug=debug)

        loop = asyncio._wrap_loop()
        loop.run_until_complete(
            asyncio.ensure_future(
                self._async_test_simple(
                    playground,
                    metadata_xml_files,
                    profiles,
                    profile,
                    licenses,
                    arch_list,
                    use_desc,
                    metadata_xsd,
                    copyright_header,
                    debug,
                ),
                loop=loop,
            ))
Exemplo n.º 33
0
	def test_method_coroutine(self):

		class Cubby(object):

			_empty = object()

			def __init__(self, loop):
				self._loop = loop
				self._value = self._empty
				self._waiters = []

			def _notify(self):
				waiters = self._waiters
				self._waiters = []
				for waiter in waiters:
					waiter.cancelled() or waiter.set_result(None)

			def _wait(self):
				waiter = self._loop.create_future()
				self._waiters.append(waiter)
				return waiter

			@coroutine
			def read(self):
				while self._value is self._empty:
					yield self._wait()

				value = self._value
				self._value = self._empty
				self._notify()
				coroutine_return(value)

			@coroutine
			def write(self, value):
				while self._value is not self._empty:
					yield self._wait()

				self._value = value
				self._notify()

		@coroutine
		def writer_coroutine(cubby, values, sentinel):
			for value in values:
				yield cubby.write(value)
			yield cubby.write(sentinel)

		@coroutine
		def reader_coroutine(cubby, sentinel):
			results = []
			while True:
				result = yield cubby.read()
				if result == sentinel:
					break
				results.append(result)
			coroutine_return(results)

		loop = asyncio.get_event_loop()
		cubby = Cubby(loop)
		values = list(range(3))
		writer = asyncio.ensure_future(writer_coroutine(cubby, values, None), loop=loop)
		reader = asyncio.ensure_future(reader_coroutine(cubby, None), loop=loop)
		loop.run_until_complete(asyncio.wait([writer, reader]))

		self.assertEqual(reader.result(), values)

		# Test decoration of coroutine methods and functions for
		# synchronous usage, allowing coroutines to smoothly
		# blend with synchronous code.
		sync_cubby = _sync_methods(cubby, loop=loop)
		sync_reader = _sync_decorator(reader_coroutine, loop=loop)
		writer = asyncio.ensure_future(writer_coroutine(cubby, values, None), loop=loop)
		self.assertEqual(sync_reader(cubby, None), values)
		self.assertTrue(writer.done())

		for i in range(3):
			sync_cubby.write(i)
			self.assertEqual(sync_cubby.read(), i)