Beispiel #1
0
 def _start(self):
     self._fetch_iterator = FetchIterator(self._config)
     fetch = TaskScheduler(iter(self._fetch_iterator),
                           max_jobs=self._config.options.jobs,
                           max_load=self._config.options.load_average,
                           event_loop=self._config.event_loop)
     self._start_task(fetch, self._fetch_exit)
Beispiel #2
0
 def _start(self):
     self._term_check_id = self.scheduler.idle_add(self._termination_check)
     fetch = TaskScheduler(iter(FetchIterator(self._config)),
                           max_jobs=self._config.options.jobs,
                           max_load=self._config.options.load_average,
                           event_loop=self._config.event_loop)
     self._start_task(fetch, self._fetch_exit)
Beispiel #3
0
	def _testPipeReader(self, test_string):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		if self._use_pty:
			got_pty, master_fd, slave_fd = _create_pty_or_pipe()
			if not got_pty:
				os.close(slave_fd)
				os.close(master_fd)
				skip_reason = "pty not acquired"
				self.portage_skip = skip_reason
				self.fail(skip_reason)
				return
		else:
			master_fd, slave_fd = os.pipe()

		# WARNING: It is very important to use unbuffered mode here,
		# in order to avoid issue 5380 with python3.
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb', 0)
		producer = SpawnProcess(
			args=["bash", "-c", self._echo_cmd % test_string],
			env=os.environ, fd_pipes={1:slave_fd})

		consumer = PipeReader(
			input_files={"producer" : master_file},
			_use_array=self._use_array)

		task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2)

		# This will ensure that both tasks have exited, which
		# is necessary to avoid "ResourceWarning: unclosed file"
		# warnings since Python 3.2 (and also ensures that we
		# don't leave any zombie child processes).
		task_scheduler.start()
		slave_file.close()
		task_scheduler.wait()

		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		return consumer.getvalue().decode('ascii', 'replace')
    def _start_fetch_tasks(self, task):
        if self._default_exit(task) != os.EX_OK:
            self._async_wait()
            return

        self._start_task(
            TaskScheduler(iter(self.fetch_tasks_future.result()),
                          max_jobs=1,
                          event_loop=self.scheduler), self._default_final_exit)
Beispiel #5
0
def iter_completed(futures, max_jobs=None, max_load=None, loop=None):
    """
	This is similar to asyncio.as_completed, but takes an iterator of
	futures as input, and includes support for max_jobs and max_load
	parameters.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is multiprocessing.cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is multiprocessing.cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures that are done
	@rtype: iterator
	"""
    loop = loop or global_event_loop()
    max_jobs = max_jobs or multiprocessing.cpu_count()
    max_load = max_load or multiprocessing.cpu_count()

    future_map = {}

    def task_generator():
        for future in futures:
            future_map[id(future)] = future
            yield AsyncTaskFuture(future=future)

    scheduler = TaskScheduler(task_generator(),
                              max_jobs=max_jobs,
                              max_load=max_load,
                              event_loop=loop)

    try:
        scheduler.start()

        # scheduler should ensure that future_map is non-empty until
        # task_generator is exhausted
        while future_map:
            done, pending = loop.run_until_complete(
                wait(list(future_map.values()), return_when=FIRST_COMPLETED))
            for future in done:
                del future_map[id(future)]
                yield future

    finally:
        # cleanup in case of interruption by SIGINT, etc
        scheduler.cancel()
        scheduler.wait()
Beispiel #6
0
def iter_completed(futures, max_jobs=None, max_load=None, loop=None):
	"""
	This is similar to asyncio.as_completed, but takes an iterator of
	futures as input, and includes support for max_jobs and max_load
	parameters.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is multiprocessing.cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is multiprocessing.cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures that are done
	@rtype: iterator
	"""
	loop = loop or global_event_loop()
	max_jobs = max_jobs or multiprocessing.cpu_count()
	max_load = max_load or multiprocessing.cpu_count()

	future_map = {}
	def task_generator():
		for future in futures:
			future_map[id(future)] = future
			yield AsyncTaskFuture(future=future)

	scheduler = TaskScheduler(
		task_generator(),
		max_jobs=max_jobs,
		max_load=max_load,
		event_loop=loop)

	try:
		scheduler.start()

		# scheduler should ensure that future_map is non-empty until
		# task_generator is exhausted
		while future_map:
			done, pending = loop.run_until_complete(
				wait(*list(future_map.values()), return_when=FIRST_COMPLETED))
			for future in done:
				del future_map[id(future)]
				yield future

	finally:
		# cleanup in case of interruption by SIGINT, etc
		scheduler.cancel()
		scheduler.wait()
Beispiel #7
0
	def _fetch_exit(self, fetch):

		self._assert_current(fetch)
		if self._was_cancelled():
			self._async_wait()
			return

		if self._config.options.delete:
			deletion = TaskScheduler(iter(DeletionIterator(self._config)),
				max_jobs=self._config.options.jobs,
				max_load=self._config.options.load_average,
				event_loop=self._config.event_loop)
			self._start_task(deletion, self._deletion_exit)
			return

		self._post_deletion()
Beispiel #8
0
    def _testPipeReader(self, test_string):
        """
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

        if self._use_pty:
            got_pty, master_fd, slave_fd = _create_pty_or_pipe()
            if not got_pty:
                os.close(slave_fd)
                os.close(master_fd)
                skip_reason = "pty not acquired"
                self.portage_skip = skip_reason
                self.fail(skip_reason)
                return
        else:
            master_fd, slave_fd = os.pipe()

        # WARNING: It is very important to use unbuffered mode here,
        # in order to avoid issue 5380 with python3.
        master_file = os.fdopen(master_fd, 'rb', 0)
        slave_file = os.fdopen(slave_fd, 'wb', 0)
        producer = SpawnProcess(
            args=["bash", "-c", self._echo_cmd % test_string],
            env=os.environ,
            fd_pipes={1: slave_fd})

        consumer = PipeReader(input_files={"producer": master_file},
                              _use_array=self._use_array)

        task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2)

        # This will ensure that both tasks have exited, which
        # is necessary to avoid "ResourceWarning: unclosed file"
        # warnings since Python 3.2 (and also ensures that we
        # don't leave any zombie child processes).
        task_scheduler.start()
        slave_file.close()
        task_scheduler.wait()

        self.assertEqual(producer.returncode, os.EX_OK)
        self.assertEqual(consumer.returncode, os.EX_OK)

        return consumer.getvalue().decode('ascii', 'replace')
Beispiel #9
0
def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
    """
	An asynchronous version of iter_completed. This yields futures, which
	when done, result in a set of input futures that are done. This serves
	as a wrapper around portage's internal TaskScheduler class, using
	standard asyncio interfaces.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures, which when done, result in a set of
		input futures that are done
	@rtype: iterator
	"""
    loop = asyncio._wrap_loop(loop)

    max_jobs = max_jobs or get_cpu_count()
    max_load = max_load or get_cpu_count()

    future_map = {}

    def task_generator():
        for future in futures:
            future_map[id(future)] = future
            yield AsyncTaskFuture(future=future)

    scheduler = TaskScheduler(task_generator(),
                              max_jobs=max_jobs,
                              max_load=max_load,
                              event_loop=loop)

    def done_callback(future_done_set, wait_result):
        """Propagate results from wait_result to future_done_set."""
        if future_done_set.cancelled():
            return
        done, pending = wait_result.result()
        for future in done:
            del future_map[id(future)]
        future_done_set.set_result(done)

    def cancel_callback(wait_result, future_done_set):
        """Cancel wait_result if future_done_set has been cancelled."""
        if future_done_set.cancelled() and not wait_result.done():
            wait_result.cancel()

    try:
        scheduler.start()

        # scheduler should ensure that future_map is non-empty until
        # task_generator is exhausted
        while future_map:
            wait_result = asyncio.ensure_future(asyncio.wait(
                list(future_map.values()),
                return_when=asyncio.FIRST_COMPLETED,
                loop=loop),
                                                loop=loop)
            future_done_set = loop.create_future()
            future_done_set.add_done_callback(
                functools.partial(cancel_callback, wait_result))
            wait_result.add_done_callback(
                functools.partial(done_callback, future_done_set))
            yield future_done_set
    finally:
        # cleanup in case of interruption by SIGINT, etc
        scheduler.cancel()
        scheduler.wait()
Beispiel #10
0
def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
    """
	An asynchronous version of iter_completed. This yields futures, which
	when done, result in a set of input futures that are done. This serves
	as a wrapper around portage's internal TaskScheduler class, using
	standard asyncio interfaces.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures, which when done, result in a set of
		input futures that are done
	@rtype: iterator
	"""
    loop = asyncio._wrap_loop(loop)

    max_jobs = max_jobs or get_cpu_count()
    max_load = max_load or get_cpu_count()

    future_map = {}

    def task_generator():
        for future in futures:
            future_map[id(future)] = future
            yield AsyncTaskFuture(future=future)

    scheduler = TaskScheduler(task_generator(),
                              max_jobs=max_jobs,
                              max_load=max_load,
                              event_loop=loop)

    def done_callback(future_done_set, wait_result):
        """Propagate results from wait_result to future_done_set."""
        if future_done_set.cancelled():
            return
        done, pending = wait_result.result()
        for future in done:
            del future_map[id(future)]
        future_done_set.set_result(done)

    def cancel_callback(wait_result, future_done_set):
        """Cancel wait_result if future_done_set has been cancelled."""
        if future_done_set.cancelled() and not wait_result.done():
            wait_result.cancel()

    @coroutine
    def fetch_wait_result(scheduler, first, loop=None):
        if first:
            yield scheduler.async_start()

        # If the current coroutine awakens just after a call to
        # done_callback but before scheduler has been notified of
        # corresponding done future(s), then wait here until scheduler
        # is notified (which will cause future_map to populate).
        while not future_map and scheduler.poll() is None:
            yield asyncio.sleep(0, loop=loop)

        if not future_map:
            if scheduler.poll() is not None:
                coroutine_return((set(), set()))
            else:
                raise AssertionError('expected non-empty future_map')

        wait_result = yield asyncio.wait(list(future_map.values()),
                                         return_when=asyncio.FIRST_COMPLETED,
                                         loop=loop)

        coroutine_return(wait_result)

    first = True
    try:
        while True:
            wait_result = asyncio.ensure_future(fetch_wait_result(scheduler,
                                                                  first,
                                                                  loop=loop),
                                                loop=loop)
            first = False
            future_done_set = loop.create_future()
            future_done_set.add_done_callback(
                functools.partial(cancel_callback, wait_result))
            wait_result.add_done_callback(
                functools.partial(done_callback, future_done_set))
            yield future_done_set
            if not future_map and scheduler.poll() is not None:
                break
    finally:
        # cleanup in case of interruption by SIGINT, etc
        scheduler.cancel()
        scheduler.wait()
    def testDoebuild(self):
        """
        Invoke portage.doebuild() with the fd_pipes parameter, and
        check that the expected output appears in the pipe. This
        functionality is not used by portage internally, but it is
        supported for API consumers (see bug #475812).
        """

        output_fd = 200
        ebuild_body = ["S=${WORKDIR}"]
        for phase_func in (
                "pkg_info",
                "pkg_nofetch",
                "pkg_pretend",
                "pkg_setup",
                "src_unpack",
                "src_prepare",
                "src_configure",
                "src_compile",
                "src_test",
                "src_install",
        ):
            ebuild_body.append(("%s() { echo ${EBUILD_PHASE}"
                                " 1>&%s; }") % (phase_func, output_fd))

        ebuild_body.append("")
        ebuild_body = "\n".join(ebuild_body)

        ebuilds = {
            "app-misct/foo-1": {
                "EAPI": "5",
                "MISC_CONTENT": ebuild_body,
            }
        }

        # Override things that may be unavailable, or may have portability
        # issues when running tests in exotic environments.
        #   prepstrip - bug #447810 (bash read builtin EINTR problem)
        true_symlinks = ("find", "prepstrip", "sed", "scanelf")
        true_binary = portage.process.find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")

        dev_null = open(os.devnull, "wb")
        playground = ResolverPlayground(ebuilds=ebuilds)
        try:
            QueryCommand._db = playground.trees
            root_config = playground.trees[playground.eroot]["root_config"]
            portdb = root_config.trees["porttree"].dbapi
            settings = portage.config(clone=playground.settings)
            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                    "__PORTAGE_TEST_HARDLINK_LOCKS"]
                settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

            settings.features.add("noauto")
            settings.features.add("test")
            settings["PORTAGE_PYTHON"] = portage._python_interpreter
            settings["PORTAGE_QUIET"] = "1"
            settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get(
                "PYTHONDONTWRITEBYTECODE", "")

            fake_bin = os.path.join(settings["EPREFIX"], "bin")
            portage.util.ensure_dirs(fake_bin)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))

            settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
            settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")

            cpv = "app-misct/foo-1"
            metadata = dict(
                zip(Package.metadata_keys,
                    portdb.aux_get(cpv, Package.metadata_keys)))

            pkg = Package(
                built=False,
                cpv=cpv,
                installed=False,
                metadata=metadata,
                root_config=root_config,
                type_name="ebuild",
            )
            settings.setcpv(pkg)
            ebuildpath = portdb.findname(cpv)
            self.assertNotEqual(ebuildpath, None)

            for phase in (
                    "info",
                    "nofetch",
                    "pretend",
                    "setup",
                    "unpack",
                    "prepare",
                    "configure",
                    "compile",
                    "test",
                    "install",
                    "qmerge",
                    "clean",
                    "merge",
            ):

                pr, pw = os.pipe()

                producer = DoebuildProcess(
                    doebuild_pargs=(ebuildpath, phase),
                    doebuild_kwargs={
                        "settings": settings,
                        "mydbapi": portdb,
                        "tree": "porttree",
                        "vartree": root_config.trees["vartree"],
                        "fd_pipes": {
                            1: dev_null.fileno(),
                            2: dev_null.fileno(),
                            output_fd: pw,
                        },
                        "prev_mtimes": {},
                    },
                )

                consumer = PipeReader(input_files={"producer": pr})

                task_scheduler = TaskScheduler(iter([producer, consumer]),
                                               max_jobs=2)

                try:
                    task_scheduler.start()
                finally:
                    # PipeReader closes pr
                    os.close(pw)

                task_scheduler.wait()
                output = portage._unicode_decode(
                    consumer.getvalue()).rstrip("\n")

                if task_scheduler.returncode != os.EX_OK:
                    portage.writemsg(output, noiselevel=-1)

                self.assertEqual(task_scheduler.returncode, os.EX_OK)

                if phase not in ("clean", "merge", "qmerge"):
                    self.assertEqual(phase, output)

        finally:
            dev_null.close()
            playground.cleanup()
            QueryCommand._db = None
    def testIpcDaemon(self):
        event_loop = global_event_loop()
        tmpdir = tempfile.mkdtemp()
        build_dir = None
        try:
            env = {}

            # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
            # need to be inherited by ebuild subprocesses.
            if 'PORTAGE_USERNAME' in os.environ:
                env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
            if 'PORTAGE_GRPNAME' in os.environ:
                env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

            env['PORTAGE_PYTHON'] = _python_interpreter
            env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
            env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
            env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')
            env['PYTHONDONTWRITEBYTECODE'] = os.environ.get(
                'PYTHONDONTWRITEBYTECODE', '')

            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
                 os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

            build_dir = EbuildBuildDir(scheduler=event_loop, settings=env)
            event_loop.run_until_complete(build_dir.async_lock())
            ensure_dirs(env['PORTAGE_BUILDDIR'])

            input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
            output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
            os.mkfifo(input_fifo)
            os.mkfifo(output_fifo)

            for exitcode in (0, 1, 2):
                exit_command = ExitCommand()
                commands = {'exit': exit_command}
                daemon = EbuildIpcDaemon(commands=commands,
                                         input_fifo=input_fifo,
                                         output_fifo=output_fifo)
                proc = SpawnProcess(args=[
                    BASH_BINARY, "-c",
                    '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode
                ],
                                    env=env)
                task_scheduler = TaskScheduler(iter([daemon, proc]),
                                               max_jobs=2,
                                               event_loop=event_loop)

                self.received_command = False

                def exit_command_callback():
                    self.received_command = True
                    task_scheduler.cancel()

                exit_command.reply_hook = exit_command_callback
                start_time = time.time()
                self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)

                hardlock_cleanup(env['PORTAGE_BUILDDIR'],
                                 remove_all_locks=True)

                self.assertEqual(self.received_command, True,
                 "command not received after %d seconds" % \
                 (time.time() - start_time,))
                self.assertEqual(proc.isAlive(), False)
                self.assertEqual(daemon.isAlive(), False)
                self.assertEqual(exit_command.exitcode, exitcode)

            # Intentionally short timeout test for EventLoop/AsyncScheduler.
            # Use a ridiculously long sleep_time_s in case the user's
            # system is heavily loaded (see bug #436334).
            sleep_time_s = 600  # seconds
            short_timeout_s = 0.010  # seconds

            for i in range(3):
                exit_command = ExitCommand()
                commands = {'exit': exit_command}
                daemon = EbuildIpcDaemon(commands=commands,
                                         input_fifo=input_fifo,
                                         output_fifo=output_fifo)
                proc = SleepProcess(seconds=sleep_time_s)
                task_scheduler = TaskScheduler(iter([daemon, proc]),
                                               max_jobs=2,
                                               event_loop=event_loop)

                self.received_command = False

                def exit_command_callback():
                    self.received_command = True
                    task_scheduler.cancel()

                exit_command.reply_hook = exit_command_callback
                start_time = time.time()
                self._run(event_loop, task_scheduler, short_timeout_s)

                hardlock_cleanup(env['PORTAGE_BUILDDIR'],
                                 remove_all_locks=True)

                self.assertEqual(self.received_command, False,
                 "command received after %d seconds" % \
                 (time.time() - start_time,))
                self.assertEqual(proc.isAlive(), False)
                self.assertEqual(daemon.isAlive(), False)
                self.assertEqual(proc.returncode == os.EX_OK, False)

        finally:
            if build_dir is not None:
                event_loop.run_until_complete(build_dir.async_unlock())
            shutil.rmtree(tmpdir)
	def testDoebuild(self):
		"""
		Invoke portage.doebuild() with the fd_pipes parameter, and
		check that the expected output appears in the pipe. This
		functionality is not used by portage internally, but it is
		supported for API consumers (see bug #475812).
		"""

		output_fd = 200
		ebuild_body = ['S=${WORKDIR}']
		for phase_func in ('pkg_info', 'pkg_nofetch', 'pkg_pretend',
			'pkg_setup', 'src_unpack', 'src_prepare', 'src_configure',
			'src_compile', 'src_test', 'src_install'):
			ebuild_body.append(('%s() { echo ${EBUILD_PHASE}'
				' 1>&%s; }') % (phase_func, output_fd))

		ebuild_body.append('')
		ebuild_body = '\n'.join(ebuild_body)

		ebuilds = {
			'app-misct/foo-1': {
				'EAPI'      : '5',
				"MISC_CONTENT": ebuild_body,
			}
		}

		# Override things that may be unavailable, or may have portability
		# issues when running tests in exotic environments.
		#   prepstrip - bug #447810 (bash read builtin EINTR problem)
		true_symlinks = ("find", "prepstrip", "sed", "scanelf")
		true_binary = portage.process.find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")

		dev_null = open(os.devnull, 'wb')
		playground = ResolverPlayground(ebuilds=ebuilds)
		try:
			QueryCommand._db = playground.trees
			root_config = playground.trees[playground.eroot]['root_config']
			portdb = root_config.trees["porttree"].dbapi
			settings = portage.config(clone=playground.settings)
			if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
				settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
					os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
				settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

			settings.features.add("noauto")
			settings.features.add("test")
			settings['PORTAGE_PYTHON'] = portage._python_interpreter
			settings['PORTAGE_QUIET'] = "1"
			settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get("PYTHONDONTWRITEBYTECODE", "")

			fake_bin = os.path.join(settings["EPREFIX"], "bin")
			portage.util.ensure_dirs(fake_bin)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))

			settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
			settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")

			cpv = 'app-misct/foo-1'
			metadata = dict(zip(Package.metadata_keys,
				portdb.aux_get(cpv, Package.metadata_keys)))

			pkg = Package(built=False, cpv=cpv, installed=False,
				metadata=metadata, root_config=root_config,
				type_name='ebuild')
			settings.setcpv(pkg)
			ebuildpath = portdb.findname(cpv)
			self.assertNotEqual(ebuildpath, None)

			for phase in ('info', 'nofetch',
				 'pretend', 'setup', 'unpack', 'prepare', 'configure',
				 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):

				pr, pw = os.pipe()

				producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
					doebuild_kwargs={"settings" : settings,
						"mydbapi": portdb, "tree": "porttree",
						"vartree": root_config.trees["vartree"],
						"fd_pipes": {
							1: dev_null.fileno(),
							2: dev_null.fileno(),
							output_fd: pw,
						},
						"prev_mtimes": {}})

				consumer = PipeReader(
					input_files={"producer" : pr})

				task_scheduler = TaskScheduler(iter([producer, consumer]),
					max_jobs=2)

				try:
					task_scheduler.start()
				finally:
					# PipeReader closes pr
					os.close(pw)

				task_scheduler.wait()
				output = portage._unicode_decode(
					consumer.getvalue()).rstrip("\n")

				if task_scheduler.returncode != os.EX_OK:
					portage.writemsg(output, noiselevel=-1)

				self.assertEqual(task_scheduler.returncode, os.EX_OK)

				if phase not in ('clean', 'merge', 'qmerge'):
					self.assertEqual(phase, output)

		finally:
			dev_null.close()
			playground.cleanup()
			QueryCommand._db = None
Beispiel #14
0
def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
	"""
	An asynchronous version of iter_completed. This yields futures, which
	when done, result in a set of input futures that are done. This serves
	as a wrapper around portage's internal TaskScheduler class, using
	standard asyncio interfaces.

	@param futures: iterator of asyncio.Future (or compatible)
	@type futures: iterator
	@param max_jobs: max number of futures to process concurrently (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_jobs: int
	@param max_load: max load allowed when scheduling a new future,
		otherwise schedule no more than 1 future at a time (default
		is portage.util.cpuinfo.get_cpu_count())
	@type max_load: int or float
	@param loop: event loop
	@type loop: EventLoop
	@return: iterator of futures, which when done, result in a set of
		input futures that are done
	@rtype: iterator
	"""
	loop = asyncio._wrap_loop(loop)

	max_jobs = max_jobs or get_cpu_count()
	max_load = max_load or get_cpu_count()

	future_map = {}
	def task_generator():
		for future in futures:
			future_map[id(future)] = future
			yield AsyncTaskFuture(future=future)

	scheduler = TaskScheduler(
		task_generator(),
		max_jobs=max_jobs,
		max_load=max_load,
		event_loop=loop)

	def done_callback(future_done_set, wait_result):
		"""Propagate results from wait_result to future_done_set."""
		if future_done_set.cancelled():
			return
		done, pending = wait_result.result()
		for future in done:
			del future_map[id(future)]
		future_done_set.set_result(done)

	def cancel_callback(wait_result, future_done_set):
		"""Cancel wait_result if future_done_set has been cancelled."""
		if future_done_set.cancelled() and not wait_result.done():
			wait_result.cancel()

	try:
		scheduler.start()

		# scheduler should ensure that future_map is non-empty until
		# task_generator is exhausted
		while future_map:
			wait_result = asyncio.ensure_future(
				asyncio.wait(list(future_map.values()),
				return_when=asyncio.FIRST_COMPLETED, loop=loop), loop=loop)
			future_done_set = loop.create_future()
			future_done_set.add_done_callback(
				functools.partial(cancel_callback, wait_result))
			wait_result.add_done_callback(
				functools.partial(done_callback, future_done_set))
			yield future_done_set
	finally:
		# cleanup in case of interruption by SIGINT, etc
		scheduler.cancel()
		scheduler.wait()