Exemple #1
0
	def __init__(self, portdb, cp_iter=None, consumer=None,
		max_jobs=None, max_load=None, write_auxdb=True):
		PollScheduler.__init__(self, main=True)
		self._portdb = portdb
		self._write_auxdb = write_auxdb
		self._global_cleanse = False
		if cp_iter is None:
			cp_iter = self._iter_every_cp()
			# We can globally cleanse stale cache only if we
			# iterate over every single cp.
			self._global_cleanse = True
		self._cp_iter = cp_iter
		self._consumer = consumer

		if max_jobs is None:
			max_jobs = 1

		self._max_jobs = max_jobs
		self._max_load = max_load

		self._valid_pkgs = set()
		self._cp_set = set()
		self._process_iter = self._iter_metadata_processes()
		self.returncode = os.EX_OK
		self._error_count = 0
		self._running_tasks = set()
		self._remaining_tasks = True
	def __init__(self, portdb, cp_iter=None, consumer=None,
		max_jobs=None, max_load=None):
		PollScheduler.__init__(self)
		self._portdb = portdb
		self._global_cleanse = False
		if cp_iter is None:
			cp_iter = self._iter_every_cp()
			# We can globally cleanse stale cache only if we
			# iterate over every single cp.
			self._global_cleanse = True
		self._cp_iter = cp_iter
		self._consumer = consumer

		if max_jobs is None:
			max_jobs = 1

		self._max_jobs = max_jobs
		self._max_load = max_load
		self._sched_iface = self._sched_iface_class(
			register=self._register,
			schedule=self._schedule_wait,
			unregister=self._unregister)

		self._valid_pkgs = set()
		self._cp_set = set()
		self._process_iter = self._iter_metadata_processes()
		self.returncode = os.EX_OK
		self._error_count = 0
	def __init__(self, main=True, max_jobs=None, max_load=None):
		PollScheduler.__init__(self, main=main)

		if max_jobs is None:
			max_jobs = 1

		self._max_jobs = max_jobs
		self._max_load = max_load

		self._queues = []
		self._schedule_listeners = []
Exemple #4
0
	def __init__(self, max_jobs=None, max_load=None, **kwargs):
		AsynchronousTask.__init__(self)
		PollScheduler.__init__(self, **kwargs)

		if max_jobs is None:
			max_jobs = 1
		self._max_jobs = max_jobs
		self._max_load = None if max_load is True else max_load
		self._error_count = 0
		self._running_tasks = set()
		self._remaining_tasks = True
		self._loadavg_check_id = None
    def __init__(self, max_jobs=None, max_load=None, **kwargs):
        AsynchronousTask.__init__(self)
        PollScheduler.__init__(self, **kwargs)

        if max_jobs is None:
            max_jobs = 1
        self._max_jobs = max_jobs
        self._max_load = max_load
        self._error_count = 0
        self._running_tasks = set()
        self._remaining_tasks = True
        self._term_check_id = None
        self._loadavg_check_id = None
Exemple #6
0
	def __init__(self, max_jobs=None, max_load=None):
		PollScheduler.__init__(self)

		if max_jobs is None:
			max_jobs = 1

		self._max_jobs = max_jobs
		self._max_load = max_load
		self.sched_iface = self._sched_iface_class(
			register=self._register,
			schedule=self._schedule_wait,
			unregister=self._unregister)

		self._queues = []
		self._schedule_listeners = []
	def __init__(self, max_jobs=None, max_load=None):
		PollScheduler.__init__(self)

		if max_jobs is None:
			max_jobs = 1

		self._max_jobs = max_jobs
		self._max_load = max_load
		self.sched_iface = self._sched_iface_class(
			register=self._register,
			schedule=self._schedule_wait,
			unregister=self._unregister)

		self._queues = []
		self._schedule_listeners = []
Exemple #8
0
    def _testAsynchronousLockWaitKill(self):
        scheduler = PollScheduler().sched_iface
        tempdir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempdir, 'lock_me')
            lock1 = AsynchronousLock(path=path, scheduler=scheduler)
            lock1.start()
            self.assertEqual(lock1.wait(), os.EX_OK)
            self.assertEqual(lock1.returncode, os.EX_OK)
            lock2 = AsynchronousLock(path=path,
                                     scheduler=scheduler,
                                     _force_async=True,
                                     _force_process=True)
            lock2.start()
            # lock2 should be waiting for lock1 to release
            self.assertEqual(lock2.poll(), None)
            self.assertEqual(lock2.returncode, None)

            # Kill lock2's process and then check wait() and
            # returncode results. This is intended to simulate
            # a SIGINT sent via the controlling tty.
            self.assertEqual(lock2._imp is not None, True)
            self.assertEqual(lock2._imp._proc is not None, True)
            self.assertEqual(lock2._imp._proc.pid is not None, True)
            lock2._imp._kill_test = True
            os.kill(lock2._imp._proc.pid, signal.SIGTERM)
            self.assertEqual(lock2.wait() == os.EX_OK, False)
            self.assertEqual(lock2.returncode == os.EX_OK, False)
            self.assertEqual(lock2.returncode is None, False)
            lock1.unlock()
        finally:
            shutil.rmtree(tempdir)
Exemple #9
0
    def _testAsynchronousLock(self):
        scheduler = PollScheduler().sched_iface
        tempdir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempdir, 'lock_me')
            for force_async in (True, False):
                for force_dummy in (True, False):
                    async_lock = AsynchronousLock(path=path,
                                                  scheduler=scheduler,
                                                  _force_async=force_async,
                                                  _force_thread=True,
                                                  _force_dummy=force_dummy)
                    async_lock.start()
                    self.assertEqual(async_lock.wait(), os.EX_OK)
                    self.assertEqual(async_lock.returncode, os.EX_OK)
                    async_lock.unlock()

                async_lock = AsynchronousLock(path=path,
                                              scheduler=scheduler,
                                              _force_async=force_async,
                                              _force_process=True)
                async_lock.start()
                self.assertEqual(async_lock.wait(), os.EX_OK)
                self.assertEqual(async_lock.returncode, os.EX_OK)
                async_lock.unlock()

        finally:
            shutil.rmtree(tempdir)
Exemple #10
0
    def _testAsynchronousLockWaitCancel(self):
        scheduler = PollScheduler().sched_iface
        tempdir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempdir, 'lock_me')
            lock1 = AsynchronousLock(path=path, scheduler=scheduler)
            lock1.start()
            self.assertEqual(lock1.wait(), os.EX_OK)
            self.assertEqual(lock1.returncode, os.EX_OK)
            lock2 = AsynchronousLock(path=path,
                                     scheduler=scheduler,
                                     _force_async=True,
                                     _force_process=True)
            lock2.start()
            # lock2 should be waiting for lock1 to release
            self.assertEqual(lock2.poll(), None)
            self.assertEqual(lock2.returncode, None)

            # Cancel lock2 and then check wait() and returncode results.
            lock2.cancel()
            self.assertEqual(lock2.wait() == os.EX_OK, False)
            self.assertEqual(lock2.returncode == os.EX_OK, False)
            self.assertEqual(lock2.returncode is None, False)
            lock1.unlock()
        finally:
            shutil.rmtree(tempdir)
Exemple #11
0
	def testLogfile(self):
		logfile = None
		try:
			fd, logfile = tempfile.mkstemp()
			os.close(fd)
			null_fd = os.open('/dev/null', os.O_RDWR)
			test_string = 2 * "blah blah blah\n"
			scheduler = PollScheduler().sched_iface
			proc = SpawnProcess(
				args=[BASH_BINARY, "-c",
				"echo -n '%s'" % test_string],
				env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
				scheduler=scheduler,
				logfile=logfile)
			proc.start()
			os.close(null_fd)
			self.assertEqual(proc.wait(), os.EX_OK)
			f = io.open(_unicode_encode(logfile,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='strict')
			log_content = f.read()
			f.close()
			# When logging passes through a pty, this comparison will fail
			# unless the oflag terminal attributes have the termios.OPOST
			# bit disabled. Otherwise, tranformations such as \n -> \r\n
			# may occur.
			self.assertEqual(test_string, log_content)
		finally:
			if logfile:
				try:
					os.unlink(logfile)
				except EnvironmentError as e:
					if e.errno != errno.ENOENT:
						raise
					del e
Exemple #12
0
    def _testAsynchronousLockWait(self):
        scheduler = PollScheduler().sched_iface
        tempdir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempdir, 'lock_me')
            lock1 = AsynchronousLock(path=path, scheduler=scheduler)
            lock1.start()
            self.assertEqual(lock1.wait(), os.EX_OK)
            self.assertEqual(lock1.returncode, os.EX_OK)

            # lock2 requires _force_async=True since the portage.locks
            # module is not designed to work as intended here if the
            # same process tries to lock the same file more than
            # one time concurrently.
            lock2 = AsynchronousLock(path=path,
                                     scheduler=scheduler,
                                     _force_async=True,
                                     _force_process=True)
            lock2.start()
            # lock2 should be waiting for lock1 to release
            self.assertEqual(lock2.poll(), None)
            self.assertEqual(lock2.returncode, None)

            lock1.unlock()
            self.assertEqual(lock2.wait(), os.EX_OK)
            self.assertEqual(lock2.returncode, os.EX_OK)
            lock2.unlock()
        finally:
            shutil.rmtree(tempdir)
	def testPipeReader(self):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		test_string = 2 * "blah blah blah\n"

		scheduler = PollScheduler().sched_iface
		master_fd, slave_fd = os.pipe()
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb')
		producer = SpawnProcess(
			args=["bash", "-c", "echo -n '%s'" % test_string],
			env=os.environ, fd_pipes={1:slave_fd},
			scheduler=scheduler)
		producer.start()
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=scheduler)

		consumer.start()

		# This will ensure that both tasks have exited, which
		# is necessary to avoid "ResourceWarning: unclosed file"
		# warnings since Python 3.2 (and also ensures that we
		# don't leave any zombie child processes).
		scheduler.schedule()
		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		output = consumer.getvalue().decode('ascii', 'replace')
		self.assertEqual(test_string, output)
Exemple #14
0
    def testLazyImportPortageBaseline(self):
        """
		Check what modules are imported by a baseline module import.
		"""

        env = os.environ.copy()
        pythonpath = env.get('PYTHONPATH')
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is None:
            pythonpath = ''
        else:
            pythonpath = ':' + pythonpath
        pythonpath = PORTAGE_PYM_PATH + pythonpath
        env['PYTHONPATH'] = pythonpath

        # If python is patched to insert the path of the
        # currently installed portage module into sys.path,
        # then the above PYTHONPATH override doesn't help.
        env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH

        scheduler = PollScheduler().sched_iface
        master_fd, slave_fd = os.pipe()
        master_file = os.fdopen(master_fd, 'rb', 0)
        slave_file = os.fdopen(slave_fd, 'wb')
        producer = SpawnProcess(args=self._baseline_import_cmd,
                                env=env,
                                fd_pipes={1: slave_fd},
                                scheduler=scheduler)
        producer.start()
        slave_file.close()

        consumer = PipeReader(input_files={"producer": master_file},
                              scheduler=scheduler)

        consumer.start()
        consumer.wait()
        self.assertEqual(producer.wait(), os.EX_OK)
        self.assertEqual(consumer.wait(), os.EX_OK)

        output = consumer.getvalue().decode('ascii', 'replace').split()

        unexpected_modules = " ".join(sorted(x for x in output \
         if self._module_re.match(x) is not None and \
         x not in self._baseline_imports))

        self.assertEqual("", unexpected_modules)
Exemple #15
0
def spawn_nofetch(portdb, ebuild_path, settings=None):
    """
	This spawns pkg_nofetch if appropriate. The settings parameter
	is useful only if setcpv has already been called in order
	to cache metadata. It will be cloned internally, in order to
	prevent any changes from interfering with the calling code.
	If settings is None then a suitable config instance will be
	acquired from the given portdbapi instance.

	A private PORTAGE_BUILDDIR will be created and cleaned up, in
	order to avoid any interference with any other processes.
	If PORTAGE_TMPDIR is writable, that will be used, otherwise
	the default directory for the tempfile module will be used.

	We only call the pkg_nofetch phase if either RESTRICT=fetch
	is set or the package has explicitly overridden the default
	pkg_nofetch implementation. This allows specialized messages
	to be displayed for problematic packages even though they do
	not set RESTRICT=fetch (bug #336499).

	This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
	variable is set in the config instance.
	"""

    if settings is None:
        settings = config(clone=portdb.settings)
    else:
        settings = config(clone=settings)

    if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
        return

    # We must create our private PORTAGE_TMPDIR before calling
    # doebuild_environment(), since lots of variables such
    # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
    portage_tmpdir = settings.get('PORTAGE_TMPDIR')
    if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
        portage_tmpdir = None
    private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
    settings['PORTAGE_TMPDIR'] = private_tmpdir
    settings.backup_changes('PORTAGE_TMPDIR')
    # private temp dir was just created, so it's not locked yet
    settings.pop('PORTAGE_BUILDIR_LOCKED', None)

    try:
        doebuild_environment(ebuild_path,
                             'nofetch',
                             settings=settings,
                             db=portdb)
        restrict = settings['PORTAGE_RESTRICT'].split()
        defined_phases = settings['DEFINED_PHASES'].split()
        if not defined_phases:
            # When DEFINED_PHASES is undefined, assume all
            # phases are defined.
            defined_phases = EBUILD_PHASES

        if 'fetch' not in restrict and \
         'nofetch' not in defined_phases:
            return

        prepare_build_dirs(settings=settings)
        ebuild_phase = EbuildPhase(background=False,
                                   phase='nofetch',
                                   scheduler=PollScheduler().sched_iface,
                                   settings=settings)
        ebuild_phase.start()
        ebuild_phase.wait()
        elog_process(settings.mycpv, settings)
    finally:
        shutil.rmtree(private_tmpdir)
Exemple #16
0
    def __init__(self, _unused_param=None, mysettings=None):
        """
		@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
		@type _unused_param: None
		@param mysettings: an immutable config instance
		@type mysettings: portage.config
		"""
        portdbapi.portdbapi_instances.append(self)

        from portage import config
        if mysettings:
            self.settings = mysettings
        else:
            from portage import settings
            self.settings = config(clone=settings)

        if _unused_param is not None:
            warnings.warn("The first parameter of the " + \
             "portage.dbapi.porttree.portdbapi" + \
             " constructor is unused since portage-2.1.8. " + \
             "mysettings['PORTDIR'] is used instead.",
             DeprecationWarning, stacklevel=2)

        self.repositories = self.settings.repositories
        self.treemap = self.repositories.treemap

        # This is strictly for use in aux_get() doebuild calls when metadata
        # is generated by the depend phase.  It's safest to use a clone for
        # this purpose because doebuild makes many changes to the config
        # instance that is passed in.
        self.doebuild_settings = config(clone=self.settings)
        self._scheduler = PollScheduler().sched_iface
        self.depcachedir = os.path.realpath(self.settings.depcachedir)

        if os.environ.get("SANDBOX_ON") == "1":
            # Make api consumers exempt from sandbox violations
            # when doing metadata cache updates.
            sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
            if self.depcachedir not in sandbox_write:
                sandbox_write.append(self.depcachedir)
                os.environ["SANDBOX_WRITE"] = \
                 ":".join(filter(None, sandbox_write))

        self.porttrees = list(self.settings.repositories.repoLocationList())

        # This is used as sanity check for aux_get(). If there is no
        # root eclass dir, we assume that PORTDIR is invalid or
        # missing. This check allows aux_get() to detect a missing
        # portage tree and return early by raising a KeyError.
        self._have_root_eclass_dir = os.path.isdir(
            os.path.join(self.settings.repositories.mainRepoLocation(),
                         "eclass"))

        #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
        self.xcache = {}
        self.frozen = 0

        #Keep a list of repo names, sorted by priority (highest priority first).
        self._ordered_repo_name_list = tuple(
            reversed(self.repositories.prepos_order))

        self.auxdbmodule = self.settings.load_best_module(
            "portdbapi.auxdbmodule")
        self.auxdb = {}
        self._pregen_auxdb = {}
        # If the current user doesn't have depcachedir write permission,
        # then the depcachedir cache is kept here read-only access.
        self._ro_auxdb = {}
        self._init_cache_dirs()
        try:
            depcachedir_st = os.stat(self.depcachedir)
            depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
        except OSError:
            depcachedir_st = None
            depcachedir_w_ok = False

        cache_kwargs = {}

        depcachedir_unshared = False
        if portage.data.secpass < 1 and \
         depcachedir_w_ok and \
         depcachedir_st is not None and \
         os.getuid() == depcachedir_st.st_uid and \
         os.getgid() == depcachedir_st.st_gid:
            # If this user owns depcachedir and is not in the
            # portage group, then don't bother to set permissions
            # on cache entries. This makes it possible to run
            # egencache without any need to be a member of the
            # portage group.
            depcachedir_unshared = True
        else:
            cache_kwargs.update({'gid': portage_gid, 'perms': 0o664})

        # If secpass < 1, we don't want to write to the cache
        # since then we won't be able to apply group permissions
        # to the cache entries/directories.
        if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
            for x in self.porttrees:
                self.auxdb[x] = volatile.database(self.depcachedir, x,
                                                  self._known_keys,
                                                  **cache_kwargs)
                try:
                    self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir,
                                                         x,
                                                         self._known_keys,
                                                         readonly=True,
                                                         **cache_kwargs)
                except CacheError:
                    pass
        else:
            for x in self.porttrees:
                if x in self.auxdb:
                    continue
                # location, label, auxdbkeys
                self.auxdb[x] = self.auxdbmodule(self.depcachedir, x,
                                                 self._known_keys,
                                                 **cache_kwargs)
        if "metadata-transfer" not in self.settings.features:
            for x in self.porttrees:
                if x in self._pregen_auxdb:
                    continue
                cache = self._create_pregen_cache(x)
                if cache is not None:
                    self._pregen_auxdb[x] = cache
        # Selectively cache metadata in order to optimize dep matching.
        self._aux_cache_keys = set([
            "DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
            "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
            "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"
        ])

        self._aux_cache = {}
        self._broken_ebuilds = set()
Exemple #17
0
	def testDoebuildSpawn(self):
		playground = ResolverPlayground()
		try:
			settings = config(clone=playground.settings)
			if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
				settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
					os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
				settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")

			cpv = 'sys-apps/portage-2.1'
			metadata = {
				'EAPI'      : '2',
				'INHERITED' : 'python eutils',
				'IUSE'      : 'build doc epydoc python3 selinux',
				'KEYWORDS'  : 'x86',
				'LICENSE'   : 'GPL-2',
				'PROVIDE'   : 'virtual/portage',
				'RDEPEND'   : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
				'SLOT'      : '0',
				'repository': 'gentoo',
			}
			root_config = playground.trees[playground.eroot]['root_config']
			pkg = Package(built=False, cpv=cpv, installed=False,
				metadata=metadata, root_config=root_config,
				type_name='ebuild')
			settings.setcpv(pkg)
			settings['PORTAGE_PYTHON'] = _python_interpreter
			settings['PORTAGE_BUILDDIR'] = os.path.join(
				settings['PORTAGE_TMPDIR'], cpv)
			settings['T'] = os.path.join(
				settings['PORTAGE_BUILDDIR'], 'temp')
			for x in ('PORTAGE_BUILDDIR', 'T'):
				os.makedirs(settings[x])
			# Create a fake environment, to pretend as if the ebuild
			# has been sourced already.
			open(os.path.join(settings['T'], 'environment'), 'wb').close()

			scheduler = PollScheduler().sched_iface
			for phase in ('_internal_test',):

				# Test EbuildSpawnProcess by calling doebuild.spawn() with
				# returnpid=False. This case is no longer used by portage
				# internals since EbuildPhase is used instead and that passes
				# returnpid=True to doebuild.spawn().
				rval = doebuild_spawn("%s %s" % (_shell_quote(
					os.path.join(settings["PORTAGE_BIN_PATH"],
					os.path.basename(EBUILD_SH_BINARY))), phase),
					settings, free=1)
				self.assertEqual(rval, os.EX_OK)

				ebuild_phase = EbuildPhase(background=False,
					phase=phase, scheduler=scheduler,
					settings=settings)
				ebuild_phase.start()
				ebuild_phase.wait()
				self.assertEqual(ebuild_phase.returncode, os.EX_OK)

			ebuild_phase = MiscFunctionsProcess(background=False,
				commands=['success_hooks'],
				scheduler=scheduler, settings=settings)
			ebuild_phase.start()
			ebuild_phase.wait()
			self.assertEqual(ebuild_phase.returncode, os.EX_OK)
		finally:
			playground.cleanup()
Exemple #18
0
    def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
        "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
        'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
        'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
        cache_me = False
        if myrepo is not None:
            mytree = self.treemap.get(myrepo)
            if mytree is None:
                raise KeyError(myrepo)

        if mytree is not None and len(self.porttrees) == 1 \
         and mytree == self.porttrees[0]:
            # mytree matches our only tree, so it's safe to
            # ignore mytree and cache the result
            mytree = None
            myrepo = None

        if mytree is None:
            cache_me = True
        if mytree is None and not self._known_keys.intersection(
                mylist).difference(self._aux_cache_keys):
            aux_cache = self._aux_cache.get(mycpv)
            if aux_cache is not None:
                return [aux_cache.get(x, "") for x in mylist]
            cache_me = True

        try:
            cat, pkg = mycpv.split("/", 1)
        except ValueError:
            # Missing slash. Can't find ebuild so raise KeyError.
            raise KeyError(mycpv)

        myebuild, mylocation = self.findname2(mycpv, mytree)

        if not myebuild:
            writemsg("!!! aux_get(): %s\n" % \
             _("ebuild not found for '%s'") % mycpv, noiselevel=1)
            raise KeyError(mycpv)

        mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild,
                                                     mylocation)
        doregen = mydata is None

        if doregen:
            if myebuild in self._broken_ebuilds:
                raise KeyError(mycpv)

            self.doebuild_settings.setcpv(mycpv)
            eapi = None

            if eapi is None and \
             'parse-eapi-ebuild-head' in self.doebuild_settings.features:
                with io.open(_unicode_encode(myebuild,
                                             encoding=_encodings['fs'],
                                             errors='strict'),
                             mode='r',
                             encoding=_encodings['repo.content'],
                             errors='replace') as f:
                    eapi = portage._parse_eapi_ebuild_head(f)

            if eapi is not None:
                self.doebuild_settings.configdict['pkg']['EAPI'] = eapi

            if eapi is not None and not portage.eapi_is_supported(eapi):
                mydata = self._metadata_callback(mycpv, mylocation,
                                                 {'EAPI': eapi}, ebuild_hash)
            else:
                proc = EbuildMetadataPhase(
                    cpv=mycpv,
                    eapi=eapi,
                    ebuild_hash=ebuild_hash,
                    metadata_callback=self._metadata_callback,
                    portdb=self,
                    repo_path=mylocation,
                    scheduler=PollScheduler().sched_iface,
                    settings=self.doebuild_settings)

                proc.start()
                proc.wait()

                if proc.returncode != os.EX_OK:
                    self._broken_ebuilds.add(myebuild)
                    raise KeyError(mycpv)

                mydata = proc.metadata

        mydata["repository"] = self.repositories.get_name_for_location(
            mylocation)
        mydata["_mtime_"] = ebuild_hash.mtime
        eapi = mydata.get("EAPI")
        if not eapi:
            eapi = "0"
            mydata["EAPI"] = eapi
        if eapi_is_supported(eapi):
            mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))

        #finally, we look at our internal cache entry and return the requested data.
        returnme = [mydata.get(x, "") for x in mylist]

        if cache_me:
            aux_cache = {}
            for x in self._aux_cache_keys:
                aux_cache[x] = mydata.get(x, "")
            self._aux_cache[mycpv] = aux_cache

        return returnme