Exemplo n.º 1
0
	def _aux_get_wrapper(self, pkg, wants, myrepo=None):
		if pkg in self._aux_get_history:
			return self._aux_get(pkg, wants)
		self._aux_get_history.add(pkg)
		# We need to check the EAPI, and this also raises
		# a KeyError to the caller if appropriate.
		installed_eapi, repo = self._aux_get(pkg, ["EAPI", "repository"])
		try:
			# Use the live ebuild metadata if possible.
			repo = _gen_valid_repo(repo)
			live_metadata = dict(zip(self._portdb_keys,
				self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
			# Use the metadata from the installed instance if the EAPI
			# of either instance is unsupported, since if the installed
			# instance has an unsupported or corrupt EAPI then we don't
			# want to attempt to do complex operations such as execute
			# pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
			# are supported then go ahead and use the live_metadata, in
			# order to respect dep updates without revision bump or EAPI
			# bump, as in bug #368725.
			if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
				portage.eapi_is_supported(installed_eapi)):
				raise KeyError(pkg)
			self.dbapi.aux_update(pkg, live_metadata)
		except (KeyError, portage.exception.PortageException):
			if self._global_updates is None:
				self._global_updates = \
					grab_global_updates(self._portdb)
			perform_global_updates(
				pkg, self.dbapi, self._global_updates)
		return self._aux_get(pkg, wants)
Exemplo n.º 2
0
	def _aux_get_wrapper(self, pkg, wants, myrepo=None):
		if pkg in self._aux_get_history:
			return self._aux_get(pkg, wants)
		self._aux_get_history.add(pkg)
		# We need to check the EAPI, and this also raises
		# a KeyError to the caller if appropriate.
		pkg_obj = self.dbapi._cpv_map[pkg]
		installed_eapi = pkg_obj.metadata['EAPI']
		repo = pkg_obj.metadata['repository']
		eapi_attrs = _get_eapi_attrs(installed_eapi)
		built_slot_operator_atoms = None

		if eapi_attrs.slot_operator and not self._ignore_built_slot_operator_deps:
			try:
				built_slot_operator_atoms = find_built_slot_operator_atoms(pkg_obj)
			except InvalidDependString:
				pass

		try:
			# Use the live ebuild metadata if possible.
			repo = _gen_valid_repo(repo)
			live_metadata = dict(zip(self._portdb_keys,
				self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
			# Use the metadata from the installed instance if the EAPI
			# of either instance is unsupported, since if the installed
			# instance has an unsupported or corrupt EAPI then we don't
			# want to attempt to do complex operations such as execute
			# pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
			# are supported then go ahead and use the live_metadata, in
			# order to respect dep updates without revision bump or EAPI
			# bump, as in bug #368725.
			if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
				portage.eapi_is_supported(installed_eapi)):
				raise KeyError(pkg)

			# preserve built slot/sub-slot := operator deps
			if built_slot_operator_atoms:
				live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
				if not live_eapi_attrs.slot_operator:
					raise KeyError(pkg)
				for k, v in built_slot_operator_atoms.items():
					live_metadata[k] += (" " +
						" ".join(_unicode(atom) for atom in v))

			self.dbapi.aux_update(pkg, live_metadata)
		except (KeyError, portage.exception.PortageException):
			if self._global_updates is None:
				self._global_updates = \
					grab_global_updates(self._portdb)
			perform_global_updates(
				pkg, self.dbapi, self._global_updates)
		return self._aux_get(pkg, wants)
Exemplo n.º 3
0
	def _aux_get_return(self, future, mycpv, mylist, myebuild, ebuild_hash,
		mydata, mylocation, cache_me, proc):
		if future.cancelled():
			return
		if proc is not None:
			if proc.returncode != os.EX_OK:
				self._broken_ebuilds.add(myebuild)
				future.set_exception(PortageKeyError(mycpv))
				return
			mydata = proc.metadata
		mydata["repository"] = self.repositories.get_name_for_location(mylocation)
		mydata["_mtime_"] = ebuild_hash.mtime
		eapi = mydata.get("EAPI")
		if not eapi:
			eapi = "0"
			mydata["EAPI"] = eapi
		if eapi_is_supported(eapi):
			mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))

		#finally, we look at our internal cache entry and return the requested data.
		returnme = [mydata.get(x, "") for x in mylist]

		if cache_me and self.frozen:
			aux_cache = {}
			for x in self._aux_cache_keys:
				aux_cache[x] = mydata.get(x, "")
			self._aux_cache[mycpv] = aux_cache

		future.set_result(returnme)
Exemplo n.º 4
0
	def _visible(self, cpv, metadata):
		eapi = metadata["EAPI"]
		if not eapi_is_supported(eapi):
			return False
		if _eapi_is_deprecated(eapi):
			return False
		if not metadata["SLOT"]:
			return False

		settings = self.settings
		if settings._getMaskAtom(cpv, metadata):
			return False
		if settings._getMissingKeywords(cpv, metadata):
			return False
		if settings.local_config:
			metadata['CHOST'] = settings.get('CHOST', '')
			if not settings._accept_chost(cpv, metadata):
				return False
			metadata["USE"] = ""
			if "?" in metadata["LICENSE"] or \
				"?" in metadata["PROPERTIES"]:
				self.doebuild_settings.setcpv(cpv, mydb=metadata)
				metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
			try:
				if settings._getMissingLicenses(cpv, metadata):
					return False
				if settings._getMissingProperties(cpv, metadata):
					return False
				if settings._getMissingRestrict(cpv, metadata):
					return False
			except InvalidDependString:
				return False

		return True
Exemplo n.º 5
0
	def _addProfile(self, currentPath):
		parentsFile = os.path.join(currentPath, "parent")
		eapi_file = os.path.join(currentPath, "eapi")
		try:
			eapi = codecs.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace'
				).readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		if os.path.exists(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))
				if os.path.exists(parentPath):
					self._addProfile(parentPath)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))
		self.profiles.append(currentPath)
Exemplo n.º 6
0
	def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime):

		i = metadata
		if hasattr(metadata, "items"):
			i = iter(metadata.items())
		metadata = dict(i)

		if metadata.get("INHERITED", False):
			metadata["_eclasses_"] = self._repo_info[repo_path
				].eclass_db.get_eclass_data(metadata["INHERITED"].split())
		else:
			metadata["_eclasses_"] = {}

		metadata.pop("INHERITED", None)
		metadata["_mtime_"] = mtime

		eapi = metadata.get("EAPI")
		if not eapi or not eapi.strip():
			eapi = "0"
			metadata["EAPI"] = eapi
		if not eapi_is_supported(eapi):
			for k in set(metadata).difference(("_mtime_", "_eclasses_")):
				metadata[k] = ""
			metadata["EAPI"] = "-" + eapi.lstrip("-")

		self.auxdb[repo_path][cpv] = metadata
		return metadata
Exemplo n.º 7
0
		def aux_get_done(aux_get_future):
			if result.cancelled():
				return
			if aux_get_future.exception() is not None:
				if isinstance(aux_get_future.exception(), PortageKeyError):
					# Convert this to an InvalidDependString exception since
					# callers already handle it.
					result.set_exception(portage.exception.InvalidDependString(
						"getFetchMap(): aux_get() error reading "
						+ mypkg + "; aborting."))
				else:
					result.set_exception(future.exception())
				return

			eapi, myuris = aux_get_future.result()

			if not eapi_is_supported(eapi):
				# Convert this to an InvalidDependString exception
				# since callers already handle it.
				result.set_exception(portage.exception.InvalidDependString(
					"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
					(mypkg, eapi)))
				return

			result.set_result(_parse_uri_map(mypkg,
				{'EAPI':eapi,'SRC_URI':myuris}, use=useflags))
Exemplo n.º 8
0
	def _apply_dynamic_deps(self, pkg, live_metadata):

		try:
			if live_metadata is None:
				raise _DynamicDepsNotApplicable()
			# Use the metadata from the installed instance if the EAPI
			# of either instance is unsupported, since if the installed
			# instance has an unsupported or corrupt EAPI then we don't
			# want to attempt to do complex operations such as execute
			# pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
			# are supported then go ahead and use the live_metadata, in
			# order to respect dep updates without revision bump or EAPI
			# bump, as in bug #368725.
			if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
				portage.eapi_is_supported(pkg.eapi)):
				raise _DynamicDepsNotApplicable()

			# preserve built slot/sub-slot := operator deps
			built_slot_operator_atoms = None
			if not self._ignore_built_slot_operator_deps and \
				_get_eapi_attrs(pkg.eapi).slot_operator:
				try:
					built_slot_operator_atoms = \
						find_built_slot_operator_atoms(pkg)
				except InvalidDependString:
					pass

			if built_slot_operator_atoms:
				live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
				if not live_eapi_attrs.slot_operator:
					raise _DynamicDepsNotApplicable()
				for k, v in built_slot_operator_atoms.items():
					live_metadata[k] += (" " +
						" ".join(_unicode(atom) for atom in v))

			self.dbapi.aux_update(pkg.cpv, live_metadata)
		except _DynamicDepsNotApplicable:
			if self._global_updates is None:
				self._global_updates = \
					grab_global_updates(self._portdb)

			# Bypass _aux_get_wrapper, since calling that
			# here would trigger infinite recursion.
			aux_keys = Package._dep_keys + self.dbapi._pkg_str_aux_keys
			aux_dict = dict(zip(aux_keys, self._aux_get(pkg.cpv, aux_keys)))
			perform_global_updates(
				pkg.cpv, aux_dict, self.dbapi, self._global_updates)
Exemplo n.º 9
0
	def _set_returncode(self, wait_retval):
		SubProcess._set_returncode(self, wait_retval)
		# self._raw_metadata is None when _start returns
		# early due to an unsupported EAPI
		if self.returncode == os.EX_OK and \
			self._raw_metadata is not None:
			metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
				encoding=_encodings['repo.content'],
				errors='replace').splitlines()
			metadata_valid = True
			if len(portage.auxdbkeys) != len(metadata_lines):
				# Don't trust bash's returncode if the
				# number of lines is incorrect.
				metadata_valid = False
			else:
				metadata = dict(zip(portage.auxdbkeys, metadata_lines))
				parsed_eapi = self._eapi
				if parsed_eapi is None:
					parsed_eapi = "0"
				self.eapi_supported = \
					portage.eapi_is_supported(metadata["EAPI"])
				if (not metadata["EAPI"] or self.eapi_supported) and \
					metadata["EAPI"] != parsed_eapi:
					self._eapi_invalid(metadata)
					metadata_valid = False

			if metadata_valid:
				# Since we're supposed to be able to efficiently obtain the
				# EAPI from _parse_eapi_ebuild_head, we don't write cache
				# entries for unsupported EAPIs.
				if self.eapi_supported:

					if metadata.get("INHERITED", False):
						metadata["_eclasses_"] = \
							self.portdb.repositories.get_repo_for_location(
							self.repo_path).eclass_db.get_eclass_data(
							metadata["INHERITED"].split())
					else:
						metadata["_eclasses_"] = {}
					metadata.pop("INHERITED", None)

					if eapi_has_automatic_unpack_dependencies(metadata["EAPI"]):
						repo = self.portdb.repositories.get_name_for_location(self.repo_path)
						unpackers = self.settings.unpack_dependencies.get(repo, {}).get(metadata["EAPI"], {})
						unpack_dependencies = extract_unpack_dependencies(metadata["SRC_URI"], unpackers)
						if unpack_dependencies:
							metadata["DEPEND"] += (" " if metadata["DEPEND"] else "") + unpack_dependencies

					# If called by egencache, this cache write is
					# undesirable when metadata-transfer is disabled.
					if self.write_auxdb is not False:
						self.portdb._write_cache(self.cpv,
							self.repo_path, metadata, self.ebuild_hash)
				else:
					metadata = {"EAPI": metadata["EAPI"]}
				self.metadata = metadata
			else:
				self.returncode = 1
Exemplo n.º 10
0
	def _eval_masks(self):
		masks = {}
		settings = self.root_config.settings

		if self.invalid is not False:
			masks['invalid'] = self.invalid

		if not settings._accept_chost(self.cpv, self._metadata):
			masks['CHOST'] = self._metadata['CHOST']

		eapi = self.eapi
		if not portage.eapi_is_supported(eapi):
			masks['EAPI.unsupported'] = eapi
		if portage._eapi_is_deprecated(eapi):
			masks['EAPI.deprecated'] = eapi

		missing_keywords = settings._getMissingKeywords(
			self.cpv, self._metadata)
		if missing_keywords:
			masks['KEYWORDS'] = missing_keywords

		try:
			missing_properties = settings._getMissingProperties(
				self.cpv, self._metadata)
			if missing_properties:
				masks['PROPERTIES'] = missing_properties
		except InvalidDependString:
			# already recorded as 'invalid'
			pass

		try:
			missing_restricts = settings._getMissingRestrict(
				self.cpv, self._metadata)
			if missing_restricts:
				masks['RESTRICT'] = missing_restricts
		except InvalidDependString:
			# already recorded as 'invalid'
			pass

		mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
		if mask_atom is not None:
			masks['package.mask'] = mask_atom

		try:
			missing_licenses = settings._getMissingLicenses(
				self.cpv, self._metadata)
			if missing_licenses:
				masks['LICENSE'] = missing_licenses
		except InvalidDependString:
			# already recorded as 'invalid'
			pass

		if not masks:
			masks = False

		return masks
Exemplo n.º 11
0
	def _eval_masks(self):
		masks = {}
		settings = self.root_config.settings

		if self.invalid is not False:
			masks['invalid'] = self.invalid

		if not settings._accept_chost(self.cpv, self._metadata):
			masks['CHOST'] = self._metadata['CHOST']

		eapi = self.eapi
		if not portage.eapi_is_supported(eapi):
			masks['EAPI.unsupported'] = eapi
		if portage._eapi_is_deprecated(eapi):
			masks['EAPI.deprecated'] = eapi

		missing_keywords = settings._getMissingKeywords(
			self.cpv, self._metadata)
		if missing_keywords:
			masks['KEYWORDS'] = missing_keywords

		try:
			missing_properties = settings._getMissingProperties(
				self.cpv, self._metadata)
			if missing_properties:
				masks['PROPERTIES'] = missing_properties
		except InvalidDependString:
			# already recorded as 'invalid'
			pass

		try:
			missing_restricts = settings._getMissingRestrict(
				self.cpv, self._metadata)
			if missing_restricts:
				masks['RESTRICT'] = missing_restricts
		except InvalidDependString:
			# already recorded as 'invalid'
			pass

		mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
		if mask_atom is not None:
			masks['package.mask'] = mask_atom

		try:
			missing_licenses = settings._getMissingLicenses(
				self.cpv, self._metadata)
			if missing_licenses:
				masks['LICENSE'] = missing_licenses
		except InvalidDependString:
			# already recorded as 'invalid'
			pass

		if not masks:
			masks = False

		return masks
Exemplo n.º 12
0
def _get_eapi_attrs(eapi):
    """
	When eapi is None then validation is not as strict, since we want the
	same to work for multiple EAPIs that may have slightly different rules.
	An unsupported eapi is handled the same as when eapi is None, which may
	be helpful for handling of corrupt EAPI metadata in essential functions
	such as pkgsplit.
	"""
    eapi_attrs = _eapi_attrs_cache.get(eapi)
    if eapi_attrs is not None:
        return eapi_attrs

    orig_eapi = eapi
    if eapi is not None and not eapi_is_supported(eapi):
        eapi = None

    eapi_attrs = _eapi_attrs(
        allows_package_provided=(eapi is None
                                 or eapi_allows_package_provided(eapi)),
        bdepend=(eapi is not None and eapi_has_bdepend(eapi)),
        broot=(eapi is None or eapi_has_broot(eapi)),
        dots_in_PN=(eapi is None or eapi_allows_dots_in_PN(eapi)),
        dots_in_use_flags=(eapi is None
                           or eapi_allows_dots_in_use_flags(eapi)),
        empty_groups_always_true=(eapi is not None
                                  and eapi_empty_groups_always_true(eapi)),
        exports_EBUILD_PHASE_FUNC=(eapi is None
                                   or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
        exports_PORTDIR=(eapi is None or eapi_exports_PORTDIR(eapi)),
        exports_ECLASSDIR=(eapi is not None and eapi_exports_ECLASSDIR(eapi)),
        feature_flag_test=True,
        feature_flag_targetroot=(eapi is not None
                                 and eapi_has_targetroot(eapi)),
        hdepend=(eapi is not None and eapi_has_hdepend(eapi)),
        iuse_defaults=(eapi is None or eapi_has_iuse_defaults(eapi)),
        iuse_effective=(eapi is not None and eapi_has_iuse_effective(eapi)),
        path_variables_end_with_trailing_slash=(
            eapi is not None
            and eapi_path_variables_end_with_trailing_slash(eapi)),
        posixish_locale=(eapi is not None
                         and eapi_requires_posixish_locale(eapi)),
        repo_deps=(eapi is None or eapi_has_repo_deps(eapi)),
        required_use=(eapi is None or eapi_has_required_use(eapi)),
        required_use_at_most_one_of=(
            eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
        slot_deps=(eapi is None or eapi_has_slot_deps(eapi)),
        slot_operator=(eapi is None or eapi_has_slot_operator(eapi)),
        src_uri_arrows=(eapi is None or eapi_has_src_uri_arrows(eapi)),
        strong_blocks=(eapi is None or eapi_has_strong_blocks(eapi)),
        sysroot=(eapi is None or eapi_has_sysroot(eapi)),
        use_deps=(eapi is None or eapi_has_use_deps(eapi)),
        use_dep_defaults=(eapi is None or eapi_has_use_dep_defaults(eapi)))

    _eapi_attrs_cache[orig_eapi] = eapi_attrs
    return eapi_attrs
Exemplo n.º 13
0
    def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
        try:
            # Don't use unicode-wrapped os module, for better performance.
            st = _os.stat(
                _unicode_encode(ebuild_path,
                                encoding=_encodings['fs'],
                                errors='strict'))
            emtime = st[stat.ST_MTIME]
        except OSError:
            writemsg(_("!!! aux_get(): ebuild for " \
             "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
            writemsg("!!!            %s\n" % ebuild_path, noiselevel=-1)
            raise KeyError(cpv)

        # Pull pre-generated metadata from the metadata/cache/
        # directory if it exists and is valid, otherwise fall
        # back to the normal writable cache.
        auxdbs = []
        pregen_auxdb = self._pregen_auxdb.get(repo_path)
        if pregen_auxdb is not None:
            auxdbs.append(pregen_auxdb)
        auxdbs.append(self.auxdb[repo_path])
        eclass_db = self._repo_info[repo_path].eclass_db

        doregen = True
        for auxdb in auxdbs:
            try:
                metadata = auxdb[cpv]
            except KeyError:
                pass
            except CacheError:
                if auxdb is not pregen_auxdb:
                    try:
                        del auxdb[cpv]
                    except KeyError:
                        pass
                    except CacheError:
                        pass
            else:
                eapi = metadata.get('EAPI', '').strip()
                if not eapi:
                    eapi = '0'
                if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \
                 emtime == metadata['_mtime_'] and \
                 eclass_db.is_eclass_data_valid(metadata['_eclasses_']):
                    doregen = False

            if not doregen:
                break

        if doregen:
            metadata = None

        return (metadata, st, emtime)
Exemplo n.º 14
0
    def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
        try:
            ebuild_hash = eclass_cache.hashed_path(ebuild_path)
            # snag mtime since we use it later, and to trigger stat failure
            # if it doesn't exist
            ebuild_hash.mtime
        except FileNotFound:
            writemsg(_("!!! aux_get(): ebuild for " \
             "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
            writemsg("!!!            %s\n" % ebuild_path, noiselevel=-1)
            raise KeyError(cpv)

        # Pull pre-generated metadata from the metadata/cache/
        # directory if it exists and is valid, otherwise fall
        # back to the normal writable cache.
        auxdbs = []
        pregen_auxdb = self._pregen_auxdb.get(repo_path)
        if pregen_auxdb is not None:
            auxdbs.append(pregen_auxdb)
        ro_auxdb = self._ro_auxdb.get(repo_path)
        if ro_auxdb is not None:
            auxdbs.append(ro_auxdb)
        auxdbs.append(self.auxdb[repo_path])
        eclass_db = self.repositories.get_repo_for_location(
            repo_path).eclass_db

        for auxdb in auxdbs:
            try:
                metadata = auxdb[cpv]
            except KeyError:
                continue
            except CacheError:
                if not auxdb.readonly:
                    try:
                        del auxdb[cpv]
                    except (KeyError, CacheError):
                        pass
                continue
            eapi = metadata.get('EAPI', '').strip()
            if not eapi:
                eapi = '0'
                metadata['EAPI'] = eapi
            if not eapi_is_supported(eapi):
                # Since we're supposed to be able to efficiently obtain the
                # EAPI from _parse_eapi_ebuild_head, we disregard cache entries
                # for unsupported EAPIs.
                continue
            if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
                break
        else:
            metadata = None

        return (metadata, ebuild_hash)
Exemplo n.º 15
0
	def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
		try:
			# Don't use unicode-wrapped os module, for better performance.
			st = _os.stat(_unicode_encode(ebuild_path,
				encoding=_encodings['fs'], errors='strict'))
			emtime = st[stat.ST_MTIME]
		except OSError:
			writemsg(_("!!! aux_get(): ebuild for " \
				"'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
			writemsg("!!!            %s\n" % ebuild_path, noiselevel=-1)
			raise KeyError(cpv)

		# Pull pre-generated metadata from the metadata/cache/
		# directory if it exists and is valid, otherwise fall
		# back to the normal writable cache.
		auxdbs = []
		pregen_auxdb = self._pregen_auxdb.get(repo_path)
		if pregen_auxdb is not None:
			auxdbs.append(pregen_auxdb)
		auxdbs.append(self.auxdb[repo_path])
		eclass_db = self._repo_info[repo_path].eclass_db

		doregen = True
		for auxdb in auxdbs:
			try:
				metadata = auxdb[cpv]
			except KeyError:
				pass
			except CacheError:
				if auxdb is not pregen_auxdb:
					try:
						del auxdb[cpv]
					except KeyError:
						pass
					except CacheError:
						pass
			else:
				eapi = metadata.get('EAPI', '').strip()
				if not eapi:
					eapi = '0'
				if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \
					emtime == metadata['_mtime_'] and \
					eclass_db.is_eclass_data_valid(metadata['_eclasses_']):
					doregen = False

			if not doregen:
				break

		if doregen:
			metadata = None

		return (metadata, st, emtime)
Exemplo n.º 16
0
	def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
		try:
			ebuild_hash = eclass_cache.hashed_path(ebuild_path)
			# snag mtime since we use it later, and to trigger stat failure
			# if it doesn't exist
			ebuild_hash.mtime
		except FileNotFound:
			writemsg(_("!!! aux_get(): ebuild for " \
				"'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
			writemsg("!!!            %s\n" % ebuild_path, noiselevel=-1)
			raise KeyError(cpv)

		# Pull pre-generated metadata from the metadata/cache/
		# directory if it exists and is valid, otherwise fall
		# back to the normal writable cache.
		auxdbs = []
		pregen_auxdb = self._pregen_auxdb.get(repo_path)
		if pregen_auxdb is not None:
			auxdbs.append(pregen_auxdb)
		ro_auxdb = self._ro_auxdb.get(repo_path)
		if ro_auxdb is not None:
			auxdbs.append(ro_auxdb)
		auxdbs.append(self.auxdb[repo_path])
		eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db

		for auxdb in auxdbs:
			try:
				metadata = auxdb[cpv]
			except KeyError:
				continue
			except CacheError:
				if not auxdb.readonly:
					try:
						del auxdb[cpv]
					except (KeyError, CacheError):
						pass
				continue
			eapi = metadata.get('EAPI', '').strip()
			if not eapi:
				eapi = '0'
				metadata['EAPI'] = eapi
			if not eapi_is_supported(eapi):
				# Since we're supposed to be able to efficiently obtain the
				# EAPI from _parse_eapi_ebuild_head, we disregard cache entries
				# for unsupported EAPIs.
				continue
			if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
				break
		else:
			metadata = None

		return (metadata, ebuild_hash)
Exemplo n.º 17
0
	def _masks(self):
		masks = {}
		settings = self.root_config.settings

		if self.invalid is not None:
			masks['invalid'] = self.invalid

		if not settings._accept_chost(self.cpv, self.metadata):
			masks['CHOST'] = self.metadata['CHOST']

		eapi = self.metadata["EAPI"]
		if not portage.eapi_is_supported(eapi):
			masks['EAPI.unsupported'] = eapi
		if portage._eapi_is_deprecated(eapi):
			masks['EAPI.deprecated'] = eapi

		missing_keywords = settings._getMissingKeywords(
			self.cpv, self.metadata)
		if missing_keywords:
			masks['KEYWORDS'] = missing_keywords

		try:
			missing_properties = settings._getMissingProperties(
				self.cpv, self.metadata)
			if missing_properties:
				masks['PROPERTIES'] = missing_properties
		except portage.exception.InvalidDependString:
			# already recorded as 'invalid'
			pass

		mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
		if mask_atom is not None:
			masks['package.mask'] = mask_atom

		system_mask = settings._getProfileMaskAtom(
			self.cpv, self.metadata)
		if system_mask is not None:
			masks['profile.system'] = system_mask

		try:
			missing_licenses = settings._getMissingLicenses(
				self.cpv, self.metadata)
			if missing_licenses:
				masks['LICENSE'] = missing_licenses
		except portage.exception.InvalidDependString:
			# already recorded as 'invalid'
			pass

		if not masks:
			masks = None

		return masks
Exemplo n.º 18
0
    def _masks(self):
        masks = {}
        settings = self.root_config.settings

        if self.invalid is not None:
            masks['invalid'] = self.invalid

        if not settings._accept_chost(self.cpv, self.metadata):
            masks['CHOST'] = self.metadata['CHOST']

        eapi = self.metadata["EAPI"]
        if not portage.eapi_is_supported(eapi):
            masks['EAPI.unsupported'] = eapi
        if portage._eapi_is_deprecated(eapi):
            masks['EAPI.deprecated'] = eapi

        missing_keywords = settings._getMissingKeywords(
            self.cpv, self.metadata)
        if missing_keywords:
            masks['KEYWORDS'] = missing_keywords

        try:
            missing_properties = settings._getMissingProperties(
                self.cpv, self.metadata)
            if missing_properties:
                masks['PROPERTIES'] = missing_properties
        except portage.exception.InvalidDependString:
            # already recorded as 'invalid'
            pass

        mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
        if mask_atom is not None:
            masks['package.mask'] = mask_atom

        system_mask = settings._getProfileMaskAtom(self.cpv, self.metadata)
        if system_mask is not None:
            masks['profile.system'] = system_mask

        try:
            missing_licenses = settings._getMissingLicenses(
                self.cpv, self.metadata)
            if missing_licenses:
                masks['LICENSE'] = missing_licenses
        except portage.exception.InvalidDependString:
            # already recorded as 'invalid'
            pass

        if not masks:
            masks = None

        return masks
Exemplo n.º 19
0
	def _set_returncode(self, wait_retval):
		SubProcess._set_returncode(self, wait_retval)
		# self._raw_metadata is None when _start returns
		# early due to an unsupported EAPI
		if self.returncode == os.EX_OK and \
			self._raw_metadata is not None:
			metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
				encoding=_encodings['repo.content'],
				errors='replace').splitlines()
			metadata_valid = True
			if len(portage.auxdbkeys) != len(metadata_lines):
				# Don't trust bash's returncode if the
				# number of lines is incorrect.
				metadata_valid = False
			else:
				metadata = dict(zip(portage.auxdbkeys, metadata_lines))
				parsed_eapi = self._eapi
				if parsed_eapi is None:
					parsed_eapi = "0"
				self.eapi_supported = \
					portage.eapi_is_supported(metadata["EAPI"])
				if (not metadata["EAPI"] or self.eapi_supported) and \
					metadata["EAPI"] != parsed_eapi:
					self._eapi_invalid(metadata)
					metadata_valid = False

			if metadata_valid:
				# Since we're supposed to be able to efficiently obtain the
				# EAPI from _parse_eapi_ebuild_head, we don't write cache
				# entries for unsupported EAPIs.
				if self.eapi_supported:

					if metadata.get("INHERITED", False):
						metadata["_eclasses_"] = \
							self.portdb.repositories.get_repo_for_location(
							self.repo_path).eclass_db.get_eclass_data(
							metadata["INHERITED"].split())
					else:
						metadata["_eclasses_"] = {}
					metadata.pop("INHERITED", None)

					# If called by egencache, this cache write is
					# undesirable when metadata-transfer is disabled.
					if self.write_auxdb is not False:
						self.portdb._write_cache(self.cpv,
							self.repo_path, metadata, self.ebuild_hash)
				else:
					metadata = {"EAPI": metadata["EAPI"]}
				self.metadata = metadata
			else:
				self.returncode = 1
Exemplo n.º 20
0
def _get_eapi_attrs(eapi):
	"""
	When eapi is None then validation is not as strict, since we want the
	same to work for multiple EAPIs that may have slightly different rules.
	An unsupported eapi is handled the same as when eapi is None, which may
	be helpful for handling of corrupt EAPI metadata in essential functions
	such as pkgsplit.
	"""
	eapi_attrs = _eapi_attrs_cache.get(eapi)
	if eapi_attrs is not None:
		return eapi_attrs

	orig_eapi = eapi
	if eapi is not None and not eapi_is_supported(eapi):
		eapi = None

	eapi_attrs = _eapi_attrs(
		allows_package_provided=(eapi is None or eapi_allows_package_provided(eapi)),
		bdepend = (eapi is not None and eapi_has_bdepend(eapi)),
		broot = (eapi is None or eapi_has_broot(eapi)),
		dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
		dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
		empty_groups_always_true = (eapi is not None and eapi_empty_groups_always_true(eapi)),
		exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
		exports_PORTDIR = (eapi is None or eapi_exports_PORTDIR(eapi)),
		exports_ECLASSDIR = (eapi is not None and eapi_exports_ECLASSDIR(eapi)),
		feature_flag_test = False,
		feature_flag_targetroot = (eapi is not None and eapi_has_targetroot(eapi)),
		hdepend = (eapi is not None and eapi_has_hdepend(eapi)),
		iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
		iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
		path_variables_end_with_trailing_slash = (eapi is not None and
			eapi_path_variables_end_with_trailing_slash(eapi)),
		posixish_locale = (eapi is not None and eapi_requires_posixish_locale(eapi)),
		repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
		required_use = (eapi is None or eapi_has_required_use(eapi)),
		required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
		slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
		slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
		src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
		strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
		sysroot = (eapi is None or eapi_has_sysroot(eapi)),
		use_deps = (eapi is None or eapi_has_use_deps(eapi)),
		use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
	)

	_eapi_attrs_cache[orig_eapi] = eapi_attrs
	return eapi_attrs
Exemplo n.º 21
0
    def gvisible(self, mylist):
        "strip out group-masked (not in current group) entries"

        if mylist is None:
            return []
        newlist = []
        aux_keys = list(self._aux_cache_keys)
        metadata = {}
        local_config = self.settings.local_config
        chost = self.settings.get('CHOST', '')
        accept_chost = self.settings._accept_chost
        for mycpv in mylist:
            metadata.clear()
            try:
                metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys)))
            except KeyError:
                continue
            except PortageException as e:
                writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
                         noiselevel=-1)
                writemsg("!!! %s\n" % (e, ), noiselevel=-1)
                del e
                continue
            eapi = metadata["EAPI"]
            if not eapi_is_supported(eapi):
                continue
            if _eapi_is_deprecated(eapi):
                continue
            if self.settings._getMissingKeywords(mycpv, metadata):
                continue
            if local_config:
                metadata['CHOST'] = chost
                if not accept_chost(mycpv, metadata):
                    continue
                metadata["USE"] = ""
                if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
                    self.doebuild_settings.setcpv(mycpv, mydb=metadata)
                    metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
                try:
                    if self.settings._getMissingLicenses(mycpv, metadata):
                        continue
                    if self.settings._getMissingProperties(mycpv, metadata):
                        continue
                except InvalidDependString:
                    continue
            newlist.append(mycpv)
        return newlist
Exemplo n.º 22
0
	def gvisible(self,mylist):
		"strip out group-masked (not in current group) entries"

		if mylist is None:
			return []
		newlist=[]
		aux_keys = list(self._aux_cache_keys)
		metadata = {}
		local_config = self.settings.local_config
		chost = self.settings.get('CHOST', '')
		accept_chost = self.settings._accept_chost
		for mycpv in mylist:
			metadata.clear()
			try:
				metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys)))
			except KeyError:
				continue
			except PortageException as e:
				writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
					noiselevel=-1)
				writemsg("!!! %s\n" % (e,), noiselevel=-1)
				del e
				continue
			eapi = metadata["EAPI"]
			if not eapi_is_supported(eapi):
				continue
			if _eapi_is_deprecated(eapi):
				continue
			if self.settings._getMissingKeywords(mycpv, metadata):
				continue
			if local_config:
				metadata['CHOST'] = chost
				if not accept_chost(mycpv, metadata):
					continue
				metadata["USE"] = ""
				if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
					self.doebuild_settings.setcpv(mycpv, mydb=metadata)
					metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
				try:
					if self.settings._getMissingLicenses(mycpv, metadata):
						continue
					if self.settings._getMissingProperties(mycpv, metadata):
						continue
				except InvalidDependString:
					continue
			newlist.append(mycpv)
		return newlist
Exemplo n.º 23
0
    def _metadata_callback(self, cpv, repo_path, metadata, ebuild_hash):

        i = metadata
        if hasattr(metadata, "items"):
            i = iter(metadata.items())
        metadata = dict(i)

        if metadata.get("INHERITED", False):
            metadata["_eclasses_"] = self.repositories.get_repo_for_location(
                repo_path).eclass_db.get_eclass_data(
                    metadata["INHERITED"].split())
        else:
            metadata["_eclasses_"] = {}

        try:
            cache = self.auxdb[repo_path]
            chf = cache.validation_chf
            metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
        except CacheError:
            # Normally this shouldn't happen, so we'll show
            # a traceback for debugging purposes.
            traceback.print_exc()
            cache = None

        metadata.pop("INHERITED", None)

        eapi = metadata.get("EAPI")
        if not eapi or not eapi.strip():
            eapi = "0"
            metadata["EAPI"] = eapi
        if not eapi_is_supported(eapi):
            keys = set(metadata)
            keys.discard('_eclasses_')
            keys.discard('_mtime_')
            keys.discard('_%s_' % chf)
            metadata.update((k, '') for k in keys)
            metadata["EAPI"] = "-" + eapi.lstrip("-")

        if cache is not None:
            try:
                cache[cpv] = metadata
            except CacheError:
                # Normally this shouldn't happen, so we'll show
                # a traceback for debugging purposes.
                traceback.print_exc()
        return metadata
Exemplo n.º 24
0
	def _metadata_callback(self, cpv, repo_path, metadata, ebuild_hash):

		i = metadata
		if hasattr(metadata, "items"):
			i = iter(metadata.items())
		metadata = dict(i)

		if metadata.get("INHERITED", False):
			metadata["_eclasses_"] = self._repo_info[repo_path
				].eclass_db.get_eclass_data(metadata["INHERITED"].split())
		else:
			metadata["_eclasses_"] = {}

		try:
			cache = self.auxdb[repo_path]
			chf = cache.validation_chf
			metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
		except CacheError:
			# Normally this shouldn't happen, so we'll show
			# a traceback for debugging purposes.
			traceback.print_exc()
			cache = None

		metadata.pop("INHERITED", None)

		eapi = metadata.get("EAPI")
		if not eapi or not eapi.strip():
			eapi = "0"
			metadata["EAPI"] = eapi
		if not eapi_is_supported(eapi):
			keys = set(metadata)
			keys.discard('_eclasses_')
			keys.discard('_mtime_')
			keys.discard('_%s_' % chf)
			metadata.update((k, '') for k in keys)
			metadata["EAPI"] = "-" + eapi.lstrip("-")

		if cache is not None:
			try:
				cache[cpv] = metadata
			except CacheError:
				# Normally this shouldn't happen, so we'll show
				# a traceback for debugging purposes.
				traceback.print_exc()
		return metadata
Exemplo n.º 25
0
 def _aux_get_wrapper(self, pkg, wants):
     if pkg in self._aux_get_history:
         return self._aux_get(pkg, wants)
     self._aux_get_history.add(pkg)
     try:
         # Use the live ebuild metadata if possible.
         live_metadata = dict(
             zip(self._portdb_keys,
                 self._portdb.aux_get(pkg, self._portdb_keys)))
         if not portage.eapi_is_supported(live_metadata["EAPI"]):
             raise KeyError(pkg)
         self.dbapi.aux_update(pkg, live_metadata)
     except (KeyError, portage.exception.PortageException):
         if self._global_updates is None:
             self._global_updates = \
              grab_global_updates(self._portdb)
         perform_global_updates(pkg, self.dbapi, self._global_updates)
     return self._aux_get(pkg, wants)
Exemplo n.º 26
0
	def _aux_get_wrapper(self, pkg, wants):
		if pkg in self._aux_get_history:
			return self._aux_get(pkg, wants)
		self._aux_get_history.add(pkg)
		try:
			# Use the live ebuild metadata if possible.
			live_metadata = dict(zip(self._portdb_keys,
				self._portdb.aux_get(pkg, self._portdb_keys)))
			if not portage.eapi_is_supported(live_metadata["EAPI"]):
				raise KeyError(pkg)
			self.dbapi.aux_update(pkg, live_metadata)
		except (KeyError, portage.exception.PortageException):
			if self._global_updates is None:
				self._global_updates = \
					grab_global_updates(self._portdb)
			perform_global_updates(
				pkg, self.dbapi, self._global_updates)
		return self._aux_get(pkg, wants)
Exemplo n.º 27
0
    def getFetchMap(self, mypkg, useflags=None, mytree=None):
        """
		Get the SRC_URI metadata as a dict which maps each file name to a
		set of alternative URIs.

		@param mypkg: cpv for an ebuild
		@type mypkg: String
		@param useflags: a collection of enabled USE flags, for evaluation of
			conditionals
		@type useflags: set, or None to enable all conditionals
		@param mytree: The canonical path of the tree in which the ebuild
			is located, or None for automatic lookup
		@type mypkg: String
		@returns: A dict which maps each file name to a set of alternative
			URIs.
		@rtype: dict
		"""

        try:
            eapi, myuris = self.aux_get(mypkg, ["EAPI", "SRC_URI"],
                                        mytree=mytree)
        except KeyError:
            # Convert this to an InvalidDependString exception since callers
            # already handle it.
            raise portage.exception.InvalidDependString(
                "getFetchMap(): aux_get() error reading " + mypkg +
                "; aborting.")

        if not eapi_is_supported(eapi):
            # Convert this to an InvalidDependString exception
            # since callers already handle it.
            raise portage.exception.InvalidDependString(
             "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
             (mypkg, eapi.lstrip("-")))

        return _parse_uri_map(mypkg, {
            'EAPI': eapi,
            'SRC_URI': myuris
        },
                              use=useflags)
Exemplo n.º 28
0
def _get_eapi_attrs(eapi):
    """
	When eapi is None then validation is not as strict, since we want the
	same to work for multiple EAPIs that may have slightly different rules.
	An unsupported eapi is handled the same as when eapi is None, which may
	be helpful for handling of corrupt EAPI metadata in essential functions
	such as pkgsplit.
	"""
    eapi_attrs = _eapi_attrs_cache.get(eapi)
    if eapi_attrs is not None:
        return eapi_attrs

    orig_eapi = eapi
    if eapi is not None and not eapi_is_supported(eapi):
        eapi = None

    eapi_attrs = _eapi_attrs(
        dots_in_PN=(eapi is None or eapi_allows_dots_in_PN(eapi)),
        dots_in_use_flags=(eapi is None
                           or eapi_allows_dots_in_use_flags(eapi)),
        exports_EBUILD_PHASE_FUNC=(eapi is None
                                   or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
        feature_flag_test=True,
        iuse_defaults=(eapi is None or eapi_has_iuse_defaults(eapi)),
        iuse_effective=(eapi is not None and eapi_has_iuse_effective(eapi)),
        repo_deps=(eapi is None or eapi_has_repo_deps(eapi)),
        required_use=(eapi is None or eapi_has_required_use(eapi)),
        required_use_at_most_one_of=(
            eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
        slot_deps=(eapi is None or eapi_has_slot_deps(eapi)),
        slot_operator=(eapi is None or eapi_has_slot_operator(eapi)),
        src_uri_arrows=(eapi is None or eapi_has_src_uri_arrows(eapi)),
        strong_blocks=(eapi is None or eapi_has_strong_blocks(eapi)),
        use_deps=(eapi is None or eapi_has_use_deps(eapi)),
        use_dep_defaults=(eapi is None or eapi_has_use_dep_defaults(eapi)))

    _eapi_attrs_cache[orig_eapi] = eapi_attrs
    return eapi_attrs
Exemplo n.º 29
0
def _get_eapi_attrs(eapi):
	"""
	When eapi is None then validation is not as strict, since we want the
	same to work for multiple EAPIs that may have slightly different rules.
	An unsupported eapi is handled the same as when eapi is None, which may
	be helpful for handling of corrupt EAPI metadata in essential functions
	such as pkgsplit.
	"""
	eapi_attrs = _eapi_attrs_cache.get(eapi)
	if eapi_attrs is not None:
		return eapi_attrs

	orig_eapi = eapi
	if eapi is not None and not eapi_is_supported(eapi):
		eapi = None

	eapi_attrs = _eapi_attrs(
		dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
		dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
		exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
		feature_flag_test = True,
		feature_flag_targetroot = (eapi is not None and eapi_has_targetroot(eapi)),
		hdepend = (eapi is not None and eapi_has_hdepend(eapi)),
		iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
		iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
		repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
		required_use = (eapi is None or eapi_has_required_use(eapi)),
		required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
		slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
		slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
		src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
		strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
		use_deps = (eapi is None or eapi_has_use_deps(eapi)),
		use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
	)

	_eapi_attrs_cache[orig_eapi] = eapi_attrs
	return eapi_attrs
Exemplo n.º 30
0
	def getFetchMap(self, mypkg, useflags=None, mytree=None):
		"""
		Get the SRC_URI metadata as a dict which maps each file name to a
		set of alternative URIs.

		@param mypkg: cpv for an ebuild
		@type mypkg: String
		@param useflags: a collection of enabled USE flags, for evaluation of
			conditionals
		@type useflags: set, or None to enable all conditionals
		@param mytree: The canonical path of the tree in which the ebuild
			is located, or None for automatic lookup
		@type mypkg: String
		@return: A dict which maps each file name to a set of alternative
			URIs.
		@rtype: dict
		"""

		try:
			eapi, myuris = self.aux_get(mypkg,
				["EAPI", "SRC_URI"], mytree=mytree)
		except KeyError:
			# Convert this to an InvalidDependString exception since callers
			# already handle it.
			raise portage.exception.InvalidDependString(
				"getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")

		if not eapi_is_supported(eapi):
			# Convert this to an InvalidDependString exception
			# since callers already handle it.
			raise portage.exception.InvalidDependString(
				"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
				(mypkg, eapi))

		return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
			use=useflags)
Exemplo n.º 31
0
    def _async_waitpid_cb(self, *args, **kwargs):
        """
		Override _async_waitpid_cb to perform cleanup that is
		not necessarily idempotent.
		"""
        SubProcess._async_waitpid_cb(self, *args, **kwargs)
        # self._raw_metadata is None when _start returns
        # early due to an unsupported EAPI
        if self.returncode == os.EX_OK and \
         self._raw_metadata is not None:
            metadata_lines = _unicode_decode(
                b''.join(self._raw_metadata),
                encoding=_encodings['repo.content'],
                errors='replace').splitlines()
            metadata_valid = True
            if len(portage.auxdbkeys) != len(metadata_lines):
                # Don't trust bash's returncode if the
                # number of lines is incorrect.
                metadata_valid = False
            else:
                metadata = dict(zip(portage.auxdbkeys, metadata_lines))
                parsed_eapi = self._eapi
                if parsed_eapi is None:
                    parsed_eapi = "0"
                self.eapi_supported = \
                 portage.eapi_is_supported(metadata["EAPI"])
                if (not metadata["EAPI"] or self.eapi_supported) and \
                 metadata["EAPI"] != parsed_eapi:
                    self._eapi_invalid(metadata)
                    metadata_valid = False

            if metadata_valid:
                # Since we're supposed to be able to efficiently obtain the
                # EAPI from _parse_eapi_ebuild_head, we don't write cache
                # entries for unsupported EAPIs.
                if self.eapi_supported:

                    if metadata.get("INHERITED", False):
                        metadata["_eclasses_"] = \
                         self.portdb.repositories.get_repo_for_location(
                         self.repo_path).eclass_db.get_eclass_data(
                         metadata["INHERITED"].split())
                    else:
                        metadata["_eclasses_"] = {}
                    metadata.pop("INHERITED", None)

                    if eapi_has_automatic_unpack_dependencies(
                            metadata["EAPI"]):
                        repo = self.portdb.repositories.get_name_for_location(
                            self.repo_path)
                        unpackers = self.settings.unpack_dependencies.get(
                            repo, {}).get(metadata["EAPI"], {})
                        unpack_dependencies = extract_unpack_dependencies(
                            metadata["SRC_URI"], unpackers)
                        if unpack_dependencies:
                            metadata["DEPEND"] += (" "
                                                   if metadata["DEPEND"] else
                                                   "") + unpack_dependencies

                    # If called by egencache, this cache write is
                    # undesirable when metadata-transfer is disabled.
                    if self.write_auxdb is not False:
                        self.portdb._write_cache(self.cpv, self.repo_path,
                                                 metadata, self.ebuild_hash)
                else:
                    metadata = {"EAPI": metadata["EAPI"]}
                self.metadata = metadata
            else:
                self.returncode = 1
Exemplo n.º 32
0
def parse_layout_conf(repo_location, repo_name=None):
    eapi = read_corresponding_eapi_file(
        os.path.join(repo_location, REPO_NAME_LOC))

    layout_filename = os.path.join(repo_location, "metadata", "layout.conf")
    layout_file = KeyValuePairFileLoader(layout_filename, None, None)
    layout_data, layout_errors = layout_file.load()

    data = {}

    # None indicates abscence of a masters setting, which later code uses
    # to trigger a backward compatibility fallback that sets an implicit
    # master. In order to avoid this fallback behavior, layout.conf can
    # explicitly set masters to an empty value, which will result in an
    # empty tuple here instead of None.
    masters = layout_data.get('masters')
    if masters is not None:
        masters = tuple(masters.split())
    data['masters'] = masters
    data['aliases'] = tuple(layout_data.get('aliases', '').split())

    data['allow-provide-virtual'] = \
     layout_data.get('allow-provide-virtuals', 'false').lower() == 'true'

    data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
    data['eapis-deprecated'] = tuple(
        layout_data.get('eapis-deprecated', '').split())

    data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
     == 'true'

    data['sign-manifest'] = layout_data.get('sign-manifests', 'true').lower() \
     == 'true'

    data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
     == 'true'

    data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))

    manifest_policy = layout_data.get('use-manifests', 'strict').lower()
    data['allow-missing-manifest'] = manifest_policy != 'strict'
    data['create-manifest'] = manifest_policy != 'false'
    data['disable-manifest'] = manifest_policy == 'false'

    # for compatibility w/ PMS, fallback to pms; but also check if the
    # cache exists or not.
    cache_formats = layout_data.get('cache-formats', '').lower().split()
    if not cache_formats:
        # Auto-detect cache formats, and prefer md5-cache if available.
        # This behavior was deployed in portage-2.1.11.14, so that the
        # default egencache format could eventually be changed to md5-dict
        # in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
        # will NOT recognize md5-dict format unless it is explicitly
        # listed in layout.conf.
        cache_formats = []
        if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
            cache_formats.append('md5-dict')
        if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
            cache_formats.append('pms')
    data['cache-formats'] = tuple(cache_formats)

    manifest_hashes = layout_data.get('manifest-hashes')
    if manifest_hashes is not None:
        manifest_hashes = frozenset(manifest_hashes.upper().split())
        if MANIFEST2_REQUIRED_HASH not in manifest_hashes:
            repo_name = _get_repo_name(repo_location, cached=repo_name)
            warnings.warn(
                (_("Repository named '%(repo_name)s' has a "
                   "'manifest-hashes' setting that does not contain "
                   "the '%(hash)s' hash which is required by this "
                   "portage version. You will have to upgrade portage "
                   "if you want to generate valid manifests for this "
                   "repository: %(layout_filename)s") % {
                       "repo_name": repo_name or 'unspecified',
                       "hash": MANIFEST2_REQUIRED_HASH,
                       "layout_filename": layout_filename
                   }), DeprecationWarning)
        unsupported_hashes = manifest_hashes.difference(
            MANIFEST2_HASH_FUNCTIONS)
        if unsupported_hashes:
            repo_name = _get_repo_name(repo_location, cached=repo_name)
            warnings.warn((
                _("Repository named '%(repo_name)s' has a "
                  "'manifest-hashes' setting that contains one "
                  "or more hash types '%(hashes)s' which are not supported by "
                  "this portage version. You will have to upgrade "
                  "portage if you want to generate valid manifests for "
                  "this repository: %(layout_filename)s") % {
                      "repo_name": repo_name or 'unspecified',
                      "hashes": " ".join(sorted(unsupported_hashes)),
                      "layout_filename": layout_filename
                  }), DeprecationWarning)
    data['manifest-hashes'] = manifest_hashes

    data['update-changelog'] = layout_data.get('update-changelog', 'false').lower() \
     == 'true'

    raw_formats = layout_data.get('profile-formats')
    if raw_formats is None:
        if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
            raw_formats = ('portage-1', )
        else:
            raw_formats = ('portage-1-compat', )
    else:
        raw_formats = set(raw_formats.split())
        unknown = raw_formats.difference(_valid_profile_formats)
        if unknown:
            repo_name = _get_repo_name(repo_location, cached=repo_name)
            warnings.warn((_(
                "Repository named '%(repo_name)s' has unsupported "
                "profiles in use ('profile-formats = %(unknown_fmts)s' setting in "
                "'%(layout_filename)s; please upgrade portage.") %
                           dict(repo_name=repo_name or 'unspecified',
                                layout_filename=layout_filename,
                                unknown_fmts=" ".join(unknown))),
                          DeprecationWarning)
        raw_formats = tuple(raw_formats.intersection(_valid_profile_formats))
    data['profile-formats'] = raw_formats

    try:
        eapi = layout_data['profile_eapi_when_unspecified']
    except KeyError:
        pass
    else:
        if 'profile-default-eapi' not in raw_formats:
            warnings.warn(
                (_("Repository named '%(repo_name)s' has "
                   "profile_eapi_when_unspecified setting in "
                   "'%(layout_filename)s', but 'profile-default-eapi' is "
                   "not listed in the profile-formats field. Please "
                   "report this issue to the repository maintainer.") %
                 dict(repo_name=repo_name or 'unspecified',
                      layout_filename=layout_filename)), SyntaxWarning)
        elif not portage.eapi_is_supported(eapi):
            warnings.warn(
                (_("Repository named '%(repo_name)s' has "
                   "unsupported EAPI '%(eapi)s' setting in "
                   "'%(layout_filename)s'; please upgrade portage.") %
                 dict(repo_name=repo_name or 'unspecified',
                      eapi=eapi,
                      layout_filename=layout_filename)), SyntaxWarning)
        else:
            data['profile_eapi_when_unspecified'] = eapi

    return data, layout_errors
Exemplo n.º 33
0
	def _addProfile(self, currentPath, repositories, known_repos):
		current_abs_path = os.path.abspath(currentPath)
		allow_directories = True
		allow_parent_colon = True
		repo_loc = None
		compat_mode = False
		current_formats = ()
		eapi = None

		intersecting_repos = [x for x in known_repos
			if current_abs_path.startswith(x[0])]
		if intersecting_repos:
			# Handle nested repositories. The longest path
			# will be the correct one.
			repo_loc, layout_data = max(intersecting_repos,
				key=lambda x:len(x[0]))
			eapi = layout_data.get("profile_eapi_when_unspecified")

		eapi_file = os.path.join(currentPath, "eapi")
		eapi = eapi or "0"
		f = None
		try:
			f = io.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace')
			eapi = f.readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		finally:
			if f is not None:
				f.close()

		if intersecting_repos:
			allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
				any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
			compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
				layout_data['profile-formats'] == ('portage-1-compat',)
			allow_parent_colon = any(x in _allow_parent_colon
				for x in layout_data['profile-formats'])
			current_formats = tuple(layout_data['profile-formats'])


		if compat_mode:
			offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
			offenders = sorted(x for x in offenders
				if os.path.isdir(os.path.join(currentPath, x)))
			if offenders:
				warnings.warn(_(
					"\nThe selected profile is implicitly using the 'portage-1' format:\n"
					"\tprofile = %(profile_path)s\n"
					"But this repository is not using that format:\n"
					"\trepo = %(repo_name)s\n"
					"This will break in the future.  Please convert these dirs to files:\n"
					"\t%(files)s\n"
					"Or, add this line to the repository's layout.conf:\n"
					"\tprofile-formats = portage-1")
					% dict(profile_path=currentPath, repo_name=repo_loc,
						files='\n\t'.join(offenders)))

		parentsFile = os.path.join(currentPath, "parent")
		if exists_raise_eaccess(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				abs_parent = parentPath[:1] == os.sep
				if not abs_parent and allow_parent_colon:
					parentPath = self._expand_parent_colon(parentsFile,
						parentPath, repo_loc, repositories)

				# NOTE: This os.path.join() call is intended to ignore
				# currentPath if parentPath is already absolute.
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))

				if abs_parent or repo_loc is None or \
					not parentPath.startswith(repo_loc):
					# It seems that this parent may point outside
					# of the current repo, so realpath it.
					parentPath = os.path.realpath(parentPath)

				if exists_raise_eaccess(parentPath):
					self._addProfile(parentPath, repositories, known_repos)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))

		self.profiles.append(currentPath)
		self.profiles_complex.append(
			_profile_node(currentPath, allow_directories, False,
				current_formats, eapi))
Exemplo n.º 34
0
	def _addProfile(self, currentPath, repositories, known_repos):
		current_abs_path = os.path.abspath(currentPath)
		allow_directories = True
		allow_parent_colon = True
		repo_loc = None
		compat_mode = False

		eapi_file = os.path.join(currentPath, "eapi")
		eapi = "0"
		f = None
		try:
			f = io.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace')
			eapi = f.readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		finally:
			if f is not None:
				f.close()

		intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
		if intersecting_repos:
			# protect against nested repositories.  Insane configuration, but the longest
			# path will be the correct one.
			repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
			allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
				any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
			compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
				layout_data['profile-formats'] == ('portage-1-compat',)
			allow_parent_colon = any(x in _allow_parent_colon
				for x in layout_data['profile-formats'])

		if compat_mode:
			offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
			offenders = sorted(x for x in offenders
				if os.path.isdir(os.path.join(currentPath, x)))
			if offenders:
				warnings.warn(_("Profile '%(profile_path)s' in repository "
					"'%(repo_name)s' is implicitly using 'portage-1' profile format, but "
					"the repository profiles are not marked as that format.  This will break "
					"in the future.  Please either convert the following paths "
					"to files, or add\nprofile-formats = portage-1\nto the "
					"repositories layout.conf.  Files: '%(files)s'\n")
					% dict(profile_path=currentPath, repo_name=repo_loc,
						files=', '.join(offenders)))

		parentsFile = os.path.join(currentPath, "parent")
		if os.path.exists(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				abs_parent = parentPath[:1] == os.sep
				if not abs_parent and allow_parent_colon:
					parentPath = self._expand_parent_colon(parentsFile,
						parentPath, repo_loc, repositories)

				# NOTE: This os.path.join() call is intended to ignore
				# currentPath if parentPath is already absolute.
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))

				if abs_parent or repo_loc is None or \
					not parentPath.startswith(repo_loc):
					# It seems that this parent may point outside
					# of the current repo, so realpath it.
					parentPath = os.path.realpath(parentPath)

				if os.path.exists(parentPath):
					self._addProfile(parentPath, repositories, known_repos)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))

		self.profiles.append(currentPath)
		self.profiles_complex.append(
			_profile_node(currentPath, allow_directories))
Exemplo n.º 35
0
def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):

	from portage import eapi_is_supported, \
		_validate_cache_for_unsupported_eapis
	if not src_cache.complete_eclass_entries and not eclass_cache:
		raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)

	if verbose_instance == None:
		noise=quiet_mirroring()
	else:
		noise=verbose_instance

	dead_nodes = set(trg_cache)
	count=0

	if not trg_cache.autocommits:
		trg_cache.sync(100)

	for x in valid_nodes_iterable:
#		print "processing x=",x
		count+=1
		dead_nodes.discard(x)
		try:
			entry = src_cache[x]
		except KeyError as e:
			noise.missing_entry(x)
			del e
			continue
		except cache_errors.CacheError as ce:
			noise.exception(x, ce)
			del ce
			continue

		eapi = entry.get('EAPI')
		if not eapi:
			eapi = '0'
		eapi = eapi.lstrip('-')
		eapi_supported = eapi_is_supported(eapi)
		if not eapi_supported:
			if not _validate_cache_for_unsupported_eapis:
				noise.misc(x, _("unable to validate cache for EAPI='%s'") % eapi)
				continue

		write_it = True
		trg = None
		try:
			trg = trg_cache[x]
		except (KeyError, cache_errors.CacheError):
			pass
		else:
			if trg['_mtime_'] == entry['_mtime_'] and \
				eclass_cache.is_eclass_data_valid(trg['_eclasses_']) and \
				set(trg['_eclasses_']) == set(entry['_eclasses_']):
				write_it = False

		for d in (entry, trg):
			if d is not None and d.get('EAPI') in ('', '0'):
				del d['EAPI']

		if trg and not write_it:
			""" We don't want to skip the write unless we're really sure that
			the existing cache is identical, so don't trust _mtime_ and
			_eclasses_ alone."""
			for k in set(chain(entry, trg)).difference(
				("_mtime_", "_eclasses_")):
				if trg.get(k, "") != entry.get(k, ""):
					write_it = True
					break

		if write_it:
			try:
				inherited = entry.get("INHERITED", "")
				eclasses = entry.get("_eclasses_")
			except cache_errors.CacheError as ce:
				noise.exception(x, ce)
				del ce
				continue

			if eclasses is not None:
				if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
					noise.eclass_stale(x)
					continue
				inherited = eclasses
			else:
				inherited = inherited.split()

			if inherited:
				if src_cache.complete_eclass_entries and eclasses is None:
					noise.corruption(x, "missing _eclasses_ field")
					continue

				# Even if _eclasses_ already exists, replace it with data from
				# eclass_cache, in order to insert local eclass paths.
				try:
					eclasses = eclass_cache.get_eclass_data(inherited)
				except KeyError:
					# INHERITED contains a non-existent eclass.
					noise.eclass_stale(x)
					continue

				if eclasses is None:
					noise.eclass_stale(x)
					continue
				entry["_eclasses_"] = eclasses

			if not eapi_supported:
				for k in set(entry).difference(("_mtime_", "_eclasses_")):
					entry[k] = ""
				entry["EAPI"] = "-" + eapi

			# by this time, if it reaches here, the eclass has been validated, and the entry has 
			# been updated/translated (if needs be, for metadata/cache mainly)
			try:
				trg_cache[x] = entry
			except cache_errors.CacheError as ce:
				noise.exception(x, ce)
				del ce
				continue
		if count >= noise.call_update_min:
			noise.update(x)
			count = 0

	if not trg_cache.autocommits:
		trg_cache.commit()

	# ok.  by this time, the trg_cache is up to date, and we have a dict
	# with a crapload of cpv's.  we now walk the target db, removing stuff if it's in the list.
	for key in dead_nodes:
		try:
			del trg_cache[key]
		except KeyError:
			pass
		except cache_errors.CacheError as ce:
			noise.exception(ce)
			del ce
	noise.finish()
Exemplo n.º 36
0
    def _start(self):
        settings = self.settings
        settings.setcpv(self.cpv)
        ebuild_path = self.ebuild_hash.location

        # the caller can pass in eapi in order to avoid
        # redundant _parse_eapi_ebuild_head calls
        eapi = self.eapi
        if eapi is None and \
         'parse-eapi-ebuild-head' in settings.features:
            with io.open(_unicode_encode(ebuild_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='r',
                         encoding=_encodings['repo.content'],
                         errors='replace') as f:
                eapi = portage._parse_eapi_ebuild_head(f)

        if eapi is not None:
            if not portage.eapi_is_supported(eapi):
                self.metadata = self.metadata_callback(self.cpv,
                                                       self.repo_path,
                                                       {'EAPI': eapi},
                                                       self.ebuild_hash)
                self._set_returncode((self.pid, os.EX_OK << 8))
                self.wait()
                return

            settings.configdict['pkg']['EAPI'] = eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open('/dev/null', 'rb')
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        fd_pipes[self._metadata_fd] = slave_fd

        self._raw_metadata = []
        files.ebuild = master_fd
        self._reg_id = self.scheduler.register(files.ebuild,
                                               self._registered_events,
                                               self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings=settings,
                                  debug=debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self._unregister()
            self._set_returncode((self.pid, retval << 8))
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)
Exemplo n.º 37
0
	def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
		"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
		'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
		'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
		cache_me = False
		if myrepo is not None:
			mytree = self.treemap.get(myrepo)
			if mytree is None:
				raise KeyError(myrepo)

		if mytree is not None and len(self.porttrees) == 1 \
			and mytree == self.porttrees[0]:
			# mytree matches our only tree, so it's safe to
			# ignore mytree and cache the result
			mytree = None
			myrepo = None

		if mytree is None:
			cache_me = True
		if mytree is None and not self._known_keys.intersection(
			mylist).difference(self._aux_cache_keys):
			aux_cache = self._aux_cache.get(mycpv)
			if aux_cache is not None:
				return [aux_cache.get(x, "") for x in mylist]
			cache_me = True

		try:
			cat, pkg = mycpv.split("/", 1)
		except ValueError:
			# Missing slash. Can't find ebuild so raise KeyError.
			raise KeyError(mycpv)

		myebuild, mylocation = self.findname2(mycpv, mytree)

		if not myebuild:
			writemsg("!!! aux_get(): %s\n" % \
				_("ebuild not found for '%s'") % mycpv, noiselevel=1)
			raise KeyError(mycpv)

		mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
		doregen = mydata is None

		if doregen:
			if myebuild in self._broken_ebuilds:
				raise KeyError(mycpv)

			proc = EbuildMetadataPhase(cpv=mycpv,
				ebuild_hash=ebuild_hash, portdb=self,
				repo_path=mylocation, scheduler=self._event_loop,
				settings=self.doebuild_settings)

			proc.start()
			proc.wait()

			if proc.returncode != os.EX_OK:
				self._broken_ebuilds.add(myebuild)
				raise KeyError(mycpv)

			mydata = proc.metadata

		mydata["repository"] = self.repositories.get_name_for_location(mylocation)
		mydata["_mtime_"] = ebuild_hash.mtime
		eapi = mydata.get("EAPI")
		if not eapi:
			eapi = "0"
			mydata["EAPI"] = eapi
		if eapi_is_supported(eapi):
			mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))

		#finally, we look at our internal cache entry and return the requested data.
		returnme = [mydata.get(x, "") for x in mylist]

		if cache_me:
			aux_cache = {}
			for x in self._aux_cache_keys:
				aux_cache[x] = mydata.get(x, "")
			self._aux_cache[mycpv] = aux_cache

		return returnme
Exemplo n.º 38
0
def expand_new_virt(vardb, atom):
    """
	Iterate over the recursively expanded RDEPEND atoms of
	a new-style virtual. If atom is not a new-style virtual
	or it does not match an installed package then it is
	yielded without any expansion.
	"""
    if not isinstance(atom, Atom):
        atom = Atom(atom)

    if not atom.cp.startswith("virtual/"):
        yield atom
        return

    traversed = set()
    stack = [atom]

    while stack:
        atom = stack.pop()
        if atom.blocker or \
         not atom.cp.startswith("virtual/"):
            yield atom
            continue

        matches = vardb.match(atom)
        if not (matches and matches[-1].startswith("virtual/")):
            yield atom
            continue

        virt_cpv = matches[-1]
        if virt_cpv in traversed:
            continue

        traversed.add(virt_cpv)
        eapi, iuse, rdepend, use = vardb.aux_get(
            virt_cpv, ["EAPI", "IUSE", "RDEPEND", "USE"])
        if not portage.eapi_is_supported(eapi):
            yield atom
            continue

        eapi_attrs = _get_eapi_attrs(eapi)
        # Validate IUSE and IUSE, for early detection of vardb corruption.
        useflag_re = _get_useflag_re(eapi)
        valid_iuse = []
        for x in iuse.split():
            if x[:1] in ("+", "-"):
                x = x[1:]
            if useflag_re.match(x) is not None:
                valid_iuse.append(x)
        valid_iuse = frozenset(valid_iuse)

        if eapi_attrs.iuse_effective:
            iuse_implicit_match = vardb.settings._iuse_effective_match
        else:
            iuse_implicit_match = vardb.settings._iuse_implicit_match

        valid_use = []
        for x in use.split():
            if x in valid_iuse or iuse_implicit_match(x):
                valid_use.append(x)
        valid_use = frozenset(valid_use)

        success, atoms = portage.dep_check(rdepend,
                                           None,
                                           vardb.settings,
                                           myuse=valid_use,
                                           myroot=vardb.settings['EROOT'],
                                           trees={
                                               vardb.settings['EROOT']: {
                                                   "porttree": vardb.vartree,
                                                   "vartree": vardb.vartree
                                               }
                                           })

        if success:
            stack.extend(atoms)
        else:
            yield atom
Exemplo n.º 39
0
    def _start(self):
        ebuild_path = self.ebuild_hash.location

        with io.open(_unicode_encode(ebuild_path,
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='r',
                     encoding=_encodings['repo.content'],
                     errors='replace') as f:
            self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

        parsed_eapi = self._eapi
        if parsed_eapi is None:
            parsed_eapi = "0"

        if not parsed_eapi:
            # An empty EAPI setting is invalid.
            self._eapi_invalid(None)
            self.returncode = 1
            self._async_wait()
            return

        self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
        if not self.eapi_supported:
            self.metadata = {"EAPI": parsed_eapi}
            self.returncode = os.EX_OK
            self._async_wait()
            return

        settings = self.settings
        settings.setcpv(self.cpv)
        settings.configdict['pkg']['EAPI'] = parsed_eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open('/dev/null', 'rb')
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()

        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    master_fd, fcntl.F_SETFD,
                    fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

        fd_pipes[slave_fd] = slave_fd
        settings["PORTAGE_PIPE_FD"] = str(slave_fd)

        self._raw_metadata = []
        files.ebuild = master_fd
        self.scheduler.add_reader(files.ebuild, self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings=settings,
                                  debug=debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)
        settings.pop("PORTAGE_PIPE_FD", None)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]
Exemplo n.º 40
0
def _get_eapi_attrs(eapi_str: Optional[str]) -> _eapi_attrs:
    """
    When eapi is None then validation is not as strict, since we want the
    same to work for multiple EAPIs that may have slightly different rules.
    An unsupported eapi is handled the same as when eapi is None, which may
    be helpful for handling of corrupt EAPI metadata in essential functions
    such as pkgsplit.
    """
    if eapi_str is None or not eapi_is_supported(eapi_str):
        return _eapi_attrs(
            allows_package_provided=True,
            bdepend=False,
            broot=True,
            empty_groups_always_true=False,
            exports_AA=False,
            exports_EBUILD_PHASE_FUNC=True,
            exports_ECLASSDIR=False,
            exports_KV=False,
            exports_merge_type=True,
            exports_PORTDIR=True,
            exports_replace_vars=True,
            feature_flag_test=False,
            idepend=False,
            iuse_defaults=True,
            iuse_effective=False,
            path_variables_end_with_trailing_slash=False,
            posixish_locale=False,
            prefix=True,
            repo_deps=True,
            required_use=True,
            required_use_at_most_one_of=True,
            selective_src_uri_restriction=True,
            slot_deps=True,
            slot_operator=True,
            src_uri_arrows=True,
            strong_blocks=True,
            sysroot=True,
            use_deps=True,
            use_dep_defaults=True,
        )
    else:
        eapi = Eapi(eapi_str)
        return _eapi_attrs(
            allows_package_provided=eapi <= Eapi("6"),
            bdepend=eapi >= Eapi("7"),
            broot=eapi >= Eapi("7"),
            empty_groups_always_true=eapi <= Eapi("6"),
            exports_AA=eapi <= Eapi("3"),
            exports_EBUILD_PHASE_FUNC=eapi >= Eapi("5"),
            exports_ECLASSDIR=eapi <= Eapi("6"),
            exports_KV=eapi <= Eapi("3"),
            exports_merge_type=eapi >= Eapi("4"),
            exports_PORTDIR=eapi <= Eapi("6"),
            exports_replace_vars=eapi >= Eapi("4"),
            feature_flag_test=False,
            idepend=eapi >= Eapi("8"),
            iuse_defaults=eapi >= Eapi("1"),
            iuse_effective=eapi >= Eapi("5"),
            path_variables_end_with_trailing_slash=eapi <= Eapi("6"),
            posixish_locale=eapi >= Eapi("6"),
            prefix=eapi >= Eapi("3"),
            repo_deps=False,
            required_use=eapi >= Eapi("4"),
            required_use_at_most_one_of=eapi >= Eapi("5"),
            selective_src_uri_restriction=eapi >= Eapi("8"),
            slot_deps=eapi >= Eapi("1"),
            slot_operator=eapi >= Eapi("5"),
            src_uri_arrows=eapi >= Eapi("2"),
            strong_blocks=eapi >= Eapi("2"),
            sysroot=eapi >= Eapi("7"),
            use_deps=eapi >= Eapi("2"),
            use_dep_defaults=eapi >= Eapi("4"),
        )
Exemplo n.º 41
0
def getmaskingreason(mycpv,
                     metadata=None,
                     settings=None,
                     portdb=None,
                     return_location=False,
                     myrepo=None):
    """
	If specified, the myrepo argument is assumed to be valid. This
	should be a safe assumption since portdbapi methods always
	return valid repo names and valid "repository" metadata from
	aux_get.
	"""
    if settings is None:
        settings = portage.settings
    if portdb is None:
        portdb = portage.portdb
    mysplit = catpkgsplit(mycpv)
    if not mysplit:
        raise ValueError(_("invalid CPV: %s") % mycpv)

    if metadata is None:
        db_keys = list(portdb._aux_cache_keys)
        try:
            metadata = dict(
                zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
        except KeyError:
            if not portdb.cpv_exists(mycpv):
                raise
        else:
            if myrepo is None:
                myrepo = _gen_valid_repo(metadata["repository"])

    elif myrepo is None:
        myrepo = metadata.get("repository")
        if myrepo is not None:
            myrepo = _gen_valid_repo(metadata["repository"])

    if metadata is not None and \
     not portage.eapi_is_supported(metadata["EAPI"]):
        # Return early since otherwise we might produce invalid
        # results given that the EAPI is not supported. Also,
        # metadata is mostly useless in this case since it doesn't
        # contain essential things like SLOT.
        if return_location:
            return (None, None)
        else:
            return None

    # Sometimes we can't access SLOT or repository due to corruption.
    pkg = mycpv
    if metadata is not None:
        pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
    # At this point myrepo should be None, a valid name, or
    # Package.UNKNOWN_REPO which we ignore.
    if myrepo is not None and myrepo != Package.UNKNOWN_REPO:
        pkg = "".join((pkg, _repo_separator, myrepo))
    cpv_slot_list = [pkg]

    mycp = mysplit[0] + "/" + mysplit[1]

    # XXX- This is a temporary duplicate of code from the config constructor.
    locations = [os.path.join(settings["PORTDIR"], "profiles")]
    locations.extend(settings.profiles)
    for ov in settings["PORTDIR_OVERLAY"].split():
        profdir = os.path.join(normalize_path(ov), "profiles")
        if os.path.isdir(profdir):
            locations.append(profdir)
    locations.append(
        os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
    locations.reverse()
    pmasklists = []
    for profile in locations:
        pmask_filename = os.path.join(profile, "package.mask")
        pmasklists.append(
            (pmask_filename, grablines(pmask_filename, recursive=1)))

    pmaskdict = settings._mask_manager._pmaskdict
    if mycp in pmaskdict:
        for x in pmaskdict[mycp]:
            if match_from_list(x, cpv_slot_list):
                x = x.without_repo
                for pmask in pmasklists:
                    comment = ""
                    comment_valid = -1
                    pmask_filename = pmask[0]
                    for i in range(len(pmask[1])):
                        l = pmask[1][i].strip()
                        try:
                            l_atom = Atom(l,
                                          allow_repo=True,
                                          allow_wildcard=True).without_repo
                        except InvalidAtom:
                            l_atom = None
                        if l == "":
                            comment = ""
                            comment_valid = -1
                        elif l[0] == "#":
                            comment += (l + "\n")
                            comment_valid = i + 1
                        elif l_atom == x:
                            if comment_valid != i:
                                comment = ""
                            if return_location:
                                return (comment, pmask_filename)
                            else:
                                return comment
                        elif comment_valid != -1:
                            # Apparently this comment applies to multiple masks, so
                            # it remains valid until a blank line is encountered.
                            comment_valid += 1
    if return_location:
        return (None, None)
    else:
        return None
Exemplo n.º 42
0
def action_metadata(settings, portdb, myopts, porttrees=None):
    if porttrees is None:
        porttrees = portdb.porttrees
    portage.writemsg_stdout("\n>>> Updating Portage cache\n")
    cachedir = os.path.normpath(settings.depcachedir)
    if cachedir in [
        "/",
        "/bin",
        "/dev",
        "/etc",
        "/home",
        "/lib",
        "/opt",
        "/proc",
        "/root",
        "/sbin",
        "/sys",
        "/tmp",
        "/usr",
        "/var",
    ]:
        print(
            "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY "
            + "ROOT DIRECTORY ON YOUR SYSTEM.",
            file=sys.stderr,
        )
        print(
            "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir,
            file=sys.stderr,
        )
        sys.exit(73)
    if not os.path.exists(cachedir):
        os.makedirs(cachedir)

    auxdbkeys = portdb._known_keys

    class TreeData:
        __slots__ = ("dest_db", "eclass_db", "path", "src_db", "valid_nodes")

        def __init__(self, dest_db, eclass_db, path, src_db):
            self.dest_db = dest_db
            self.eclass_db = eclass_db
            self.path = path
            self.src_db = src_db
            self.valid_nodes = set()

    porttrees_data = []
    for path in porttrees:
        src_db = portdb._pregen_auxdb.get(path)
        if src_db is None:
            # portdbapi does not populate _pregen_auxdb
            # when FEATURES=metadata-transfer is enabled
            src_db = portdb._create_pregen_cache(path)

        if src_db is not None:
            eclass_db = portdb.repositories.get_repo_for_location(path).eclass_db
            # Update eclass data which may be stale after sync.
            eclass_db.update_eclasses()
            porttrees_data.append(TreeData(portdb.auxdb[path], eclass_db, path, src_db))

    porttrees = [tree_data.path for tree_data in porttrees_data]

    quiet = (
        settings.get("TERM") == "dumb" or "--quiet" in myopts or not sys.stdout.isatty()
    )

    onProgress = None
    if not quiet:
        progressBar = portage.output.TermProgressBar()
        progressHandler = ProgressHandler()
        onProgress = progressHandler.onProgress

        def display():
            progressBar.set(progressHandler.curval, progressHandler.maxval)

        progressHandler.display = display

        def sigwinch_handler(signum, frame):
            lines, progressBar.term_columns = portage.output.get_term_size()

        signal.signal(signal.SIGWINCH, sigwinch_handler)

    # Temporarily override portdb.porttrees so portdb.cp_all()
    # will only return the relevant subset.
    portdb_porttrees = portdb.porttrees
    portdb.porttrees = porttrees
    try:
        cp_all = portdb.cp_all()
    finally:
        portdb.porttrees = portdb_porttrees

    curval = 0
    maxval = len(cp_all)
    if onProgress is not None:
        onProgress(maxval, curval)

    # TODO: Display error messages, but do not interfere with the progress bar.
    # Here's how:
    #  1) erase the progress bar
    #  2) show the error message
    #  3) redraw the progress bar on a new line

    for cp in cp_all:
        for tree_data in porttrees_data:

            src_chf = tree_data.src_db.validation_chf
            dest_chf = tree_data.dest_db.validation_chf
            dest_chf_key = "_%s_" % dest_chf
            dest_chf_getter = operator.attrgetter(dest_chf)

            for cpv in portdb.cp_list(cp, mytree=tree_data.path):
                tree_data.valid_nodes.add(cpv)
                try:
                    src = tree_data.src_db[cpv]
                except (CacheError, KeyError):
                    continue

                ebuild_location = portdb.findname(cpv, mytree=tree_data.path)
                if ebuild_location is None:
                    continue
                ebuild_hash = hashed_path(ebuild_location)

                try:
                    if not tree_data.src_db.validate_entry(
                        src, ebuild_hash, tree_data.eclass_db
                    ):
                        continue
                except CacheError:
                    continue

                eapi = src.get("EAPI")
                if not eapi:
                    eapi = "0"
                eapi_supported = eapi_is_supported(eapi)
                if not eapi_supported:
                    continue

                dest = None
                try:
                    dest = tree_data.dest_db[cpv]
                except (KeyError, CacheError):
                    pass

                for d in (src, dest):
                    if d is not None and d.get("EAPI") in ("", "0"):
                        del d["EAPI"]

                if src_chf != "mtime":
                    # src may contain an irrelevant _mtime_ which corresponds
                    # to the time that the cache entry was written
                    src.pop("_mtime_", None)

                if src_chf != dest_chf:
                    # populate src entry with dest_chf_key
                    # (the validity of the dest_chf that we generate from the
                    # ebuild here relies on the fact that we already used
                    # validate_entry to validate the ebuild with src_chf)
                    src[dest_chf_key] = dest_chf_getter(ebuild_hash)

                if dest is not None:
                    if not (
                        dest.get(dest_chf_key) == src[dest_chf_key]
                        and tree_data.eclass_db.validate_and_rewrite_cache(
                            dest["_eclasses_"],
                            tree_data.dest_db.validation_chf,
                            tree_data.dest_db.store_eclass_paths,
                        )
                        is not None
                        and set(dest["_eclasses_"]) == set(src["_eclasses_"])
                    ):
                        dest = None
                    else:
                        # We don't want to skip the write unless we're really
                        # sure that the existing cache is identical, so don't
                        # trust _mtime_ and _eclasses_ alone.
                        for k in auxdbkeys:
                            if dest.get(k, "") != src.get(k, ""):
                                dest = None
                                break

                if dest is not None:
                    # The existing data is valid and identical,
                    # so there's no need to overwrite it.
                    continue

                try:
                    tree_data.dest_db[cpv] = src
                except CacheError:
                    # ignore it; can't do anything about it.
                    pass

        curval += 1
        if onProgress is not None:
            onProgress(maxval, curval)

    if onProgress is not None:
        onProgress(maxval, curval)

    for tree_data in porttrees_data:
        try:
            dead_nodes = set(tree_data.dest_db)
        except CacheError as e:
            writemsg_level(
                "Error listing cache entries for "
                + "'%s': %s, continuing...\n" % (tree_data.path, e),
                level=logging.ERROR,
                noiselevel=-1,
            )
            del e
        else:
            dead_nodes.difference_update(tree_data.valid_nodes)
            for cpv in dead_nodes:
                try:
                    del tree_data.dest_db[cpv]
                except (KeyError, CacheError):
                    pass

    if not quiet:
        # make sure the final progress is displayed
        progressHandler.display()
        print()
        signal.signal(signal.SIGWINCH, signal.SIG_DFL)

    portdb.flush_cache()
    sys.stdout.flush()
Exemplo n.º 43
0
	def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
		"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
		'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
		'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise PortageKeyError if error'
		cache_me = False
		if myrepo is not None:
			mytree = self.treemap.get(myrepo)
			if mytree is None:
				raise PortageKeyError(myrepo)

		if mytree is not None and len(self.porttrees) == 1 \
			and mytree == self.porttrees[0]:
			# mytree matches our only tree, so it's safe to
			# ignore mytree and cache the result
			mytree = None
			myrepo = None

		if mytree is None:
			cache_me = True
		if mytree is None and not self._known_keys.intersection(
			mylist).difference(self._aux_cache_keys):
			aux_cache = self._aux_cache.get(mycpv)
			if aux_cache is not None:
				return [aux_cache.get(x, "") for x in mylist]
			cache_me = True

		try:
			cat, pkg = mycpv.split("/", 1)
		except ValueError:
			# Missing slash. Can't find ebuild so raise PortageKeyError.
			raise PortageKeyError(mycpv)

		myebuild, mylocation = self.findname2(mycpv, mytree)

		if not myebuild:
			writemsg("!!! aux_get(): %s\n" % \
				_("ebuild not found for '%s'") % mycpv, noiselevel=1)
			raise PortageKeyError(mycpv)

		mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
		doregen = mydata is None

		if doregen:
			if myebuild in self._broken_ebuilds:
				raise PortageKeyError(mycpv)

			proc = EbuildMetadataPhase(cpv=mycpv,
				ebuild_hash=ebuild_hash, portdb=self,
				repo_path=mylocation, scheduler=self._event_loop,
				settings=self.doebuild_settings)

			proc.start()
			proc.wait()

			if proc.returncode != os.EX_OK:
				self._broken_ebuilds.add(myebuild)
				raise PortageKeyError(mycpv)

			mydata = proc.metadata

		mydata["repository"] = self.repositories.get_name_for_location(mylocation)
		mydata["_mtime_"] = ebuild_hash.mtime
		eapi = mydata.get("EAPI")
		if not eapi:
			eapi = "0"
			mydata["EAPI"] = eapi
		if eapi_is_supported(eapi):
			mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))

		#finally, we look at our internal cache entry and return the requested data.
		returnme = [mydata.get(x, "") for x in mylist]

		if cache_me and self.frozen:
			aux_cache = {}
			for x in self._aux_cache_keys:
				aux_cache[x] = mydata.get(x, "")
			self._aux_cache[mycpv] = aux_cache

		return returnme
Exemplo n.º 44
0
	def _start(self):
		ebuild_path = self.ebuild_hash.location

		with io.open(_unicode_encode(ebuild_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace') as f:
			self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

		parsed_eapi = self._eapi
		if parsed_eapi is None:
			parsed_eapi = "0"

		if not parsed_eapi:
			# An empty EAPI setting is invalid.
			self._eapi_invalid(None)
			self._set_returncode((self.pid, 1 << 8))
			self.wait()
			return

		self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
		if not self.eapi_supported:
			self.metadata = {"EAPI": parsed_eapi}
			self._set_returncode((self.pid, os.EX_OK << 8))
			self.wait()
			return

		settings = self.settings
		settings.setcpv(self.cpv)
		settings.configdict['pkg']['EAPI'] = parsed_eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		null_input = open('/dev/null', 'rb')
		fd_pipes.setdefault(0, null_input.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		fd_pipes[self._metadata_fd] = slave_fd

		self._raw_metadata = []
		files.ebuild = master_fd
		self._reg_id = self.scheduler.register(files.ebuild,
			self._registered_events, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings=settings, debug=debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)

		os.close(slave_fd)
		null_input.close()

		if isinstance(retval, int):
			# doebuild failed before spawning
			self._unregister()
			self._set_returncode((self.pid, retval << 8))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)
Exemplo n.º 45
0
 def eapi_supported(self, eapi):
     return portage.eapi_is_supported(eapi)
Exemplo n.º 46
0
    def _addProfile(self, currentPath, repositories, known_repos):
        current_abs_path = os.path.abspath(currentPath)
        allow_directories = True
        allow_parent_colon = True
        repo_loc = None
        compat_mode = False
        current_formats = ()
        eapi = None

        intersecting_repos = [
            x for x in known_repos if current_abs_path.startswith(x[0])
        ]
        if intersecting_repos:
            # Handle nested repositories. The longest path
            # will be the correct one.
            repo_loc, layout_data = max(intersecting_repos,
                                        key=lambda x: len(x[0]))
            eapi = layout_data.get("profile_eapi_when_unspecified")

        eapi_file = os.path.join(currentPath, "eapi")
        eapi = eapi or "0"
        f = None
        try:
            f = io.open(_unicode_encode(eapi_file,
                                        encoding=_encodings['fs'],
                                        errors='strict'),
                        mode='r',
                        encoding=_encodings['content'],
                        errors='replace')
            eapi = f.readline().strip()
        except IOError:
            pass
        else:
            if not eapi_is_supported(eapi):
                raise ParseError(_(
                 "Profile contains unsupported "
                 "EAPI '%s': '%s'") % \
                 (eapi, os.path.realpath(eapi_file),))
        finally:
            if f is not None:
                f.close()

        if intersecting_repos:
            allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
             any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
            compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
             layout_data['profile-formats'] == ('portage-1-compat',)
            allow_parent_colon = any(x in _allow_parent_colon
                                     for x in layout_data['profile-formats'])
            current_formats = tuple(layout_data['profile-formats'])

        if compat_mode:
            offenders = _PORTAGE1_DIRECTORIES.intersection(
                os.listdir(currentPath))
            offenders = sorted(x for x in offenders
                               if os.path.isdir(os.path.join(currentPath, x)))
            if offenders:
                warnings.warn(
                    _("\nThe selected profile is implicitly using the 'portage-1' format:\n"
                      "\tprofile = %(profile_path)s\n"
                      "But this repository is not using that format:\n"
                      "\trepo = %(repo_name)s\n"
                      "This will break in the future.  Please convert these dirs to files:\n"
                      "\t%(files)s\n"
                      "Or, add this line to the repository's layout.conf:\n"
                      "\tprofile-formats = portage-1") %
                    dict(profile_path=currentPath,
                         repo_name=repo_loc,
                         files='\n\t'.join(offenders)))

        parentsFile = os.path.join(currentPath, "parent")
        if exists_raise_eaccess(parentsFile):
            parents = grabfile(parentsFile)
            if not parents:
                raise ParseError(_("Empty parent file: '%s'") % parentsFile)
            for parentPath in parents:
                abs_parent = parentPath[:1] == os.sep
                if not abs_parent and allow_parent_colon:
                    parentPath = self._expand_parent_colon(
                        parentsFile, parentPath, repo_loc, repositories)

                # NOTE: This os.path.join() call is intended to ignore
                # currentPath if parentPath is already absolute.
                parentPath = normalize_path(
                    os.path.join(currentPath, parentPath))

                if abs_parent or repo_loc is None or \
                 not parentPath.startswith(repo_loc):
                    # It seems that this parent may point outside
                    # of the current repo, so realpath it.
                    parentPath = os.path.realpath(parentPath)

                if exists_raise_eaccess(parentPath):
                    self._addProfile(parentPath, repositories, known_repos)
                else:
                    raise ParseError(
                     _("Parent '%s' not found: '%s'") %  \
                     (parentPath, parentsFile))

        self.profiles.append(currentPath)
        self.profiles_complex.append(
            _profile_node(currentPath, allow_directories, False,
                          current_formats, eapi, 'build-id'
                          in current_formats))
Exemplo n.º 47
0
    def _start(self):
        settings = self.settings
        settings.setcpv(self.cpv)
        ebuild_path = self.ebuild_path

        eapi = None
        if 'parse-eapi-glep-55' in settings.features:
            pf, eapi = portage._split_ebuild_name_glep55(
                os.path.basename(ebuild_path))
        if eapi is None and \
         'parse-eapi-ebuild-head' in settings.features:
            eapi = portage._parse_eapi_ebuild_head(
                codecs.open(_unicode_encode(ebuild_path,
                                            encoding=_encodings['fs'],
                                            errors='strict'),
                            mode='r',
                            encoding=_encodings['repo.content'],
                            errors='replace'))

        if eapi is not None:
            if not portage.eapi_is_supported(eapi):
                self.metadata_callback(self.cpv, self.ebuild_path,
                                       self.repo_path, {'EAPI': eapi},
                                       self.ebuild_mtime)
                self.returncode = os.EX_OK
                self.wait()
                return

            settings.configdict['pkg']['EAPI'] = eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        fd_pipes.setdefault(0, sys.stdin.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        fd_pipes_orig = fd_pipes.copy()
        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        fd_pipes[self._metadata_fd] = slave_fd

        self._raw_metadata = []
        files.ebuild = os.fdopen(master_fd, 'rb')
        self._reg_id = self.scheduler.register(files.ebuild.fileno(),
                                               self._registered_events,
                                               self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings["ROOT"],
                                  settings,
                                  debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)

        os.close(slave_fd)

        if isinstance(retval, int):
            # doebuild failed before spawning
            self._unregister()
            self.returncode = retval
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)
Exemplo n.º 48
0
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None):
    """
	If specified, the myrepo argument is assumed to be valid. This
	should be a safe assumption since portdbapi methods always
	return valid repo names and valid "repository" metadata from
	aux_get.
	"""
    if settings is None:
        settings = portage.settings
    if portdb is None:
        portdb = portage.portdb
    mysplit = catpkgsplit(mycpv)
    if not mysplit:
        raise ValueError(_("invalid CPV: %s") % mycpv)

    if metadata is None:
        db_keys = list(portdb._aux_cache_keys)
        try:
            metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
        except KeyError:
            if not portdb.cpv_exists(mycpv):
                raise
        else:
            if myrepo is None:
                myrepo = _gen_valid_repo(metadata["repository"])

    elif myrepo is None:
        myrepo = metadata.get("repository")
        if myrepo is not None:
            myrepo = _gen_valid_repo(metadata["repository"])

    if metadata is not None and not portage.eapi_is_supported(metadata["EAPI"]):
        # Return early since otherwise we might produce invalid
        # results given that the EAPI is not supported. Also,
        # metadata is mostly useless in this case since it doesn't
        # contain essential things like SLOT.
        if return_location:
            return (None, None)
        else:
            return None

            # Sometimes we can't access SLOT or repository due to corruption.
    pkg = mycpv
    if metadata is not None:
        pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
        # At this point myrepo should be None, a valid name, or
        # Package.UNKNOWN_REPO which we ignore.
    if myrepo is not None and myrepo != Package.UNKNOWN_REPO:
        pkg = "".join((pkg, _repo_separator, myrepo))
    cpv_slot_list = [pkg]

    mycp = mysplit[0] + "/" + mysplit[1]

    # XXX- This is a temporary duplicate of code from the config constructor.
    locations = [os.path.join(settings["PORTDIR"], "profiles")]
    locations.extend(settings.profiles)
    for ov in settings["PORTDIR_OVERLAY"].split():
        profdir = os.path.join(normalize_path(ov), "profiles")
        if os.path.isdir(profdir):
            locations.append(profdir)
    locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
    locations.reverse()
    pmasklists = []
    for profile in locations:
        pmask_filename = os.path.join(profile, "package.mask")
        node = None
        for l, recursive_filename in grablines(pmask_filename, recursive=1, remember_source_file=True):
            if node is None or node[0] != recursive_filename:
                node = (recursive_filename, [])
                pmasklists.append(node)
            node[1].append(l)

    pmaskdict = settings._mask_manager._pmaskdict
    if mycp in pmaskdict:
        for x in pmaskdict[mycp]:
            if match_from_list(x, cpv_slot_list):
                x = x.without_repo
                for pmask in pmasklists:
                    comment = ""
                    comment_valid = -1
                    pmask_filename = pmask[0]
                    for i in range(len(pmask[1])):
                        l = pmask[1][i].strip()
                        try:
                            l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo
                        except InvalidAtom:
                            l_atom = None
                        if l == "":
                            comment = ""
                            comment_valid = -1
                        elif l[0] == "#":
                            comment += l + "\n"
                            comment_valid = i + 1
                        elif l_atom == x:
                            if comment_valid != i:
                                comment = ""
                            if return_location:
                                return (comment, pmask_filename)
                            else:
                                return comment
                        elif comment_valid != -1:
                            # Apparently this comment applies to multiple masks, so
                            # it remains valid until a blank line is encountered.
                            comment_valid += 1
    if return_location:
        return (None, None)
    else:
        return None
Exemplo n.º 49
0
    def _addProfile(self, currentPath, repositories, known_repos,
                    previous_repos):
        current_abs_path = os.path.abspath(currentPath)
        allow_directories = True
        allow_parent_colon = True
        repo_loc = None
        compat_mode = False
        current_formats = ()
        eapi = None

        intersecting_repos = tuple(x for x in known_repos
                                   if current_abs_path.startswith(x[0]))
        if intersecting_repos:
            # Handle nested repositories. The longest path
            # will be the correct one.
            repo_loc, layout_data = max(intersecting_repos,
                                        key=lambda x: len(x[0]))
            eapi = layout_data.get("profile_eapi_when_unspecified")

        eapi_file = os.path.join(currentPath, "eapi")
        eapi = eapi or "0"
        f = None
        try:
            f = io.open(
                _unicode_encode(eapi_file,
                                encoding=_encodings["fs"],
                                errors="strict"),
                mode="r",
                encoding=_encodings["content"],
                errors="replace",
            )
            eapi = f.readline().strip()
        except IOError:
            pass
        else:
            if not eapi_is_supported(eapi):
                raise ParseError(
                    _("Profile contains unsupported "
                      "EAPI '%s': '%s'") % (
                          eapi,
                          os.path.realpath(eapi_file),
                      ))
        finally:
            if f is not None:
                f.close()

        if intersecting_repos:
            allow_directories = (
                eapi_allows_directories_on_profile_level_and_repository_level(
                    eapi) or any(x in _portage1_profiles_allow_directories
                                 for x in layout_data["profile-formats"]))
            compat_mode = (
                not eapi_allows_directories_on_profile_level_and_repository_level(
                    eapi)
                and layout_data["profile-formats"] == ("portage-1-compat", ))
            allow_parent_colon = any(x in _allow_parent_colon
                                     for x in layout_data["profile-formats"])
            current_formats = tuple(layout_data["profile-formats"])

        # According to PMS, a deprecated profile warning is not inherited. Since
        # the current profile node may have been inherited by a user profile
        # node, the deprecation warning may be relevant even if it is not a
        # top-level profile node. Therefore, consider the deprecated warning
        # to be irrelevant when the current profile node belongs to the same
        # repo as the previous profile node.
        show_deprecated_warning = tuple(x[0] for x in previous_repos) != tuple(
            x[0] for x in intersecting_repos)

        if compat_mode:
            offenders = _PORTAGE1_DIRECTORIES.intersection(
                os.listdir(currentPath))
            offenders = sorted(x for x in offenders
                               if os.path.isdir(os.path.join(currentPath, x)))
            if offenders:
                warnings.warn(
                    _("\nThe selected profile is implicitly using the 'portage-1' format:\n"
                      "\tprofile = %(profile_path)s\n"
                      "But this repository is not using that format:\n"
                      "\trepo = %(repo_name)s\n"
                      "This will break in the future.  Please convert these dirs to files:\n"
                      "\t%(files)s\n"
                      "Or, add this line to the repository's layout.conf:\n"
                      "\tprofile-formats = portage-1") % dict(
                          profile_path=currentPath,
                          repo_name=repo_loc,
                          files="\n\t".join(offenders),
                      ))

        parentsFile = os.path.join(currentPath, "parent")
        if exists_raise_eaccess(parentsFile):
            parents = grabfile(parentsFile)
            if not parents:
                raise ParseError(_("Empty parent file: '%s'") % parentsFile)
            for parentPath in parents:
                abs_parent = parentPath[:1] == os.sep
                if not abs_parent and allow_parent_colon:
                    parentPath = self._expand_parent_colon(
                        parentsFile, parentPath, repo_loc, repositories)

                # NOTE: This os.path.join() call is intended to ignore
                # currentPath if parentPath is already absolute.
                parentPath = normalize_path(
                    os.path.join(currentPath, parentPath))

                if (abs_parent or repo_loc is None
                        or not parentPath.startswith(repo_loc)):
                    # It seems that this parent may point outside
                    # of the current repo, so realpath it.
                    parentPath = os.path.realpath(parentPath)

                if exists_raise_eaccess(parentPath):
                    self._addProfile(parentPath, repositories, known_repos,
                                     intersecting_repos)
                else:
                    raise ParseError(
                        _("Parent '%s' not found: '%s'") %
                        (parentPath, parentsFile))

        self.profiles.append(currentPath)
        self.profiles_complex.append(
            _profile_node(
                currentPath,
                allow_directories,
                False,
                current_formats,
                eapi,
                "build-id" in current_formats,
                show_deprecated_warning=show_deprecated_warning,
            ))
Exemplo n.º 50
0
	def getFetchMap(self, mypkg, useflags=None, mytree=None):
		"""
		Get the SRC_URI metadata as a dict which maps each file name to a
		set of alternative URIs.

		@param mypkg: cpv for an ebuild
		@type mypkg: String
		@param useflags: a collection of enabled USE flags, for evaluation of
			conditionals
		@type useflags: set, or None to enable all conditionals
		@param mytree: The canonical path of the tree in which the ebuild
			is located, or None for automatic lookup
		@type mypkg: String
		@returns: A dict which maps each file name to a set of alternative
			URIs.
		@rtype: dict
		"""

		try:
			eapi, myuris = self.aux_get(mypkg,
				["EAPI", "SRC_URI"], mytree=mytree)
		except KeyError:
			# Convert this to an InvalidDependString exception since callers
			# already handle it.
			raise portage.exception.InvalidDependString(
				"getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")

		if not eapi_is_supported(eapi):
			# Convert this to an InvalidDependString exception
			# since callers already handle it.
			raise portage.exception.InvalidDependString(
				"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
				(mypkg, eapi.lstrip("-")))

		myuris = paren_reduce(myuris)
		_src_uri_validate(mypkg, eapi, myuris)
		myuris = use_reduce(myuris, uselist=useflags,
			matchall=(useflags is None))
		myuris = flatten(myuris)

		uri_map = OrderedDict()

		myuris.reverse()
		while myuris:
			uri = myuris.pop()
			if myuris and myuris[-1] == "->":
				operator = myuris.pop()
				distfile = myuris.pop()
			else:
				distfile = os.path.basename(uri)
				if not distfile:
					raise portage.exception.InvalidDependString(
						("getFetchMap(): '%s' SRC_URI has no file " + \
						"name: '%s'") % (mypkg, uri))

			uri_set = uri_map.get(distfile)
			if uri_set is None:
				uri_set = set()
				uri_map[distfile] = uri_set
			uri_set.add(uri)
			uri = None
			operator = None

		return uri_map
Exemplo n.º 51
0
def parse_layout_conf(repo_location, repo_name=None):
	eapi = read_corresponding_eapi_file(os.path.join(repo_location, REPO_NAME_LOC))

	layout_filename = os.path.join(repo_location, "metadata", "layout.conf")
	layout_file = KeyValuePairFileLoader(layout_filename, None, None)
	layout_data, layout_errors = layout_file.load()

	data = {}

	# None indicates abscence of a masters setting, which later code uses
	# to trigger a backward compatibility fallback that sets an implicit
	# master. In order to avoid this fallback behavior, layout.conf can
	# explicitly set masters to an empty value, which will result in an
	# empty tuple here instead of None.
	masters = layout_data.get('masters')
	if masters is not None:
		masters = tuple(masters.split())
	data['masters'] = masters
	data['aliases'] = tuple(layout_data.get('aliases', '').split())

	data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
	data['eapis-deprecated'] = tuple(layout_data.get('eapis-deprecated', '').split())

	data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
		== 'true'

	data['sign-manifest'] = layout_data.get('sign-manifests', 'true').lower() \
		== 'true'

	data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
		== 'true'

	data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))

	manifest_policy = layout_data.get('use-manifests', 'strict').lower()
	data['allow-missing-manifest'] = manifest_policy != 'strict'
	data['create-manifest'] = manifest_policy != 'false'
	data['disable-manifest'] = manifest_policy == 'false'

	# for compatibility w/ PMS, fallback to pms; but also check if the
	# cache exists or not.
	cache_formats = layout_data.get('cache-formats', '').lower().split()
	if not cache_formats:
		# Auto-detect cache formats, and prefer md5-cache if available.
		# This behavior was deployed in portage-2.1.11.14, so that the
		# default egencache format could eventually be changed to md5-dict
		# in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
		# will NOT recognize md5-dict format unless it is explicitly
		# listed in layout.conf.
		cache_formats = []
		if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
			cache_formats.append('md5-dict')
		if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
			cache_formats.append('pms')
	data['cache-formats'] = tuple(cache_formats)

	manifest_hashes = layout_data.get('manifest-hashes')
	manifest_required_hashes = layout_data.get('manifest-required-hashes')

	if manifest_required_hashes is not None and manifest_hashes is None:
		repo_name = _get_repo_name(repo_location, cached=repo_name)
		warnings.warn((_("Repository named '%(repo_name)s' specifies "
			"'manifest-required-hashes' setting without corresponding "
			"'manifest-hashes'. Portage will default it to match "
			"the required set but please add the missing entry "
			"to: %(layout_filename)s") %
			{"repo_name": repo_name or 'unspecified',
			"layout_filename":layout_filename}),
			SyntaxWarning)
		manifest_hashes = manifest_required_hashes

	if manifest_hashes is not None:
		# require all the hashes unless specified otherwise
		if manifest_required_hashes is None:
			manifest_required_hashes = manifest_hashes

		manifest_required_hashes = frozenset(manifest_required_hashes.upper().split())
		manifest_hashes = frozenset(manifest_hashes.upper().split())
		missing_required_hashes = manifest_required_hashes.difference(
			manifest_hashes)
		if missing_required_hashes:
			repo_name = _get_repo_name(repo_location, cached=repo_name)
			warnings.warn((_("Repository named '%(repo_name)s' has a "
				"'manifest-hashes' setting that does not contain "
				"the '%(hash)s' hashes which are listed in "
				"'manifest-required-hashes'. Please fix that file "
				"if you want to generate valid manifests for this "
				"repository: %(layout_filename)s") %
				{"repo_name": repo_name or 'unspecified',
				"hash": ' '.join(missing_required_hashes),
				"layout_filename":layout_filename}),
				SyntaxWarning)
		unsupported_hashes = manifest_hashes.difference(
			get_valid_checksum_keys())
		if unsupported_hashes:
			repo_name = _get_repo_name(repo_location, cached=repo_name)
			warnings.warn((_("Repository named '%(repo_name)s' has a "
				"'manifest-hashes' setting that contains one "
				"or more hash types '%(hashes)s' which are not supported by "
				"this portage version. You will have to upgrade "
				"portage if you want to generate valid manifests for "
				"this repository: %(layout_filename)s") %
				{"repo_name": repo_name or 'unspecified',
				"hashes":" ".join(sorted(unsupported_hashes)),
				"layout_filename":layout_filename}),
				DeprecationWarning)

	data['manifest-hashes'] = manifest_hashes
	data['manifest-required-hashes'] = manifest_required_hashes

	data['update-changelog'] = layout_data.get('update-changelog', 'false').lower() \
		== 'true'

	raw_formats = layout_data.get('profile-formats')
	if raw_formats is None:
		if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
			raw_formats = ('portage-1',)
		else:
			raw_formats = ('portage-1-compat',)
	else:
		raw_formats = set(raw_formats.split())
		unknown = raw_formats.difference(_valid_profile_formats)
		if unknown:
			repo_name = _get_repo_name(repo_location, cached=repo_name)
			warnings.warn((_("Repository named '%(repo_name)s' has unsupported "
				"profiles in use ('profile-formats = %(unknown_fmts)s' setting in "
				"'%(layout_filename)s; please upgrade portage.") %
				dict(repo_name=repo_name or 'unspecified',
				layout_filename=layout_filename,
				unknown_fmts=" ".join(unknown))),
				DeprecationWarning)
		raw_formats = tuple(raw_formats.intersection(_valid_profile_formats))
	data['profile-formats'] = raw_formats

	try:
		eapi = layout_data['profile_eapi_when_unspecified']
	except KeyError:
		pass
	else:
		if 'profile-default-eapi' not in raw_formats:
			warnings.warn((_("Repository named '%(repo_name)s' has "
				"profile_eapi_when_unspecified setting in "
				"'%(layout_filename)s', but 'profile-default-eapi' is "
				"not listed in the profile-formats field. Please "
				"report this issue to the repository maintainer.") %
				dict(repo_name=repo_name or 'unspecified',
				layout_filename=layout_filename)),
				SyntaxWarning)
		elif not portage.eapi_is_supported(eapi):
			warnings.warn((_("Repository named '%(repo_name)s' has "
				"unsupported EAPI '%(eapi)s' setting in "
				"'%(layout_filename)s'; please upgrade portage.") %
				dict(repo_name=repo_name or 'unspecified',
				eapi=eapi, layout_filename=layout_filename)),
				SyntaxWarning)
		else:
			data['profile_eapi_when_unspecified'] = eapi

	return data, layout_errors
Exemplo n.º 52
0
def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):

	metadata = None
	installed = False
	if not isinstance(mycpv, basestring):
		# emerge passed in a Package instance
		pkg = mycpv
		mycpv = pkg.cpv
		metadata = pkg._metadata
		installed = pkg.installed

	if metadata is None:
		db_keys = list(portdb._aux_cache_keys)
		try:
			metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
		except KeyError:
			if not portdb.cpv_exists(mycpv):
				raise
			return [_MaskReason("corruption", "corruption")]
		if "?" in metadata["LICENSE"]:
			settings.setcpv(mycpv, mydb=metadata)
			metadata["USE"] = settings["PORTAGE_USE"]
		else:
			metadata["USE"] = ""

	try:
		mycpv.slot
	except AttributeError:
		try:
			mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings)
		except portage.exception.InvalidData:
			raise ValueError(_("invalid CPV: %s") % mycpv)

	rValue = []

	# package.mask checking
	if settings._getMaskAtom(mycpv, metadata):
		rValue.append(_MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None)))

	# keywords checking
	eapi = metadata["EAPI"]
	mygroups = settings._getKeywords(mycpv, metadata)
	licenses = metadata["LICENSE"]
	properties = metadata["PROPERTIES"]
	restrict = metadata["RESTRICT"]
	if not eapi_is_supported(eapi):
		return [_MaskReason("EAPI", "EAPI %s" % eapi)]
	elif _eapi_is_deprecated(eapi) and not installed:
		return [_MaskReason("EAPI", "EAPI %s" % eapi)]
	egroups = settings.configdict["backupenv"].get(
		"ACCEPT_KEYWORDS", "").split()
	global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "")
	pgroups = global_accept_keywords.split()
	myarch = settings["ARCH"]
	if pgroups and myarch not in pgroups:
		"""For operating systems other than Linux, ARCH is not necessarily a
		valid keyword."""
		myarch = pgroups[0].lstrip("~")

	# NOTE: This logic is copied from KeywordsManager.getMissingKeywords().
	unmaskgroups = settings._keywords_manager.getPKeywords(mycpv,
		metadata["SLOT"], metadata["repository"], global_accept_keywords)
	pgroups.extend(unmaskgroups)
	if unmaskgroups or egroups:
		pgroups = settings._keywords_manager._getEgroups(egroups, pgroups)
	else:
		pgroups = set(pgroups)

	kmask = "missing"
	kmask_hint = None

	if '**' in pgroups:
		kmask = None
	else:
		for keyword in pgroups:
			if keyword in mygroups:
				kmask = None
				break

	if kmask:
		for gp in mygroups:
			if gp=="*":
				kmask=None
				break
			elif gp == "~*":
				for x in pgroups:
					if x[:1] == "~":
						kmask = None
						break
				if kmask is None:
					break
			elif gp=="-"+myarch and myarch in pgroups:
				kmask="-"+myarch
				break
			elif gp=="~"+myarch and myarch in pgroups:
				kmask="~"+myarch
				kmask_hint = _UnmaskHint("unstable keyword", kmask)
				break

	if kmask == "missing":
		kmask_hint = _UnmaskHint("unstable keyword", "**")

	try:
		missing_licenses = settings._getMissingLicenses(mycpv, metadata)
		if missing_licenses:
			allowed_tokens = set(["||", "(", ")"])
			allowed_tokens.update(missing_licenses)
			license_split = licenses.split()
			license_split = [x for x in license_split \
				if x in allowed_tokens]
			msg = license_split[:]
			msg.append("license(s)")
			rValue.append(_MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses))))
	except portage.exception.InvalidDependString as e:
		rValue.append(_MaskReason("invalid", "LICENSE: "+str(e)))

	try:
		missing_properties = settings._getMissingProperties(mycpv, metadata)
		if missing_properties:
			allowed_tokens = set(["||", "(", ")"])
			allowed_tokens.update(missing_properties)
			properties_split = properties.split()
			properties_split = [x for x in properties_split \
					if x in allowed_tokens]
			msg = properties_split[:]
			msg.append("properties")
			rValue.append(_MaskReason("PROPERTIES", " ".join(msg)))
	except portage.exception.InvalidDependString as e:
		rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))

	try:
		missing_restricts = settings._getMissingRestrict(mycpv, metadata)
		if missing_restricts:
			msg = list(missing_restricts)
			msg.append("in RESTRICT")
			rValue.append(_MaskReason("RESTRICT", " ".join(msg)))
	except InvalidDependString as e:
		rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e,)))

	# Only show KEYWORDS masks for installed packages
	# if they're not masked for any other reason.
	if kmask and (not installed or not rValue):
		rValue.append(_MaskReason("KEYWORDS",
			kmask + " keyword", unmask_hint=kmask_hint))

	return rValue
Exemplo n.º 53
0
	def _start(self):
		ebuild_path = self.ebuild_hash.location

		with io.open(_unicode_encode(ebuild_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace') as f:
			self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

		parsed_eapi = self._eapi
		if parsed_eapi is None:
			parsed_eapi = "0"

		if not parsed_eapi:
			# An empty EAPI setting is invalid.
			self._eapi_invalid(None)
			self.returncode = 1
			self._async_wait()
			return

		self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
		if not self.eapi_supported:
			self.metadata = {"EAPI": parsed_eapi}
			self.returncode = os.EX_OK
			self._async_wait()
			return

		settings = self.settings
		settings.setcpv(self.cpv)
		settings.configdict['pkg']['EAPI'] = parsed_eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		null_input = open('/dev/null', 'rb')
		fd_pipes.setdefault(0, null_input.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()

		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		# FD_CLOEXEC is enabled by default in Python >=3.4.
		if sys.hexversion < 0x3040000:
			try:
				fcntl.FD_CLOEXEC
			except AttributeError:
				pass
			else:
				fcntl.fcntl(master_fd, fcntl.F_SETFD,
					fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		fd_pipes[slave_fd] = slave_fd
		settings["PORTAGE_PIPE_FD"] = str(slave_fd)

		self._raw_metadata = []
		files.ebuild = master_fd
		self.scheduler.add_reader(files.ebuild, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings=settings, debug=debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)
		settings.pop("PORTAGE_PIPE_FD", None)

		os.close(slave_fd)
		null_input.close()

		if isinstance(retval, int):
			# doebuild failed before spawning
			self.returncode = retval
			self._async_wait()
			return

		self.pid = retval[0]
Exemplo n.º 54
0
def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):

    metadata = None
    installed = False
    if not isinstance(mycpv, str):
        # emerge passed in a Package instance
        pkg = mycpv
        mycpv = pkg.cpv
        metadata = pkg._metadata
        installed = pkg.installed

    if metadata is None:
        db_keys = list(portdb._aux_cache_keys)
        try:
            metadata = dict(
                zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
        except KeyError:
            if not portdb.cpv_exists(mycpv):
                raise
            return [_MaskReason("corruption", "corruption")]
        if "?" in metadata["LICENSE"]:
            settings.setcpv(mycpv, mydb=metadata)
            metadata["USE"] = settings["PORTAGE_USE"]
        else:
            metadata["USE"] = ""

    try:
        mycpv.slot
    except AttributeError:
        try:
            mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings)
        except portage.exception.InvalidData:
            raise ValueError(_("invalid CPV: %s") % mycpv)

    rValue = []

    # package.mask checking
    if settings._getMaskAtom(mycpv, metadata):
        rValue.append(
            _MaskReason("package.mask", "package.mask",
                        _UnmaskHint("p_mask", None)))

    # keywords checking
    eapi = metadata["EAPI"]
    mygroups = settings._getKeywords(mycpv, metadata)
    licenses = metadata["LICENSE"]
    properties = metadata["PROPERTIES"]
    restrict = metadata["RESTRICT"]
    if not eapi_is_supported(eapi):
        return [_MaskReason("EAPI", "EAPI %s" % eapi)]
    if _eapi_is_deprecated(eapi) and not installed:
        return [_MaskReason("EAPI", "EAPI %s" % eapi)]
    egroups = settings.configdict["backupenv"].get("ACCEPT_KEYWORDS",
                                                   "").split()
    global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "")
    pgroups = global_accept_keywords.split()
    myarch = settings["ARCH"]
    if pgroups and myarch not in pgroups:
        """For operating systems other than Linux, ARCH is not necessarily a
		valid keyword."""
        myarch = pgroups[0].lstrip("~")

    # NOTE: This logic is copied from KeywordsManager.getMissingKeywords().
    unmaskgroups = settings._keywords_manager.getPKeywords(
        mycpv, metadata["SLOT"], metadata["repository"],
        global_accept_keywords)
    pgroups.extend(unmaskgroups)
    if unmaskgroups or egroups:
        pgroups = settings._keywords_manager._getEgroups(egroups, pgroups)
    else:
        pgroups = set(pgroups)

    kmask = "missing"
    kmask_hint = None

    if '**' in pgroups:
        kmask = None
    else:
        for keyword in pgroups:
            if keyword in mygroups:
                kmask = None
                break

    if kmask:
        for gp in mygroups:
            if gp == "*":
                kmask = None
                break
            elif gp == "~*":
                for x in pgroups:
                    if x[:1] == "~":
                        kmask = None
                        break
                if kmask is None:
                    break
            elif gp == "-" + myarch and myarch in pgroups:
                kmask = "-" + myarch
                break
            elif gp == "~" + myarch and myarch in pgroups:
                kmask = "~" + myarch
                kmask_hint = _UnmaskHint("unstable keyword", kmask)
                break

    if kmask == "missing":
        kmask_hint = _UnmaskHint("unstable keyword", "**")

    try:
        missing_licenses = settings._getMissingLicenses(mycpv, metadata)
        if missing_licenses:
            allowed_tokens = set(["||", "(", ")"])
            allowed_tokens.update(missing_licenses)
            license_split = licenses.split()
            license_split = [x for x in license_split \
             if x in allowed_tokens]
            msg = license_split[:]
            msg.append("license(s)")
            rValue.append(
                _MaskReason("LICENSE", " ".join(msg),
                            _UnmaskHint("license", set(missing_licenses))))
    except portage.exception.InvalidDependString as e:
        rValue.append(_MaskReason("invalid", "LICENSE: " + str(e)))

    try:
        missing_properties = settings._getMissingProperties(mycpv, metadata)
        if missing_properties:
            allowed_tokens = set(["||", "(", ")"])
            allowed_tokens.update(missing_properties)
            properties_split = properties.split()
            properties_split = [x for x in properties_split \
              if x in allowed_tokens]
            msg = properties_split[:]
            msg.append("properties")
            rValue.append(_MaskReason("PROPERTIES", " ".join(msg)))
    except portage.exception.InvalidDependString as e:
        rValue.append(_MaskReason("invalid", "PROPERTIES: " + str(e)))

    try:
        missing_restricts = settings._getMissingRestrict(mycpv, metadata)
        if missing_restricts:
            msg = list(missing_restricts)
            msg.append("in RESTRICT")
            rValue.append(_MaskReason("RESTRICT", " ".join(msg)))
    except InvalidDependString as e:
        rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e, )))

    # Only show KEYWORDS masks for installed packages
    # if they're not masked for any other reason.
    if kmask and (not installed or not rValue):
        rValue.append(
            _MaskReason("KEYWORDS", kmask + " keyword",
                        unmask_hint=kmask_hint))

    return rValue
Exemplo n.º 55
0
	def aux_get(self, mycpv, mylist, mytree=None):
		"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
		'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
		'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
		cache_me = False
		if not mytree:
			cache_me = True
		if not mytree and not self._known_keys.intersection(
			mylist).difference(self._aux_cache_keys):
			aux_cache = self._aux_cache.get(mycpv)
			if aux_cache is not None:
				return [aux_cache.get(x, "") for x in mylist]
			cache_me = True
		global auxdbkeys, auxdbkeylen
		try:
			cat, pkg = mycpv.split("/", 1)
		except ValueError:
			# Missing slash. Can't find ebuild so raise KeyError.
			raise KeyError(mycpv)

		myebuild, mylocation = self.findname2(mycpv, mytree)

		if not myebuild:
			writemsg("!!! aux_get(): %s\n" % \
				_("ebuild not found for '%s'") % mycpv, noiselevel=1)
			raise KeyError(mycpv)

		mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation)
		doregen = mydata is None

		if doregen:
			if myebuild in self._broken_ebuilds:
				raise KeyError(mycpv)
			if not self._have_root_eclass_dir:
				raise KeyError(mycpv)

			self.doebuild_settings.setcpv(mycpv)
			mydata = {}
			eapi = None

			if 'parse-eapi-glep-55' in self.doebuild_settings.features:
				pf, eapi = portage._split_ebuild_name_glep55(
					os.path.basename(myebuild))
			if eapi is None and \
				'parse-eapi-ebuild-head' in self.doebuild_settings.features:
				eapi = portage._parse_eapi_ebuild_head(codecs.open(
					_unicode_encode(myebuild,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace'))

			if eapi is not None:
				self.doebuild_settings.configdict['pkg']['EAPI'] = eapi

			if eapi is not None and not portage.eapi_is_supported(eapi):
				mydata['EAPI'] = eapi
			else:
				myret = doebuild(myebuild, "depend",
					self.doebuild_settings["ROOT"], self.doebuild_settings,
					dbkey=mydata, tree="porttree", mydbapi=self)
				if myret != os.EX_OK:
					self._broken_ebuilds.add(myebuild)
					raise KeyError(mycpv)

			self._metadata_callback(
				mycpv, myebuild, mylocation, mydata, emtime)

			if mydata.get("INHERITED", False):
				mydata["_eclasses_"] = self._repo_info[mylocation
					].eclass_db.get_eclass_data(mydata["INHERITED"].split())
			else:
				mydata["_eclasses_"] = {}

		# do we have a origin repository name for the current package
		mydata["repository"] = self._repository_map.get(mylocation, "")

		mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", []))
		mydata["_mtime_"] = st[stat.ST_MTIME]

		eapi = mydata.get("EAPI")
		if not eapi:
			eapi = "0"
			mydata["EAPI"] = eapi
		if not eapi_is_supported(eapi):
			for k in set(mydata).difference(("_mtime_", "_eclasses_")):
				mydata[k] = ""
			mydata["EAPI"] = "-" + eapi.lstrip("-")

		#finally, we look at our internal cache entry and return the requested data.
		returnme = [mydata.get(x, "") for x in mylist]

		if cache_me:
			aux_cache = {}
			for x in self._aux_cache_keys:
				aux_cache[x] = mydata.get(x, "")
			self._aux_cache[mycpv] = aux_cache

		return returnme
Exemplo n.º 56
0
def getmaskingreason(mycpv,
                     metadata=None,
                     settings=None,
                     portdb=None,
                     return_location=False,
                     myrepo=None):
    """
    If specified, the myrepo argument is assumed to be valid. This
    should be a safe assumption since portdbapi methods always
    return valid repo names and valid "repository" metadata from
    aux_get.
    """
    if settings is None:
        settings = portage.settings
    if portdb is None:
        portdb = portage.portdb
    mysplit = catpkgsplit(mycpv)
    if not mysplit:
        raise ValueError(_("invalid CPV: %s") % mycpv)

    if metadata is None:
        db_keys = list(portdb._aux_cache_keys)
        try:
            metadata = dict(
                zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
        except KeyError:
            if not portdb.cpv_exists(mycpv):
                raise
        else:
            if myrepo is None:
                myrepo = _gen_valid_repo(metadata["repository"])

    elif myrepo is None:
        myrepo = metadata.get("repository")
        if myrepo is not None:
            myrepo = _gen_valid_repo(metadata["repository"])

    if metadata is not None and not portage.eapi_is_supported(
            metadata["EAPI"]):
        # Return early since otherwise we might produce invalid
        # results given that the EAPI is not supported. Also,
        # metadata is mostly useless in this case since it doesn't
        # contain essential things like SLOT.
        if return_location:
            return (None, None)
        return None

    # Sometimes we can't access SLOT or repository due to corruption.
    pkg = mycpv
    try:
        pkg.slot
    except AttributeError:
        pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo)

    cpv_slot_list = [pkg]

    mycp = pkg.cp

    locations = []
    if pkg.repo in settings.repositories:
        for repo in settings.repositories[pkg.repo].masters + (
                settings.repositories[pkg.repo], ):
            locations.append(os.path.join(repo.location, "profiles"))
    locations.extend(settings.profiles)
    locations.append(
        os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
    locations.reverse()
    pmasklists = []
    for profile in locations:
        pmask_filename = os.path.join(profile, "package.mask")
        node = None
        for l, recursive_filename in grablines(pmask_filename,
                                               recursive=1,
                                               remember_source_file=True):
            if node is None or node[0] != recursive_filename:
                node = (recursive_filename, [])
                pmasklists.append(node)
            node[1].append(l)

    pmaskdict = settings._mask_manager._pmaskdict
    if mycp in pmaskdict:
        for x in pmaskdict[mycp]:
            if match_from_list(x, cpv_slot_list):
                x = x.without_repo
                for pmask in pmasklists:
                    comment = ""
                    comment_valid = -1
                    pmask_filename = pmask[0]
                    for i in range(len(pmask[1])):
                        l = pmask[1][i].strip()
                        try:
                            l_atom = Atom(l,
                                          allow_repo=True,
                                          allow_wildcard=True).without_repo
                        except InvalidAtom:
                            l_atom = None
                        if l == "":
                            comment = ""
                            comment_valid = -1
                        elif l[0] == "#":
                            comment += l + "\n"
                            comment_valid = i + 1
                        elif l_atom == x:
                            if comment_valid != i:
                                comment = ""
                            if return_location:
                                return (comment, pmask_filename)
                            return comment
                        elif comment_valid != -1:
                            # Apparently this comment applies to multiple masks, so
                            # it remains valid until a blank line is encountered.
                            comment_valid += 1
    if return_location:
        return (None, None)
    return None
Exemplo n.º 57
0
	def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
		"caching match function; very trick stuff"
		#if no updates are being made to the tree, we can consult our xcache...
		if self.frozen:
			try:
				return self.xcache[level][origdep][:]
			except KeyError:
				pass

		if not mydep:
			#this stuff only runs on first call of xmatch()
			#create mydep, mykey from origdep
			mydep = dep_expand(origdep, mydb=self, settings=self.settings)
			mykey = mydep.cp

		if level == "list-visible":
			#a list of all visible packages, not called directly (just by xmatch())
			#myval = self.visible(self.cp_list(mykey))

			myval = self.gvisible(self.visible(self.cp_list(mykey)))
		elif level == "minimum-all":
			# Find the minimum matching version. This is optimized to
			# minimize the number of metadata accesses (improves performance
			# especially in cases where metadata needs to be generated).
			cpv_iter = iter(self.cp_list(mykey))
			if mydep != mykey:
				cpv_iter = self._iter_match(mydep, cpv_iter)
			try:
				myval = next(cpv_iter)
			except StopIteration:
				myval = ""

		elif level in ("minimum-visible", "bestmatch-visible"):
			# Find the minimum matching visible version. This is optimized to
			# minimize the number of metadata accesses (improves performance
			# especially in cases where metadata needs to be generated).
			if mydep == mykey:
				mylist = self.cp_list(mykey)
			else:
				mylist = match_from_list(mydep, self.cp_list(mykey))
			myval = ""
			settings = self.settings
			local_config = settings.local_config
			aux_keys = list(self._aux_cache_keys)
			if level == "minimum-visible":
				iterfunc = iter
			else:
				iterfunc = reversed
			for cpv in iterfunc(mylist):
				try:
					metadata = dict(zip(aux_keys,
						self.aux_get(cpv, aux_keys)))
				except KeyError:
					# ebuild masked by corruption
					continue
				if not eapi_is_supported(metadata["EAPI"]):
					continue
				if mydep.slot and mydep.slot != metadata["SLOT"]:
					continue
				if settings._getMissingKeywords(cpv, metadata):
					continue
				if settings._getMaskAtom(cpv, metadata):
					continue
				if settings._getProfileMaskAtom(cpv, metadata):
					continue
				if local_config:
					metadata["USE"] = ""
					if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
						self.doebuild_settings.setcpv(cpv, mydb=metadata)
						metadata["USE"] = self.doebuild_settings.get("USE", "")
					try:
						if settings._getMissingLicenses(cpv, metadata):
							continue
						if settings._getMissingProperties(cpv, metadata):
							continue
					except InvalidDependString:
						continue
				if mydep.use:
					has_iuse = False
					for has_iuse in self._iter_match_use(mydep, [cpv]):
						break
					if not has_iuse:
						continue
				myval = cpv
				break
		elif level == "bestmatch-list":
			#dep match -- find best match but restrict search to sublist
			#no point in calling xmatch again since we're not caching list deps

			myval = best(list(self._iter_match(mydep, mylist)))
		elif level == "match-list":
			#dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())

			myval = list(self._iter_match(mydep, mylist))
		elif level == "match-visible":
			#dep match -- find all visible matches
			#get all visible packages, then get the matching ones

			myval = list(self._iter_match(mydep,
				self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey)))
		elif level == "match-all":
			#match *all* visible *and* masked packages
			if mydep == mykey:
				myval = self.cp_list(mykey)
			else:
				myval = list(self._iter_match(mydep, self.cp_list(mykey)))
		else:
			raise AssertionError(
				"Invalid level argument: '%s'" % level)

		if self.frozen and (level not in ["match-list", "bestmatch-list"]):
			self.xcache[level][mydep] = myval
			if origdep and origdep != mydep:
				self.xcache[level][origdep] = myval
		return myval[:]
Exemplo n.º 58
0
def getmaskingstatus(mycpv, settings=None, portdb=None):
    if settings is None:
        settings = config(clone=portage.settings)
    if portdb is None:
        portdb = portage.portdb

    metadata = None
    installed = False
    if not isinstance(mycpv, basestring):
        # emerge passed in a Package instance
        pkg = mycpv
        mycpv = pkg.cpv
        metadata = pkg.metadata
        installed = pkg.installed

    mysplit = catpkgsplit(mycpv)
    if not mysplit:
        raise ValueError(_("invalid CPV: %s") % mycpv)
    if metadata is None:
        db_keys = list(portdb._aux_cache_keys)
        try:
            metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
        except KeyError:
            if not portdb.cpv_exists(mycpv):
                raise
            return ["corruption"]
        if "?" in metadata["LICENSE"]:
            settings.setcpv(mycpv, mydb=metadata)
            metadata["USE"] = settings["PORTAGE_USE"]
        else:
            metadata["USE"] = ""

    rValue = []

    # profile checking
    if settings._getProfileMaskAtom(mycpv, metadata):
        rValue.append("profile")

    # package.mask checking
    if settings._getMaskAtom(mycpv, metadata):
        rValue.append("package.mask")

    # keywords checking
    eapi = metadata["EAPI"]
    mygroups = settings._getKeywords(mycpv, metadata)
    licenses = metadata["LICENSE"]
    properties = metadata["PROPERTIES"]
    if eapi.startswith("-"):
        eapi = eapi[1:]
    if not eapi_is_supported(eapi):
        return ["EAPI %s" % eapi]
    elif _eapi_is_deprecated(eapi) and not installed:
        return ["EAPI %s" % eapi]
    egroups = settings.configdict["backupenv"].get("ACCEPT_KEYWORDS",
                                                   "").split()
    pgroups = settings["ACCEPT_KEYWORDS"].split()
    myarch = settings["ARCH"]
    if pgroups and myarch not in pgroups:
        """For operating systems other than Linux, ARCH is not necessarily a
		valid keyword."""
        myarch = pgroups[0].lstrip("~")

    cp = cpv_getkey(mycpv)
    pkgdict = settings.pkeywordsdict.get(cp)
    matches = False
    if pkgdict:
        cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
        for atom, pkgkeywords in pkgdict.items():
            if match_from_list(atom, cpv_slot_list):
                matches = True
                pgroups.extend(pkgkeywords)
    if matches or egroups:
        pgroups.extend(egroups)
        inc_pgroups = set()
        for x in pgroups:
            if x.startswith("-"):
                if x == "-*":
                    inc_pgroups.clear()
                else:
                    inc_pgroups.discard(x[1:])
            else:
                inc_pgroups.add(x)
        pgroups = inc_pgroups
        del inc_pgroups

    kmask = "missing"

    if '**' in pgroups:
        kmask = None
    else:
        for keyword in pgroups:
            if keyword in mygroups:
                kmask = None
                break

    if kmask:
        for gp in mygroups:
            if gp == "*":
                kmask = None
                break
            elif gp == "-" + myarch and myarch in pgroups:
                kmask = "-" + myarch
                break
            elif gp == "~" + myarch and myarch in pgroups:
                kmask = "~" + myarch
                break

    try:
        missing_licenses = settings._getMissingLicenses(mycpv, metadata)
        if missing_licenses:
            allowed_tokens = set(["||", "(", ")"])
            allowed_tokens.update(missing_licenses)
            license_split = licenses.split()
            license_split = [x for x in license_split \
             if x in allowed_tokens]
            msg = license_split[:]
            msg.append("license(s)")
            rValue.append(" ".join(msg))
    except portage.exception.InvalidDependString as e:
        rValue.append("LICENSE: " + str(e))

    try:
        missing_properties = settings._getMissingProperties(mycpv, metadata)
        if missing_properties:
            allowed_tokens = set(["||", "(", ")"])
            allowed_tokens.update(missing_properties)
            properties_split = properties.split()
            properties_split = [x for x in properties_split \
              if x in allowed_tokens]
            msg = properties_split[:]
            msg.append("properties")
            rValue.append(" ".join(msg))
    except portage.exception.InvalidDependString as e:
        rValue.append("PROPERTIES: " + str(e))

    # Only show KEYWORDS masks for installed packages
    # if they're not masked for any other reason.
    if kmask and (not installed or not rValue):
        rValue.append(kmask + " keyword")

    return rValue