Beispiel #1
0
 def deps_affected_by_flag(self, flag):
     depstring = Portage.get_depstring(self.cpv)
     base_deps = set(filter(isvalidatom, use_reduce(depstring, flat=True)))
     deps = set(
         filter(isvalidatom,
                use_reduce(depstring, uselist=[flag.name], flat=True)))
     return sorted(map(Dependency, deps - base_deps))
Beispiel #2
0
	def load(self):
		depvars = ('RDEPEND', 'PDEPEND')
		ebuild_vars = depvars + ('EAPI',)
		installed_vars = depvars + ('USE', 'EAPI')

		atoms = []
		for cpv in self._vardb.cpv_all():
			# no ebuild, no update :).
			try:
				ebuild_metadata = dict(zip(ebuild_vars, self._portdb.aux_get(cpv, ebuild_vars)))
			except PortageKeyError:
				continue

			# USE flags used to build the ebuild and EAPI
			# (needed for Atom & use_reduce())
			installed_metadata = dict(zip(installed_vars, self._vardb.aux_get(cpv, installed_vars)))
			usel = frozenset(installed_metadata['USE'].split())

			# get all *DEPEND variables from vdb & portdb and compare them.
			# we need to do some cleaning up & expansion to make matching
			# meaningful since vdb dependencies are conditional-free.
			vdbvars = [strip_slots(use_reduce(installed_metadata[k],
				uselist=usel, eapi=installed_metadata['EAPI'], token_class=Atom))
				for k in depvars]
			pdbvars = [strip_slots(use_reduce(ebuild_metadata[k],
				uselist=usel, eapi=ebuild_metadata['EAPI'], token_class=Atom))
				for k in depvars]

			# if dependencies don't match, trigger the rebuild.
			if vdbvars != pdbvars:
				atoms.append('=%s' % cpv)

		self._setAtoms(atoms)
def load_unpack_dependencies_configuration(repositories):
	repo_dict = {}
	for repo in repositories.repos_with_profiles():
		for eapi in _supported_eapis:
			if eapi_has_automatic_unpack_dependencies(eapi):
				file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
				lines = grabfile(file_name, recursive=True)
				for line in lines:
					elements = line.split()
					suffix = elements[0].lower()
					if len(elements) == 1:
						writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
					depend = " ".join(elements[1:])
					try:
						use_reduce(depend, eapi=eapi)
					except InvalidDependString as e:
						writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
					else:
						repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	ret = {}
	for repo in repositories.repos_with_profiles():
		for repo_name in [x.name for x in repo.masters] + [repo.name]:
			for eapi in repo_dict.get(repo_name, {}):
				for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
					ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	return ret
Beispiel #4
0
    def load(self):
        depvars = ('RDEPEND', 'PDEPEND')

        # regexp used to match atoms using subslot operator :=
        subslot_repl_re = re.compile(r':[^[]*=')

        atoms = []
        for cpv in self._vardb.cpv_all():
            # no ebuild, no update :).
            if not self._portdb.cpv_exists(cpv):
                continue

            # USE flags used to build the ebuild and EAPI
            # (needed for Atom & use_reduce())
            use, eapi = self._vardb.aux_get(cpv, ('USE', 'EAPI'))
            usel = use.split()

            # function used to recursively process atoms in nested lists.
            def clean_subslots(depatom, usel=None):
                if isinstance(depatom, list):
                    # process the nested list.
                    return [clean_subslots(x, usel) for x in depatom]
                else:
                    try:
                        # this can be either an atom or some special operator.
                        # in the latter case, we get InvalidAtom and pass it as-is.
                        a = Atom(depatom)
                    except InvalidAtom:
                        return depatom
                    else:
                        # if we're processing portdb, we need to evaluate USE flag
                        # dependency conditionals to make them match vdb. this
                        # requires passing the list of USE flags, so we reuse it
                        # as conditional for the operation as well.
                        if usel is not None:
                            a = a.evaluate_conditionals(usel)

                        # replace slot operator := dependencies with plain :=
                        # since we can't properly compare expanded slots
                        # in vardb to abstract slots in portdb.
                        return subslot_repl_re.sub(':=', a)

            # get all *DEPEND variables from vdb & portdb and compare them.
            # we need to do some cleaning up & expansion to make matching
            # meaningful since vdb dependencies are conditional-free.
            vdbvars = [
                clean_subslots(use_reduce(x, uselist=usel, eapi=eapi))
                for x in self._vardb.aux_get(cpv, depvars)
            ]
            pdbvars = [
                clean_subslots(use_reduce(x, uselist=usel, eapi=eapi), usel)
                for x in self._portdb.aux_get(cpv, depvars)
            ]

            # if dependencies don't match, trigger the rebuild.
            if vdbvars != pdbvars:
                atoms.append('=%s' % cpv)

        self._setAtoms(atoms)
Beispiel #5
0
def _deps_equal(deps_a, eapi_a, deps_b, eapi_b, uselist=None):
    """Compare two dependency lists given a set of USE flags"""
    if deps_a == deps_b: return True

    deps_a = use_reduce(deps_a, uselist=uselist, eapi=eapi_a, token_class=Atom)
    deps_b = use_reduce(deps_b, uselist=uselist, eapi=eapi_b, token_class=Atom)
    strip_slots(deps_a)
    strip_slots(deps_b)
    return deps_a == deps_b
Beispiel #6
0
	def load(self):
		depvars = ('RDEPEND', 'PDEPEND')

		# regexp used to match atoms using subslot operator :=
		subslot_repl_re = re.compile(r':[^[]*=')

		atoms = []
		for cpv in self._vardb.cpv_all():
			# no ebuild, no update :).
			if not self._portdb.cpv_exists(cpv):
				continue

			# USE flags used to build the ebuild and EAPI
			# (needed for Atom & use_reduce())
			use, eapi = self._vardb.aux_get(cpv, ('USE', 'EAPI'))
			usel = use.split()

			# function used to recursively process atoms in nested lists.
			def clean_subslots(depatom, usel=None):
				if isinstance(depatom, list):
					# process the nested list.
					return [clean_subslots(x, usel) for x in depatom]
				else:
					try:
						# this can be either an atom or some special operator.
						# in the latter case, we get InvalidAtom and pass it as-is.
						a = Atom(depatom)
					except InvalidAtom:
						return depatom
					else:
						# if we're processing portdb, we need to evaluate USE flag
						# dependency conditionals to make them match vdb. this
						# requires passing the list of USE flags, so we reuse it
						# as conditional for the operation as well.
						if usel is not None:
							a = a.evaluate_conditionals(usel)

						# replace slot operator := dependencies with plain :=
						# since we can't properly compare expanded slots
						# in vardb to abstract slots in portdb.
						return subslot_repl_re.sub(':=', a)

			# get all *DEPEND variables from vdb & portdb and compare them.
			# we need to do some cleaning up & expansion to make matching
			# meaningful since vdb dependencies are conditional-free.
			vdbvars = [clean_subslots(use_reduce(x, uselist=usel, eapi=eapi))
					for x in self._vardb.aux_get(cpv, depvars)]
			pdbvars = [clean_subslots(use_reduce(x, uselist=usel, eapi=eapi), usel)
					for x in self._portdb.aux_get(cpv, depvars)]

			# if dependencies don't match, trigger the rebuild.
			if vdbvars != pdbvars:
				atoms.append('=%s' % cpv)

		self._setAtoms(atoms)
Beispiel #7
0
    def _aux_parse(self, arg):
        try:
            lic = use_reduce(arg, matchall=True, flat=True)
        except TypeError:  # portage-2.1.8 compat
            from portage.dep import paren_reduce
            lic = use_reduce(paren_reduce(arg, tokenize=True), matchall=True)

        lic = set(lic)
        lic.discard('||')
        lic.update(k for k, v in self.groups.items() if lic & v)
        return lic
Beispiel #8
0
def evaluate_slot_operator_equal_deps(settings, use, trees):

	metadata = settings.configdict['pkg']
	eapi = metadata['EAPI']
	eapi_attrs = _get_eapi_attrs(eapi)
	running_vardb = trees[trees._running_eroot]["vartree"].dbapi
	target_vardb = trees[trees._target_eroot]["vartree"].dbapi
	vardbs = [target_vardb]
	deps = {}
	for k in Package._dep_keys:
		deps[k] = use_reduce(metadata[k],
			uselist=use, eapi=eapi, token_class=Atom)

	for k in Package._runtime_keys:
		_eval_deps(deps[k], vardbs)

	if eapi_attrs.bdepend:
		_eval_deps(deps["BDEPEND"], [running_vardb])
		_eval_deps(deps["DEPEND"], [target_vardb])
	else:
		if running_vardb is not target_vardb:
			vardbs.append(running_vardb)
		_eval_deps(deps["DEPEND"], vardbs)

	result = {}
	for k, v in deps.items():
		result[k] = paren_enclose(v)

	return result
	def _eval_use_flags(self, cpv, metadata):
		use = frozenset(metadata["USE"].split())
		raw_use = use
		iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
		use = [f for f in use if f in iuse]
		use.sort()
		metadata["USE"] = " ".join(use)
		from portage.dep import paren_reduce, use_reduce, \
			paren_normalize, paren_enclose
		for k in self._pkgindex_use_evaluated_keys:
			try:
				deps = paren_reduce(metadata[k])
				deps = use_reduce(deps, uselist=raw_use)
				deps = paren_normalize(deps)
				deps = paren_enclose(deps)
			except portage.exception.InvalidDependString as e:
				writemsg("%s: %s\n" % (k, str(e)),
					noiselevel=-1)
				raise
			if k in _vdb_use_conditional_atoms:
				v_split = []
				for x in deps.split():
					try:
						x = portage.dep.Atom(x)
					except portage.exception.InvalidAtom:
						v_split.append(x)
					else:
						v_split.append(str(x.evaluate_conditionals(raw_use)))
				deps = ' '.join(v_split)
			metadata[k] = deps
Beispiel #10
0
    def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
        """
		Generate a pruned version of ACCEPT_LICENSE, by intersection with
		LICENSE. This is required since otherwise ACCEPT_LICENSE might be
		too big (bigger than ARG_MAX), causing execve() calls to fail with
		E2BIG errors as in bug #262647.
		"""
        try:
            licenses = set(use_reduce(lic, uselist=use, flat=True))
        except InvalidDependString:
            licenses = set()
        licenses.discard('||')

        accept_license = self._getPkgAcceptLicense(cpv, slot, repo)

        if accept_license:
            acceptable_licenses = set()
            for x in accept_license:
                if x == '*':
                    acceptable_licenses.update(licenses)
                elif x == '-*':
                    acceptable_licenses.clear()
                elif x[:1] == '-':
                    acceptable_licenses.discard(x[1:])
                elif x in licenses:
                    acceptable_licenses.add(x)

            licenses = acceptable_licenses
        return ' '.join(sorted(licenses))
Beispiel #11
0
	def get_prunned_accept_license(self, cpv, use, lic, slot):
		"""
		Generate a pruned version of ACCEPT_LICENSE, by intersection with
		LICENSE. This is required since otherwise ACCEPT_LICENSE might be
		too big (bigger than ARG_MAX), causing execve() calls to fail with
		E2BIG errors as in bug #262647.
		"""
		try:
			licenses = set(use_reduce(lic, uselist=use, flat=True))
		except InvalidDependString:
			licenses = set()
		licenses.discard('||')

		accept_license = self._getPkgAcceptLicense(cpv, slot)

		if accept_license:
			acceptable_licenses = set()
			for x in accept_license:
				if x == '*':
					acceptable_licenses.update(licenses)
				elif x == '-*':
					acceptable_licenses.clear()
				elif x[:1] == '-':
					acceptable_licenses.discard(x[1:])
				elif x in licenses:
					acceptable_licenses.add(x)

			licenses = acceptable_licenses
		return ' '.join(sorted(licenses))
Beispiel #12
0
    def __getitem__(self, k):
        v = _PackageMetadataWrapperBase.__getitem__(self, k)
        if k in self._use_conditional_keys:
            if self._pkg.root_config.settings.local_config and "?" in v:
                try:
                    v = paren_enclose(
                        use_reduce(
                            v,
                            uselist=self._pkg.use.enabled,
                            is_valid_flag=self._pkg.iuse.is_valid_flag,
                        )
                    )
                except InvalidDependString:
                    # This error should already have been registered via
                    # self._pkg._invalid_metadata().
                    pass
                else:
                    self[k] = v

        elif k == "USE" and not self._pkg.built:
            if not v:
                # This is lazy because it's expensive.
                v = self._pkg._init_use()

        return v
Beispiel #13
0
    def __getitem__(self, k):
        v = _PackageMetadataWrapperBase.__getitem__(self, k)
        if k in self._use_conditional_keys:
            if self._pkg.root_config.settings.local_config and '?' in v:
                try:
                    v = paren_enclose(
                        paren_normalize(
                            use_reduce(paren_reduce(v),
                                       uselist=self._pkg.use.enabled)))
                except portage.exception.InvalidDependString:
                    # This error should already have been registered via
                    # self._pkg._invalid_metadata().
                    pass
                else:
                    self[k] = v

        elif k == 'USE' and not self._pkg.built:
            if not v:
                # This is lazy because it's expensive.
                pkgsettings = self._pkg.root_config.trees[
                    'porttree'].dbapi.doebuild_settings
                pkgsettings.setcpv(self._pkg)
                v = pkgsettings["PORTAGE_USE"]
                self['USE'] = v

        return v
    def testOverlapDNF(self):

        test_cases = (
            (
                '|| ( cat/A cat/B ) cat/E || ( cat/C cat/D )',
                [['||', 'cat/A', 'cat/B'], 'cat/E', ['||', 'cat/C', 'cat/D']],
            ),
            (
                '|| ( cat/A cat/B ) cat/D || ( cat/B cat/C )',
                [
                    'cat/D',
                    [
                        '||', ['cat/A', 'cat/B'], ['cat/A', 'cat/C'],
                        ['cat/B', 'cat/B'], ['cat/B', 'cat/C']
                    ]
                ],
            ),
            (
                '|| ( cat/A cat/B ) || ( cat/C cat/D )  || ( ( cat/B cat/E ) cat/F )',
                [[
                    '||', ['cat/A', 'cat/B', 'cat/E'], ['cat/A', 'cat/F'],
                    ['cat/B', 'cat/B', 'cat/E'], ['cat/B', 'cat/F']
                ], ['||', 'cat/C', 'cat/D']],
            ),
        )

        for dep_str, result in test_cases:
            self.assertEqual(
                _overlap_dnf(
                    use_reduce(dep_str, token_class=Atom, opconvert=True)),
                result)
Beispiel #15
0
	def run(self):
		try:
			return use_reduce(self.deparray, self.uselist, self.masklist,
				self.matchall, self.excludeall, self.is_src_uri, self.eapi,
				self.opconvert, self.flat, self.is_valid_flag, self.token_class)
		except InvalidDependString as e:
			raise InvalidDependString("%s: %s" % (e, self.deparray))
Beispiel #16
0
    def testUseReduce(self):

        tests = (
            ('|| ( x y )', True),
            ('|| x', False),
            ('foo? ( x y )', True),
            ('foo? ( bar? x y )', False),
            ('foo? x', False),
        )

        for dep_str, valid in tests:
            try:
                use_reduce(paren_reduce(dep_str), matchall=True)
            except InvalidDependString:
                self.assertEqual(valid, False)
            else:
                self.assertEqual(valid, True)
Beispiel #17
0
def deps_from_depstring(depstring, use_flags=None):
    # match all if use flags are empty
    matchall = not bool(use_flags)
    use_flags = use_flags or []
    deps = use_reduce(depstring, uselist=use_flags, matchall=matchall)
    # ignore blocker deps
    deps = (d for d in resolve_deps_anyof(deps) if not d.startswith('!'))
    return list(deps)
	def testUseReduce(self):

		tests = (
			('|| ( x y )',                                           True  ),
			('|| x',                                                 False ),
			('foo? ( x y )',                                         True  ),
			('foo? ( bar? x y )',                                    False ),
			('foo? x',                                               False ),
		)

		for dep_str, valid in tests:
			try:
				use_reduce(paren_reduce(dep_str), matchall=True)
			except InvalidDependString:
				self.assertEqual(valid, False)
			else:
				self.assertEqual(valid, True)
Beispiel #19
0
def find_built_slot_operator_atoms(pkg):
	atoms = {}
	for k in Package._dep_keys:
		atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k],
			uselist=pkg.use.enabled, eapi=pkg.eapi,
			token_class=Atom)))
		if atom_list:
			atoms[k] = atom_list
	return atoms
Beispiel #20
0
    def testDNFConvert(self):

        test_cases = (
            (
                "|| ( A B ) || ( C D )",
                [["||", ["A", "C"], ["A", "D"], ["B", "C"], ["B", "D"]]],
            ),
            (
                "|| ( A B ) || ( B C )",
                [["||", ["A", "B"], ["A", "C"], ["B", "B"], ["B", "C"]]],
            ),
            (
                "|| ( A ( B C D ) )",
                [["||", "A", ["B", "C", "D"]]],
            ),
            (
                "|| ( A ( B C D ) ) E",
                [["||", ["E", "A"], ["E", "B", "C", "D"]]],
            ),
            (
                "|| ( A ( B C ) ) || ( D E ) F",
                [[
                    "||",
                    ["F", "A", "D"],
                    ["F", "A", "E"],
                    ["F", "B", "C", "D"],
                    ["F", "B", "C", "E"],
                ]],
            ),
            (
                "|| ( A ( B C || ( D E ) ) ( F G ) H )",
                [[
                    "||", "A", ["B", "C", "D"], ["B", "C", "E"], ["F", "G"],
                    "H"
                ]],
            ),
            (
                "|| ( A ( B C || ( D E ) ) F )",
                [["||", "A", ["B", "C", "D"], ["B", "C", "E"], "F"]],
            ),
            (
                "|| ( A ( C || ( D E ) || ( F G ) ) H )",
                [[
                    "||",
                    "A",
                    ["C", "D", "F"],
                    ["C", "D", "G"],
                    ["C", "E", "F"],
                    ["C", "E", "G"],
                    "H",
                ]],
            ),
        )

        for dep_str, result in test_cases:
            self.assertEqual(dnf_convert(use_reduce(dep_str, opconvert=True)),
                             result)
Beispiel #21
0
	def homepage_urischeme(self, **kwargs):
		ebuild = kwargs.get('ebuild').get()
		if kwargs.get('catdir') != "virtual":
			for homepage in use_reduce(ebuild.metadata["HOMEPAGE"],
				matchall=True,flat=True):
				if URISCHEME_RE.match(homepage) is None:
					self.qatracker.add_error(
						"HOMEPAGE.missingurischeme", ebuild.relative_path)
		return False
Beispiel #22
0
	def homepage_urischeme(self, **kwargs):
		ebuild = kwargs.get('ebuild').get()
		if kwargs.get('catdir') != "virtual":
			for homepage in use_reduce(ebuild.metadata["HOMEPAGE"],
				matchall=True,flat=True):
				if URISCHEME_RE.match(homepage) is None:
					self.qatracker.add_error(
						"HOMEPAGE.missingurischeme", ebuild.relative_path)
		return False
Beispiel #23
0
def native(settings, mydbapi=None):
    """Take a Portage settings object and turn it into an APK.

    Surprisingly less difficult than it sounds, but surprisingly more difficult
    than it appears on second glance.

    :param settings:
        A Portage settings object.

    :param mydbapi:
        A Portage DBAPI object for the package.
    """
    params = {}

    if settings['CATEGORY'] == 'virtual':
        _fatal("Won't make an APK for a virtual/ package.")
        sys.exit(-1)

    params['name'] = _maybe_xlat(settings['PN'], settings['CATEGORY'])
    if 'SLOT' in settings and not settings['SLOT'].startswith('0/') and\
       settings['SLOT'] != '0':
        slot = settings['SLOT'].split('/')[0]
        params['name'] += slot
    params['version'] = settings['PVR']  # include -rX if necessary
    params['arch'] = ARCH_MAP.get(settings['ARCH'], settings['ARCH'])
    params['provides'] = list()

    cpv = '%s/%s' % (settings['CATEGORY'], settings['PF'])
    if mydbapi is None or not mydbapi.cpv_exists(cpv):
        _fatal('CPV does not exist or DBAPI is missing')
        sys.exit(-1)

    desc, url = mydbapi.aux_get(cpv, ('DESCRIPTION', 'HOMEPAGE'))
    params['description'] = desc
    params['url'] = url

    run_deps = use_reduce(mydbapi.aux_get(cpv, ('RDEPEND', )),
                          uselist=settings['USE'],
                          opconvert=True,
                          token_class=Atom,
                          eapi=settings['EAPI'])

    if any([isinstance(dep, list) for dep in run_deps]):
        run_deps = _deps_need_an_adult(params['name'], settings['PVR'],
                                       settings['EAPI'])

    params['depends'] = list(
        filterfalse(lambda x: x is None, map(_translate_dep, run_deps)))

    params['provides'] = _maybe_package_provides(settings, params['name'])

    package = Package(**params)
    out_path = os.path.join(settings.get('PKG_DIR', settings['PKGDIR']))
    apk = APKFile.create(package, settings['D'], out_path=out_path)

    return 0
Beispiel #24
0
    def getMissingLicenses(self, cpv, use, lic, slot, repo):
        """
		Take a LICENSE string and return a list of any licenses that the user
		may need to accept for the given package.  The returned list will not
		contain any licenses that have already been accepted.  This method
		can throw an InvalidDependString exception.

		@param cpv: The package name (for package.license support)
		@type cpv: String
		@param use: "USE" from the cpv's metadata
		@type use: String
		@param lic: "LICENSE" from the cpv's metadata
		@type lic: String
		@param slot: "SLOT" from the cpv's metadata
		@type slot: String
		@rtype: List
		@return: A list of licenses that have not been accepted.
		"""

        licenses = set(use_reduce(lic, matchall=1, flat=True))
        licenses.discard('||')

        acceptable_licenses = set()
        for x in self._getPkgAcceptLicense(cpv, slot, repo):
            if x == '*':
                acceptable_licenses.update(licenses)
            elif x == '-*':
                acceptable_licenses.clear()
            elif x[:1] == '-':
                acceptable_licenses.discard(x[1:])
            else:
                acceptable_licenses.add(x)

        license_str = lic
        if "?" in license_str:
            use = use.split()
        else:
            use = []

        license_struct = use_reduce(license_str, uselist=use, opconvert=True)
        return self._getMaskedLicenses(license_struct, acceptable_licenses)
Beispiel #25
0
	def getMissingLicenses(self, cpv, use, lic, slot):
		"""
		Take a LICENSE string and return a list any licenses that the user may
		may need to accept for the given package.  The returned list will not
		contain any licenses that have already been accepted.  This method
		can throw an InvalidDependString exception.

		@param cpv: The package name (for package.license support)
		@type cpv: String
		@param use: "USE" from the cpv's metadata
		@type use: String
		@param lic: "LICENSE" from the cpv's metadata
		@type lic: String
		@param slot: "SLOT" from the cpv's metadata
		@type slot: String
		@rtype: List
		@return: A list of licenses that have not been accepted.
		"""

		licenses = set(use_reduce(lic, matchall=1, flat=True))
		licenses.discard('||')

		acceptable_licenses = set()
		for x in self._getPkgAcceptLicense(cpv, slot):
			if x == '*':
				acceptable_licenses.update(licenses)
			elif x == '-*':
				acceptable_licenses.clear()
			elif x[:1] == '-':
				acceptable_licenses.discard(x[1:])
			else:
				acceptable_licenses.add(x)

		license_str = lic
		if "?" in license_str:
			use = use.split()
		else:
			use = []

		license_struct = use_reduce(license_str, uselist=use, opconvert=True)
		return self._getMaskedLicenses(license_struct, acceptable_licenses)
Beispiel #26
0
def load_unpack_dependencies_configuration(repositories):
    repo_dict = {}
    for repo in repositories.repos_with_profiles():
        for eapi in _supported_eapis:
            if eapi_has_automatic_unpack_dependencies(eapi):
                file_name = os.path.join(repo.location, "profiles",
                                         "unpack_dependencies", eapi)
                lines = grabfile(file_name, recursive=True)
                for line in lines:
                    elements = line.split()
                    suffix = elements[0].lower()
                    if len(elements) == 1:
                        writemsg(
                            _("--- Missing unpack dependencies for '%s' suffix in '%s'\n"
                              ) % (suffix, file_name))
                    depend = " ".join(elements[1:])
                    try:
                        use_reduce(depend, eapi=eapi)
                    except InvalidDependString as e:
                        writemsg(
                            _("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n"
                              % (suffix, file_name, e)))
                    else:
                        repo_dict.setdefault(repo.name, {}).setdefault(
                            eapi, {})[suffix] = depend

    ret = {}
    for repo in repositories.repos_with_profiles():
        for repo_name in [x.name for x in repo.masters] + [repo.name]:
            for eapi in repo_dict.get(repo_name, {}):
                for suffix, depend in (repo_dict.get(repo_name,
                                                     {}).get(eapi,
                                                             {}).items()):
                    ret.setdefault(repo.name,
                                   {}).setdefault(eapi, {})[suffix] = depend

    return ret
    def testDNFConvert(self):

        test_cases = (
            (
                '|| ( A B ) || ( C D )',
                [['||', ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D']]],
            ),
            (
                '|| ( A B ) || ( B C )',
                [['||', ['A', 'B'], ['A', 'C'], ['B', 'B'], ['B', 'C']]],
            ),
            (
                '|| ( A ( B C D ) )',
                [['||', 'A', ['B', 'C', 'D']]],
            ),
            (
                '|| ( A ( B C D ) ) E',
                [['||', ['E', 'A'], ['E', 'B', 'C', 'D']]],
            ),
            (
                '|| ( A ( B C ) ) || ( D E ) F',
                [[
                    '||', ['F', 'A', 'D'], ['F', 'A', 'E'],
                    ['F', 'B', 'C', 'D'], ['F', 'B', 'C', 'E']
                ]],
            ),
            (
                '|| ( A ( B C || ( D E ) ) ( F G ) H )',
                [[
                    '||', 'A', ['B', 'C', 'D'], ['B', 'C', 'E'], ['F', 'G'],
                    'H'
                ]],
            ),
            (
                '|| ( A ( B C || ( D E ) ) F )',
                [['||', 'A', ['B', 'C', 'D'], ['B', 'C', 'E'], 'F']],
            ),
            (
                '|| ( A ( C || ( D E ) || ( F G ) ) H )',
                [[
                    '||', 'A', ['C', 'D', 'F'], ['C', 'D', 'G'],
                    ['C', 'E', 'F'], ['C', 'E', 'G'], 'H'
                ]],
            ),
        )

        for dep_str, result in test_cases:
            self.assertEqual(dnf_convert(use_reduce(dep_str, opconvert=True)),
                             result)
	def __setitem__(self, k, v):
		_PackageMetadataWrapperBase.__setitem__(self, k, v)
		if k in self._wrapped_keys:
			getattr(self, "_set_" + k.lower())(k, v)
		elif k in self._use_conditional_keys:
			try:
				reduced = use_reduce(paren_reduce(v), matchall=1)
			except portage.exception.InvalidDependString as e:
				self._pkg._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
			else:
				if reduced and k == 'PROVIDE':
					for x in portage.flatten(reduced):
						if not isvalidatom(x):
							self._pkg._invalid_metadata(k + ".syntax",
								"%s: %s" % (k, x))
Beispiel #29
0
 def __setitem__(self, k, v):
     _PackageMetadataWrapperBase.__setitem__(self, k, v)
     if k in self._wrapped_keys:
         getattr(self, "_set_" + k.lower())(k, v)
     elif k in self._use_conditional_keys:
         try:
             reduced = use_reduce(paren_reduce(v), matchall=1)
         except portage.exception.InvalidDependString as e:
             self._pkg._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
         else:
             if reduced and k == 'PROVIDE':
                 for x in portage.flatten(reduced):
                     if not isvalidatom(x):
                         self._pkg._invalid_metadata(
                             k + ".syntax", "%s: %s" % (k, x))
Beispiel #30
0
	def _eval_use_flags(self, cpv, metadata):
		use = frozenset(metadata["USE"].split())
		for k in self._pkgindex_use_evaluated_keys:
			if k.endswith('DEPEND'):
				token_class = Atom
			else:
				token_class = None

			try:
				deps = metadata[k]
				deps = use_reduce(deps, uselist=use, token_class=token_class)
				deps = paren_enclose(deps)
			except portage.exception.InvalidDependString as e:
				writemsg("%s: %s\n" % (k, str(e)),
					noiselevel=-1)
				raise
			metadata[k] = deps
Beispiel #31
0
	def _eval_use_flags(self, cpv, metadata):
		use = frozenset(metadata.get("USE", "").split())
		for k in self._pkgindex_use_evaluated_keys:
			if k.endswith('DEPEND'):
				token_class = Atom
			else:
				token_class = None

			deps = metadata.get(k)
			if deps is None:
				continue
			try:
				deps = use_reduce(deps, uselist=use, token_class=token_class)
				deps = paren_enclose(deps)
			except portage.exception.InvalidDependString as e:
				writemsg("%s: %s\n" % (k, e), noiselevel=-1)
				raise
			metadata[k] = deps
Beispiel #32
0
	def testOverlapDNF(self):

		test_cases = (
			(
				'|| ( cat/A cat/B ) cat/E || ( cat/C cat/D )',
				[['||', 'cat/A', 'cat/B'], 'cat/E', ['||', 'cat/C', 'cat/D']],
			),
			(
				'|| ( cat/A cat/B ) cat/D || ( cat/B cat/C )',
				['cat/D', ['||', ['cat/A', 'cat/B'], ['cat/A', 'cat/C'], ['cat/B', 'cat/B'], ['cat/B', 'cat/C']]],
			),
			(
				'|| ( cat/A cat/B ) || ( cat/C cat/D )  || ( ( cat/B cat/E ) cat/F )',
				[['||', ['cat/A', 'cat/B', 'cat/E'], ['cat/A', 'cat/F'], ['cat/B', 'cat/B', 'cat/E'], ['cat/B', 'cat/F']], ['||', 'cat/C', 'cat/D']],
			),
		)

		for dep_str, result in test_cases:
			self.assertEqual(_overlap_dnf(use_reduce(dep_str, token_class=Atom, opconvert=True)), result)
Beispiel #33
0
	def __getitem__(self, k):
		v = _PackageMetadataWrapperBase.__getitem__(self, k)
		if k in self._use_conditional_keys:
			if self._pkg.root_config.settings.local_config and '?' in v:
				try:
					v = paren_enclose(use_reduce(v, uselist=self._pkg.use.enabled, \
						is_valid_flag=self._pkg.iuse.is_valid_flag))
				except InvalidDependString:
					# This error should already have been registered via
					# self._pkg._invalid_metadata().
					pass
				else:
					self[k] = v

		elif k == 'USE' and not self._pkg.built:
			if not v:
				# This is lazy because it's expensive.
				v = self._pkg._init_use()

		return v
Beispiel #34
0
def getDependencies(cur_overlay, catpkgs, levels=0, cur_level=0):
	cur_tree = cur_overlay.root
	try:
		with open(os.path.join(cur_tree, 'profiles/repo_name')) as f:
			cur_name = f.readline().strip()
	except FileNotFoundError:
			cur_name = cur_overlay.name
	env = os.environ.copy()
	env['PORTAGE_REPOSITORIES'] = '''
[DEFAULT]
main-repo = %s 

[%s]
location = %s
''' % (cur_name, cur_name, cur_tree)
	p = portage.portdbapi(mysettings=portage.config(env=env,config_profile_path=''))
	p.frozen = False
	mypkgs = set()
	for catpkg in list(catpkgs):
		for pkg in p.cp_list(catpkg):
			if pkg == '':
				print("No match for %s" % catpkg)
				continue
			try:
				aux = p.aux_get(pkg, ["DEPEND", "RDEPEND"])
			except PortageKeyError:
				print("Portage key error for %s" % repr(pkg))
				return mypkgs
			for dep in flatten(use_reduce(aux[0]+" "+aux[1], matchall=True)):
				if len(dep) and dep[0] == "!":
					continue
				try:
					mypkg = dep_getkey(dep)
				except portage.exception.InvalidAtom:
					continue
				if mypkg not in mypkgs:
					mypkgs.add(mypkg)
				if levels != cur_level:
					mypkgs = mypkgs.union(getDependencies(cur_overlay, mypkg, levels=levels, cur_level=cur_level+1))
	return mypkgs
Beispiel #35
0
    def testOverlapDNF(self):

        test_cases = (
            (
                "|| ( cat/A cat/B ) cat/E || ( cat/C cat/D )",
                [["||", "cat/A", "cat/B"], "cat/E", ["||", "cat/C", "cat/D"]],
            ),
            (
                "|| ( cat/A cat/B ) cat/D || ( cat/B cat/C )",
                [
                    "cat/D",
                    [
                        "||",
                        ["cat/A", "cat/B"],
                        ["cat/A", "cat/C"],
                        ["cat/B", "cat/B"],
                        ["cat/B", "cat/C"],
                    ],
                ],
            ),
            (
                "|| ( cat/A cat/B ) || ( cat/C cat/D )  || ( ( cat/B cat/E ) cat/F )",
                [
                    [
                        "||",
                        ["cat/A", "cat/B", "cat/E"],
                        ["cat/A", "cat/F"],
                        ["cat/B", "cat/B", "cat/E"],
                        ["cat/B", "cat/F"],
                    ],
                    ["||", "cat/C", "cat/D"],
                ],
            ),
        )

        for dep_str, result in test_cases:
            self.assertEqual(
                _overlap_dnf(use_reduce(dep_str, token_class=Atom, opconvert=True)),
                result,
            )
Beispiel #36
0
	def testDNFConvert(self):

		test_cases = (
			(
				'|| ( A B ) || ( C D )',
				[['||', ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D']]],
			),
			(
				'|| ( A B ) || ( B C )',
				[['||', ['A', 'B'], ['A', 'C'], ['B', 'B'], ['B', 'C']]],
			),
			(
				'|| ( A ( B C D ) )',
				[['||', 'A', ['B', 'C', 'D']]],
			),
			(
				'|| ( A ( B C D ) ) E',
				[['||', ['E', 'A'], ['E', 'B', 'C', 'D']]],
			),
			(
				'|| ( A ( B C ) ) || ( D E ) F',
				[['||', ['F', 'A', 'D'], ['F', 'A', 'E'], ['F', 'B', 'C', 'D'], ['F', 'B', 'C', 'E']]],
			),
			(
				'|| ( A ( B C || ( D E ) ) ( F G ) H )',
				[['||', 'A', ['B', 'C', 'D'], ['B', 'C', 'E'], ['F', 'G'], 'H']],
			),
			(
				'|| ( A ( B C || ( D E ) ) F )',
				[['||', 'A', ['B', 'C', 'D'], ['B', 'C', 'E'], 'F']],
			),
			(
				'|| ( A ( C || ( D E ) || ( F G ) ) H )',
				[['||', 'A', ['C', 'D', 'F'], ['C', 'D', 'G'], ['C', 'E', 'F'], ['C', 'E', 'G'], 'H']],
			),
		)

		for dep_str, result in test_cases:
			self.assertEqual(dnf_convert(use_reduce(dep_str, opconvert=True)), result)
Beispiel #37
0
	def _eval_use_flags(self, cpv, metadata):
		use = frozenset(metadata["USE"].split())
		raw_use = use
		iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
		use = [f for f in use if f in iuse]
		use.sort()
		metadata["USE"] = " ".join(use)
		for k in self._pkgindex_use_evaluated_keys:
			if k.endswith('DEPEND'):
				token_class = Atom
			else:
				token_class = None

			try:
				deps = metadata[k]
				deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
				deps = paren_enclose(deps)
			except portage.exception.InvalidDependString as e:
				writemsg("%s: %s\n" % (k, str(e)),
					noiselevel=-1)
				raise
			metadata[k] = deps
	def __getitem__(self, k):
		v = _PackageMetadataWrapperBase.__getitem__(self, k)
		if k in self._use_conditional_keys:
			if self._pkg.root_config.settings.local_config and '?' in v:
				try:
					v = paren_enclose(paren_normalize(use_reduce(
						paren_reduce(v), uselist=self._pkg.use.enabled)))
				except portage.exception.InvalidDependString:
					# This error should already have been registered via
					# self._pkg._invalid_metadata().
					pass
				else:
					self[k] = v

		elif k == 'USE' and not self._pkg.built:
			if not v:
				# This is lazy because it's expensive.
				pkgsettings = self._pkg.root_config.trees[
					'porttree'].dbapi.doebuild_settings
				pkgsettings.setcpv(self._pkg)
				v = pkgsettings["PORTAGE_USE"]
				self['USE'] = v

		return v
Beispiel #39
0
	def aux_get_done(gather_result):
		# All exceptions must be consumed from gather_result before this
		# function returns, in order to avoid triggering the event loop's
		# exception handler.
		if not gather_result.cancelled():
			list(future.exception() for future in gather_result.result()
				if not future.cancelled())
		else:
			result.cancel()

		if result.cancelled():
			return

		aux_get_result, fetch_map_result = gather_result.result()
		if aux_get_result.cancelled() or fetch_map_result.cancelled():
			# Cancel result after consuming any exceptions which
			# are now irrelevant due to cancellation.
			aux_get_result.cancelled() or aux_get_result.exception()
			fetch_map_result.cancelled() or fetch_map_result.exception()
			result.cancel()
			return

		try:
			restrict, = aux_get_result.result()
		except (PortageKeyError, PortageException) as e:
			config.log_failure("%s\t\taux_get exception %s" %
				(cpv, e))
			result.set_result(fetch_tasks)
			return

		# Here we use matchnone=True to ignore conditional parts
		# of RESTRICT since they don't apply unconditionally.
		# Assume such conditionals only apply on the client side.
		try:
			restrict = frozenset(use_reduce(restrict,
				flat=True, matchnone=True))
		except PortageException as e:
			config.log_failure("%s\t\tuse_reduce exception %s" %
				(cpv, e))
			result.set_result(fetch_tasks)
			return

		if "fetch" in restrict:
			result.set_result(fetch_tasks)
			return

		try:
			uri_map = fetch_map_result.result()
		except PortageException as e:
			config.log_failure("%s\t\tgetFetchMap exception %s" %
				(cpv, e))
			result.set_result(fetch_tasks)
			return

		if not uri_map:
			result.set_result(fetch_tasks)
			return

		if "mirror" in restrict:
			skip = False
			if config.restrict_mirror_exemptions is not None:
				new_uri_map = {}
				for filename, uri_tuple in uri_map.items():
					for uri in uri_tuple:
						if uri[:9] == "mirror://":
							i = uri.find("/", 9)
							if i != -1 and uri[9:i].strip("/") in \
								config.restrict_mirror_exemptions:
								new_uri_map[filename] = uri_tuple
								break
				if new_uri_map:
					uri_map = new_uri_map
				else:
					skip = True
			else:
				skip = True

			if skip:
				result.set_result(fetch_tasks)
				return

		# Parse Manifest for this cp if we haven't yet.
		try:
			if digests_future.done():
				# If there's an exception then raise it.
				digests = digests_future.result()
			else:
				digests = repo_config.load_manifest(
					os.path.join(repo_config.location, cpv.cp)).\
					getTypeDigests("DIST")
		except (EnvironmentError, PortageException) as e:
			digests_future.done() or digests_future.set_exception(e)
			for filename in uri_map:
				config.log_failure(
					"%s\t%s\tManifest exception %s" %
					(cpv, filename, e))
				config.file_failures[filename] = cpv
			result.set_result(fetch_tasks)
			return
		else:
			digests_future.done() or digests_future.set_result(digests)

		if not digests:
			for filename in uri_map:
				config.log_failure("%s\t%s\tdigest entry missing" %
					(cpv, filename))
				config.file_failures[filename] = cpv
			result.set_result(fetch_tasks)
			return

		for filename, uri_tuple in uri_map.items():
			file_digests = digests.get(filename)
			if file_digests is None:
				config.log_failure("%s\t%s\tdigest entry missing" %
					(cpv, filename))
				config.file_failures[filename] = cpv
				continue
			if filename in config.file_owners:
				continue
			config.file_owners[filename] = cpv

			file_digests = \
				_filter_unaccelarated_hashes(file_digests)
			if hash_filter is not None:
				file_digests = _apply_hash_filter(
					file_digests, hash_filter)

			fetch_tasks.append(FetchTask(
				cpv=cpv,
				background=True,
				digests=file_digests,
				distfile=filename,
				restrict=restrict,
				uri_tuple=uri_tuple,
				config=config))

		result.set_result(fetch_tasks)
Beispiel #40
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError(
            "portage.digestgen(): 'mysettings' and 'myportdb' parameter are required."
        )

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        repo_required_hashes = mf.manifest_required_hashes
        if repo_required_hashes is None:
            repo_required_hashes = MANIFEST2_HASH_DEFAULTS
        mf = mf.load_manifest(mysettings["O"],
                              mysettings["DISTDIR"],
                              fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; "
                  "repository is configured to not use them\n") %
                mysettings["O"])
            return 1

        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.update(repo_required_hashes)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        for myfile in missing_files:
            uris = set()
            all_restrict = set()
            for cpv in distfiles_map[myfile]:
                uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                restrict = myportdb.aux_get(cpv, ['RESTRICT'],
                                            mytree=mytree)[0]
                # Here we ignore conditional parts of RESTRICT since
                # they don't apply unconditionally. Assume such
                # conditionals only apply on the client side where
                # digestgen() does not need to be called.
                all_restrict.update(
                    use_reduce(restrict, flat=True, matchnone=True))

                # fetch() uses CATEGORY and PF to display a message
                # when fetch restriction is triggered.
                cat, pf = catsplit(cpv)
                mysettings["CATEGORY"] = cat
                mysettings["PF"] = pf

            # fetch() uses PORTAGE_RESTRICT to control fetch
            # restriction, which is only applied to files that
            # are not fetchable via a mirror:// URI.
            mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError:
                st = None

            if not fetch({myfile: uris}, mysettings):
                myebuild = os.path.join(mysettings["O"],
                                        catsplit(cpv)[1] + ".ebuild")
                spawn_nofetch(myportdb, myebuild)
                writemsg(
                    _("!!! Fetch failed for %s, can't update Manifest\n") %
                    myfile,
                    noiselevel=-1)
                if myfile in dist_hashes and \
                 st is not None and st.st_size > 0:
                    # stat result is obtained before calling fetch(),
                    # since fetch may rename the existing file if the
                    # digest does not match.
                    cmd = colorize(
                        "INFORM", "ebuild --force %s manifest" %
                        os.path.basename(myebuild))
                    writemsg((_(
                        "!!! If you would like to forcefully replace the existing Manifest entry\n"
                        "!!! for %s, use the following command:\n") % myfile) +
                             "!!!    %s\n" % cmd,
                             noiselevel=-1)
                return 0

        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update Manifest\n") %
                     e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
Beispiel #41
0
	def _validate_deps(self):
		"""
		Validate deps. This does not trigger USE calculation since that
		is expensive for ebuilds and therefore we want to avoid doing
		in unnecessarily (like for masked packages).
		"""
		eapi = self.metadata['EAPI']
		dep_eapi = eapi
		dep_valid_flag = self.iuse.is_valid_flag
		if self.installed:
			# Ignore EAPI.incompatible and conditionals missing
			# from IUSE for installed packages since these issues
			# aren't relevant now (re-evaluate when new EAPIs are
			# deployed).
			dep_eapi = None
			dep_valid_flag = None

		for k in self._dep_keys:
			v = self.metadata.get(k)
			if not v:
				continue
			try:
				use_reduce(v, eapi=dep_eapi, matchall=True,
					is_valid_flag=dep_valid_flag, token_class=Atom)
			except InvalidDependString as e:
				self._metadata_exception(k, e)

		k = 'PROVIDE'
		v = self.metadata.get(k)
		if v:
			try:
				use_reduce(v, eapi=dep_eapi, matchall=True,
					is_valid_flag=dep_valid_flag, token_class=Atom)
			except InvalidDependString as e:
				self._invalid_metadata("PROVIDE.syntax",
					_unicode_decode("%s: %s") % (k, e))

		for k in self._use_conditional_misc_keys:
			v = self.metadata.get(k)
			if not v:
				continue
			try:
				use_reduce(v, eapi=dep_eapi, matchall=True,
					is_valid_flag=dep_valid_flag)
			except InvalidDependString as e:
				self._metadata_exception(k, e)

		k = 'REQUIRED_USE'
		v = self.metadata.get(k)
		if v:
			if not eapi_has_required_use(eapi):
				self._invalid_metadata('EAPI.incompatible',
					"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
			else:
				try:
					check_required_use(v, (),
						self.iuse.is_valid_flag)
				except InvalidDependString as e:
					# Force unicode format string for python-2.x safety,
					# ensuring that PortageException.__unicode__() is used
					# when necessary.
					self._invalid_metadata(k + ".syntax",
						_unicode_decode("%s: %s") % (k, e))

		k = 'SRC_URI'
		v = self.metadata.get(k)
		if v:
			try:
				use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
					is_valid_flag=self.iuse.is_valid_flag)
			except InvalidDependString as e:
				if not self.installed:
					self._metadata_exception(k, e)
	def _find_suggestions(self):
		if not self.shortest_cycle:
			return None, None

		suggestions = []
		final_solutions = {}

		for pos, pkg in enumerate(self.shortest_cycle):
			parent = self.shortest_cycle[pos-1]
			priorities = self.graph.nodes[parent][0][pkg]
			parent_atoms = self.all_parent_atoms.get(pkg)

			if priorities[-1].buildtime:
				dep = parent.metadata["DEPEND"]
			elif priorities[-1].runtime:
				dep = parent.metadata["RDEPEND"]

			for ppkg, atom in parent_atoms:
				if ppkg == parent:
					changed_parent = ppkg
					parent_atom = atom.unevaluated_atom
					break

			try:
				affecting_use = extract_affecting_use(dep, parent_atom,
					eapi=parent.metadata["EAPI"])
			except InvalidDependString:
				if not parent.installed:
					raise
				affecting_use = set()

			# Make sure we don't want to change a flag that is 
			#	a) in use.mask or use.force
			#	b) changed by autounmask
			
			usemask, useforce = self._get_use_mask_and_force(parent)
			autounmask_changes = self._get_autounmask_changes(parent)
			untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))

			affecting_use.difference_update(untouchable_flags)

			#If any of the flags we're going to touch is in REQUIRED_USE, add all
			#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
			required_use_flags = get_required_use_flags(
				parent.metadata.get("REQUIRED_USE", ""))

			if affecting_use.intersection(required_use_flags):
				# TODO: Find out exactly which REQUIRED_USE flags are
				# entangled with affecting_use. We have to limit the
				# number of flags since the number of loops is
				# exponentially related (see bug #374397).
				total_flags = set()
				total_flags.update(affecting_use, required_use_flags)
				total_flags.difference_update(untouchable_flags)
				if len(total_flags) <= 10:
					affecting_use = total_flags

			affecting_use = tuple(affecting_use)

			if not affecting_use:
				continue

			#We iterate over all possible settings of these use flags and gather
			#a set of possible changes
			#TODO: Use the information encoded in REQUIRED_USE
			solutions = set()
			for use_state in product(("disabled", "enabled"),
				repeat=len(affecting_use)):
				current_use = set(self.depgraph._pkg_use_enabled(parent))
				for flag, state in zip(affecting_use, use_state):
					if state == "enabled":
						current_use.add(flag)
					else:
						current_use.discard(flag)
				try:
					reduced_dep = use_reduce(dep,
						uselist=current_use, flat=True)
				except InvalidDependString:
					if not parent.installed:
						raise
					reduced_dep = None

				if reduced_dep is not None and \
					parent_atom not in reduced_dep:
					#We found an assignment that removes the atom from 'dep'.
					#Make sure it doesn't conflict with REQUIRED_USE.
					required_use = parent.metadata.get("REQUIRED_USE", "")

					if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
						use = self.depgraph._pkg_use_enabled(parent)
						solution = set()
						for flag, state in zip(affecting_use, use_state):
							if state == "enabled" and \
								flag not in use:
								solution.add((flag, True))
							elif state == "disabled" and \
								flag in use:
								solution.add((flag, False))
						solutions.add(frozenset(solution))

			for solution in solutions:
				ignore_solution = False
				for other_solution in solutions:
					if solution is other_solution:
						continue
					if solution.issuperset(other_solution):
						ignore_solution = True
				if ignore_solution:
					continue

				#Check if a USE change conflicts with use requirements of the parents.
				#If a requiremnet is hard, ignore the suggestion.
				#If the requirment is conditional, warn the user that other changes might be needed.
				followup_change = False
				parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
				for ppkg, atom in parent_parent_atoms:

					atom = atom.unevaluated_atom
					if not atom.use:
						continue

					for flag, state in solution:
						if flag in atom.use.enabled or flag in atom.use.disabled:
							ignore_solution = True
							break
						elif atom.use.conditional:
							for flags in atom.use.conditional.values():
								if flag in flags:
									followup_change = True
									break

					if ignore_solution:
						break

				if ignore_solution:
					continue

				changes = []
				for flag, state in solution:
					if state:
						changes.append(colorize("red", "+"+flag))
					else:
						changes.append(colorize("blue", "-"+flag))
				msg = "- %s (Change USE: %s)\n" \
					% (parent.cpv, " ".join(changes))
				if followup_change:
					msg += " (This change might require USE changes on parent packages.)"
				suggestions.append(msg)
				final_solutions.setdefault(pkg, set()).add(solution)

		return final_solutions, suggestions
	def _find_suggestions(self):
		if not self.shortest_cycle:
			return None, None

		suggestions = []
		final_solutions = {}

		for pos, pkg in enumerate(self.shortest_cycle):
			parent = self.shortest_cycle[pos-1]
			priorities = self.graph.nodes[parent][0][pkg]
			parent_atoms = self.all_parent_atoms.get(pkg)

			if priorities[-1].buildtime:
				dep = parent.metadata["DEPEND"]
			elif priorities[-1].runtime:
				dep = parent.metadata["RDEPEND"]

			for ppkg, atom in parent_atoms:
				if ppkg == parent:
					changed_parent = ppkg
					parent_atom = atom.unevaluated_atom
					break

			affecting_use = extract_affecting_use(dep, parent_atom)

			# Make sure we don't want to change a flag that is 
			#	a) in use.mask or use.force
			#	b) changed by autounmask
			
			usemask, useforce = self._get_use_mask_and_force(parent)
			autounmask_changes = self._get_autounmask_changes(parent)
			untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))

			affecting_use.difference_update(untouchable_flags)

			#If any of the flags we're going to touch is in REQUIRED_USE, add all
			#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
			required_use_flags = get_required_use_flags(parent.metadata["REQUIRED_USE"])

			if affecting_use.intersection(required_use_flags):
				affecting_use.update(required_use_flags)
				affecting_use.difference_update(untouchable_flags)

			affecting_use = tuple(affecting_use)

			if not affecting_use:
				continue

			#We iterate over all possible settings of these use flags and gather
			#a set of possible changes
			#TODO: Use the information encoded in REQUIRED_USE
			use_state = []
			for flag in affecting_use:
				use_state.append("disabled")

			def _next_use_state(state, id=None):
				if id is None:
					id = len(state)-1

				if id == 0 and state[0] == "enabled":
					return False

				if state[id] == "disabled":
					state[id] = "enabled"
					for i in range(id+1,len(state)):
						state[i] = "disabled"
					return True
				else:
					return _next_use_state(state, id-1)

			solutions = set()
			while(True):
				current_use = set(self.depgraph._pkg_use_enabled(parent))
				for flag, state in zip(affecting_use, use_state):
					if state == "enabled":
						current_use.add(flag)
					else:
						current_use.discard(flag)
				reduced_dep = use_reduce(dep,
					uselist=current_use, flat=True)

				if parent_atom not in reduced_dep:
					#We found an assignment that removes the atom from 'dep'.
					#Make sure it doesn't conflict with REQUIRED_USE.
					required_use = parent.metadata["REQUIRED_USE"]

					if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
						use = self.depgraph._pkg_use_enabled(parent)
						solution = set()
						for flag, state in zip(affecting_use, use_state):
							if state == "enabled" and \
								flag not in use:
								solution.add((flag, True))
							elif state == "disabled" and \
								flag in use:
								solution.add((flag, False))
						solutions.add(frozenset(solution))

				if not _next_use_state(use_state):
					break

			for solution in solutions:
				ignore_solution = False
				for other_solution in solutions:
					if solution is other_solution:
						continue
					if solution.issuperset(other_solution):
						ignore_solution = True
				if ignore_solution:
					continue

				#Check if a USE change conflicts with use requirements of the parents.
				#If a requiremnet is hard, ignore the suggestion.
				#If the requirment is conditional, warn the user that other changes might be needed.
				followup_change = False
				parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
				for ppkg, atom in parent_parent_atoms:

					atom = atom.unevaluated_atom
					if not atom.use:
						continue

					for flag, state in solution:
						if flag in atom.use.enabled or flag in atom.use.disabled:
							ignore_solution = True
							break
						elif atom.use.conditional:
							for flags in atom.use.conditional.values():
								if flag in flags:
									followup_change = True
									break

					if ignore_solution:
						break

				if ignore_solution:
					continue

				changes = []
				for flag, state in solution:
					if state:
						changes.append(colorize("red", "+"+flag))
					else:
						changes.append(colorize("blue", "-"+flag))
				msg = "- %s (Change USE: %s)\n" \
					% (parent.cpv, " ".join(changes))
				if followup_change:
					msg += " (This change might require USE changes on parent packages.)"
				suggestions.append(msg)
				final_solutions.setdefault(pkg, set()).add(solution)

		return final_solutions, suggestions
Beispiel #44
0
	def _validate_deps(self):
		"""
		Validate deps. This does not trigger USE calculation since that
		is expensive for ebuilds and therefore we want to avoid doing
		it unnecessarily (like for masked packages).
		"""
		eapi = self.eapi
		dep_eapi = eapi
		dep_valid_flag = self.iuse.is_valid_flag
		if self.installed:
			# Ignore EAPI.incompatible and conditionals missing
			# from IUSE for installed packages since these issues
			# aren't relevant now (re-evaluate when new EAPIs are
			# deployed).
			dep_eapi = None
			dep_valid_flag = None

		validated_atoms = []
		for k in self._dep_keys:
			v = self._metadata.get(k)
			if not v:
				continue
			try:
				atoms = use_reduce(v, eapi=dep_eapi,
					matchall=True, is_valid_flag=dep_valid_flag,
					token_class=Atom, flat=True)
			except InvalidDependString as e:
				self._metadata_exception(k, e)
			else:
				validated_atoms.extend(atoms)
				if not self.built:
					for atom in atoms:
						if not isinstance(atom, Atom):
							continue
						if atom.slot_operator_built:
							e = InvalidDependString(
								_("Improper context for slot-operator "
								"\"built\" atom syntax: %s") %
								(atom.unevaluated_atom,))
							self._metadata_exception(k, e)

		self._validated_atoms = tuple(set(atom for atom in
			validated_atoms if isinstance(atom, Atom)))

		k = 'PROVIDE'
		v = self._metadata.get(k)
		if v:
			try:
				use_reduce(v, eapi=dep_eapi, matchall=True,
					is_valid_flag=dep_valid_flag, token_class=Atom)
			except InvalidDependString as e:
				self._invalid_metadata("PROVIDE.syntax", "%s: %s" % (k, e))

		for k in self._use_conditional_misc_keys:
			v = self._metadata.get(k)
			if not v:
				continue
			try:
				use_reduce(v, eapi=dep_eapi, matchall=True,
					is_valid_flag=dep_valid_flag)
			except InvalidDependString as e:
				self._metadata_exception(k, e)

		k = 'REQUIRED_USE'
		v = self._metadata.get(k)
		if v and not self.built:
			if not _get_eapi_attrs(eapi).required_use:
				self._invalid_metadata('EAPI.incompatible',
					"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
			else:
				try:
					check_required_use(v, (),
						self.iuse.is_valid_flag, eapi=eapi)
				except InvalidDependString as e:
					self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))

		k = 'SRC_URI'
		v = self._metadata.get(k)
		if v:
			try:
				use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
					is_valid_flag=self.iuse.is_valid_flag)
			except InvalidDependString as e:
				if not self.installed:
					self._metadata_exception(k, e)

		if self.built:
			k = 'PROVIDES'
			try:
				self._provides = frozenset(
					parse_soname_deps(self._metadata[k]))
			except InvalidData as e:
				self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))

			k = 'REQUIRES'
			try:
				self._requires = frozenset(
					parse_soname_deps(self._metadata[k]))
			except InvalidData as e:
				self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
	def run(self):
		return use_reduce(self.deparray, self.uselist, self.masklist, \
			self.matchall, self.excludeall, self.is_src_uri, self.eapi, \
				self.opconvert, self.flat, self.is_valid_flag, self.token_class)
Beispiel #46
0
def dep_check(
    depstring,
    mydbapi,
    mysettings,
    use="yes",
    mode=None,
    myuse=None,
    use_cache=1,
    use_binaries=0,
    myroot=None,
    trees=None,
):
    """
	Takes a depend string, parses it, and selects atoms.
	The myroot parameter is unused (use mysettings['EROOT'] instead).
	"""
    myroot = mysettings["EROOT"]
    edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
    # check_config_instance(mysettings)
    if trees is None:
        trees = globals()["db"]
    if use == "yes":
        if myuse is None:
            # default behavior
            myusesplit = mysettings["PORTAGE_USE"].split()
        else:
            myusesplit = myuse
            # We've been given useflags to use.
            # print "USE FLAGS PASSED IN."
            # print myuse
            # if "bindist" in myusesplit:
            # 	print "BINDIST is set!"
            # else:
            # 	print "BINDIST NOT set."
    else:
        # we are being run by autouse(), don't consult USE vars yet.
        # WE ALSO CANNOT USE SETTINGS
        myusesplit = []

    mymasks = set()
    useforce = set()
    if use == "all":
        # This is only for repoman, in order to constrain the use_reduce
        # matchall behavior to account for profile use.mask/force. The
        # ARCH/archlist code here may be redundant, since the profile
        # really should be handling ARCH masking/forcing itself.
        mymasks.update(mysettings.usemask)
        mymasks.update(mysettings.archlist())
        mymasks.discard(mysettings["ARCH"])
        useforce.add(mysettings["ARCH"])
        useforce.update(mysettings.useforce)
        useforce.difference_update(mymasks)

        # eapi code borrowed from _expand_new_virtuals()
    mytrees = trees[myroot]
    parent = mytrees.get("parent")
    virt_parent = mytrees.get("virt_parent")
    current_parent = None
    eapi = None
    if parent is not None:
        if virt_parent is not None:
            current_parent = virt_parent
        else:
            current_parent = parent

    if current_parent is not None:
        # Don't pass the eapi argument to use_reduce() for installed packages
        # since previous validation will have already marked them as invalid
        # when necessary and now we're more interested in evaluating
        # dependencies so that things like --depclean work as well as possible
        # in spite of partial invalidity.
        if not current_parent.installed:
            eapi = current_parent.eapi

    if isinstance(depstring, list):
        mysplit = depstring
    else:
        try:
            mysplit = use_reduce(
                depstring,
                uselist=myusesplit,
                masklist=mymasks,
                matchall=(use == "all"),
                excludeall=useforce,
                opconvert=True,
                token_class=Atom,
                eapi=eapi,
            )
        except InvalidDependString as e:
            return [0, "%s" % (e,)]

    if mysplit == []:
        # dependencies were reduced to nothing
        return [1, []]

        # Recursively expand new-style virtuals so as to
        # collapse one or more levels of indirection.
    try:
        mysplit = _expand_new_virtuals(
            mysplit,
            edebug,
            mydbapi,
            mysettings,
            use=use,
            mode=mode,
            myuse=myuse,
            use_force=useforce,
            use_mask=mymasks,
            use_cache=use_cache,
            use_binaries=use_binaries,
            myroot=myroot,
            trees=trees,
        )
    except ParseError as e:
        return [0, "%s" % (e,)]

    mysplit2 = dep_wordreduce(mysplit, mysettings, mydbapi, mode, use_cache=use_cache)
    if mysplit2 is None:
        return [0, _("Invalid token")]

    writemsg("\n\n\n", 1)
    writemsg("mysplit:  %s\n" % (mysplit), 1)
    writemsg("mysplit2: %s\n" % (mysplit2), 1)

    selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot, use_binaries=use_binaries, trees=trees)

    return [1, selected_atoms]
def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
	use_cache=1, use_binaries=0, myroot="/", trees=None):
	"""Takes a depend string and parses the condition."""
	edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
	#check_config_instance(mysettings)
	if trees is None:
		trees = globals()["db"]
	if use=="yes":
		if myuse is None:
			#default behavior
			myusesplit = mysettings["PORTAGE_USE"].split()
		else:
			myusesplit = myuse
			# We've been given useflags to use.
			#print "USE FLAGS PASSED IN."
			#print myuse
			#if "bindist" in myusesplit:
			#	print "BINDIST is set!"
			#else:
			#	print "BINDIST NOT set."
	else:
		#we are being run by autouse(), don't consult USE vars yet.
		# WE ALSO CANNOT USE SETTINGS
		myusesplit=[]

	#convert parenthesis to sublists
	try:
		mysplit = paren_reduce(depstring)
	except InvalidDependString as e:
		return [0, str(e)]

	mymasks = set()
	useforce = set()
	useforce.add(mysettings["ARCH"])
	if use == "all":
		# This masking/forcing is only for repoman.  In other cases, relevant
		# masking/forcing should have already been applied via
		# config.regenerate().  Also, binary or installed packages may have
		# been built with flags that are now masked, and it would be
		# inconsistent to mask them now.  Additionally, myuse may consist of
		# flags from a parent package that is being merged to a $ROOT that is
		# different from the one that mysettings represents.
		mymasks.update(mysettings.usemask)
		mymasks.update(mysettings.archlist())
		mymasks.discard(mysettings["ARCH"])
		useforce.update(mysettings.useforce)
		useforce.difference_update(mymasks)
	try:
		mysplit = use_reduce(mysplit, uselist=myusesplit,
			masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
	except InvalidDependString as e:
		return [0, str(e)]

	# Do the || conversions
	mysplit = dep_opconvert(mysplit)

	if mysplit == []:
		#dependencies were reduced to nothing
		return [1,[]]

	# Recursively expand new-style virtuals so as to
	# collapse one or more levels of indirection.
	try:
		mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
			use=use, mode=mode, myuse=myuse,
			use_force=useforce, use_mask=mymasks, use_cache=use_cache,
			use_binaries=use_binaries, myroot=myroot, trees=trees)
	except ParseError as e:
		return [0, str(e)]

	mysplit2=mysplit[:]
	mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
	if mysplit2 is None:
		return [0, _("Invalid token")]

	writemsg("\n\n\n", 1)
	writemsg("mysplit:  %s\n" % (mysplit), 1)
	writemsg("mysplit2: %s\n" % (mysplit2), 1)

	try:
		selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
			use_binaries=use_binaries, trees=trees)
	except InvalidAtom as e:
		if portage.dep._dep_check_strict:
			raise # This shouldn't happen.
		# dbapi.match() failed due to an invalid atom in
		# the dependencies of an installed package.
		return [0, _("Invalid atom: '%s'") % (e,)]

	return [1, selected_atoms]
Beispiel #48
0
	def __iter__(self):

		portdb = self._config.portdb
		get_repo_for_location = portdb.repositories.get_repo_for_location
		file_owners = self._config.file_owners
		file_failures = self._config.file_failures
		restrict_mirror_exemptions = self._config.restrict_mirror_exemptions

		hash_filter = _hash_filter(
			portdb.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
		if hash_filter.transparent:
			hash_filter = None

		for cp in self._iter_every_cp():

			for tree in portdb.porttrees:

				# Reset state so the Manifest is pulled once
				# for this cp / tree combination.
				digests = None
				repo_config = get_repo_for_location(tree)

				for cpv in portdb.cp_list(cp, mytree=tree):

					try:
						restrict, = portdb.aux_get(cpv, ("RESTRICT",),
							mytree=tree)
					except (KeyError, PortageException) as e:
						self._log_failure("%s\t\taux_get exception %s" %
							(cpv, e))
						continue

					# Here we use matchnone=True to ignore conditional parts
					# of RESTRICT since they don't apply unconditionally.
					# Assume such conditionals only apply on the client side.
					try:
						restrict = frozenset(use_reduce(restrict,
							flat=True, matchnone=True))
					except PortageException as e:
						self._log_failure("%s\t\tuse_reduce exception %s" %
							(cpv, e))
						continue

					if "fetch" in restrict:
						continue

					try:
						uri_map = portdb.getFetchMap(cpv)
					except PortageException as e:
						self._log_failure("%s\t\tgetFetchMap exception %s" %
							(cpv, e))
						continue

					if not uri_map:
						continue

					if "mirror" in restrict:
						skip = False
						if restrict_mirror_exemptions is not None:
							new_uri_map = {}
							for filename, uri_tuple in uri_map.items():
								for uri in uri_tuple:
									if uri[:9] == "mirror://":
										i = uri.find("/", 9)
										if i != -1 and uri[9:i].strip("/") in \
											restrict_mirror_exemptions:
											new_uri_map[filename] = uri_tuple
											break
							if new_uri_map:
								uri_map = new_uri_map
							else:
								skip = True
						else:
							skip = True

						if skip:
							continue

					# Parse Manifest for this cp if we haven't yet.
					if digests is None:
						try:
							digests = repo_config.load_manifest(
								os.path.join(repo_config.location, cp)
								).getTypeDigests("DIST")
						except (EnvironmentError, PortageException) as e:
							for filename in uri_map:
								self._log_failure(
									"%s\t%s\tManifest exception %s" %
									(cpv, filename, e))
								file_failures[filename] = cpv
							continue

					if not digests:
						for filename in uri_map:
							self._log_failure("%s\t%s\tdigest entry missing" %
								(cpv, filename))
							file_failures[filename] = cpv
						continue

					for filename, uri_tuple in uri_map.items():
						file_digests = digests.get(filename)
						if file_digests is None:
							self._log_failure("%s\t%s\tdigest entry missing" %
								(cpv, filename))
							file_failures[filename] = cpv
							continue
						if filename in file_owners:
							continue
						file_owners[filename] = cpv

						file_digests = \
							_filter_unaccelarated_hashes(file_digests)
						if hash_filter is not None:
							file_digests = _apply_hash_filter(
								file_digests, hash_filter)

						yield FetchTask(cpv=cpv,
							background=True,
							digests=file_digests,
							distfile=filename,
							restrict=restrict,
							uri_tuple=uri_tuple,
							config=self._config)
    def _find_suggestions(self):
        if not self.shortest_cycle:
            return None, None

        suggestions = []
        final_solutions = {}

        for pos, pkg in enumerate(self.shortest_cycle):
            parent = self.shortest_cycle[pos - 1]
            priorities = self.graph.nodes[parent][0][pkg]
            parent_atoms = self.all_parent_atoms.get(pkg)

            if priorities[-1].buildtime:
                dep = parent.metadata["DEPEND"]
            elif priorities[-1].runtime:
                dep = parent.metadata["RDEPEND"]

            for ppkg, atom in parent_atoms:
                if ppkg == parent:
                    changed_parent = ppkg
                    parent_atom = atom.unevaluated_atom
                    break

            try:
                affecting_use = extract_affecting_use(
                    dep, parent_atom, eapi=parent.metadata["EAPI"])
            except InvalidDependString:
                if not parent.installed:
                    raise
                affecting_use = set()

            # Make sure we don't want to change a flag that is
            #	a) in use.mask or use.force
            #	b) changed by autounmask

            usemask, useforce = self._get_use_mask_and_force(parent)
            autounmask_changes = self._get_autounmask_changes(parent)
            untouchable_flags = frozenset(
                chain(usemask, useforce, autounmask_changes))

            affecting_use.difference_update(untouchable_flags)

            #If any of the flags we're going to touch is in REQUIRED_USE, add all
            #other flags in REQUIRED_USE to affecting_use, to not lose any solution.
            required_use_flags = get_required_use_flags(
                parent.metadata.get("REQUIRED_USE", ""))

            if affecting_use.intersection(required_use_flags):
                # TODO: Find out exactly which REQUIRED_USE flags are
                # entangled with affecting_use. We have to limit the
                # number of flags since the number of loops is
                # exponentially related (see bug #374397).
                total_flags = set()
                total_flags.update(affecting_use, required_use_flags)
                total_flags.difference_update(untouchable_flags)
                if len(total_flags) <= 10:
                    affecting_use = total_flags

            affecting_use = tuple(affecting_use)

            if not affecting_use:
                continue

            #We iterate over all possible settings of these use flags and gather
            #a set of possible changes
            #TODO: Use the information encoded in REQUIRED_USE
            solutions = set()
            for use_state in product(("disabled", "enabled"),
                                     repeat=len(affecting_use)):
                current_use = set(self.depgraph._pkg_use_enabled(parent))
                for flag, state in zip(affecting_use, use_state):
                    if state == "enabled":
                        current_use.add(flag)
                    else:
                        current_use.discard(flag)
                try:
                    reduced_dep = use_reduce(dep,
                                             uselist=current_use,
                                             flat=True)
                except InvalidDependString:
                    if not parent.installed:
                        raise
                    reduced_dep = None

                if reduced_dep is not None and \
                 parent_atom not in reduced_dep:
                    #We found an assignment that removes the atom from 'dep'.
                    #Make sure it doesn't conflict with REQUIRED_USE.
                    required_use = parent.metadata.get("REQUIRED_USE", "")

                    if check_required_use(required_use, current_use,
                                          parent.iuse.is_valid_flag):
                        use = self.depgraph._pkg_use_enabled(parent)
                        solution = set()
                        for flag, state in zip(affecting_use, use_state):
                            if state == "enabled" and \
                             flag not in use:
                                solution.add((flag, True))
                            elif state == "disabled" and \
                             flag in use:
                                solution.add((flag, False))
                        solutions.add(frozenset(solution))

            for solution in solutions:
                ignore_solution = False
                for other_solution in solutions:
                    if solution is other_solution:
                        continue
                    if solution.issuperset(other_solution):
                        ignore_solution = True
                if ignore_solution:
                    continue

                #Check if a USE change conflicts with use requirements of the parents.
                #If a requiremnet is hard, ignore the suggestion.
                #If the requirment is conditional, warn the user that other changes might be needed.
                followup_change = False
                parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(
                    changed_parent)
                for ppkg, atom in parent_parent_atoms:

                    atom = atom.unevaluated_atom
                    if not atom.use:
                        continue

                    for flag, state in solution:
                        if flag in atom.use.enabled or flag in atom.use.disabled:
                            ignore_solution = True
                            break
                        elif atom.use.conditional:
                            for flags in atom.use.conditional.values():
                                if flag in flags:
                                    followup_change = True
                                    break

                    if ignore_solution:
                        break

                if ignore_solution:
                    continue

                changes = []
                for flag, state in solution:
                    if state:
                        changes.append(colorize("red", "+" + flag))
                    else:
                        changes.append(colorize("blue", "-" + flag))
                msg = "- %s (Change USE: %s)\n" \
                 % (parent.cpv, " ".join(changes))
                if followup_change:
                    msg += " (This change might require USE changes on parent packages.)"
                suggestions.append(msg)
                final_solutions.setdefault(pkg, set()).add(solution)

        return final_solutions, suggestions
Beispiel #50
0
 def without_conditionals(self):
     if self._use_reducable:
         return PortageUncondAllOfDep(
             use_reduce(self._depstr, self._args.puse), self._args)
     return PMPackageDepSet.without_conditionals.fget(self)
Beispiel #51
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; " "repository is configured to not use them\n")
                % mysettings["O"]
            )
            return 1

            # Don't require all hashes since that can trigger excessive
            # fetches when sufficient digests already exist.  To ease transition
            # while Manifest 1 is being removed, only require hashes that will
            # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            for myfile in missing_files:
                uris = set()
                all_restrict = set()
                for cpv in distfiles_map[myfile]:
                    uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                    restrict = myportdb.aux_get(cpv, ["RESTRICT"], mytree=mytree)[0]
                    # Here we ignore conditional parts of RESTRICT since
                    # they don't apply unconditionally. Assume such
                    # conditionals only apply on the client side where
                    # digestgen() does not need to be called.
                    all_restrict.update(use_reduce(restrict, flat=True, matchnone=True))

                    # fetch() uses CATEGORY and PF to display a message
                    # when fetch restriction is triggered.
                    cat, pf = catsplit(cpv)
                    mysettings["CATEGORY"] = cat
                    mysettings["PF"] = pf

                    # fetch() uses PORTAGE_RESTRICT to control fetch
                    # restriction, which is only applied to files that
                    # are not fetchable via a mirror:// URI.
                mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, mysettings):
                    myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild")
                    spawn_nofetch(myportdb, myebuild)
                    writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1)
                    if myfile in dist_hashes and st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(
                            _(
                                "!!! If you would like to "
                                "forcefully replace the existing "
                                "Manifest entry\n!!! for %s, use "
                                "the following command:\n"
                            )
                            % myfile
                            + "!!!    "
                            + colorize("INFORM", "ebuild --force %s manifest" % os.path.basename(myebuild))
                            + "\n",
                            noiselevel=-1,
                        )
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True, assumeDistHashesAlways=("assume-digests" in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e,), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" + colorize("WARN", str(len(auto_assumed)).rjust(18)) + "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
Beispiel #52
0
	def _validate_deps(self):
		"""
		Validate deps. This does not trigger USE calculation since that
		is expensive for ebuilds and therefore we want to avoid doing
		it unnecessarily (like for masked packages).
		"""
		eapi = self.eapi
		dep_eapi = eapi
		dep_valid_flag = self.iuse.is_valid_flag
		if self.installed:
			# Ignore EAPI.incompatible and conditionals missing
			# from IUSE for installed packages since these issues
			# aren't relevant now (re-evaluate when new EAPIs are
			# deployed).
			dep_eapi = None
			dep_valid_flag = None

		validated_atoms = []
		for k in self._dep_keys:
			v = self._metadata.get(k)
			if not v:
				continue
			try:
				atoms = use_reduce(v, eapi=dep_eapi,
					matchall=True, is_valid_flag=dep_valid_flag,
					token_class=Atom, flat=True)
			except InvalidDependString as e:
				self._metadata_exception(k, e)
			else:
				validated_atoms.extend(atoms)
				if not self.built:
					for atom in atoms:
						if not isinstance(atom, Atom):
							continue
						if atom.slot_operator_built:
							e = InvalidDependString(
								_("Improper context for slot-operator "
								"\"built\" atom syntax: %s") %
								(atom.unevaluated_atom,))
							self._metadata_exception(k, e)

		self._validated_atoms = tuple(set(atom for atom in
			validated_atoms if isinstance(atom, Atom)))

		for k in self._use_conditional_misc_keys:
			v = self._metadata.get(k)
			if not v:
				continue
			try:
				use_reduce(v, eapi=dep_eapi, matchall=True,
					is_valid_flag=dep_valid_flag)
			except InvalidDependString as e:
				self._metadata_exception(k, e)

		k = 'REQUIRED_USE'
		v = self._metadata.get(k)
		if v and not self.built:
			if not _get_eapi_attrs(eapi).required_use:
				self._invalid_metadata('EAPI.incompatible',
					"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
			else:
				try:
					check_required_use(v, (),
						self.iuse.is_valid_flag, eapi=eapi)
				except InvalidDependString as e:
					self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))

		k = 'SRC_URI'
		v = self._metadata.get(k)
		if v:
			try:
				use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
					is_valid_flag=self.iuse.is_valid_flag)
			except InvalidDependString as e:
				if not self.installed:
					self._metadata_exception(k, e)

		if self.built:
			k = 'PROVIDES'
			try:
				self._provides = frozenset(
					parse_soname_deps(self._metadata[k]))
			except InvalidData as e:
				self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))

			k = 'REQUIRES'
			try:
				self._requires = frozenset(
					parse_soname_deps(self._metadata[k]))
			except InvalidData as e:
				self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
Beispiel #53
0
    def _validate_deps(self):
        """
		Validate deps. This does not trigger USE calculation since that
		is expensive for ebuilds and therefore we want to avoid doing
		in unnecessarily (like for masked packages).
		"""
        eapi = self.metadata['EAPI']
        dep_eapi = eapi
        dep_valid_flag = self.iuse.is_valid_flag
        if self.installed:
            # Ignore EAPI.incompatible and conditionals missing
            # from IUSE for installed packages since these issues
            # aren't relevant now (re-evaluate when new EAPIs are
            # deployed).
            dep_eapi = None
            dep_valid_flag = None

        for k in self._dep_keys:
            v = self.metadata.get(k)
            if not v:
                continue
            try:
                use_reduce(v,
                           eapi=dep_eapi,
                           matchall=True,
                           is_valid_flag=dep_valid_flag,
                           token_class=Atom)
            except InvalidDependString as e:
                self._metadata_exception(k, e)

        k = 'PROVIDE'
        v = self.metadata.get(k)
        if v:
            try:
                use_reduce(v,
                           eapi=dep_eapi,
                           matchall=True,
                           is_valid_flag=dep_valid_flag,
                           token_class=Atom)
            except InvalidDependString as e:
                self._invalid_metadata("PROVIDE.syntax",
                                       _unicode_decode("%s: %s") % (k, e))

        for k in self._use_conditional_misc_keys:
            v = self.metadata.get(k)
            if not v:
                continue
            try:
                use_reduce(v,
                           eapi=dep_eapi,
                           matchall=True,
                           is_valid_flag=dep_valid_flag)
            except InvalidDependString as e:
                self._metadata_exception(k, e)

        k = 'REQUIRED_USE'
        v = self.metadata.get(k)
        if v:
            if not eapi_has_required_use(eapi):
                self._invalid_metadata(
                    'EAPI.incompatible',
                    "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
            else:
                try:
                    check_required_use(v, (), self.iuse.is_valid_flag)
                except InvalidDependString as e:
                    # Force unicode format string for python-2.x safety,
                    # ensuring that PortageException.__unicode__() is used
                    # when necessary.
                    self._invalid_metadata(k + ".syntax",
                                           _unicode_decode("%s: %s") % (k, e))

        k = 'SRC_URI'
        v = self.metadata.get(k)
        if v:
            try:
                use_reduce(v,
                           is_src_uri=True,
                           eapi=eapi,
                           matchall=True,
                           is_valid_flag=self.iuse.is_valid_flag)
            except InvalidDependString as e:
                if not self.installed:
                    self._metadata_exception(k, e)
Beispiel #54
0
def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
	use_cache=1, use_binaries=0, myroot="/", trees=None):
	"""Takes a depend string and parses the condition."""
	edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
	#check_config_instance(mysettings)
	if trees is None:
		trees = globals()["db"]
	if use=="yes":
		if myuse is None:
			#default behavior
			myusesplit = mysettings["PORTAGE_USE"].split()
		else:
			myusesplit = myuse
			# We've been given useflags to use.
			#print "USE FLAGS PASSED IN."
			#print myuse
			#if "bindist" in myusesplit:
			#	print "BINDIST is set!"
			#else:
			#	print "BINDIST NOT set."
	else:
		#we are being run by autouse(), don't consult USE vars yet.
		# WE ALSO CANNOT USE SETTINGS
		myusesplit=[]

	mymasks = set()
	useforce = set()
	useforce.add(mysettings["ARCH"])
	if use == "all":
		# This masking/forcing is only for repoman.  In other cases, relevant
		# masking/forcing should have already been applied via
		# config.regenerate().  Also, binary or installed packages may have
		# been built with flags that are now masked, and it would be
		# inconsistent to mask them now.  Additionally, myuse may consist of
		# flags from a parent package that is being merged to a $ROOT that is
		# different from the one that mysettings represents.
		mymasks.update(mysettings.usemask)
		mymasks.update(mysettings.archlist())
		mymasks.discard(mysettings["ARCH"])
		useforce.update(mysettings.useforce)
		useforce.difference_update(mymasks)

	# eapi code borrowed from _expand_new_virtuals()
	mytrees = trees[myroot]
	parent = mytrees.get("parent")
	virt_parent = mytrees.get("virt_parent")
	current_parent = None
	eapi = None
	if parent is not None:
		if virt_parent is not None:
			current_parent = virt_parent[0]
		else:
			current_parent = parent

	if current_parent is not None:
		# Don't pass the eapi argument to use_reduce() for installed packages
		# since previous validation will have already marked them as invalid
		# when necessary and now we're more interested in evaluating
		# dependencies so that things like --depclean work as well as possible
		# in spite of partial invalidity.
		if not current_parent.installed:
			eapi = current_parent.metadata['EAPI']

	try:
		mysplit = use_reduce(depstring, uselist=myusesplit, masklist=mymasks, \
			matchall=(use=="all"), excludeall=useforce, opconvert=True, \
			token_class=Atom, eapi=eapi)
	except InvalidDependString as e:
		return [0, str(e)]

	if mysplit == []:
		#dependencies were reduced to nothing
		return [1,[]]

	# Recursively expand new-style virtuals so as to
	# collapse one or more levels of indirection.
	try:
		mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
			use=use, mode=mode, myuse=myuse,
			use_force=useforce, use_mask=mymasks, use_cache=use_cache,
			use_binaries=use_binaries, myroot=myroot, trees=trees)
	except ParseError as e:
		return [0, str(e)]

	mysplit2=mysplit[:]
	mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
	if mysplit2 is None:
		return [0, _("Invalid token")]

	writemsg("\n\n\n", 1)
	writemsg("mysplit:  %s\n" % (mysplit), 1)
	writemsg("mysplit2: %s\n" % (mysplit2), 1)

	selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
		use_binaries=use_binaries, trees=trees)

	return [1, selected_atoms]