def test__eval_qa_conditionals(self): test_cases = ( ("dev-libs/A[foo]", [], [], "dev-libs/A[foo]"), ("dev-libs/A[foo]", ["foo"], [], "dev-libs/A[foo]"), ("dev-libs/A[foo]", [], ["foo"], "dev-libs/A[foo]"), ("dev-libs/A:0[foo]", [], [], "dev-libs/A:0[foo]"), ("dev-libs/A:0[foo]", ["foo"], [], "dev-libs/A:0[foo]"), ("dev-libs/A:0[foo]", [], ["foo"], "dev-libs/A:0[foo]"), ("dev-libs/A:0[foo=]", [], ["foo"], "dev-libs/A:0[foo]"), ("dev-libs/A[foo,-bar]", ["foo"], ["bar"], "dev-libs/A[foo,-bar]"), ("dev-libs/A[-foo,bar]", ["foo", "bar"], [], "dev-libs/A[-foo,bar]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a", "b", "c"], [], "dev-libs/A[a,-b,c,d,-e,-f]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c"], "dev-libs/A[a,b,-c,d,-e,-f]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", \ ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", \ [], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"), ) for atom, use_mask, use_force, expected_atom in test_cases: a = Atom(atom) b = a._eval_qa_conditionals(use_mask, use_force) self.assertEqual(str(b), expected_atom) self.assertEqual(str(b.unevaluated_atom), atom)
def __call__(self, argv): """ @return: tuple of (stdout, stderr, returncode) """ # Python 3: # cmd, root, *args = argv cmd = argv[0] root = argv[1] args = argv[2:] warnings = [] warnings_str = '' db = self.get_db() eapi = self.settings.get('EAPI') root = normalize_path(root or os.sep).rstrip(os.sep) + os.sep if root not in db: return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3) portdb = db[root]["porttree"].dbapi vardb = db[root]["vartree"].dbapi if cmd in ('best_version', 'has_version'): try: atom = Atom(args[0], allow_repo=False) except InvalidAtom: return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2) try: atom = Atom(args[0], allow_repo=False, eapi=eapi) except InvalidAtom as e: warnings.append("QA Notice: %s: %s" % (cmd, e)) use = self.settings.get('PORTAGE_BUILT_USE') if use is None: use = self.settings['PORTAGE_USE'] use = frozenset(use.split()) atom = atom.evaluate_conditionals(use) if warnings: warnings_str = self._elog('eqawarn', warnings) if cmd == 'has_version': if vardb.match(atom): returncode = 0 else: returncode = 1 return ('', warnings_str, returncode) elif cmd == 'best_version': m = best(vardb.match(atom)) return ('%s\n' % m, warnings_str, 0) else: return ('', 'Invalid command: %s\n' % cmd, 3)
def __call__(self, argv): """ @returns: tuple of (stdout, stderr, returncode) """ cmd, root, atom_str = argv eapi = self.settings.get('EAPI') allow_repo = eapi_has_repo_deps(eapi) try: atom = Atom(atom_str, allow_repo=allow_repo) except InvalidAtom: return ('', 'invalid atom: %s\n' % atom_str, 2) warnings = [] try: atom = Atom(atom_str, allow_repo=allow_repo, eapi=eapi) except InvalidAtom as e: warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e)) use = self.settings.get('PORTAGE_BUILT_USE') if use is None: use = self.settings['PORTAGE_USE'] use = frozenset(use.split()) atom = atom.evaluate_conditionals(use) db = self._db if db is None: db = portage.db warnings_str = '' if warnings: warnings_str = self._elog('eqawarn', warnings) root = normalize_path(root).rstrip(os.path.sep) + os.path.sep if root not in db: return ('', 'invalid ROOT: %s\n' % root, 2) vardb = db[root]["vartree"].dbapi if cmd == 'has_version': if vardb.match(atom): returncode = 0 else: returncode = 1 return ('', warnings_str, returncode) elif cmd == 'best_version': m = best(vardb.match(atom)) return ('%s\n' % m, warnings_str, 0) else: return ('', 'invalid command: %s\n' % cmd, 2)
def __call__(self, argv): """ @returns: tuple of (stdout, stderr, returncode) """ # Note that $USE is passed via IPC in order to ensure that # we have the correct value for built/installed packages, # since the config class doesn't currently provide a way # to access built/installed $USE that would work in all # possible scenarios. cmd, root, atom, use = argv try: atom = Atom(atom) except InvalidAtom: return ('', 'invalid atom: %s\n' % atom, 2) use = frozenset(use.split()) atom = atom.evaluate_conditionals(use) db = self._db if db is None: db = portage.db root = normalize_path(root).rstrip(os.path.sep) + os.path.sep if root not in db: return ('', 'invalid ROOT: %s\n' % root, 2) vardb = db[root]["vartree"].dbapi if cmd == 'has_version': if vardb.match(atom): returncode = 0 else: returncode = 1 return ('', '', returncode) elif cmd == 'best_version': m = best(vardb.match(atom)) return ('%s\n' % m, '', 0) else: return ('', 'invalid command: %s\n' % cmd, 2)
def clean_subslots(depatom, usel=None): if isinstance(depatom, list): # process the nested list. return [clean_subslots(x, usel) for x in depatom] else: try: # this can be either an atom or some special operator. # in the latter case, we get InvalidAtom and pass it as-is. a = Atom(depatom) except InvalidAtom: return depatom else: # if we're processing portdb, we need to evaluate USE flag # dependency conditionals to make them match vdb. this # requires passing the list of USE flags, so we reuse it # as conditional for the operation as well. if usel is not None: a = a.evaluate_conditionals(usel) # replace slot operator := dependencies with plain := # since we can't properly compare expanded slots # in vardb to abstract slots in portdb. return subslot_repl_re.sub(':=', a)
def __init__(self, **kwargs): metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata')) Task.__init__(self, **kwargs) # the SlotObject constructor assigns self.root_config from keyword args # and is an instance of a '_emerge.RootConfig.RootConfig class self.root = self.root_config.root self._raw_metadata = metadata self._metadata = _PackageMetadataWrapper(self, metadata) if not self.built: self._metadata['CHOST'] = self.root_config.settings.get( 'CHOST', '') eapi_attrs = _get_eapi_attrs(self.eapi) self.cpv = _pkg_str(self.cpv, metadata=self._metadata, settings=self.root_config.settings) if hasattr(self.cpv, 'slot_invalid'): self._invalid_metadata( 'SLOT.invalid', "SLOT: invalid value: '%s'" % self._metadata["SLOT"]) self.cpv_split = self.cpv.cpv_split self.category, self.pf = portage.catsplit(self.cpv) self.cp = self.cpv.cp self.version = self.cpv.version self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot)) # sync metadata with validated repo (may be UNKNOWN_REPO) self._metadata['repository'] = self.cpv.repo if eapi_attrs.iuse_effective: implicit_match = self.root_config.settings._iuse_effective_match if self.built: implicit_match = functools.partial( self._built_iuse_effective_match, implicit_match, frozenset(self._metadata['USE'].split())) else: implicit_match = self.root_config.settings._iuse_implicit_match usealiases = self.root_config.settings._use_manager.getUseAliases(self) self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match, usealiases, self.eapi) if (self.iuse.enabled or self.iuse.disabled) and \ not eapi_attrs.iuse_defaults: if not self.installed: self._invalid_metadata( 'EAPI.incompatible', "IUSE contains defaults, but EAPI doesn't allow them") if self.inherited is None: self.inherited = frozenset() if self.operation is None: if self.onlydeps or self.installed: self.operation = "nomerge" else: self.operation = "merge" self._hash_key = Package._gen_hash_key(cpv=self.cpv, installed=self.installed, onlydeps=self.onlydeps, operation=self.operation, repo_name=self.cpv.repo, root_config=self.root_config, type_name=self.type_name) self._hash_value = hash(self._hash_key)
def check_matches(atom, expected): matches = list(p.match("/", Atom(atom))) self.assertEqual(len(matches), len(expected)) for x, y in zip(matches, expected): self.assertTrue(x is y)
def __call__(self, argv): """ @return: tuple of (stdout, stderr, returncode) """ # Python 3: # cmd, root, *args = argv cmd = argv[0] root = argv[1] args = argv[2:] warnings = [] warnings_str = '' db = self.get_db() eapi = self.settings.get('EAPI') root = normalize_path(root).rstrip(os.path.sep) + os.path.sep if root not in db: return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3) portdb = db[root]["porttree"].dbapi vardb = db[root]["vartree"].dbapi if cmd in ('best_version', 'has_version'): allow_repo = eapi_has_repo_deps(eapi) try: atom = Atom(args[0], allow_repo=allow_repo) except InvalidAtom: return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2) try: atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi) except InvalidAtom as e: warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e)) use = self.settings.get('PORTAGE_BUILT_USE') if use is None: use = self.settings['PORTAGE_USE'] use = frozenset(use.split()) atom = atom.evaluate_conditionals(use) if warnings: warnings_str = self._elog('eqawarn', warnings) if cmd == 'has_version': if vardb.match(atom): returncode = 0 else: returncode = 1 return ('', warnings_str, returncode) elif cmd == 'best_version': m = best(vardb.match(atom)) return ('%s\n' % m, warnings_str, 0) elif cmd in ('master_repositories', 'repository_path', 'available_eclasses', 'eclass_path', 'license_path'): repo = _repo_name_re.match(args[0]) if repo is None: return ('', '%s: Invalid repository: %s\n' % (cmd, args[0]), 2) try: repo = portdb.repositories[args[0]] except KeyError: return ('', warnings_str, 1) if cmd == 'master_repositories': return ('%s\n' % ' '.join(x.name for x in repo.masters), warnings_str, 0) elif cmd == 'repository_path': return ('%s\n' % repo.location, warnings_str, 0) elif cmd == 'available_eclasses': return ('%s\n' % ' '.join(sorted(repo.eclass_db.eclasses)), warnings_str, 0) elif cmd == 'eclass_path': try: eclass = repo.eclass_db.eclasses[args[1]] except KeyError: return ('', warnings_str, 1) return ('%s\n' % eclass.location, warnings_str, 0) elif cmd == 'license_path': paths = reversed([os.path.join(x.location, 'licenses', args[1]) for x in list(repo.masters) + [repo]]) for path in paths: if os.path.exists(path): return ('%s\n' % path, warnings_str, 0) return ('', warnings_str, 1) else: return ('', 'Invalid command: %s\n' % cmd, 3)
def _expand_new_virtuals( mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs ): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ). The result is normalized in the same way as use_reduce, having a top-level conjuction, and no redundant nested lists. """ newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") # Atoms are stored in the graph as (atom, id(atom)) tuples # since each atom is considered to be a unique entity. For # example, atoms that appear identical may behave differently # in USE matching, depending on their unevaluated form. Also, # specially generated virtual atoms may appear identical while # having different _orig_atom attributes. atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent parent = virt_parent else: graph_parent = parent repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] is_disjunction = mysplit and mysplit[0] == "||" for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): assert x, "Normalization error, empty conjunction found in %s" % (mysplit,) if is_disjunction: assert ( x[0] != "||" ), "Normalization error, nested disjunction found in %s" % (mysplit,) else: assert ( x[0] == "||" ), "Normalization error, nested conjunction found in %s" % (mysplit,) x_exp = _expand_new_virtuals( x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs ) if is_disjunction: if len(x_exp) == 1: x = x_exp[0] if isinstance(x, list): # Due to normalization, a conjunction must not be # nested directly in another conjunction, so this # must be a disjunction. assert ( x and x[0] == "||" ), "Normalization error, nested conjunction found in %s" % ( x_exp, ) newsplit.extend(x[1:]) else: newsplit.append(x) else: newsplit.append(x_exp) else: newsplit.extend(x_exp) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if repoman or not hasattr(portdb, "match_pkgs") or pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: a = [] myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif is_disjunction: newsplit.extend(a) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(["||"] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) mychoices = [] if not pkgs and not portdb.cp_list(x.cp): myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue a = [] for pkg in pkgs: virt_atom = "=" + pkg.cpv if x.unevaluated_atom.use: virt_atom += str(x.unevaluated_atom.use) virt_atom = Atom(virt_atom) if parent is None: if myuse is None: virt_atom = virt_atom.evaluate_conditionals( mysettings.get("PORTAGE_USE", "").split() ) else: virt_atom = virt_atom.evaluate_conditionals(myuse) else: virt_atom = virt_atom.evaluate_conditionals(pkg_use_enabled(parent)) else: virt_atom = Atom(virt_atom) # Allow the depgraph to map this atom back to the # original, in order to avoid distortion in places # like display or conflict resolution code. virt_atom.__dict__["_orig_atom"] = x # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg._metadata["RDEPEND"] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level( _("Virtual Parent: %s\n") % (pkg,), noiselevel=-1, level=logging.DEBUG, ) writemsg_level( _("Virtual Depstring: %s\n") % (depstring,), noiselevel=-1, level=logging.DEBUG, ) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = pkg try: mycheck = dep_check( depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs ) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % (pkg, mycheck[1], depstring)) # Replace the original atom "x" with "virt_atom" which refers # to the specific version of the virtual whose deps we're # expanding. The virt_atom._orig_atom attribute is used # by depgraph to map virt_atom back to the original atom. # We specifically exclude the original atom "x" from the # the expanded output here, since otherwise it could trigger # incorrect dep_zapdeps behavior (see bug #597752). mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: virt_atom_node = (virt_atom, id(virt_atom)) atom_graph.add(virt_atom_node, graph_parent) atom_graph.add(pkg, virt_atom_node) atom_graph.add((x, id(x)), graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add((new_atom, id(new_atom)), graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) elif is_disjunction: newsplit.extend(a) elif len(a) == 1: newsplit.extend(a[0]) else: newsplit.append(["||"] + a) # For consistency with related functions like use_reduce, always # normalize the result to have a top-level conjunction. if is_disjunction: newsplit = [newsplit] return newsplit
def testAtom(self): tests = ( ("=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]", ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False), ("=sys-apps/portage-2.1-r1*:0[doc]", ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False), ("sys-apps/portage:0[doc]", (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False), ("sys-apps/portage:0[doc]", (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False), ("*/*", (None, '*/*', None, None, None, None), True, False), ("=*/*-*9999*", ('=*', '*/*', '*9999*', None, None, None), True, False), ("=*/*-*9999*:0::repo_name", ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True), ("=*/*-*_beta*", ('=*', '*/*', '*_beta*', None, None, None), True, False), ("=*/*-*_beta*:0::repo_name", ('=*', '*/*', '*_beta*', '0', None, 'repo_name'), True, True), ("sys-apps/*", (None, 'sys-apps/*', None, None, None, None), True, False), ("*/portage", (None, '*/portage', None, None, None, None), True, False), ("s*s-*/portage:1", (None, 's*s-*/portage', None, '1', None, None), True, False), ("*/po*ge:2", (None, '*/po*ge', None, '2', None, None), True, False), ("!dev-libs/A", (None, 'dev-libs/A', None, None, None, None), True, True), ("!!dev-libs/A", (None, 'dev-libs/A', None, None, None, None), True, True), ("!!dev-libs/A", (None, 'dev-libs/A', None, None, None, None), True, True), ("dev-libs/A[foo(+)]", (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True), ("dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True), ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True), ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]", ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True), ("sys-apps/portage:0::repo_name[doc]", (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True), ("*/*::repo_name", (None, '*/*', None, None, None, 'repo_name'), True, True), ("sys-apps/*::repo_name", (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True), ("*/portage::repo_name", (None, '*/portage', None, None, None, 'repo_name'), True, True), ("s*s-*/portage:1::repo_name", (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True), ) tests_xfail = ( (Atom("sys-apps/portage"), False, False), ("cat/pkg[a!]", False, False), ("cat/pkg[!a]", False, False), ("cat/pkg[!a!]", False, False), ("cat/pkg[!a-]", False, False), ("cat/pkg[-a=]", False, False), ("cat/pkg[-a?]", False, False), ("cat/pkg[-a!]", False, False), ("cat/pkg[=a]", False, False), ("cat/pkg[=a=]", False, False), ("cat/pkg[=a?]", False, False), ("cat/pkg[=a!]", False, False), ("cat/pkg[=a-]", False, False), ("cat/pkg[?a]", False, False), ("cat/pkg[?a=]", False, False), ("cat/pkg[?a?]", False, False), ("cat/pkg[?a!]", False, False), ("cat/pkg[?a-]", False, False), ("sys-apps/portage[doc]:0", False, False), ("*/*", False, False), ("sys-apps/*", False, False), ("*/portage", False, False), ("*/**", True, False), ("*/portage[use]", True, False), ("cat/pkg[a()]", False, False), ("cat/pkg[a(]", False, False), ("cat/pkg[a)]", False, False), ("cat/pkg[a(,b]", False, False), ("cat/pkg[a),b]", False, False), ("cat/pkg[a(*)]", False, False), ("cat/pkg[a(*)]", True, False), ("cat/pkg[a(+-)]", False, False), ("cat/pkg[a()]", False, False), ("cat/pkg[(+)a]", False, False), ("cat/pkg[a=(+)]", False, False), ("cat/pkg[!(+)a=]", False, False), ("cat/pkg[!a=(+)]", False, False), ("cat/pkg[a?(+)]", False, False), ("cat/pkg[!a?(+)]", False, False), ("cat/pkg[!(+)a?]", False, False), ("cat/pkg[-(+)a]", False, False), ("cat/pkg[a(+),-a]", False, False), ("cat/pkg[a(-),-a]", False, False), ("cat/pkg[-a,a(+)]", False, False), ("cat/pkg[-a,a(-)]", False, False), ("cat/pkg[-a(+),a(-)]", False, False), ("cat/pkg[-a(-),a(+)]", False, False), ("sys-apps/portage[doc]::repo_name", False, False), ("sys-apps/portage:0[doc]::repo_name", False, False), ("sys-apps/portage[doc]:0::repo_name", False, False), ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False), ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False), ("sys-apps/portage:0::repo_name[doc]", False, False), ("*/*::repo_name", True, False), ) for atom, parts, allow_wildcard, allow_repo in tests: a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo) op, cp, ver, slot, use, repo = parts self.assertEqual(op, a.operator, msg="Atom('%s').operator = %s == '%s'" % (atom, a.operator, op)) self.assertEqual(cp, a.cp, msg="Atom('%s').cp = %s == '%s'" % (atom, a.cp, cp)) if ver is not None: cpv = "%s-%s" % (cp, ver) else: cpv = cp self.assertEqual(cpv, a.cpv, msg="Atom('%s').cpv = %s == '%s'" % (atom, a.cpv, cpv)) self.assertEqual(slot, a.slot, msg="Atom('%s').slot = %s == '%s'" % (atom, a.slot, slot)) self.assertEqual(repo, a.repo, msg="Atom('%s').repo == %s == '%s'" % (atom, a.repo, repo)) if a.use: returned_use = str(a.use) else: returned_use = None self.assertEqual(use, returned_use, msg="Atom('%s').use = %s == '%s'" % (atom, returned_use, use)) for atom, allow_wildcard, allow_repo in tests_xfail: self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom, \ allow_wildcard=allow_wildcard, allow_repo=allow_repo)
def dep_zapdeps( unreduced, reduced, myroot, use_binaries=0, trees=None, minimize_slots=False ): """ Takes an unreduced and reduced deplist and removes satisfied dependencies. Returned deplist contains steps that must be taken to satisfy dependencies. """ if trees is None: trees = portage.db writemsg("ZapDeps -- %s\n" % (use_binaries), 2) if not reduced or unreduced == ["||"] or dep_eval(reduced): return [] if unreduced[0] != "||": unresolved = [] for x, satisfied in zip(unreduced, reduced): if isinstance(x, list): unresolved += dep_zapdeps( x, satisfied, myroot, use_binaries=use_binaries, trees=trees, minimize_slots=minimize_slots, ) elif not satisfied: unresolved.append(x) return unresolved # We're at a ( || atom ... ) type level and need to make a choice deps = unreduced[1:] satisfieds = reduced[1:] # Our preference order is for an the first item that: # a) contains all unmasked packages with the same key as installed packages # b) contains all unmasked packages # c) contains masked installed packages # d) is the first item preferred_in_graph = [] preferred_installed = preferred_in_graph preferred_any_slot = preferred_in_graph preferred_non_installed = [] unsat_use_in_graph = [] unsat_use_installed = [] unsat_use_non_installed = [] other_installed = [] other_installed_some = [] other_installed_any_slot = [] other = [] # unsat_use_* must come after preferred_non_installed # for correct ordering in cases like || ( foo[a] foo[b] ). choice_bins = ( preferred_in_graph, preferred_non_installed, unsat_use_in_graph, unsat_use_installed, unsat_use_non_installed, other_installed, other_installed_some, other_installed_any_slot, other, ) # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") virt_parent = trees[myroot].get("virt_parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") graph = trees[myroot].get("graph") pkg_use_enabled = trees[myroot].get("pkg_use_enabled") graph_interface = trees[myroot].get("graph_interface") downgrade_probe = trees[myroot].get("downgrade_probe") circular_dependency = trees[myroot].get("circular_dependency") vardb = None if "vartree" in trees[myroot]: vardb = trees[myroot]["vartree"].dbapi if use_binaries: mydbapi = trees[myroot]["bintree"].dbapi else: mydbapi = trees[myroot]["porttree"].dbapi try: mydbapi_match_pkgs = mydbapi.match_pkgs except AttributeError: def mydbapi_match_pkgs(atom): return [mydbapi._pkg_str(cpv, atom.repo) for cpv in mydbapi.match(atom)] # Sort the deps into installed, not installed but already # in the graph and other, not installed and not in the graph # and other, with values of [[required_atom], availablility] for x, satisfied in zip(deps, satisfieds): if isinstance(x, list): atoms = dep_zapdeps( x, satisfied, myroot, use_binaries=use_binaries, trees=trees, minimize_slots=minimize_slots, ) else: atoms = [x] if vardb is None: # When called by repoman, we can simply return the first choice # because dep_eval() handles preference selection. return atoms all_available = True all_use_satisfied = True all_use_unmasked = True conflict_downgrade = False installed_downgrade = False slot_atoms = collections.defaultdict(list) slot_map = {} cp_map = {} for atom in atoms: if atom.blocker: continue # It's not a downgrade if parent is replacing child. replacing = ( parent and graph_interface and graph_interface.will_replace_child(parent, myroot, atom) ) # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi_match_pkgs(atom.without_use) if not avail_pkg and replacing: avail_pkg = [replacing] if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not avail_pkg: all_available = False all_use_satisfied = False break if not replacing and graph_db is not None and downgrade_probe is not None: slot_matches = graph_db.match_pkgs(avail_slot) if ( len(slot_matches) > 1 and avail_pkg < slot_matches[-1] and not downgrade_probe(avail_pkg) ): # If a downgrade is not desirable, then avoid a # choice that pulls in a lower version involved # in a slot conflict (bug #531656). conflict_downgrade = True if atom.use: avail_pkg_use = mydbapi_match_pkgs(atom) if not avail_pkg_use: all_use_satisfied = False if pkg_use_enabled is not None: # Check which USE flags cause the match to fail, # so we can prioritize choices that do not # require changes to use.mask or use.force # (see bug #515584). violated_atom = atom.violated_conditionals( pkg_use_enabled(avail_pkg), avail_pkg.iuse.is_valid_flag ) # Note that violated_atom.use can be None here, # since evaluation can collapse conditional USE # deps that cause the match to fail due to # missing IUSE (match uses atom.unevaluated_atom # to detect such missing IUSE). if violated_atom.use is not None: for flag in violated_atom.use.enabled: if flag in avail_pkg.use.mask: all_use_unmasked = False break else: for flag in violated_atom.use.disabled: if ( flag in avail_pkg.use.force and flag not in avail_pkg.use.mask ): all_use_unmasked = False break else: # highest (ascending order) avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not replacing and downgrade_probe is not None and graph is not None: highest_in_slot = mydbapi_match_pkgs(avail_slot) highest_in_slot = highest_in_slot[-1] if highest_in_slot else None if ( avail_pkg and highest_in_slot and avail_pkg < highest_in_slot and not downgrade_probe(avail_pkg) and (highest_in_slot.installed or highest_in_slot in graph) ): installed_downgrade = True slot_map[avail_slot] = avail_pkg slot_atoms[avail_slot].append(atom) highest_cpv = cp_map.get(avail_pkg.cp) all_match_current = None all_match_previous = None if highest_cpv is not None and highest_cpv.slot == avail_pkg.slot: # If possible, make the package selection internally # consistent by choosing a package that satisfies all # atoms which match a package in the same slot. Later on, # the package version chosen here is used in the # has_upgrade/has_downgrade logic to prefer choices with # upgrades, and a package choice that is not internally # consistent will lead the has_upgrade/has_downgrade logic # to produce invalid results (see bug 600346). all_match_current = all( a.match(avail_pkg) for a in slot_atoms[avail_slot] ) all_match_previous = all( a.match(highest_cpv) for a in slot_atoms[avail_slot] ) if all_match_previous and not all_match_current: continue current_higher = ( highest_cpv is None or vercmp(avail_pkg.version, highest_cpv.version) > 0 ) if current_higher or (all_match_current and not all_match_previous): cp_map[avail_pkg.cp] = avail_pkg want_update = False if graph_interface is None or graph_interface.removal_action: new_slot_count = len(slot_map) else: new_slot_count = 0 for slot_atom, avail_pkg in slot_map.items(): if parent is not None and graph_interface.want_update_pkg( parent, avail_pkg ): want_update = True if not slot_atom.cp.startswith("virtual/") and not graph_db.match_pkgs( slot_atom ): new_slot_count += 1 this_choice = _dep_choice( atoms=atoms, slot_map=slot_map, cp_map=cp_map, all_available=all_available, all_installed_slots=False, new_slot_count=new_slot_count, all_in_graph=False, want_update=want_update, ) if all_available: # The "all installed" criterion is not version or slot specific. # If any version of a package is already in the graph then we # assume that it is preferred over other possible packages choices. all_installed = True for atom in set(Atom(atom.cp) for atom in atoms if not atom.blocker): # New-style virtuals have zero cost to install. if not vardb.match(atom) and not atom.startswith("virtual/"): all_installed = False break all_installed_slots = False if all_installed: all_installed_slots = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not vardb.match(slot_atom) and not slot_atom.startswith( "virtual/" ): all_installed_slots = False break this_choice.all_installed_slots = all_installed_slots if graph_db is None: if all_use_satisfied: if all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) elif conflict_downgrade or installed_downgrade: other.append(this_choice) else: all_in_graph = True for atom in atoms: # New-style virtuals have zero cost to install. if atom.blocker or atom.cp.startswith("virtual/"): continue # We check if the matched package has actually been # added to the digraph, in order to distinguish between # those packages and installed packages that may need # to be uninstalled in order to resolve blockers. if not any(pkg in graph for pkg in graph_db.match_pkgs(atom)): all_in_graph = False break this_choice.all_in_graph = all_in_graph circular_atom = None if parent and parent.onlydeps: # Check if the atom would result in a direct circular # dependency and avoid that for --onlydeps arguments # since it can defeat the purpose of --onlydeps. # This check should only be used for --onlydeps # arguments, since it can interfere with circular # dependency backtracking choices, causing the test # case for bug 756961 to fail. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: continue if vardb.match(atom): # If the atom is satisfied by an installed # version then it's not a circular dep. continue if atom.cp != parent.cp: continue if match_from_list(atom, cpv_slot_list): circular_atom = atom break if circular_atom is None and circular_dependency is not None: for circular_child in itertools.chain( circular_dependency.get(parent, []), circular_dependency.get(virt_parent, []), ): for atom in atoms: if not atom.blocker and atom.match(circular_child): circular_atom = atom break if circular_atom is not None: break if circular_atom is not None: other.append(this_choice) else: if all_use_satisfied: if all_in_graph: preferred_in_graph.append(this_choice) elif all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_in_graph: unsat_use_in_graph.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_installed = True some_installed = False for atom in atoms: if not atom.blocker: if vardb.match(atom): some_installed = True else: all_installed = False if all_installed: this_choice.all_installed_slots = True other_installed.append(this_choice) elif some_installed: other_installed_some.append(this_choice) # Use Atom(atom.cp) for a somewhat "fuzzy" match, since # the whole atom may be too specific. For example, see # bug #522652, where using the whole atom leads to an # unsatisfiable choice. elif any(vardb.match(Atom(atom.cp)) for atom in atoms if not atom.blocker): other_installed_any_slot.append(this_choice) else: other.append(this_choice) # Prefer choices which contain upgrades to higher slots. This helps # for deps such as || ( foo:1 foo:2 ), where we want to prefer the # atom which matches the higher version rather than the atom furthest # to the left. Sorting is done separately for each of choice_bins, so # as not to interfere with the ordering of the bins. Because of the # bin separation, the main function of this code is to allow # --depclean to remove old slots (rather than to pull in new slots). for choices in choice_bins: if len(choices) < 2: continue if minimize_slots: # Prefer choices having fewer new slots. When used with DNF form, # this can eliminate unecessary packages that depclean would # ultimately eliminate (see bug 632026). Only use this behavior # when deemed necessary by the caller, since this will discard the # order specified in the ebuild, and the preferences specified # there can serve as a crucial sources of guidance (see bug 645002). # NOTE: Under some conditions, new_slot_count value may have some # variance from one calculation to the next because it depends on # the order that packages are added to the graph. This variance can # contribute to outcomes that appear to be random. Meanwhile, # the order specified in the ebuild is without variance, so it # does not have this problem. choices.sort(key=operator.attrgetter("new_slot_count")) for choice_1 in choices[1:]: cps = set(choice_1.cp_map) for choice_2 in choices: if choice_1 is choice_2: # choice_1 will not be promoted, so move on break if ( choice_1.all_installed_slots and not choice_2.all_installed_slots and not choice_2.want_update ): # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break intersecting_cps = cps.intersection(choice_2.cp_map) has_upgrade = False has_downgrade = False for cp in intersecting_cps: version_1 = choice_1.cp_map[cp] version_2 = choice_2.cp_map[cp] difference = vercmp(version_1.version, version_2.version) if difference != 0: if difference > 0: has_upgrade = True else: has_downgrade = True if ( # Prefer upgrades. (has_upgrade and not has_downgrade) # Prefer choices where all packages have been pulled into # the graph, except for choices that eliminate upgrades. or ( choice_1.all_in_graph and not choice_2.all_in_graph and not (has_downgrade and not has_upgrade) ) ): # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break for allow_masked in (False, True): for choices in choice_bins: for choice in choices: if choice.all_available or allow_masked: return choice.atoms assert False # This point should not be reachable
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ).""" newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None eapi = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent eapi = virt_parent[0].metadata['EAPI'] else: graph_parent = parent eapi = parent.metadata["EAPI"] repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi myvirtuals = mysettings.getvirtuals() pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): newsplit.append(_expand_new_virtuals(x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs)) continue if not isinstance(x, Atom): try: x = Atom(x) except InvalidAtom: if portage.dep._dep_check_strict: raise ParseError( _("invalid atom: '%s'") % x) else: # Only real Atom instances are allowed past this point. continue else: if x.blocker and x.blocker.overlap.forbid and \ eapi in ("0", "1") and portage.dep._dep_check_strict: raise ParseError( _("invalid atom: '%s'") % (x,)) if x.use and eapi in ("0", "1") and \ portage.dep._dep_check_strict: raise ParseError( _("invalid atom: '%s'") % (x,)) if repoman and x.use and x.use.conditional: evaluated_atom = remove_slot(x) if x.slot: evaluated_atom += ":%s" % x.slot evaluated_atom += str(x.use._eval_qa_conditionals( use_mask, use_force)) x = Atom(evaluated_atom) if not repoman : if 'lib32' not in x and portage.dep_getkey(x) not in mysettings.get("NO_AUTO_FLAG", None): if ']' in x: x = str(x).replace(']',',lib32?]') else: x = str(x) + '[lib32?]' try: x = portage.dep.Atom(x) except portage.exception.InvalidAtom: if portage.dep._dep_check_strict: raise portage.exception.ParseError( "invalid atom: '%s'" % x) if myuse is not None and isinstance(x, Atom) and x.use: if x.use.conditional: x = x.evaluate_conditionals(myuse) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue mychoices = myvirtuals.get(mykey, []) if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue if repoman or not hasattr(portdb, 'match_pkgs'): if portdb.cp_list(x.cp): newsplit.append(x) else: # TODO: Add PROVIDE check for repoman. a = [] for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue a = [] for pkg in pkgs: virt_atom = '=' + pkg.cpv if x.use: virt_atom += str(x.use) virt_atom = Atom(virt_atom) # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg.metadata['RDEPEND'] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg.use.enabled if edebug: writemsg_level(_("Virtual Parent: %s\n") \ % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") \ % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = (pkg, virt_atom) try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError( "%s: %s '%s'" % (y[0], mycheck[1], depstring)) # pull in the new-style virtual mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: atom_graph.add(virt_atom, graph_parent) # Plain old-style virtuals. New-style virtuals are preferred. if not pkgs: for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) matches = portdb.match(new_atom) # portdb is an instance of depgraph._dep_check_composite_db, so # USE conditionals are already evaluated. if matches and mykey in \ portdb.aux_get(matches[-1], ['PROVIDE'])[0].split(): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) return newsplit
def __call__(self, argv): """ @return: tuple of (stdout, stderr, returncode) """ # Python 3: # cmd, root, *args = argv cmd = argv[0] root = argv[1] args = argv[2:] warnings = [] warnings_str = '' db = self.get_db() eapi = self.settings.get('EAPI') root = normalize_path(root or os.sep).rstrip(os.sep) + os.sep if root not in db: return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3) portdb = db[root]["porttree"].dbapi vardb = db[root]["vartree"].dbapi if cmd in ('best_version', 'has_version'): allow_repo = eapi_has_repo_deps(eapi) try: atom = Atom(args[0], allow_repo=allow_repo) except InvalidAtom: return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2) try: atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi) except InvalidAtom as e: warnings.append("QA Notice: %s: %s" % (cmd, e)) use = self.settings.get('PORTAGE_BUILT_USE') if use is None: use = self.settings['PORTAGE_USE'] use = frozenset(use.split()) atom = atom.evaluate_conditionals(use) if warnings: warnings_str = self._elog('eqawarn', warnings) if cmd == 'has_version': if vardb.match(atom): returncode = 0 else: returncode = 1 return ('', warnings_str, returncode) elif cmd == 'best_version': m = best(vardb.match(atom)) return ('%s\n' % m, warnings_str, 0) elif cmd in ('master_repositories', 'repository_path', 'available_eclasses', 'eclass_path', 'license_path'): repo = _repo_name_re.match(args[0]) if repo is None: return ('', '%s: Invalid repository: %s\n' % (cmd, args[0]), 2) try: repo = portdb.repositories[args[0]] except KeyError: return ('', warnings_str, 1) if cmd == 'master_repositories': return ('%s\n' % ' '.join(x.name for x in repo.masters), warnings_str, 0) elif cmd == 'repository_path': return ('%s\n' % repo.location, warnings_str, 0) elif cmd == 'available_eclasses': return ('%s\n' % ' '.join(sorted(repo.eclass_db.eclasses)), warnings_str, 0) elif cmd == 'eclass_path': try: eclass = repo.eclass_db.eclasses[args[1]] except KeyError: return ('', warnings_str, 1) return ('%s\n' % eclass.location, warnings_str, 0) elif cmd == 'license_path': paths = reversed([os.path.join(x.location, 'licenses', args[1]) for x in list(repo.masters) + [repo]]) for path in paths: if os.path.exists(path): return ('%s\n' % path, warnings_str, 0) return ('', warnings_str, 1) else: return ('', 'Invalid command: %s\n' % cmd, 3)
def parse_actions(args, dbapi, cache, quiet=False, strict=False, cleanupact=[], dataout=sys.stdout, output=sys.stderr): out = [] actset = ActionSet(cache=cache) for i, a in enumerate(args): if not a: continue try: try: act = Action(a, output=dataout) except NotAnAction: try: atom = dep_expand(a, mydb=dbapi, settings=dbapi.settings) if atom.startswith('null/'): raise InvalidAtom(atom) except AmbiguousPackageName as e: raise ParserError('ambiguous package name, matching: %s' % e) except InvalidAtom as e: try: try: atom = Atom(a, allow_wildcard=True) except TypeError: atom = Atom(a) except InvalidAtom as e: raise ParserError('invalid package atom: %s' % e) actset.append(atom) except ParserWarning as w: actset.append(act) raise else: actset.append(act) except ParserError as e: output.write('At argv[%d]=\'%s\': %s\n' % (i + 1, a, e)) output.write('Aborting.\n') return None except ParserWarning as e: if not quiet or strict: output.write('At argv[%d]=\'%s\': %s\n' % (i + 1, a, e)) if strict: output.write('Strict mode, aborting.\n') return None if actset: out.append(actset) if cleanupact: raise NotImplementedError( 'Cleanup actions are currently disabled due to missing wildcard support' ) actset = ActionSet(cache=cache) for a in cleanupact: actset.append(a(dbapi)) out.append(actset) return out
def __init__(self, **kwargs): metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata')) Task.__init__(self, **kwargs) # the SlotObject constructor assigns self.root_config from keyword args # and is an instance of a '_emerge.RootConfig.RootConfig class self.root = self.root_config.root self._raw_metadata = metadata self._metadata = _PackageMetadataWrapper(self, metadata) if not self.built: self._metadata['CHOST'] = self.root_config.settings.get( 'CHOST', '') eapi_attrs = _get_eapi_attrs(self.eapi) try: db = self.cpv._db except AttributeError: if self.built: # For independence from the source ebuild repository and # profile implicit IUSE state, require the _db attribute # for built packages. raise db = self.root_config.trees['porttree'].dbapi self.cpv = _pkg_str(self.cpv, metadata=self._metadata, settings=self.root_config.settings, db=db) if hasattr(self.cpv, 'slot_invalid'): self._invalid_metadata( 'SLOT.invalid', "SLOT: invalid value: '%s'" % self._metadata["SLOT"]) self.cpv_split = self.cpv.cpv_split self.category, self.pf = portage.catsplit(self.cpv) self.cp = self.cpv.cp self.version = self.cpv.version self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot)) # sync metadata with validated repo (may be UNKNOWN_REPO) self._metadata['repository'] = self.cpv.repo if self.root_config.settings.local_config: implicit_match = db._iuse_implicit_cnstr(self.cpv, self._metadata) else: implicit_match = db._repoman_iuse_implicit_cnstr( self.cpv, self._metadata) usealiases = self.root_config.settings._use_manager.getUseAliases(self) self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match, usealiases, self.eapi) if (self.iuse.enabled or self.iuse.disabled) and \ not eapi_attrs.iuse_defaults: if not self.installed: self._invalid_metadata( 'EAPI.incompatible', "IUSE contains defaults, but EAPI doesn't allow them") if self.inherited is None: self.inherited = frozenset() if self.operation is None: if self.onlydeps or self.installed: self.operation = "nomerge" else: self.operation = "merge" self._hash_key = Package._gen_hash_key(cpv=self.cpv, installed=self.installed, onlydeps=self.onlydeps, operation=self.operation, repo_name=self.cpv.repo, root_config=self.root_config, type_name=self.type_name) self._hash_value = hash(self._hash_key)
def create_world_atom(pkg, args_set, root_config, before_install=False): """Create a new atom for the world file if one does not exist. If the argument atom is precise enough to identify a specific slot then a slot atom will be returned. Atoms that are in the system set may also be stored in world since system atoms can only match one slot while world atoms can be greedy with respect to slots. Unslotted system packages will not be stored in world.""" arg_atom = args_set.findAtomForPackage(pkg) if not arg_atom: return None cp = arg_atom.cp new_world_atom = cp if arg_atom.repo: new_world_atom += _repo_separator + arg_atom.repo sets = root_config.sets portdb = root_config.trees["porttree"].dbapi vardb = root_config.trees["vartree"].dbapi if arg_atom.repo is not None: repos = [arg_atom.repo] else: # Iterate over portdbapi.porttrees, since it's common to # tweak this attribute in order to adjust match behavior. repos = [] for tree in portdb.porttrees: repos.append(portdb.repositories.get_name_for_location(tree)) available_slots = set() for cpv in portdb.match(Atom(cp)): for repo in repos: try: available_slots.add(portdb._pkg_str(_unicode(cpv), repo).slot) except (KeyError, InvalidData): pass slotted = len(available_slots) > 1 or \ (len(available_slots) == 1 and "0" not in available_slots) if not slotted: # check the vdb in case this is multislot available_slots = set(vardb._pkg_str(cpv, None).slot \ for cpv in vardb.match(Atom(cp))) slotted = len(available_slots) > 1 or \ (len(available_slots) == 1 and "0" not in available_slots) if slotted and arg_atom.without_repo != cp: # If the user gave a specific atom, store it as a # slot atom in the world file. slot_atom = pkg.slot_atom # For USE=multislot, there are a couple of cases to # handle here: # # 1) SLOT="0", but the real SLOT spontaneously changed to some # unknown value, so just record an unslotted atom. # # 2) SLOT comes from an installed package and there is no # matching SLOT in the portage tree. # # Make sure that the slot atom is available in either the # portdb or the vardb, since otherwise the user certainly # doesn't want the SLOT atom recorded in the world file # (case 1 above). If it's only available in the vardb, # the user may be trying to prevent a USE=multislot # package from being removed by --depclean (case 2 above). mydb = portdb if not portdb.match(slot_atom): # SLOT seems to come from an installed multislot package mydb = vardb # If there is no installed package matching the SLOT atom, # it probably changed SLOT spontaneously due to USE=multislot, # so just record an unslotted atom. if vardb.match(slot_atom) or before_install: # Now verify that the argument is precise # enough to identify a specific slot. matches = mydb.match(arg_atom) matched_slots = set() if before_install: matched_slots.add(pkg.slot) if mydb is vardb: for cpv in matches: matched_slots.add(mydb._pkg_str(cpv, None).slot) else: for cpv in matches: for repo in repos: try: matched_slots.add( portdb._pkg_str(_unicode(cpv), repo).slot) except (KeyError, InvalidData): pass if len(matched_slots) == 1: new_world_atom = slot_atom if arg_atom.repo: new_world_atom += _repo_separator + arg_atom.repo if new_world_atom == sets["selected"].findAtomForPackage(pkg): # Both atoms would be identical, so there's nothing to add. return None if not slotted and not arg_atom.repo: # Unlike world atoms, system atoms are not greedy for slots, so they # can't be safely excluded from world if they are slotted. system_atom = sets["system"].findAtomForPackage(pkg) if system_atom: if not system_atom.cp.startswith("virtual/"): return None # System virtuals aren't safe to exclude from world since they can # match multiple old-style virtuals but only one of them will be # pulled in by update or depclean. providers = portdb.settings.getvirtuals().get(system_atom.cp) if providers and len(providers) == 1 and \ providers[0].cp == arg_atom.cp: return None return new_world_atom
def set_pkg_info(self, pkg, ordered): """Sets various pkg_info dictionary variables @param pkg: _emerge.Package.Package instance @param ordered: bool @rtype pkg_info dictionary Modifies self.counters.restrict_fetch, self.counters.restrict_fetch_satisfied """ pkg_info = PkgInfo() pkg_info.cp = pkg.cp pkg_info.ver = self.get_ver_str(pkg) pkg_info.slot = pkg.slot pkg_info.sub_slot = pkg.sub_slot pkg_info.repo_name = pkg.repo pkg_info.ordered = ordered pkg_info.operation = pkg.operation pkg_info.merge = ordered and pkg_info.operation == "merge" if not pkg_info.merge and pkg_info.operation == "merge": pkg_info.operation = "nomerge" pkg_info.built = pkg.type_name != "ebuild" pkg_info.ebuild_path = None if ordered: if pkg_info.merge: if pkg.type_name == "binary": self.counters.binary += 1 elif pkg_info.operation == "uninstall": self.counters.uninst += 1 if pkg.type_name == "ebuild": pkg_info.ebuild_path = self.portdb.findname( pkg.cpv, myrepo=pkg_info.repo_name) if pkg_info.ebuild_path is None: raise AssertionError( "ebuild not found for '%s'" % pkg.cpv) pkg_info.repo_path_real = os.path.dirname(os.path.dirname( os.path.dirname(pkg_info.ebuild_path))) else: pkg_info.repo_path_real = self.portdb.getRepositoryPath(pkg.repo) pkg_info.use = list(self.conf.pkg_use_enabled(pkg)) if not pkg.built and pkg.operation == 'merge' and \ 'fetch' in pkg.restrict: if pkg_info.ordered: self.counters.restrict_fetch += 1 pkg_info.attr_display.fetch_restrict = True if not self.portdb.getfetchsizes(pkg.cpv, useflags=pkg_info.use, myrepo=pkg.repo): pkg_info.attr_display.fetch_restrict_satisfied = True if pkg_info.ordered: self.counters.restrict_fetch_satisfied += 1 else: if pkg_info.ebuild_path is not None: self.restrict_fetch_list[pkg] = pkg_info if self.vardb.cpv_exists(pkg.cpv): # Do a cpv match first, in case the SLOT has changed. pkg_info.previous_pkg = self.vardb.match_pkgs( Atom('=' + pkg.cpv))[0] else: slot_matches = self.vardb.match_pkgs(pkg.slot_atom) if slot_matches: pkg_info.previous_pkg = slot_matches[0] return pkg_info
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None): """ If specified, the myrepo argument is assumed to be valid. This should be a safe assumption since portdbapi methods always return valid repo names and valid "repository" metadata from aux_get. """ if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict( zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise else: if myrepo is None: myrepo = _gen_valid_repo(metadata["repository"]) elif myrepo is None: myrepo = metadata.get("repository") if myrepo is not None: myrepo = _gen_valid_repo(metadata["repository"]) if metadata is not None and \ not portage.eapi_is_supported(metadata["EAPI"]): # Return early since otherwise we might produce invalid # results given that the EAPI is not supported. Also, # metadata is mostly useless in this case since it doesn't # contain essential things like SLOT. if return_location: return (None, None) else: return None # Sometimes we can't access SLOT or repository due to corruption. pkg = mycpv try: pkg.slot except AttributeError: pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo) cpv_slot_list = [pkg] mycp = pkg.cp # XXX- This is a temporary duplicate of code from the config constructor. locations = [os.path.join(settings["PORTDIR"], "profiles")] locations.extend(settings.profiles) for ov in settings["PORTDIR_OVERLAY"].split(): profdir = os.path.join(normalize_path(ov), "profiles") if os.path.isdir(profdir): locations.append(profdir) locations.append( os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") node = None for l, recursive_filename in grablines(pmask_filename, recursive=1, remember_source_file=True): if node is None or node[0] != recursive_filename: node = (recursive_filename, []) pmasklists.append(node) node[1].append(l) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): x = x.without_repo for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = pmask[0] for i in range(len(pmask[1])): l = pmask[1][i].strip() try: l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo except InvalidAtom: l_atom = None if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += (l + "\n") comment_valid = i + 1 elif l_atom == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) else: return comment elif comment_valid != -1: # Apparently this comment applies to multiple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) else: return None
def check(self, **kwargs): '''Perform profile dependant dependency checks @param arches: @param pkg: Package in which we check (object). @param ebuild: Ebuild which we check (object). @param baddepsyntax: boolean @param unknown_pkgs: set of tuples (type, atom.unevaluated_atom) @returns: dictionary ''' ebuild = kwargs.get('ebuild').get() pkg = kwargs.get('pkg').get() unknown_pkgs, baddepsyntax = _depend_checks(ebuild, pkg, self.portdb, self.qatracker, self.repo_metadata, self.repo_settings.qadata) relevant_profiles = [] for keyword, arch, groups in _gen_arches(ebuild, self.options, self.repo_settings, self.profiles): if arch not in self.profiles: # A missing profile will create an error further down # during the KEYWORDS verification. continue if self.include_arches is not None: if arch not in self.include_arches: continue relevant_profiles.extend( (keyword, groups, prof) for prof in self.profiles[arch]) relevant_profiles.sort(key=sort_key) for keyword, groups, prof in relevant_profiles: is_stable_profile = prof.status == "stable" is_dev_profile = prof.status == "dev" and \ self.options.include_dev is_exp_profile = prof.status == "exp" and \ self.options.include_exp_profiles == 'y' if not (is_stable_profile or is_dev_profile or is_exp_profile): continue dep_settings = self.caches['arch'].get(prof.sub_path) if dep_settings is None: dep_settings = portage.config( config_profile_path=prof.abs_path, config_incrementals=self.repoman_incrementals, config_root=self.repo_settings.config_root, local_config=False, _unmatched_removal=self.options.unmatched_removal, env=self.env, repositories=self.repo_settings.repoman_settings. repositories) if not prof.abs_path: self._populate_implicit_iuse( dep_settings, self.repo_settings.repo_config.eclass_db.porttrees) dep_settings.categories = self.repo_settings.repoman_settings.categories if self.options.without_mask: dep_settings._mask_manager_obj = \ copy.deepcopy(dep_settings._mask_manager) dep_settings._mask_manager._pmaskdict.clear() self.caches['arch'][prof.sub_path] = dep_settings xmatch_cache_key = (prof.sub_path, tuple(groups)) xcache = self.caches['arch_xmatch'].get(xmatch_cache_key) if xcache is None: self.portdb.melt() self.portdb.freeze() xcache = self.portdb.xcache xcache.update(self.caches['shared_xmatch']) self.caches['arch_xmatch'][xmatch_cache_key] = xcache self.repo_settings.trees[ self.repo_settings.root]["porttree"].settings = dep_settings self.portdb.settings = dep_settings self.portdb.xcache = xcache dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups) # just in case, prevent config.reset() from nuking these. dep_settings.backup_changes("ACCEPT_KEYWORDS") # This attribute is used in dbapi._match_use() to apply # use.stable.{mask,force} settings based on the stable # status of the parent package. This is required in order # for USE deps of unstable packages to be resolved correctly, # since otherwise use.stable.{mask,force} settings of # dependencies may conflict (see bug #456342). dep_settings._parent_stable = dep_settings._isStable(pkg) # Handle package.use*.{force,mask) calculation, for use # in dep_check. dep_settings.useforce = dep_settings._use_manager.getUseForce( pkg, stable=dep_settings._parent_stable) dep_settings.usemask = dep_settings._use_manager.getUseMask( pkg, stable=dep_settings._parent_stable) if not baddepsyntax: ismasked = not ebuild.archs or \ pkg.cpv not in self.portdb.xmatch("match-visible", Atom("%s::%s" % (pkg.cp, self.repo_settings.repo_config.name))) if ismasked: if not self.have['pmasked']: self.have['pmasked'] = bool( dep_settings._getMaskAtom(pkg.cpv, ebuild.metadata)) if self.options.ignore_masked: continue # we are testing deps for a masked package; give it some lee-way suffix = "masked" matchmode = "minimum-all-ignore-profile" else: suffix = "" matchmode = "minimum-visible" if not self.have['dev_keywords']: self.have['dev_keywords'] = \ bool(self.dev_keywords.intersection(ebuild.keywords)) if prof.status == "dev": suffix = suffix + "indev" elif prof.status == "exp": suffix = suffix + "inexp" for mytype in Package._dep_keys: mykey = "dependency.bad" + suffix myvalue = ebuild.metadata[mytype] if not myvalue: continue success, atoms = portage.dep_check( myvalue, self.portdb, dep_settings, use="all", mode=matchmode, trees=self.repo_settings.trees) if success: if atoms: # Don't bother with dependency.unknown for # cases in which *DEPEND.bad is triggered. for atom in atoms: # dep_check returns all blockers and they # aren't counted for *DEPEND.bad, so we # ignore them here. if not atom.blocker: unknown_pkgs.discard( (mytype, atom.unevaluated_atom)) if not prof.sub_path: # old-style virtuals currently aren't # resolvable with empty profile, since # 'virtuals' mappings are unavailable # (it would be expensive to search # for PROVIDE in all ebuilds) atoms = [ atom for atom in atoms if not (atom.cp.startswith('virtual/') and not self.portdb.cp_list(atom.cp)) ] # we have some unsolvable deps # remove ! deps, which always show up as unsatisfiable all_atoms = [ str(atom.unevaluated_atom) for atom in atoms if not atom.blocker ] # if we emptied out our list, continue: if not all_atoms: continue # Filter out duplicates. We do this by hand (rather # than use a set) so the order is stable and better # matches the order that's in the ebuild itself. atoms = [] for atom in all_atoms: if atom not in atoms: atoms.append(atom) if self.options.output_style in ['column']: self.qatracker.add_error( mykey, "%s: %s: %s(%s) %s" % (ebuild.relative_path, mytype, keyword, prof, repr(atoms))) else: self.qatracker.add_error( mykey, "%s: %s: %s(%s)\n%s" % (ebuild.relative_path, mytype, keyword, prof, pformat(atoms, indent=6))) else: if self.options.output_style in ['column']: self.qatracker.add_error( mykey, "%s: %s: %s(%s) %s" % (ebuild.relative_path, mytype, keyword, prof, repr(atoms))) else: self.qatracker.add_error( mykey, "%s: %s: %s(%s)\n%s" % (ebuild.relative_path, mytype, keyword, prof, pformat(atoms, indent=6))) if not baddepsyntax and unknown_pkgs: type_map = {} for mytype, atom in unknown_pkgs: type_map.setdefault(mytype, set()).add(atom) for mytype, atoms in type_map.items(): self.qatracker.add_error( "dependency.unknown", "%s: %s: %s" % (ebuild.relative_path, mytype, ", ".join(sorted(atoms)))) return False
def test_violated_conditionals(self): test_cases = ( ("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"), ("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"), ("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"), ("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"), ("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"), ("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"), ("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"), ("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"), ("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"), #Some more test cases to trigger all remaining code paths ("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"), ("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"), ("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"), ("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"), ("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"), ("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"), ("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"), ("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"), ("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"), #Missing IUSE test cases ("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"), ("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"), ("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"), ("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"), ("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"), ("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"), ) test_cases_xfail = ( ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None), ) class use_flag_validator(object): def __init__(self, iuse): self.iuse = iuse def is_valid_flag(self, flag): return flag in iuse for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases: a = Atom(atom) validator = use_flag_validator(iuse) violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use) if parent_use is None: fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \ (atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom) else: fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \ (atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom) self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg) for atom, other_use, iuse, parent_use in test_cases_xfail: a = Atom(atom) validator = use_flag_validator(iuse) self.assertRaisesMsg(atom, InvalidAtom, \ a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
def _read_dirVirtuals(self, profiles): """ Read the 'virtuals' file in all profiles. """ virtuals_list = [] for x in profiles: virtuals_file = os.path.join(x, "virtuals") virtuals_dict = grabdict(virtuals_file) atoms_dict = {} for k, v in virtuals_dict.items(): try: virt_atom = Atom(k) except InvalidAtom: virt_atom = None else: if virt_atom.blocker or str(virt_atom) != str( virt_atom.cp): virt_atom = None if virt_atom is None: writemsg( _("--- Invalid virtuals atom in %s: %s\n") % (virtuals_file, k), noiselevel=-1, ) continue providers = [] for atom in v: atom_orig = atom if atom[:1] == "-": # allow incrementals atom = atom[1:] try: atom = Atom(atom) except InvalidAtom: atom = None else: if atom.blocker: atom = None if atom is None: writemsg( _("--- Invalid atom in %s: %s\n") % (virtuals_file, atom_orig), noiselevel=-1, ) else: if atom_orig == str(atom): # normal atom, so return as Atom instance providers.append(atom) else: # atom has special prefix, so return as string providers.append(atom_orig) if providers: atoms_dict[virt_atom] = providers if atoms_dict: virtuals_list.append(atoms_dict) self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True) for virt in self._dirVirtuals: # Preference for virtuals decreases from left to right. self._dirVirtuals[virt].reverse()
def process_submission(request): """ Saves and parses a stats submission. """ # Before continuing let's save the whole request (for debugging): try: raw_request_filename = save_request(request) except FileExistsException as e: raise BadRequestException("Error: Unable to save your request.") # Parse the request: try: data = json.loads(request.body) except Exception as e: error_message = "Error: Unable to parse JSON data." logger.warning("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) # Check for AUTH data: try: # Make UUIDs case-insensitive by always using lower(). uuid = data['AUTH']['UUID'].lower() upload_key = data['AUTH']['PASSWD'] except KeyError as e: error_message = "Error: Incomplete AUTH data." logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) try: protocol = data['PROTOCOL'] assert type(protocol) == int except KeyError as e: error_message = "Error: No protocol specified." logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) except AssertionError as e: error_message = "Error: PROTOCOL must be an integer." logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) if protocol != CURRENT_PROTOCOL_VERSION: logger.info( "process_submission(): Unsupported protocol: %s." % (protocol), exc_info=True ) raise BadRequestException( "Error: Unsupported protocol " + \ "(only version %d is supported). " % CURRENT_PROTOCOL_VERSION + \ "Please update your client." ) lastsync = data.get('LASTSYNC') if lastsync: try: # FIXME: I've hardcoded the time zone here. # Here's why: http://bugs.python.org/issue6641 . lastsync = datetime.utcfromtimestamp( time.mktime( time.strptime(lastsync, "%a, %d %b %Y %H:%M:%S +0000") ) ) lastsync = lastsync.replace(tzinfo=utc) except ValueError as e: error_message = "Error: Invalid date in LASTSYNC." logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) try: host, _ = Host.objects.get_or_create(id=uuid, upload_key=upload_key) host.full_clean() except IntegrityError as e: error_message = "Error: Invalid password." logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) except ValidationError as e: error_message = "Error: Invalid AUTH values." logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message + " Is your password too long?") features = data.get('FEATURES') if features: # AFAIK using bulk_create here is not worth it. features = [ Feature.objects.get_or_create(name=f)[0] for f in features ] map(validate_item, features) useflags = data.get('USE') if useflags: useflags = [ UseFlag.objects.get_or_create(name=u)[0] for u in useflags ] map(validate_item, useflags) keywords = data.get('ACCEPT_KEYWORDS') if keywords: keywords = [ Keyword.objects.get_or_create(name=k)[0] for k in keywords ] map(validate_item, keywords) mirrors = data.get('GENTOO_MIRRORS') if mirrors: mirrors = [ MirrorServer.objects.get_or_create(url=m)[0] for m in mirrors ] map(validate_item, mirrors) lang = data.get('LANG') if lang: lang, _ = Lang.objects.get_or_create(name=lang) validate_item(lang) sync = data.get('SYNC') if sync: sync, _ = SyncServer.objects.get_or_create(url=sync) validate_item(sync) ip_addr = request.META['REMOTE_ADDR'] fwd_addr = request.META.get('HTTP_X_FORWARDED_FOR') # TODO submission = Submission.objects.create( raw_request_filename = raw_request_filename, host = host, country = GeoIP().country_name(ip_addr), email = data['AUTH'].get('EMAIL'), ip_addr = ip_addr, fwd_addr = fwd_addr, protocol = protocol, arch = data.get('ARCH'), chost = data.get('CHOST'), cbuild = data.get('CBUILD'), ctarget = data.get('CTARGET'), platform = data.get('PLATFORM'), profile = data.get('PROFILE'), makeconf = data.get('MAKECONF'), cflags = data.get('CFLAGS'), cxxflags = data.get('CXXFLAGS'), ldflags = data.get('LDFLAGS'), fflags = data.get('FFLAGS'), makeopts = data.get('MAKEOPTS'), emergeopts = data.get('EMERGE_DEFAULT_OPTS'), syncopts = data.get('PORTAGE_RSYNC_EXTRA_OPTS'), acceptlicense = data.get('ACCEPT_LICENSE'), lang = lang, sync = sync, lastsync = lastsync, ) submission.features.add(*features) submission.mirrors.add(*mirrors) submission.global_use.add(*useflags) submission.global_keywords.add(*keywords) packages = data.get('PACKAGES') if packages: for package, info in packages.items(): try: atom = PortageAtom( "=" + package , allow_wildcard = False , allow_repo = True ) #assert atom.blocker == False and atom.operator == '=' category, package_name = atom.cp.split('/') category, created = Category.objects.get_or_create(name=category) if created: category.full_clean() package_name, created = PackageName.objects.get_or_create(name=package_name) if created: package_name.full_clean() repo = atom.repo or info.get('REPO') if repo: repo, created = Repository.objects.get_or_create(name=repo) if created: repo.full_clean() package, created = Package.objects.get_or_create( category = category, package_name = package_name, version = atom.cpv.lstrip(atom.cp), slot = atom.slot, repository = repo, ) if created: package.full_clean() except (InvalidAtom, ValidationError) as e: error_message = "Error: Atom '%s' failed validation." % package logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) keyword = info.get('KEYWORD') if keyword: keyword, created = Keyword.objects.get_or_create(name=keyword) if created: keyword.full_clean() built_at = info.get('BUILD_TIME') if not built_at: # Sometimes clients report BUILD_TIME as ''. built_at = None else: built_at = datetime.utcfromtimestamp(float(built_at)) built_at = built_at.replace(tzinfo=utc) build_duration = info.get('BUILD_DURATION') if not build_duration: # '' -> None build_duration = None size = info.get('SIZE') if not size: # '' -> None size = None installation, created = Installation.objects.get_or_create( package = package, keyword = keyword, built_at = built_at, build_duration = build_duration, # TODO size = size, ) iuse = get_useflag_objects(info.get('IUSE')) pkguse = get_useflag_objects(info.get('PKGUSE')) use = get_useflag_objects(info.get('USE')) if iuse: installation.iuse.add(*iuse) if pkguse: installation.pkguse.add(*pkguse) if use: installation.use.add(*use) if created: installation.full_clean() submission.installations.add(installation) reported_sets = data.get('WORLDSET') if reported_sets: for set_name, entries in reported_sets.items(): try: atom_set, _ = AtomSet.objects.get_or_create( name = set_name, owner = submission, ) for entry in entries: try: if entry.startswith(SET_PREFIX): subset_name = entry[len(SET_PREFIX):] subset, _ = AtomSet.objects.get_or_create( name = subset_name, owner = submission, ) subset.full_clean() atom_set.subsets.add(subset) else: patom = PortageAtom( entry , allow_wildcard = False , allow_repo = True ) category, package_name = patom.cp.split('/') category, _ = Category.objects.get_or_create(name=category) category.full_clean() package_name, _ = PackageName.objects.get_or_create(name=package_name) package_name.full_clean() repo = patom.repo if repo: repo, created = Repository.objects.get_or_create(name=repo) if created: repo.full_clean() atom, _ = Atom.objects.get_or_create( full_atom = entry, operator = patom.operator or '', category = category, package_name = package_name, version = patom.cpv.lstrip(patom.cp), slot = patom.slot, repository = repo, ) atom.full_clean() atom_set.atoms.add(atom) except (InvalidAtom, ValidationError) as e: error_message = "Error: Atom/set '%s' failed validation." % entry logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) atom_set.full_clean() submission.reported_sets.add(atom_set) except ValidationError as e: error_message = \ "Error: Selected set '%s' failed validation." \ % selectedset logger.info("process_submission(): " + error_message, exc_info=True) raise BadRequestException(error_message) submission.full_clean() return HttpResponse("Success")
def test_violated_conditionals(self): test_cases = ( ("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"), ("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"), ("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"), ("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"), ("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"), ("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"), ("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"), ("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"), ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"), ("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"), ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"), ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"), #Some more test cases to trigger all remaining code paths ("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"), ("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"), ("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"), ("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"), ("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"), ("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"), ("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"), ("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"), ("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"), #Missing IUSE test cases ("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"), ("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"), ("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"), ("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"), ("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"), ("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"), ) test_cases_xfail = (("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None), ) class use_flag_validator(object): def __init__(self, iuse): self.iuse = iuse def is_valid_flag(self, flag): return flag in iuse for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases: a = Atom(atom) validator = use_flag_validator(iuse) violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use) if parent_use is None: fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \ (atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom) else: fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \ (atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom) self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg) for atom, other_use, iuse, parent_use in test_cases_xfail: a = Atom(atom) validator = use_flag_validator(iuse) self.assertRaisesMsg(atom, InvalidAtom, \ a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
def _expand_new_virtuals( mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs ): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ).""" newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") # Atoms are stored in the graph as (atom, id(atom)) tuples # since each atom is considered to be a unique entity. For # example, atoms that appear identical may behave differently # in USE matching, depending on their unevaluated form. Also, # specially generated virtual atoms may appear identical while # having different _orig_atom attributes. atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent parent = virt_parent else: graph_parent = parent repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): newsplit.append( _expand_new_virtuals( x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs ) ) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if repoman or not hasattr(portdb, "match_pkgs") or pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: a = [] myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(["||"] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) mychoices = [] if not pkgs and not portdb.cp_list(x.cp): myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue a = [] for pkg in pkgs: virt_atom = "=" + pkg.cpv if x.unevaluated_atom.use: virt_atom += str(x.unevaluated_atom.use) virt_atom = Atom(virt_atom) if parent is None: if myuse is None: virt_atom = virt_atom.evaluate_conditionals(mysettings.get("PORTAGE_USE", "").split()) else: virt_atom = virt_atom.evaluate_conditionals(myuse) else: virt_atom = virt_atom.evaluate_conditionals(pkg_use_enabled(parent)) else: virt_atom = Atom(virt_atom) # Allow the depgraph to map this atom back to the # original, in order to avoid distortion in places # like display or conflict resolution code. virt_atom.__dict__["_orig_atom"] = x # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg._metadata["RDEPEND"] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level(_("Virtual Parent: %s\n") % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = pkg try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % (pkg, mycheck[1], depstring)) # pull in the new-style virtual mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: virt_atom_node = (virt_atom, id(virt_atom)) atom_graph.add(virt_atom_node, graph_parent) atom_graph.add(pkg, virt_atom_node) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add((new_atom, id(new_atom)), graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(["||"] + a) return newsplit
def __call__(self, argv): """ @return: tuple of (stdout, stderr, returncode) """ # Python 3: # cmd, root, *args = argv cmd = argv[0] root = argv[1] args = argv[2:] warnings = [] warnings_str = "" db = self.get_db() eapi = self.settings.get("EAPI") root = normalize_path(root).rstrip(os.path.sep) + os.path.sep if root not in db: return ("", "%s: Invalid ROOT: %s\n" % (cmd, root), 3) portdb = db[root]["porttree"].dbapi vardb = db[root]["vartree"].dbapi if cmd in ("best_version", "has_version"): allow_repo = eapi_has_repo_deps(eapi) try: atom = Atom(args[0], allow_repo=allow_repo) except InvalidAtom: return ("", "%s: Invalid atom: %s\n" % (cmd, args[0]), 2) try: atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi) except InvalidAtom as e: warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e)) use = self.settings.get("PORTAGE_BUILT_USE") if use is None: use = self.settings["PORTAGE_USE"] use = frozenset(use.split()) atom = atom.evaluate_conditionals(use) if warnings: warnings_str = self._elog("eqawarn", warnings) if cmd == "has_version": if vardb.match(atom): returncode = 0 else: returncode = 1 return ("", warnings_str, returncode) elif cmd == "best_version": m = best(vardb.match(atom)) return ("%s\n" % m, warnings_str, 0) elif cmd in ("master_repositories", "repository_path", "available_eclasses", "eclass_path", "license_path"): repo = _repo_name_re.match(args[0]) if repo is None: return ("", "%s: Invalid repository: %s\n" % (cmd, args[0]), 2) try: repo = portdb.repositories[args[0]] except KeyError: return ("", warnings_str, 1) if cmd == "master_repositories": return ("%s\n" % " ".join(x.name for x in repo.masters), warnings_str, 0) elif cmd == "repository_path": return ("%s\n" % repo.location, warnings_str, 0) elif cmd == "available_eclasses": return ("%s\n" % " ".join(sorted(repo.eclass_db.eclasses)), warnings_str, 0) elif cmd == "eclass_path": try: eclass = repo.eclass_db.eclasses[args[1]] except KeyError: return ("", warnings_str, 1) return ("%s\n" % eclass.location, warnings_str, 0) elif cmd == "license_path": paths = reversed([os.path.join(x.location, "licenses", args[1]) for x in list(repo.masters) + [repo]]) for path in paths: if os.path.exists(path): return ("%s\n" % path, warnings_str, 0) return ("", warnings_str, 1) else: return ("", "Invalid command: %s\n" % cmd, 3)