def getMaskAtom(self, cpv, slot): """ Take a package and return a matching package.mask atom, or None if no such atom exists or it has been cancelled by package.unmask. PROVIDE is not checked, so atoms will not be found for old-style virtuals. @param cpv: The package name @type cpv: String @param slot: The package's slot @type slot: String @rtype: String @return: An matching atom string or None if one is not found. """ cp = cpv_getkey(cpv) mask_atoms = self._pmaskdict.get(cp) if mask_atoms: pkg_list = ["%s:%s" % (cpv, slot)] unmask_atoms = self._punmaskdict.get(cp) for x in mask_atoms: if not match_from_list(x, pkg_list): continue if unmask_atoms: for y in unmask_atoms: if match_from_list(y, pkg_list): return None return x return None
def iterAtomsForPackage(self, pkg): """ Find all matching atoms for a given package. This matches virtual arguments against the PROVIDE metadata. This will raise an InvalidDependString exception if PROVIDE is invalid. """ cpv_slot_list = [pkg] cp = cpv_getkey(pkg.cpv) self._load() # make sure the atoms are loaded atoms = self._atommap.get(cp) if atoms: for atom in atoms: if match_from_list(atom, cpv_slot_list): yield atom provides = pkg._metadata['PROVIDE'] if not provides: return provides = provides.split() for provide in provides: try: provided_cp = Atom(provide).cp except InvalidAtom: continue atoms = self._atommap.get(provided_cp) if atoms: for atom in atoms: if match_from_list(atom.replace(provided_cp, cp), cpv_slot_list): yield atom
def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None): """ Take a package and return a matching package.mask atom, or None if no such atom exists or it has been cancelled by package.unmask. PROVIDE is not checked, so atoms will not be found for old-style virtuals. @param cpv: The package name @type cpv: String @param slot: The package's slot @type slot: String @param repo: The package's repository [optional] @type repo: String @param unmask_atoms: if desired pass in self._punmaskdict.get(cp) @type unmask_atoms: list @rtype: String @return: A matching atom string or None if one is not found. """ cp = cpv_getkey(cpv) mask_atoms = self._pmaskdict.get(cp) if mask_atoms: pkg = "".join((cpv, _slot_separator, slot)) if repo and repo != Package.UNKNOWN_REPO: pkg = "".join((pkg, _repo_separator, repo)) pkg_list = [pkg] for x in mask_atoms: if not match_from_list(x, pkg_list): continue if unmask_atoms: for y in unmask_atoms: if match_from_list(y, pkg_list): return None return x return None
def iterAtomsForPackage(self, pkg): """ Find all matching atoms for a given package. This matches virtual arguments against the PROVIDE metadata. This will raise an InvalidDependString exception if PROVIDE is invalid. """ cpv_slot_list = [pkg] cp = cpv_getkey(pkg.cpv) self._load() # make sure the atoms are loaded atoms = self._atommap.get(cp) if atoms: for atom in atoms: if match_from_list(atom, cpv_slot_list): yield atom provides = pkg._metadata["PROVIDE"] if not provides: return provides = provides.split() for provide in provides: try: provided_cp = Atom(provide).cp except InvalidAtom: continue atoms = self._atommap.get(provided_cp) if atoms: for atom in atoms: if match_from_list(atom.replace(provided_cp, cp), cpv_slot_list): yield atom
def __contains__(self, cpv): """ Check whether the atom matches the given cpv. If it was incorrect, simply return False. """ if not isinstance(self.atom, Atom): return False else: return match_from_list(self.atom, [cpv])
def testEqualGlobFail(self): tests = [("=sys-apps/portage-2*", "sys-apps/portage-2.1"), ("=sys-apps/portage-2.1*", "sys-apps/portage-2.1.2")] for test in tests: try: self.assertEqual(len(match_from_list(test[0], [test[1]])), 1) except TypeError: # failure is ok here pass
def testEqualGlobFail(self): tests = [ ("=sys-apps/portage-2*", "sys-apps/portage-2.1" ), ("=sys-apps/portage-2.1*", "sys-apps/portage-2.1.2" ) ] for test in tests: try: self.assertEqual( len( match_from_list( test[0], [test[1]] ) ), 1 ) except TypeError: # failure is ok here pass
def testEqualGlobPass(self): tests = [ ("=sys-apps/portage-45*", "sys-apps/portage-045" ), ("=sys-fs/udev-1*", "sys-fs/udev-123"), ("=sys-fs/udev-4*", "sys-fs/udev-456" ) ] # I need to look up the cvs syntax # ("=sys-fs/udev_cvs*","sys-fs/udev_cvs_pre4" ) ] for test in tests: self.assertEqual( len(match_from_list( test[0], [test[1]] )), 1 )
def testEqualGlobPass(self): tests = [("=sys-apps/portage-45*", "sys-apps/portage-045"), ("=sys-fs/udev-1*", "sys-fs/udev-123"), ("=sys-fs/udev-4*", "sys-fs/udev-456")] # I need to look up the cvs syntax # ("=sys-fs/udev_cvs*","sys-fs/udev_cvs_pre4" ) ] for test in tests: self.assertEqual(len(match_from_list(test[0], [test[1]])), 1)
def testMatch_from_list(self): tests = ( ("=sys-apps/portage-45*", [], [] ), ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ), ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ), ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ), ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ), ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ), (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ), (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ), ("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]), ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ), ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ), ("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ), ("*/tar", ["sys-apps/portage-2.1.2"], [] ), ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ), ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ), ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ), ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ), ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ), ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ), ) for atom, cpv_list, expected_result in tests: result = [] for pkg in match_from_list( atom, cpv_list ): if isinstance(pkg, Package): result.append(pkg.cpv) else: result.append(pkg) self.assertEqual( result, expected_result )
def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None): """ Take a package and return a matching package.mask atom, or None if no such atom exists or it has been cancelled by package.unmask. PROVIDE is not checked, so atoms will not be found for old-style virtuals. @param cpv: The package name @type cpv: String @param slot: The package's slot @type slot: String @param repo: The package's repository [optional] @type repo: String @param unmask_atoms: if desired pass in self._punmaskdict.get(cp) @type unmask_atoms: list @rtype: String @return: A matching atom string or None if one is not found. """ try: cpv.slot except AttributeError: pkg = _pkg_str(cpv, slot=slot, repo=repo) else: pkg = cpv mask_atoms = self._pmaskdict.get(pkg.cp) if mask_atoms: pkg_list = [pkg] for x in mask_atoms: if not match_from_list(x, pkg_list): continue if unmask_atoms: for y in unmask_atoms: if match_from_list(y, pkg_list): return None return x return None
def iterAtomsForPackage(self, pkg): """ Find all matching atoms for a given package. This matches virtual arguments against the PROVIDE metadata. This will raise an InvalidDependString exception if PROVIDE is invalid. """ cpv_slot_list = [pkg] cp = cpv_getkey(pkg.cpv) self._load() # make sure the atoms are loaded atoms = self._atommap.get(cp) if atoms: for atom in atoms: if match_from_list(atom, cpv_slot_list): yield atom
def version_blacklisted(cp, version): rule = None cpv = '%s-%s' % (cp, version) ''' Check that the generated cpv can be used by portage ''' if not portage.versions.catpkgsplit(cpv): return False for bv in BLACKLIST_VERSIONS: if dep.match_from_list(bv, [cpv]): rule = bv None if rule: euscan.output.einfo("%s is blacklisted by rule %s" % (cpv, bv)) return rule is not None
def version_blacklisted(cp, version): rule = None cpv = '%s-%s' % (cp, version) # Check that the generated cpv can be used by portage if not portage.versions.catpkgsplit(cpv): return False for bv in BLACKLIST_VERSIONS: if dep.match_from_list(bv, [cpv]): rule = bv None if rule: euscan.output.einfo("%s is blacklisted by rule %s" % (cpv, bv)) return rule is not None
def dep_wordreduce(mydeplist, mysettings, mydbapi, mode, use_cache=1): "Reduces the deplist to ones and zeros" deplist = mydeplist[:] for mypos, token in enumerate(deplist): if isinstance(deplist[mypos], list): # recurse deplist[mypos] = dep_wordreduce(deplist[mypos], mysettings, mydbapi, mode, use_cache=use_cache) elif deplist[mypos] == "||": pass elif token[:1] == "!": deplist[mypos] = False else: mykey = deplist[mypos].cp if (mysettings and mykey in mysettings.pprovideddict and match_from_list( deplist[mypos], mysettings.pprovideddict[mykey])): deplist[mypos] = True elif mydbapi is None: # Assume nothing is satisfied. This forces dep_zapdeps to # return all of deps the deps that have been selected # (excluding those satisfied by package.provided). deplist[mypos] = False else: if mode: x = mydbapi.xmatch(mode, deplist[mypos]) if mode.startswith("minimum-"): mydep = [] if x: mydep.append(x) else: mydep = x else: mydep = mydbapi.match(deplist[mypos], use_cache=use_cache) if mydep != None: tmp = len(mydep) >= 1 if deplist[mypos][0] == "!": tmp = False deplist[mypos] = tmp else: # encountered invalid string return None return deplist
def dep_wordreduce(mydeplist, mysettings, mydbapi, mode, use_cache=1): "Reduces the deplist to ones and zeros" deplist = mydeplist[:] for mypos, token in enumerate(deplist): if isinstance(deplist[mypos], list): # recurse deplist[mypos] = dep_wordreduce(deplist[mypos], mysettings, mydbapi, mode, use_cache=use_cache) elif deplist[mypos] == "||": pass elif token[:1] == "!": deplist[mypos] = False else: mykey = deplist[mypos].cp if ( mysettings and mykey in mysettings.pprovideddict and match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]) ): deplist[mypos] = True elif mydbapi is None: # Assume nothing is satisfied. This forces dep_zapdeps to # return all of deps the deps that have been selected # (excluding those satisfied by package.provided). deplist[mypos] = False else: if mode: x = mydbapi.xmatch(mode, deplist[mypos]) if mode.startswith("minimum-"): mydep = [] if x: mydep.append(x) else: mydep = x else: mydep = mydbapi.match(deplist[mypos], use_cache=use_cache) if mydep != None: tmp = len(mydep) >= 1 if deplist[mypos][0] == "!": tmp = False deplist[mypos] = tmp else: # encountered invalid string return None return deplist
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): """ Takes an unreduced and reduced deplist and removes satisfied dependencies. Returned deplist contains steps that must be taken to satisfy dependencies. """ if trees is None: trees = portage.db writemsg("ZapDeps -- %s\n" % (use_binaries), 2) if not reduced or unreduced == ["||"] or dep_eval(reduced): return [] if unreduced[0] != "||": unresolved = [] for x, satisfied in zip(unreduced, reduced): if isinstance(x, list): unresolved += dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees) elif not satisfied: unresolved.append(x) return unresolved # We're at a ( || atom ... ) type level and need to make a choice deps = unreduced[1:] satisfieds = reduced[1:] # Our preference order is for an the first item that: # a) contains all unmasked packages with the same key as installed packages # b) contains all unmasked packages # c) contains masked installed packages # d) is the first item preferred_installed = [] preferred_in_graph = [] preferred_any_slot = [] preferred_non_installed = [] unsat_use_in_graph = [] unsat_use_installed = [] unsat_use_non_installed = [] other_installed = [] other_installed_some = [] other = [] # unsat_use_* must come after preferred_non_installed # for correct ordering in cases like || ( foo[a] foo[b] ). choice_bins = ( preferred_in_graph, preferred_installed, preferred_any_slot, preferred_non_installed, unsat_use_in_graph, unsat_use_installed, unsat_use_non_installed, other_installed, other_installed_some, other, ) # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") graph = trees[myroot].get("graph") want_update_pkg = trees[myroot].get("want_update_pkg") vardb = None if "vartree" in trees[myroot]: vardb = trees[myroot]["vartree"].dbapi if use_binaries: mydbapi = trees[myroot]["bintree"].dbapi else: mydbapi = trees[myroot]["porttree"].dbapi try: mydbapi_match_pkgs = mydbapi.match_pkgs except AttributeError: def mydbapi_match_pkgs(atom): return [mydbapi._pkg_str(cpv, atom.repo) for cpv in mydbapi.match(atom)] # Sort the deps into installed, not installed but already # in the graph and other, not installed and not in the graph # and other, with values of [[required_atom], availablility] for x, satisfied in zip(deps, satisfieds): if isinstance(x, list): atoms = dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees) else: atoms = [x] if vardb is None: # When called by repoman, we can simply return the first choice # because dep_eval() handles preference selection. return atoms all_available = True all_use_satisfied = True slot_map = {} cp_map = {} for atom in atoms: if atom.blocker: continue # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi_match_pkgs(atom.without_use) if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not avail_pkg: all_available = False all_use_satisfied = False break if atom.use: avail_pkg_use = mydbapi_match_pkgs(atom) if not avail_pkg_use: all_use_satisfied = False else: # highest (ascending order) avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) slot_map[avail_slot] = avail_pkg highest_cpv = cp_map.get(avail_pkg.cp) if highest_cpv is None or vercmp(avail_pkg.version, highest_cpv.version) > 0: cp_map[avail_pkg.cp] = avail_pkg this_choice = _dep_choice( atoms=atoms, slot_map=slot_map, cp_map=cp_map, all_available=all_available, all_installed_slots=False ) if all_available: # The "all installed" criterion is not version or slot specific. # If any version of a package is already in the graph then we # assume that it is preferred over other possible packages choices. all_installed = True for atom in set(Atom(atom.cp) for atom in atoms if not atom.blocker): # New-style virtuals have zero cost to install. if not vardb.match(atom) and not atom.startswith("virtual/"): all_installed = False break all_installed_slots = False if all_installed: all_installed_slots = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not vardb.match(slot_atom) and not slot_atom.startswith("virtual/"): all_installed_slots = False break this_choice.all_installed_slots = all_installed_slots if graph_db is None: if all_use_satisfied: if all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_in_graph = True for atom in atoms: # New-style virtuals have zero cost to install. if atom.blocker or atom.cp.startswith("virtual/"): continue # We check if the matched package has actually been # added to the digraph, in order to distinguish between # those packages and installed packages that may need # to be uninstalled in order to resolve blockers. if not any(pkg in graph for pkg in graph_db.match_pkgs(atom)): all_in_graph = False break circular_atom = None if all_in_graph: if parent is None or priority is None: pass elif priority.buildtime and not (priority.satisfied or priority.optional): # Check if the atom would result in a direct circular # dependency and try to avoid that if it seems likely # to be unresolvable. This is only relevant for # buildtime deps that aren't already satisfied by an # installed package. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: continue if vardb.match(atom): # If the atom is satisfied by an installed # version then it's not a circular dep. continue if atom.cp != parent.cp: continue if match_from_list(atom, cpv_slot_list): circular_atom = atom break if circular_atom is not None: other.append(this_choice) else: if all_use_satisfied: if all_in_graph: preferred_in_graph.append(this_choice) elif all_installed: if all_installed_slots: preferred_installed.append(this_choice) elif parent is None or want_update_pkg is None: preferred_any_slot.append(this_choice) else: # When appropriate, prefer a slot that is not # installed yet for bug #478188. want_update = True for slot_atom, avail_pkg in slot_map.items(): if avail_pkg in graph: continue # New-style virtuals have zero cost to install. if slot_atom.startswith("virtual/") or vardb.match(slot_atom): continue if not want_update_pkg(parent, avail_pkg): want_update = False break if want_update: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if all_in_graph: unsat_use_in_graph.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_installed = True some_installed = False for atom in atoms: if not atom.blocker: if vardb.match(atom): some_installed = True else: all_installed = False if all_installed: this_choice.all_installed_slots = True other_installed.append(this_choice) elif some_installed: other_installed_some.append(this_choice) else: other.append(this_choice) # Prefer choices which contain upgrades to higher slots. This helps # for deps such as || ( foo:1 foo:2 ), where we want to prefer the # atom which matches the higher version rather than the atom furthest # to the left. Sorting is done separately for each of choice_bins, so # as not to interfere with the ordering of the bins. Because of the # bin separation, the main function of this code is to allow # --depclean to remove old slots (rather than to pull in new slots). for choices in choice_bins: if len(choices) < 2: continue # Prefer choices with all_installed_slots for bug #480736. choices.sort(key=operator.attrgetter("all_installed_slots"), reverse=True) for choice_1 in choices[1:]: cps = set(choice_1.cp_map) for choice_2 in choices: if choice_1 is choice_2: # choice_1 will not be promoted, so move on break intersecting_cps = cps.intersection(choice_2.cp_map) if not intersecting_cps: continue has_upgrade = False has_downgrade = False for cp in intersecting_cps: version_1 = choice_1.cp_map[cp] version_2 = choice_2.cp_map[cp] difference = vercmp(version_1.version, version_2.version) if difference != 0: if difference > 0: has_upgrade = True else: has_downgrade = True break if has_upgrade and not has_downgrade: # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break for allow_masked in (False, True): for choices in choice_bins: for choice in choices: if choice.all_available or allow_masked: return choice.atoms assert False # This point should not be reachable
def _expand_new_virtuals( mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs ): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ).""" newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") # Atoms are stored in the graph as (atom, id(atom)) tuples # since each atom is considered to be a unique entity. For # example, atoms that appear identical may behave differently # in USE matching, depending on their unevaluated form. Also, # specially generated virtual atoms may appear identical while # having different _orig_atom attributes. atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent parent = virt_parent else: graph_parent = parent repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): newsplit.append( _expand_new_virtuals( x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs ) ) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if repoman or not hasattr(portdb, "match_pkgs") or pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: a = [] myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(["||"] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) mychoices = [] if not pkgs and not portdb.cp_list(x.cp): myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue a = [] for pkg in pkgs: virt_atom = "=" + pkg.cpv if x.unevaluated_atom.use: virt_atom += str(x.unevaluated_atom.use) virt_atom = Atom(virt_atom) if parent is None: if myuse is None: virt_atom = virt_atom.evaluate_conditionals(mysettings.get("PORTAGE_USE", "").split()) else: virt_atom = virt_atom.evaluate_conditionals(myuse) else: virt_atom = virt_atom.evaluate_conditionals(pkg_use_enabled(parent)) else: virt_atom = Atom(virt_atom) # Allow the depgraph to map this atom back to the # original, in order to avoid distortion in places # like display or conflict resolution code. virt_atom.__dict__["_orig_atom"] = x # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg._metadata["RDEPEND"] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level(_("Virtual Parent: %s\n") % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = pkg try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % (pkg, mycheck[1], depstring)) # pull in the new-style virtual mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: virt_atom_node = (virt_atom, id(virt_atom)) atom_graph.add(virt_atom_node, graph_parent) atom_graph.add(pkg, virt_atom_node) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add((new_atom, id(new_atom)), graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(["||"] + a) return newsplit
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): """ Takes an unreduced and reduced deplist and removes satisfied dependencies. Returned deplist contains steps that must be taken to satisfy dependencies. """ if trees is None: trees = portage.db writemsg("ZapDeps -- %s\n" % (use_binaries), 2) if not reduced or unreduced == ["||"] or dep_eval(reduced): return [] if unreduced[0] != "||": unresolved = [] for x, satisfied in zip(unreduced, reduced): if isinstance(x, list): unresolved += dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees) elif not satisfied: unresolved.append(x) return unresolved # We're at a ( || atom ... ) type level and need to make a choice deps = unreduced[1:] satisfieds = reduced[1:] # Our preference order is for an the first item that: # a) contains all unmasked packages with the same key as installed packages # b) contains all unmasked packages # c) contains masked installed packages # d) is the first item preferred_installed = [] preferred_in_graph = [] preferred_any_slot = [] preferred_non_installed = [] unsat_use_in_graph = [] unsat_use_installed = [] unsat_use_non_installed = [] other_installed = [] other_installed_some = [] other_installed_any_slot = [] other = [] # unsat_use_* must come after preferred_non_installed # for correct ordering in cases like || ( foo[a] foo[b] ). choice_bins = ( preferred_in_graph, preferred_installed, preferred_any_slot, preferred_non_installed, unsat_use_in_graph, unsat_use_installed, unsat_use_non_installed, other_installed, other_installed_some, other_installed_any_slot, other, ) # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") graph = trees[myroot].get("graph") pkg_use_enabled = trees[myroot].get("pkg_use_enabled") want_update_pkg = trees[myroot].get("want_update_pkg") downgrade_probe = trees[myroot].get("downgrade_probe") vardb = None if "vartree" in trees[myroot]: vardb = trees[myroot]["vartree"].dbapi if use_binaries: mydbapi = trees[myroot]["bintree"].dbapi else: mydbapi = trees[myroot]["porttree"].dbapi try: mydbapi_match_pkgs = mydbapi.match_pkgs except AttributeError: def mydbapi_match_pkgs(atom): return [ mydbapi._pkg_str(cpv, atom.repo) for cpv in mydbapi.match(atom) ] # Sort the deps into installed, not installed but already # in the graph and other, not installed and not in the graph # and other, with values of [[required_atom], availablility] for x, satisfied in zip(deps, satisfieds): if isinstance(x, list): atoms = dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees) else: atoms = [x] if vardb is None: # When called by repoman, we can simply return the first choice # because dep_eval() handles preference selection. return atoms all_available = True all_use_satisfied = True all_use_unmasked = True conflict_downgrade = False slot_map = {} cp_map = {} for atom in atoms: if atom.blocker: continue # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi_match_pkgs(atom.without_use) if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not avail_pkg: all_available = False all_use_satisfied = False break if graph_db is not None and downgrade_probe is not None: slot_matches = graph_db.match_pkgs(avail_slot) if (len(slot_matches) > 1 and avail_pkg < slot_matches[-1] and not downgrade_probe(avail_pkg)): # If a downgrade is not desirable, then avoid a # choice that pulls in a lower version involved # in a slot conflict (bug #531656). conflict_downgrade = True if atom.use: avail_pkg_use = mydbapi_match_pkgs(atom) if not avail_pkg_use: all_use_satisfied = False if pkg_use_enabled is not None: # Check which USE flags cause the match to fail, # so we can prioritize choices that do not # require changes to use.mask or use.force # (see bug #515584). violated_atom = atom.violated_conditionals( pkg_use_enabled(avail_pkg), avail_pkg.iuse.is_valid_flag) # Note that violated_atom.use can be None here, # since evaluation can collapse conditional USE # deps that cause the match to fail due to # missing IUSE (match uses atom.unevaluated_atom # to detect such missing IUSE). if violated_atom.use is not None: for flag in violated_atom.use.enabled: if flag in avail_pkg.use.mask: all_use_unmasked = False break else: for flag in violated_atom.use.disabled: if flag in avail_pkg.use.force and \ flag not in avail_pkg.use.mask: all_use_unmasked = False break else: # highest (ascending order) avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) slot_map[avail_slot] = avail_pkg highest_cpv = cp_map.get(avail_pkg.cp) if highest_cpv is None or \ vercmp(avail_pkg.version, highest_cpv.version) > 0: cp_map[avail_pkg.cp] = avail_pkg this_choice = _dep_choice(atoms=atoms, slot_map=slot_map, cp_map=cp_map, all_available=all_available, all_installed_slots=False) if all_available: # The "all installed" criterion is not version or slot specific. # If any version of a package is already in the graph then we # assume that it is preferred over other possible packages choices. all_installed = True for atom in set(Atom(atom.cp) for atom in atoms \ if not atom.blocker): # New-style virtuals have zero cost to install. if not vardb.match(atom) and not atom.startswith("virtual/"): all_installed = False break all_installed_slots = False if all_installed: all_installed_slots = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not vardb.match(slot_atom) and \ not slot_atom.startswith("virtual/"): all_installed_slots = False break this_choice.all_installed_slots = all_installed_slots if graph_db is None: if all_use_satisfied: if all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) elif conflict_downgrade: other.append(this_choice) else: all_in_graph = True for atom in atoms: # New-style virtuals have zero cost to install. if atom.blocker or atom.cp.startswith("virtual/"): continue # We check if the matched package has actually been # added to the digraph, in order to distinguish between # those packages and installed packages that may need # to be uninstalled in order to resolve blockers. if not any(pkg in graph for pkg in graph_db.match_pkgs(atom)): all_in_graph = False break circular_atom = None if not (parent is None or priority is None) and \ (parent.onlydeps or (all_in_graph and priority.buildtime and not (priority.satisfied or priority.optional))): # Check if the atom would result in a direct circular # dependency and try to avoid that if it seems likely # to be unresolvable. This is only relevant for # buildtime deps that aren't already satisfied by an # installed package. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: continue if vardb.match(atom): # If the atom is satisfied by an installed # version then it's not a circular dep. continue if atom.cp != parent.cp: continue if match_from_list(atom, cpv_slot_list): circular_atom = atom break if circular_atom is not None: other.append(this_choice) else: if all_use_satisfied: if all_in_graph: preferred_in_graph.append(this_choice) elif all_installed: if all_installed_slots: preferred_installed.append(this_choice) elif parent is None or want_update_pkg is None: preferred_any_slot.append(this_choice) else: # When appropriate, prefer a slot that is not # installed yet for bug #478188. want_update = True for slot_atom, avail_pkg in slot_map.items(): if avail_pkg in graph: continue # New-style virtuals have zero cost to install. if slot_atom.startswith("virtual/") or \ vardb.match(slot_atom): continue if not want_update_pkg(parent, avail_pkg): want_update = False break if want_update: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_in_graph: unsat_use_in_graph.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_installed = True some_installed = False for atom in atoms: if not atom.blocker: if vardb.match(atom): some_installed = True else: all_installed = False if all_installed: this_choice.all_installed_slots = True other_installed.append(this_choice) elif some_installed: other_installed_some.append(this_choice) # Use Atom(atom.cp) for a somewhat "fuzzy" match, since # the whole atom may be too specific. For example, see # bug #522652, where using the whole atom leads to an # unsatisfiable choice. elif any( vardb.match(Atom(atom.cp)) for atom in atoms if not atom.blocker): other_installed_any_slot.append(this_choice) else: other.append(this_choice) # Prefer choices which contain upgrades to higher slots. This helps # for deps such as || ( foo:1 foo:2 ), where we want to prefer the # atom which matches the higher version rather than the atom furthest # to the left. Sorting is done separately for each of choice_bins, so # as not to interfere with the ordering of the bins. Because of the # bin separation, the main function of this code is to allow # --depclean to remove old slots (rather than to pull in new slots). for choices in choice_bins: if len(choices) < 2: continue # Prefer choices with all_installed_slots for bug #480736. choices.sort(key=operator.attrgetter('all_installed_slots'), reverse=True) for choice_1 in choices[1:]: cps = set(choice_1.cp_map) for choice_2 in choices: if choice_1 is choice_2: # choice_1 will not be promoted, so move on break intersecting_cps = cps.intersection(choice_2.cp_map) if not intersecting_cps: continue has_upgrade = False has_downgrade = False for cp in intersecting_cps: version_1 = choice_1.cp_map[cp] version_2 = choice_2.cp_map[cp] difference = vercmp(version_1.version, version_2.version) if difference != 0: if difference > 0: has_upgrade = True else: has_downgrade = True break if has_upgrade and not has_downgrade: # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break for allow_masked in (False, True): for choices in choice_bins: for choice in choices: if choice.all_available or allow_masked: return choice.atoms assert (False) # This point should not be reachable
def containsCPV(self, cpv): self._load() for a in self._atoms: if match_from_list(a, [cpv]): return True return False
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, minimize_slots=False): """ Takes an unreduced and reduced deplist and removes satisfied dependencies. Returned deplist contains steps that must be taken to satisfy dependencies. """ if trees is None: trees = portage.db writemsg("ZapDeps -- %s\n" % (use_binaries), 2) if not reduced or unreduced == ["||"] or dep_eval(reduced): return [] if unreduced[0] != "||": unresolved = [] for x, satisfied in zip(unreduced, reduced): if isinstance(x, list): unresolved += dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees, minimize_slots=minimize_slots) elif not satisfied: unresolved.append(x) return unresolved # We're at a ( || atom ... ) type level and need to make a choice deps = unreduced[1:] satisfieds = reduced[1:] # Our preference order is for an the first item that: # a) contains all unmasked packages with the same key as installed packages # b) contains all unmasked packages # c) contains masked installed packages # d) is the first item preferred_installed = [] preferred_in_graph = [] preferred_any_slot = [] preferred_non_installed = [] unsat_use_in_graph = [] unsat_use_installed = [] unsat_use_non_installed = [] other_installed = [] other_installed_some = [] other_installed_any_slot = [] other = [] # unsat_use_* must come after preferred_non_installed # for correct ordering in cases like || ( foo[a] foo[b] ). choice_bins = ( preferred_in_graph, preferred_installed, preferred_any_slot, preferred_non_installed, unsat_use_in_graph, unsat_use_installed, unsat_use_non_installed, other_installed, other_installed_some, other_installed_any_slot, other, ) # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") graph = trees[myroot].get("graph") pkg_use_enabled = trees[myroot].get("pkg_use_enabled") want_update_pkg = trees[myroot].get("want_update_pkg") downgrade_probe = trees[myroot].get("downgrade_probe") vardb = None if "vartree" in trees[myroot]: vardb = trees[myroot]["vartree"].dbapi if use_binaries: mydbapi = trees[myroot]["bintree"].dbapi else: mydbapi = trees[myroot]["porttree"].dbapi try: mydbapi_match_pkgs = mydbapi.match_pkgs except AttributeError: def mydbapi_match_pkgs(atom): return [mydbapi._pkg_str(cpv, atom.repo) for cpv in mydbapi.match(atom)] # Sort the deps into installed, not installed but already # in the graph and other, not installed and not in the graph # and other, with values of [[required_atom], availablility] for x, satisfied in zip(deps, satisfieds): if isinstance(x, list): atoms = dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees, minimize_slots=minimize_slots) else: atoms = [x] if vardb is None: # When called by repoman, we can simply return the first choice # because dep_eval() handles preference selection. return atoms all_available = True all_use_satisfied = True all_use_unmasked = True conflict_downgrade = False installed_downgrade = False slot_atoms = collections.defaultdict(list) slot_map = {} cp_map = {} for atom in atoms: if atom.blocker: continue # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi_match_pkgs(atom.without_use) if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not avail_pkg: all_available = False all_use_satisfied = False break if graph_db is not None and downgrade_probe is not None: slot_matches = graph_db.match_pkgs(avail_slot) if (len(slot_matches) > 1 and avail_pkg < slot_matches[-1] and not downgrade_probe(avail_pkg)): # If a downgrade is not desirable, then avoid a # choice that pulls in a lower version involved # in a slot conflict (bug #531656). conflict_downgrade = True if atom.use: avail_pkg_use = mydbapi_match_pkgs(atom) if not avail_pkg_use: all_use_satisfied = False if pkg_use_enabled is not None: # Check which USE flags cause the match to fail, # so we can prioritize choices that do not # require changes to use.mask or use.force # (see bug #515584). violated_atom = atom.violated_conditionals( pkg_use_enabled(avail_pkg), avail_pkg.iuse.is_valid_flag) # Note that violated_atom.use can be None here, # since evaluation can collapse conditional USE # deps that cause the match to fail due to # missing IUSE (match uses atom.unevaluated_atom # to detect such missing IUSE). if violated_atom.use is not None: for flag in violated_atom.use.enabled: if flag in avail_pkg.use.mask: all_use_unmasked = False break else: for flag in violated_atom.use.disabled: if flag in avail_pkg.use.force and \ flag not in avail_pkg.use.mask: all_use_unmasked = False break else: # highest (ascending order) avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if downgrade_probe is not None and graph is not None: highest_in_slot = mydbapi_match_pkgs(avail_slot) highest_in_slot = (highest_in_slot[-1] if highest_in_slot else None) if (avail_pkg and highest_in_slot and avail_pkg < highest_in_slot and not downgrade_probe(avail_pkg) and (highest_in_slot.installed or highest_in_slot in graph)): installed_downgrade = True slot_map[avail_slot] = avail_pkg slot_atoms[avail_slot].append(atom) highest_cpv = cp_map.get(avail_pkg.cp) all_match_current = None all_match_previous = None if (highest_cpv is not None and highest_cpv.slot == avail_pkg.slot): # If possible, make the package selection internally # consistent by choosing a package that satisfies all # atoms which match a package in the same slot. Later on, # the package version chosen here is used in the # has_upgrade/has_downgrade logic to prefer choices with # upgrades, and a package choice that is not internally # consistent will lead the has_upgrade/has_downgrade logic # to produce invalid results (see bug 600346). all_match_current = all(a.match(avail_pkg) for a in slot_atoms[avail_slot]) all_match_previous = all(a.match(highest_cpv) for a in slot_atoms[avail_slot]) if all_match_previous and not all_match_current: continue current_higher = (highest_cpv is None or vercmp(avail_pkg.version, highest_cpv.version) > 0) if current_higher or (all_match_current and not all_match_previous): cp_map[avail_pkg.cp] = avail_pkg new_slot_count = (len(slot_map) if graph_db is None else sum(not graph_db.match_pkgs(slot_atom) for slot_atom in slot_map if not slot_atom.cp.startswith("virtual/"))) this_choice = _dep_choice(atoms=atoms, slot_map=slot_map, cp_map=cp_map, all_available=all_available, all_installed_slots=False, new_slot_count=new_slot_count) if all_available: # The "all installed" criterion is not version or slot specific. # If any version of a package is already in the graph then we # assume that it is preferred over other possible packages choices. all_installed = True for atom in set(Atom(atom.cp) for atom in atoms \ if not atom.blocker): # New-style virtuals have zero cost to install. if not vardb.match(atom) and not atom.startswith("virtual/"): all_installed = False break all_installed_slots = False if all_installed: all_installed_slots = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not vardb.match(slot_atom) and \ not slot_atom.startswith("virtual/"): all_installed_slots = False break this_choice.all_installed_slots = all_installed_slots if graph_db is None: if all_use_satisfied: if all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) elif conflict_downgrade or installed_downgrade: other.append(this_choice) else: all_in_graph = True for atom in atoms: # New-style virtuals have zero cost to install. if atom.blocker or atom.cp.startswith("virtual/"): continue # We check if the matched package has actually been # added to the digraph, in order to distinguish between # those packages and installed packages that may need # to be uninstalled in order to resolve blockers. if not any(pkg in graph for pkg in graph_db.match_pkgs(atom)): all_in_graph = False break circular_atom = None if not (parent is None or priority is None) and \ (parent.onlydeps or (all_in_graph and priority.buildtime and not (priority.satisfied or priority.optional))): # Check if the atom would result in a direct circular # dependency and try to avoid that if it seems likely # to be unresolvable. This is only relevant for # buildtime deps that aren't already satisfied by an # installed package. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: continue if vardb.match(atom): # If the atom is satisfied by an installed # version then it's not a circular dep. continue if atom.cp != parent.cp: continue if match_from_list(atom, cpv_slot_list): circular_atom = atom break if circular_atom is not None: other.append(this_choice) else: if all_use_satisfied: if all_in_graph: preferred_in_graph.append(this_choice) elif all_installed: if all_installed_slots: preferred_installed.append(this_choice) elif parent is None or want_update_pkg is None: preferred_any_slot.append(this_choice) else: # When appropriate, prefer a slot that is not # installed yet for bug #478188. want_update = True for slot_atom, avail_pkg in slot_map.items(): if avail_pkg in graph: continue # New-style virtuals have zero cost to install. if slot_atom.startswith("virtual/") or \ vardb.match(slot_atom): continue if not want_update_pkg(parent, avail_pkg): want_update = False break if want_update: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_in_graph: unsat_use_in_graph.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_installed = True some_installed = False for atom in atoms: if not atom.blocker: if vardb.match(atom): some_installed = True else: all_installed = False if all_installed: this_choice.all_installed_slots = True other_installed.append(this_choice) elif some_installed: other_installed_some.append(this_choice) # Use Atom(atom.cp) for a somewhat "fuzzy" match, since # the whole atom may be too specific. For example, see # bug #522652, where using the whole atom leads to an # unsatisfiable choice. elif any(vardb.match(Atom(atom.cp)) for atom in atoms if not atom.blocker): other_installed_any_slot.append(this_choice) else: other.append(this_choice) # Prefer choices which contain upgrades to higher slots. This helps # for deps such as || ( foo:1 foo:2 ), where we want to prefer the # atom which matches the higher version rather than the atom furthest # to the left. Sorting is done separately for each of choice_bins, so # as not to interfere with the ordering of the bins. Because of the # bin separation, the main function of this code is to allow # --depclean to remove old slots (rather than to pull in new slots). for choices in choice_bins: if len(choices) < 2: continue sort_keys = [] # Prefer choices with all_installed_slots for bug #480736. sort_keys.append(lambda x: not x.all_installed_slots) if minimize_slots: # Prefer choices having fewer new slots. When used with DNF form, # this can eliminate unecessary packages that depclean would # ultimately eliminate (see bug 632026). Only use this behavior # when deemed necessary by the caller, since this will discard the # order specified in the ebuild, and the preferences specified # there can serve as a crucial sources of guidance (see bug 645002). # NOTE: Under some conditions, new_slot_count value may have some # variance from one calculation to the next because it depends on # the order that packages are added to the graph. This variance can # contribute to outcomes that appear to be random. Meanwhile, # the order specified in the ebuild is without variance, so it # does not have this problem. sort_keys.append(lambda x: x.new_slot_count) choices.sort(key=lambda x: tuple(f(x) for f in sort_keys)) for choice_1 in choices[1:]: cps = set(choice_1.cp_map) for choice_2 in choices: if choice_1 is choice_2: # choice_1 will not be promoted, so move on break intersecting_cps = cps.intersection(choice_2.cp_map) if not intersecting_cps: continue has_upgrade = False has_downgrade = False for cp in intersecting_cps: version_1 = choice_1.cp_map[cp] version_2 = choice_2.cp_map[cp] difference = vercmp(version_1.version, version_2.version) if difference != 0: if difference > 0: has_upgrade = True else: has_downgrade = True break if has_upgrade and not has_downgrade: # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break for allow_masked in (False, True): for choices in choice_bins: for choice in choices: if choice.all_available or allow_masked: return choice.atoms assert(False) # This point should not be reachable
def testMatch_from_list(self): tests = ( ("=sys-apps/portage-45*", [], [] ), ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ), ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ), ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ), ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ), ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), ("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ), (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ), (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ), (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ), ("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]), ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"] ), ("*/*:1", ["sys-fs/udev-456:0"], [] ), ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ), ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ), ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ), ("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ), ("*/tar", ["sys-apps/portage-2.1.2"], [] ), ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ), ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ), ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ), ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ), ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ), ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ), ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"] ), ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"] ), ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], [] ), ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"] ), ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [] ), ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"] ), ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ), ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ), ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], [] ), ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ), ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ), ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ), ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ), ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ), ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ), ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ), ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ), ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], [] ), ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], [] ), ) for atom, cpv_list, expected_result in tests: result = [] for pkg in match_from_list( atom, cpv_list ): if isinstance(pkg, Package): if pkg.repo: result.append(pkg.cpv + _repo_separator + pkg.repo) else: result.append(pkg.cpv) else: result.append(pkg) self.assertEqual( result, expected_result )
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None): """ If specified, the myrepo argument is assumed to be valid. This should be a safe assumption since portdbapi methods always return valid repo names and valid "repository" metadata from aux_get. """ if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise else: if myrepo is None: myrepo = _gen_valid_repo(metadata["repository"]) elif myrepo is None: myrepo = metadata.get("repository") if myrepo is not None: myrepo = _gen_valid_repo(metadata["repository"]) if metadata is not None and not portage.eapi_is_supported(metadata["EAPI"]): # Return early since otherwise we might produce invalid # results given that the EAPI is not supported. Also, # metadata is mostly useless in this case since it doesn't # contain essential things like SLOT. if return_location: return (None, None) else: return None # Sometimes we can't access SLOT or repository due to corruption. pkg = mycpv if metadata is not None: pkg = "".join((mycpv, _slot_separator, metadata["SLOT"])) # At this point myrepo should be None, a valid name, or # Package.UNKNOWN_REPO which we ignore. if myrepo is not None and myrepo != Package.UNKNOWN_REPO: pkg = "".join((pkg, _repo_separator, myrepo)) cpv_slot_list = [pkg] mycp = mysplit[0] + "/" + mysplit[1] # XXX- This is a temporary duplicate of code from the config constructor. locations = [os.path.join(settings["PORTDIR"], "profiles")] locations.extend(settings.profiles) for ov in settings["PORTDIR_OVERLAY"].split(): profdir = os.path.join(normalize_path(ov), "profiles") if os.path.isdir(profdir): locations.append(profdir) locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") node = None for l, recursive_filename in grablines(pmask_filename, recursive=1, remember_source_file=True): if node is None or node[0] != recursive_filename: node = (recursive_filename, []) pmasklists.append(node) node[1].append(l) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): x = x.without_repo for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = pmask[0] for i in range(len(pmask[1])): l = pmask[1][i].strip() try: l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo except InvalidAtom: l_atom = None if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += l + "\n" comment_valid = i + 1 elif l_atom == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) else: return comment elif comment_valid != -1: # Apparently this comment applies to multiple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) else: return None
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ). The result is normalized in the same way as use_reduce, having a top-level conjuction, and no redundant nested lists. """ newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") # Atoms are stored in the graph as (atom, id(atom)) tuples # since each atom is considered to be a unique entity. For # example, atoms that appear identical may behave differently # in USE matching, depending on their unevaluated form. Also, # specially generated virtual atoms may appear identical while # having different _orig_atom attributes. atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent parent = virt_parent else: graph_parent = parent repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] is_disjunction = mysplit and mysplit[0] == '||' for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): assert x, 'Normalization error, empty conjunction found in %s' % ( mysplit, ) if is_disjunction: assert x[0] != '||', \ 'Normalization error, nested disjunction found in %s' % (mysplit,) else: assert x[0] == '||', \ 'Normalization error, nested conjunction found in %s' % (mysplit,) x_exp = _expand_new_virtuals(x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs) if is_disjunction: if len(x_exp) == 1: x = x_exp[0] if isinstance(x, list): # Due to normalization, a conjunction must not be # nested directly in another conjunction, so this # must be a disjunction. assert x and x[0] == '||', \ 'Normalization error, nested conjunction found in %s' % (x_exp,) newsplit.extend(x[1:]) else: newsplit.append(x) else: newsplit.append(x_exp) else: newsplit.extend(x_exp) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if repoman or not hasattr(portdb, 'match_pkgs') or \ pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: a = [] myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif is_disjunction: newsplit.extend(a) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) mychoices = [] if not pkgs and not portdb.cp_list(x.cp): myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue a = [] for pkg in pkgs: virt_atom = '=' + pkg.cpv if x.unevaluated_atom.use: virt_atom += str(x.unevaluated_atom.use) virt_atom = Atom(virt_atom) if parent is None: if myuse is None: virt_atom = virt_atom.evaluate_conditionals( mysettings.get("PORTAGE_USE", "").split()) else: virt_atom = virt_atom.evaluate_conditionals(myuse) else: virt_atom = virt_atom.evaluate_conditionals( pkg_use_enabled(parent)) else: virt_atom = Atom(virt_atom) # Allow the depgraph to map this atom back to the # original, in order to avoid distortion in places # like display or conflict resolution code. virt_atom.__dict__['_orig_atom'] = x # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg._metadata['RDEPEND'] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level(_("Virtual Parent: %s\n") \ % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") \ % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = pkg try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % \ (pkg, mycheck[1], depstring)) # Replace the original atom "x" with "virt_atom" which refers # to the specific version of the virtual whose deps we're # expanding. The virt_atom._orig_atom attribute is used # by depgraph to map virt_atom back to the original atom. # We specifically exclude the original atom "x" from the # the expanded output here, since otherwise it could trigger # incorrect dep_zapdeps behavior (see bug #597752). mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: virt_atom_node = (virt_atom, id(virt_atom)) atom_graph.add(virt_atom_node, graph_parent) atom_graph.add(pkg, virt_atom_node) atom_graph.add((x, id(x)), graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add((new_atom, id(new_atom)), graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) elif is_disjunction: newsplit.extend(a) elif len(a) == 1: newsplit.extend(a[0]) else: newsplit.append(['||'] + a) # For consistency with related functions like use_reduce, always # normalize the result to have a top-level conjunction. if is_disjunction: newsplit = [newsplit] return newsplit
def testMatch_from_list(self): tests = ( ("=sys-apps/portage-45*", [], []), ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("=sys-apps/portage-045", ["sys-apps/portage-046"], []), ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]), ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []), ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []), ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("<sys-apps/portage-046", ["sys-apps/portage-046"], []), (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []), ("=cat/pkg-1-r1*", ["cat/pkg-1_alpha1"], []), # =* glob matches only on boundaries between version parts, # so 1* does not match 10 (bug 560466). ("=cat/pkg-1.1*", ["cat/pkg-1.1-r1", "cat/pkg-1.10-r1"], ["cat/pkg-1.1-r1"]), ("=cat/pkg-1-r1*", ["cat/pkg-1-r11"], []), ("=cat/pkg-1_pre*", ["cat/pkg-1_pre1"], ["cat/pkg-1_pre1"]), ("=cat/pkg-1-r1*", ["cat/pkg-1-r1"], ["cat/pkg-1-r1"]), ("=cat/pkg-1-r11*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]), ("=cat/pkg-1-r11*", ["cat/pkg-01-r11"], ["cat/pkg-01-r11"]), ("=cat/pkg-01-r11*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]), ("=cat/pkg-01-r11*", ["cat/pkg-001-r11"], ["cat/pkg-001-r11"]), ("=sys-fs/udev-1*", ["sys-fs/udev-123", "sys-fs/udev-123-r1"], []), ("=sys-fs/udev-123*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]), ("=sys-fs/udev-123*", ["sys-fs/udev-123-r1"], ["sys-fs/udev-123-r1"]), ("=sys-fs/udev-4*", ["sys-fs/udev-456", "sys-fs/udev-456-r1"], []), ("=sys-fs/udev-456*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]), ("*/*:1", ["sys-fs/udev-456:0"], []), ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]), ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]), ("dev-libs/*", ["sys-apps/portage-2.1.2"], []), ("*/tar", ["sys-apps/portage-2.1.2"], []), ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]), ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]), ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], []), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"]), ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]), ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []), ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]), ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"]), ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"]), ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], []), ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"]), ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], []), ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"]), ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []), ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []), ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []), ) for atom, cpv_list, expected_result in tests: result = [] for pkg in match_from_list(atom, cpv_list): if isinstance(pkg, Package): if pkg.repo: result.append(pkg.cpv + _repo_separator + pkg.repo) else: result.append(pkg.cpv) else: result.append(pkg) self.assertEqual(result, expected_result)
def getmaskingstatus(mycpv, settings=None, portdb=None): if settings is None: settings = config(clone=portage.settings) if portdb is None: portdb = portage.portdb metadata = None installed = False if not isinstance(mycpv, basestring): # emerge passed in a Package instance pkg = mycpv mycpv = pkg.cpv metadata = pkg.metadata installed = pkg.installed mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys))) except KeyError: if not portdb.cpv_exists(mycpv): raise return ["corruption"] if "?" in metadata["LICENSE"]: settings.setcpv(mycpv, mydb=metadata) metadata["USE"] = settings["PORTAGE_USE"] else: metadata["USE"] = "" rValue = [] # profile checking if settings._getProfileMaskAtom(mycpv, metadata): rValue.append("profile") # package.mask checking if settings._getMaskAtom(mycpv, metadata): rValue.append("package.mask") # keywords checking eapi = metadata["EAPI"] mygroups = settings._getKeywords(mycpv, metadata) licenses = metadata["LICENSE"] properties = metadata["PROPERTIES"] if eapi.startswith("-"): eapi = eapi[1:] if not eapi_is_supported(eapi): return ["EAPI %s" % eapi] elif _eapi_is_deprecated(eapi) and not installed: return ["EAPI %s" % eapi] egroups = settings.configdict["backupenv"].get("ACCEPT_KEYWORDS", "").split() pgroups = settings["ACCEPT_KEYWORDS"].split() myarch = settings["ARCH"] if pgroups and myarch not in pgroups: """For operating systems other than Linux, ARCH is not necessarily a valid keyword.""" myarch = pgroups[0].lstrip("~") cp = cpv_getkey(mycpv) pkgdict = settings.pkeywordsdict.get(cp) matches = False if pkgdict: cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])] for atom, pkgkeywords in pkgdict.items(): if match_from_list(atom, cpv_slot_list): matches = True pgroups.extend(pkgkeywords) if matches or egroups: pgroups.extend(egroups) inc_pgroups = set() for x in pgroups: if x.startswith("-"): if x == "-*": inc_pgroups.clear() else: inc_pgroups.discard(x[1:]) else: inc_pgroups.add(x) pgroups = inc_pgroups del inc_pgroups kmask = "missing" if '**' in pgroups: kmask = None else: for keyword in pgroups: if keyword in mygroups: kmask = None break if kmask: for gp in mygroups: if gp == "*": kmask = None break elif gp == "-" + myarch and myarch in pgroups: kmask = "-" + myarch break elif gp == "~" + myarch and myarch in pgroups: kmask = "~" + myarch break try: missing_licenses = settings._getMissingLicenses(mycpv, metadata) if missing_licenses: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_licenses) license_split = licenses.split() license_split = [x for x in license_split \ if x in allowed_tokens] msg = license_split[:] msg.append("license(s)") rValue.append(" ".join(msg)) except portage.exception.InvalidDependString as e: rValue.append("LICENSE: " + str(e)) try: missing_properties = settings._getMissingProperties(mycpv, metadata) if missing_properties: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_properties) properties_split = properties.split() properties_split = [x for x in properties_split \ if x in allowed_tokens] msg = properties_split[:] msg.append("properties") rValue.append(" ".join(msg)) except portage.exception.InvalidDependString as e: rValue.append("PROPERTIES: " + str(e)) # Only show KEYWORDS masks for installed packages # if they're not masked for any other reason. if kmask and (not installed or not rValue): rValue.append(kmask + " keyword") return rValue
def testMatch_from_list(self): tests = ( ("=sys-apps/portage-45*", [], []), ( "=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), ( "!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), ( "!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), ( "=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), ("=sys-apps/portage-045", ["sys-apps/portage-046"], []), ( "~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"], ), ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []), ( "<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []), ( "<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), ("<sys-apps/portage-046", ["sys-apps/portage-046"], []), ( ">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), ( ">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"], ), (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), ( "sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"], ), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []), ("=cat/pkg-1-r1*", ["cat/pkg-1_alpha1"], []), # =* glob matches only on boundaries between version parts, # so 1* does not match 10 (bug 560466). ( "=cat/pkg-1.1*", ["cat/pkg-1.1-r1", "cat/pkg-1.10-r1"], ["cat/pkg-1.1-r1"], ), ("=cat/pkg-1-r1*", ["cat/pkg-1-r11"], []), ("=cat/pkg-1_pre*", ["cat/pkg-1_pre1"], ["cat/pkg-1_pre1"]), ("=cat/pkg-1-r1*", ["cat/pkg-1-r1"], ["cat/pkg-1-r1"]), ("=cat/pkg-1-r11*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]), ("=cat/pkg-1-r11*", ["cat/pkg-01-r11"], ["cat/pkg-01-r11"]), ("=cat/pkg-01-r11*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]), ("=cat/pkg-01-r11*", ["cat/pkg-001-r11"], ["cat/pkg-001-r11"]), ("=sys-fs/udev-1*", ["sys-fs/udev-123", "sys-fs/udev-123-r1"], []), ("=sys-fs/udev-123*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]), ("=sys-fs/udev-123*", ["sys-fs/udev-123-r1"], ["sys-fs/udev-123-r1"]), ("=sys-fs/udev-4*", ["sys-fs/udev-456", "sys-fs/udev-456-r1"], []), ("=sys-fs/udev-456*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]), ("*/*:1", ["sys-fs/udev-456:0"], []), ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ( "=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"], ), ( "=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"], ), ("dev-libs/*", ["sys-apps/portage-2.1.2"], []), ("*/tar", ["sys-apps/portage-2.1.2"], []), ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]), ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]), ( "dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"], ), ( "dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"], ), ( "dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [], ), ( "dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [], ), ( "dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [], ), ( "dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"], ), ( "dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"], ), ( "dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [], ), ( "dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"], ), ( "dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"], ), ( "dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"], ), ( "dev-libs/A::repo2[foo]", [ Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]"), ], [], ), ( "dev-libs/A::repo2[foo]", [ Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]"), ], ["dev-libs/A-1::repo2"], ), ( "dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [], ), ( "dev-libs/A:1::repo2[foo]", [ Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]"), ], ["dev-libs/A-1::repo2"], ), ( "virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"], ), ( "virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"], ), ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []), ( "virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"], ), ( "virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"], ), ( "virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"], ), ( "virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"], ), ( "sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"], ), ( "sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"], ), ( "sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"], ), ( "sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"], ), ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []), ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []), ) for atom, cpv_list, expected_result in tests: result = [] for pkg in match_from_list(atom, cpv_list): if isinstance(pkg, Package): if pkg.repo: result.append(pkg.cpv + _repo_separator + pkg.repo) else: result.append(pkg.cpv) else: result.append(pkg) self.assertEqual(result, expected_result)
def testMatch_from_list(self): tests = ( ("=sys-apps/portage-45*", [], []), ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("=sys-apps/portage-045", ["sys-apps/portage-046"], []), ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]), ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []), ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []), ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("<sys-apps/portage-046", ["sys-apps/portage-046"], []), (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []), ("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]), ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]), ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]), ("dev-libs/*", ["sys-apps/portage-2.1.2"], []), ("*/tar", ["sys-apps/portage-2.1.2"], []), ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]), ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]), ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []), ("dev-libs/A[foo,bar]", [ Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]") ], []), ("dev-libs/A[foo,bar]", [ Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]") ], ["dev-libs/A-2"]), ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]), ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []), ("dev-libs/A[foo,-bar(-)]", [ Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]") ], ["dev-libs/A-2"]), ) for atom, cpv_list, expected_result in tests: result = [] for pkg in match_from_list(atom, cpv_list): if isinstance(pkg, Package): result.append(pkg.cpv) else: result.append(pkg) self.assertEqual(result, expected_result)
def testMatch_from_list(self): tests = ( ("=sys-apps/portage-45*", [], []), ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("=sys-apps/portage-045", ["sys-apps/portage-046"], []), ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]), ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []), ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []), ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), ("<sys-apps/portage-046", ["sys-apps/portage-046"], []), (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]), (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]), ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []), ("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]), ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]), ("*/*:1", ["sys-fs/udev-456:0"], []), ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]), ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]), ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]), ("dev-libs/*", ["sys-apps/portage-2.1.2"], []), ("*/tar", ["sys-apps/portage-2.1.2"], []), ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]), ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]), ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]), ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []), ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []), ("dev-libs/A[foo,bar]", [ Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]") ], []), ("dev-libs/A[foo,bar]", [ Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]") ], ["dev-libs/A-2"]), ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]), ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []), ("dev-libs/A[foo,-bar(-)]", [ Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]") ], ["dev-libs/A-2"]), ("dev-libs/A::repo1", [ Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2") ], ["dev-libs/A-1::repo1"]), ("dev-libs/A::repo2", [ Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2") ], ["dev-libs/A-1::repo2"]), ("dev-libs/A::repo2[foo]", [ Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]") ], []), ("dev-libs/A::repo2[foo]", [ Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]") ], ["dev-libs/A-1::repo2"]), ("dev-libs/A:1::repo2[foo]", [ Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2") ], []), ("dev-libs/A:1::repo2[foo]", [ Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]") ], ["dev-libs/A-1::repo2"]), ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []), ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]), ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]), ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []), ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []), ) for atom, cpv_list, expected_result in tests: result = [] for pkg in match_from_list(atom, cpv_list): if isinstance(pkg, Package): if pkg.repo: result.append(pkg.cpv + _repo_separator + pkg.repo) else: result.append(pkg.cpv) else: result.append(pkg) self.assertEqual(result, expected_result)
def getmaskingstatus(mycpv, settings=None, portdb=None): if settings is None: settings = config(clone=portage.settings) if portdb is None: portdb = portage.portdb metadata = None installed = False if not isinstance(mycpv, basestring): # emerge passed in a Package instance pkg = mycpv mycpv = pkg.cpv metadata = pkg.metadata installed = pkg.installed mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys))) except KeyError: if not portdb.cpv_exists(mycpv): raise return ["corruption"] if "?" in metadata["LICENSE"]: settings.setcpv(mycpv, mydb=metadata) metadata["USE"] = settings["PORTAGE_USE"] else: metadata["USE"] = "" rValue = [] # profile checking if settings._getProfileMaskAtom(mycpv, metadata): rValue.append("profile") # package.mask checking if settings._getMaskAtom(mycpv, metadata): rValue.append("package.mask") # keywords checking eapi = metadata["EAPI"] mygroups = settings._getKeywords(mycpv, metadata) licenses = metadata["LICENSE"] properties = metadata["PROPERTIES"] if eapi.startswith("-"): eapi = eapi[1:] if not eapi_is_supported(eapi): return ["EAPI %s" % eapi] elif _eapi_is_deprecated(eapi) and not installed: return ["EAPI %s" % eapi] egroups = settings.configdict["backupenv"].get( "ACCEPT_KEYWORDS", "").split() pgroups = settings["ACCEPT_KEYWORDS"].split() myarch = settings["ARCH"] if pgroups and myarch not in pgroups: """For operating systems other than Linux, ARCH is not necessarily a valid keyword.""" myarch = pgroups[0].lstrip("~") cp = cpv_getkey(mycpv) pkgdict = settings.pkeywordsdict.get(cp) matches = False if pkgdict: cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])] for atom, pkgkeywords in pkgdict.items(): if match_from_list(atom, cpv_slot_list): matches = True pgroups.extend(pkgkeywords) if matches or egroups: pgroups.extend(egroups) inc_pgroups = set() for x in pgroups: if x.startswith("-"): if x == "-*": inc_pgroups.clear() else: inc_pgroups.discard(x[1:]) else: inc_pgroups.add(x) pgroups = inc_pgroups del inc_pgroups kmask = "missing" if '**' in pgroups: kmask = None else: for keyword in pgroups: if keyword in mygroups: kmask = None break if kmask: for gp in mygroups: if gp=="*": kmask=None break elif gp=="-"+myarch and myarch in pgroups: kmask="-"+myarch break elif gp=="~"+myarch and myarch in pgroups: kmask="~"+myarch break try: missing_licenses = settings._getMissingLicenses(mycpv, metadata) if missing_licenses: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_licenses) license_split = licenses.split() license_split = [x for x in license_split \ if x in allowed_tokens] msg = license_split[:] msg.append("license(s)") rValue.append(" ".join(msg)) except portage.exception.InvalidDependString as e: rValue.append("LICENSE: "+str(e)) try: missing_properties = settings._getMissingProperties(mycpv, metadata) if missing_properties: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_properties) properties_split = properties.split() properties_split = [x for x in properties_split \ if x in allowed_tokens] msg = properties_split[:] msg.append("properties") rValue.append(" ".join(msg)) except portage.exception.InvalidDependString as e: rValue.append("PROPERTIES: "+str(e)) # Only show KEYWORDS masks for installed packages # if they're not masked for any other reason. if kmask and (not installed or not rValue): rValue.append(kmask+" keyword") return rValue
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, minimize_slots=False): """ Takes an unreduced and reduced deplist and removes satisfied dependencies. Returned deplist contains steps that must be taken to satisfy dependencies. """ if trees is None: trees = portage.db writemsg("ZapDeps -- %s\n" % (use_binaries), 2) if not reduced or unreduced == ["||"] or dep_eval(reduced): return [] if unreduced[0] != "||": unresolved = [] for x, satisfied in zip(unreduced, reduced): if isinstance(x, list): unresolved += dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees, minimize_slots=minimize_slots) elif not satisfied: unresolved.append(x) return unresolved # We're at a ( || atom ... ) type level and need to make a choice deps = unreduced[1:] satisfieds = reduced[1:] # Our preference order is for an the first item that: # a) contains all unmasked packages with the same key as installed packages # b) contains all unmasked packages # c) contains masked installed packages # d) is the first item preferred_in_graph = [] preferred_installed = preferred_in_graph preferred_any_slot = preferred_in_graph preferred_non_installed = [] unsat_use_in_graph = [] unsat_use_installed = [] unsat_use_non_installed = [] other_installed = [] other_installed_some = [] other_installed_any_slot = [] other = [] # unsat_use_* must come after preferred_non_installed # for correct ordering in cases like || ( foo[a] foo[b] ). choice_bins = ( preferred_in_graph, preferred_non_installed, unsat_use_in_graph, unsat_use_installed, unsat_use_non_installed, other_installed, other_installed_some, other_installed_any_slot, other, ) # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") virt_parent = trees[myroot].get("virt_parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") graph = trees[myroot].get("graph") pkg_use_enabled = trees[myroot].get("pkg_use_enabled") graph_interface = trees[myroot].get("graph_interface") downgrade_probe = trees[myroot].get("downgrade_probe") circular_dependency = trees[myroot].get("circular_dependency") vardb = None if "vartree" in trees[myroot]: vardb = trees[myroot]["vartree"].dbapi if use_binaries: mydbapi = trees[myroot]["bintree"].dbapi else: mydbapi = trees[myroot]["porttree"].dbapi try: mydbapi_match_pkgs = mydbapi.match_pkgs except AttributeError: def mydbapi_match_pkgs(atom): return [ mydbapi._pkg_str(cpv, atom.repo) for cpv in mydbapi.match(atom) ] # Sort the deps into installed, not installed but already # in the graph and other, not installed and not in the graph # and other, with values of [[required_atom], availablility] for x, satisfied in zip(deps, satisfieds): if isinstance(x, list): atoms = dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees, minimize_slots=minimize_slots) else: atoms = [x] if vardb is None: # When called by repoman, we can simply return the first choice # because dep_eval() handles preference selection. return atoms all_available = True all_use_satisfied = True all_use_unmasked = True conflict_downgrade = False installed_downgrade = False slot_atoms = collections.defaultdict(list) slot_map = {} cp_map = {} for atom in atoms: if atom.blocker: continue # It's not a downgrade if parent is replacing child. replacing = (parent and graph_interface and graph_interface.will_replace_child( parent, myroot, atom)) # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi_match_pkgs(atom.without_use) if not avail_pkg and replacing: avail_pkg = [replacing] if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not avail_pkg: all_available = False all_use_satisfied = False break if not replacing and graph_db is not None and downgrade_probe is not None: slot_matches = graph_db.match_pkgs(avail_slot) if (len(slot_matches) > 1 and avail_pkg < slot_matches[-1] and not downgrade_probe(avail_pkg)): # If a downgrade is not desirable, then avoid a # choice that pulls in a lower version involved # in a slot conflict (bug #531656). conflict_downgrade = True if atom.use: avail_pkg_use = mydbapi_match_pkgs(atom) if not avail_pkg_use: all_use_satisfied = False if pkg_use_enabled is not None: # Check which USE flags cause the match to fail, # so we can prioritize choices that do not # require changes to use.mask or use.force # (see bug #515584). violated_atom = atom.violated_conditionals( pkg_use_enabled(avail_pkg), avail_pkg.iuse.is_valid_flag) # Note that violated_atom.use can be None here, # since evaluation can collapse conditional USE # deps that cause the match to fail due to # missing IUSE (match uses atom.unevaluated_atom # to detect such missing IUSE). if violated_atom.use is not None: for flag in violated_atom.use.enabled: if flag in avail_pkg.use.mask: all_use_unmasked = False break else: for flag in violated_atom.use.disabled: if flag in avail_pkg.use.force and \ flag not in avail_pkg.use.mask: all_use_unmasked = False break else: # highest (ascending order) avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) if not replacing and downgrade_probe is not None and graph is not None: highest_in_slot = mydbapi_match_pkgs(avail_slot) highest_in_slot = (highest_in_slot[-1] if highest_in_slot else None) if (avail_pkg and highest_in_slot and avail_pkg < highest_in_slot and not downgrade_probe(avail_pkg) and (highest_in_slot.installed or highest_in_slot in graph)): installed_downgrade = True slot_map[avail_slot] = avail_pkg slot_atoms[avail_slot].append(atom) highest_cpv = cp_map.get(avail_pkg.cp) all_match_current = None all_match_previous = None if (highest_cpv is not None and highest_cpv.slot == avail_pkg.slot): # If possible, make the package selection internally # consistent by choosing a package that satisfies all # atoms which match a package in the same slot. Later on, # the package version chosen here is used in the # has_upgrade/has_downgrade logic to prefer choices with # upgrades, and a package choice that is not internally # consistent will lead the has_upgrade/has_downgrade logic # to produce invalid results (see bug 600346). all_match_current = all( a.match(avail_pkg) for a in slot_atoms[avail_slot]) all_match_previous = all( a.match(highest_cpv) for a in slot_atoms[avail_slot]) if all_match_previous and not all_match_current: continue current_higher = ( highest_cpv is None or vercmp(avail_pkg.version, highest_cpv.version) > 0) if current_higher or (all_match_current and not all_match_previous): cp_map[avail_pkg.cp] = avail_pkg want_update = False if graph_interface is None or graph_interface.removal_action: new_slot_count = len(slot_map) else: new_slot_count = 0 for slot_atom, avail_pkg in slot_map.items(): if parent is not None and graph_interface.want_update_pkg( parent, avail_pkg): want_update = True if (not slot_atom.cp.startswith("virtual/") and not graph_db.match_pkgs(slot_atom)): new_slot_count += 1 this_choice = _dep_choice(atoms=atoms, slot_map=slot_map, cp_map=cp_map, all_available=all_available, all_installed_slots=False, new_slot_count=new_slot_count, all_in_graph=False, want_update=want_update) if all_available: # The "all installed" criterion is not version or slot specific. # If any version of a package is already in the graph then we # assume that it is preferred over other possible packages choices. all_installed = True for atom in set(Atom(atom.cp) for atom in atoms \ if not atom.blocker): # New-style virtuals have zero cost to install. if not vardb.match(atom) and not atom.startswith("virtual/"): all_installed = False break all_installed_slots = False if all_installed: all_installed_slots = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not vardb.match(slot_atom) and \ not slot_atom.startswith("virtual/"): all_installed_slots = False break this_choice.all_installed_slots = all_installed_slots if graph_db is None: if all_use_satisfied: if all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) elif conflict_downgrade or installed_downgrade: other.append(this_choice) else: all_in_graph = True for atom in atoms: # New-style virtuals have zero cost to install. if atom.blocker or atom.cp.startswith("virtual/"): continue # We check if the matched package has actually been # added to the digraph, in order to distinguish between # those packages and installed packages that may need # to be uninstalled in order to resolve blockers. if not any(pkg in graph for pkg in graph_db.match_pkgs(atom)): all_in_graph = False break this_choice.all_in_graph = all_in_graph circular_atom = None if parent and parent.onlydeps: # Check if the atom would result in a direct circular # dependency and avoid that for --onlydeps arguments # since it can defeat the purpose of --onlydeps. # This check should only be used for --onlydeps # arguments, since it can interfere with circular # dependency backtracking choices, causing the test # case for bug 756961 to fail. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: continue if vardb.match(atom): # If the atom is satisfied by an installed # version then it's not a circular dep. continue if atom.cp != parent.cp: continue if match_from_list(atom, cpv_slot_list): circular_atom = atom break if circular_atom is None and circular_dependency is not None: for circular_child in itertools.chain( circular_dependency.get(parent, []), circular_dependency.get(virt_parent, [])): for atom in atoms: if not atom.blocker and atom.match(circular_child): circular_atom = atom break if circular_atom is not None: break if circular_atom is not None: other.append(this_choice) else: if all_use_satisfied: if all_in_graph: preferred_in_graph.append(this_choice) elif all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if not all_use_unmasked: other.append(this_choice) elif all_in_graph: unsat_use_in_graph.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_installed = True some_installed = False for atom in atoms: if not atom.blocker: if vardb.match(atom): some_installed = True else: all_installed = False if all_installed: this_choice.all_installed_slots = True other_installed.append(this_choice) elif some_installed: other_installed_some.append(this_choice) # Use Atom(atom.cp) for a somewhat "fuzzy" match, since # the whole atom may be too specific. For example, see # bug #522652, where using the whole atom leads to an # unsatisfiable choice. elif any( vardb.match(Atom(atom.cp)) for atom in atoms if not atom.blocker): other_installed_any_slot.append(this_choice) else: other.append(this_choice) # Prefer choices which contain upgrades to higher slots. This helps # for deps such as || ( foo:1 foo:2 ), where we want to prefer the # atom which matches the higher version rather than the atom furthest # to the left. Sorting is done separately for each of choice_bins, so # as not to interfere with the ordering of the bins. Because of the # bin separation, the main function of this code is to allow # --depclean to remove old slots (rather than to pull in new slots). for choices in choice_bins: if len(choices) < 2: continue if minimize_slots: # Prefer choices having fewer new slots. When used with DNF form, # this can eliminate unecessary packages that depclean would # ultimately eliminate (see bug 632026). Only use this behavior # when deemed necessary by the caller, since this will discard the # order specified in the ebuild, and the preferences specified # there can serve as a crucial sources of guidance (see bug 645002). # NOTE: Under some conditions, new_slot_count value may have some # variance from one calculation to the next because it depends on # the order that packages are added to the graph. This variance can # contribute to outcomes that appear to be random. Meanwhile, # the order specified in the ebuild is without variance, so it # does not have this problem. choices.sort(key=operator.attrgetter('new_slot_count')) for choice_1 in choices[1:]: cps = set(choice_1.cp_map) for choice_2 in choices: if choice_1 is choice_2: # choice_1 will not be promoted, so move on break if ( # Prefer choices where all_installed_slots is True, except # in cases where we want to upgrade to a new slot as in # bug 706278. Don't compare new_slot_count here since that # would aggressively override the preference order defined # in the ebuild, breaking the test case for bug 645002. (choice_1.all_installed_slots and not choice_2.all_installed_slots and not choice_2.want_update)): # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break intersecting_cps = cps.intersection(choice_2.cp_map) has_upgrade = False has_downgrade = False for cp in intersecting_cps: version_1 = choice_1.cp_map[cp] version_2 = choice_2.cp_map[cp] difference = vercmp(version_1.version, version_2.version) if difference != 0: if difference > 0: has_upgrade = True else: has_downgrade = True if ( # Prefer upgrades. (has_upgrade and not has_downgrade) # Prefer choices where all packages have been pulled into # the graph, except for choices that eliminate upgrades. or (choice_1.all_in_graph and not choice_2.all_in_graph and not (has_downgrade and not has_upgrade))): # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break for allow_masked in (False, True): for choices in choice_bins: for choice in choices: if choice.all_available or allow_masked: return choice.atoms assert False # This point should not be reachable
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None): """ If specified, the myrepo argument is assumed to be valid. This should be a safe assumption since portdbapi methods always return valid repo names and valid "repository" metadata from aux_get. """ if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict( zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise else: if myrepo is None: myrepo = _gen_valid_repo(metadata["repository"]) elif myrepo is None: myrepo = metadata.get("repository") if myrepo is not None: myrepo = _gen_valid_repo(metadata["repository"]) if metadata is not None and \ not portage.eapi_is_supported(metadata["EAPI"]): # Return early since otherwise we might produce invalid # results given that the EAPI is not supported. Also, # metadata is mostly useless in this case since it doesn't # contain essential things like SLOT. if return_location: return (None, None) else: return None # Sometimes we can't access SLOT or repository due to corruption. pkg = mycpv if metadata is not None: pkg = "".join((mycpv, _slot_separator, metadata["SLOT"])) # At this point myrepo should be None, a valid name, or # Package.UNKNOWN_REPO which we ignore. if myrepo is not None and myrepo != Package.UNKNOWN_REPO: pkg = "".join((pkg, _repo_separator, myrepo)) cpv_slot_list = [pkg] mycp = mysplit[0] + "/" + mysplit[1] # XXX- This is a temporary duplicate of code from the config constructor. locations = [os.path.join(settings["PORTDIR"], "profiles")] locations.extend(settings.profiles) for ov in settings["PORTDIR_OVERLAY"].split(): profdir = os.path.join(normalize_path(ov), "profiles") if os.path.isdir(profdir): locations.append(profdir) locations.append( os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") pmasklists.append( (pmask_filename, grablines(pmask_filename, recursive=1))) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): x = x.without_repo for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = pmask[0] for i in range(len(pmask[1])): l = pmask[1][i].strip() try: l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo except InvalidAtom: l_atom = None if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += (l + "\n") comment_valid = i + 1 elif l_atom == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) else: return comment elif comment_valid != -1: # Apparently this comment applies to multiple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) else: return None
def _match(self, pkg): # SLOT matching requires metadata so delay it. if not match_from_list(self._atom, [pkg._cpv]): return False return not self._atom.slot \ or self._atom.slot == pkg.slot
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None): """ If specified, the myrepo argument is assumed to be valid. This should be a safe assumption since portdbapi methods always return valid repo names and valid "repository" metadata from aux_get. """ if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict( zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise else: if myrepo is None: myrepo = _gen_valid_repo(metadata["repository"]) elif myrepo is None: myrepo = metadata.get("repository") if myrepo is not None: myrepo = _gen_valid_repo(metadata["repository"]) if metadata is not None and not portage.eapi_is_supported( metadata["EAPI"]): # Return early since otherwise we might produce invalid # results given that the EAPI is not supported. Also, # metadata is mostly useless in this case since it doesn't # contain essential things like SLOT. if return_location: return (None, None) return None # Sometimes we can't access SLOT or repository due to corruption. pkg = mycpv try: pkg.slot except AttributeError: pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo) cpv_slot_list = [pkg] mycp = pkg.cp locations = [] if pkg.repo in settings.repositories: for repo in settings.repositories[pkg.repo].masters + ( settings.repositories[pkg.repo], ): locations.append(os.path.join(repo.location, "profiles")) locations.extend(settings.profiles) locations.append( os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") node = None for l, recursive_filename in grablines(pmask_filename, recursive=1, remember_source_file=True): if node is None or node[0] != recursive_filename: node = (recursive_filename, []) pmasklists.append(node) node[1].append(l) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): x = x.without_repo for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = pmask[0] for i in range(len(pmask[1])): l = pmask[1][i].strip() try: l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo except InvalidAtom: l_atom = None if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += l + "\n" comment_valid = i + 1 elif l_atom == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) return comment elif comment_valid != -1: # Apparently this comment applies to multiple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) return None
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ).""" newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None eapi = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent eapi = virt_parent[0].metadata['EAPI'] else: graph_parent = parent eapi = parent.metadata["EAPI"] repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi myvirtuals = mysettings.getvirtuals() pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): newsplit.append( _expand_new_virtuals(x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs)) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue mychoices = myvirtuals.get(mykey, []) if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue if repoman or not hasattr(portdb, 'match_pkgs') or \ pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: # TODO: Add PROVIDE check for repoman. a = [] for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue a = [] for pkg in pkgs: virt_atom = '=' + pkg.cpv if x.use: virt_atom += str(x.use) virt_atom = Atom(virt_atom) # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg.metadata['RDEPEND'] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level(_("Virtual Parent: %s\n") \ % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") \ % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = (pkg, virt_atom) try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % (pkg, mycheck[1], depstring)) # pull in the new-style virtual mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: atom_graph.add(virt_atom, graph_parent) # Plain old-style virtuals. New-style virtuals are preferred. if not pkgs: for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) matches = portdb.match(new_atom) # portdb is an instance of depgraph._dep_check_composite_db, so # USE conditionals are already evaluated. if matches and mykey in \ portdb.aux_get(matches[-1], ['PROVIDE'])[0].split(): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) return newsplit
def __call__(self, depgraph, mylist, favorites=None, verbosity=None): """The main operation to format and display the resolver output. @param depgraph: dependency grah @param mylist: list of packages being processed @param favorites: list, defaults to [] @param verbosity: verbose level, defaults to None Modifies self.conf, self.myfetchlist, self.portdb, self.vardb, self.pkgsettings, self.verboseadd, self.oldlp, self.newlp, self.print_msg, """ if favorites is None: favorites = [] self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity) mylist = self.get_display_list(self.conf.mylist) # files to fetch list - avoids counting a same file twice # in size display (verbose mode) self.myfetchlist = set() # Use this set to detect when all the "repoadd" strings are "[0]" # and disable the entire repo display in this case. repoadd_set = set() for mylist_index in range(len(mylist)): pkg, depth, ordered = mylist[mylist_index] self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi self.pkgsettings = self.conf.pkgsettings[pkg.root] self.indent = " " * depth if isinstance(pkg, Blocker): if self._blockers(pkg, fetch_symbol=" "): continue else: pkg_info = self.set_pkg_info(pkg, ordered) addl, pkg_info.oldbest, myinslotlist = \ self._get_installed_best(pkg, pkg_info) self.verboseadd = "" self.repoadd = None self._display_use(pkg, pkg_info.oldbest, myinslotlist) self.recheck_hidden(pkg) if self.conf.verbosity == 3: self.verbose_size(pkg, repoadd_set, pkg_info) pkg_info.cp = pkg.cp pkg_info.ver = self.get_ver_str(pkg) self.oldlp = self.conf.columnwidth - 30 self.newlp = self.oldlp - 30 pkg_info.oldbest = self.convert_myoldbest(pkg_info.oldbest) pkg_info.system, pkg_info.world = \ self.check_system_world(pkg) addl = self.set_interactive(pkg, pkg_info.ordered, addl) if self.include_mask_str(): addl += self.gen_mask_str(pkg) if pkg.root != "/": if pkg_info.oldbest: pkg_info.oldbest += " " if self.conf.columns: myprint = self._set_non_root_columns( addl, pkg_info, pkg) else: if not pkg_info.merge: addl = self.empty_space_in_brackets() myprint = "[%s%s] " % ( self.pkgprint(pkg_info.operation.ljust(13), pkg_info), addl, ) else: myprint = "[%s %s] " % ( self.pkgprint(pkg.type_name, pkg_info), addl) myprint += self.indent + \ self.pkgprint(pkg.cpv, pkg_info) + " " + \ pkg_info.oldbest + darkgreen("to " + pkg.root) else: if self.conf.columns: myprint = self._set_root_columns( addl, pkg_info, pkg) else: myprint = self._set_no_columns( pkg, pkg_info, addl) if self.conf.columns and pkg.operation == "uninstall": continue self.print_msg.append((myprint, self.verboseadd, self.repoadd)) if not self.conf.tree_display \ and not self.conf.no_restart \ and pkg.root == self.conf.running_root.root \ and match_from_list(PORTAGE_PACKAGE_ATOM, [pkg]) \ and not self.conf.quiet: if not self.vardb.cpv_exists(pkg.cpv) or \ '9999' in pkg.cpv or \ 'git' in pkg.inherited or \ 'git-2' in pkg.inherited: if mylist_index < len(mylist) - 1: self.print_msg.append( colorize( "WARN", "*** Portage will stop merging " "at this point and reload itself," ) ) self.print_msg.append( colorize("WARN", " then resume the merge.") ) show_repos = repoadd_set and repoadd_set != set(["0"]) # now finally print out the messages self.print_messages(show_repos) self.print_blockers() if self.conf.verbosity == 3: self.print_verbose(show_repos) if self.conf.changelog: self.print_changelog() return os.EX_OK
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): """ Takes an unreduced and reduced deplist and removes satisfied dependencies. Returned deplist contains steps that must be taken to satisfy dependencies. """ if trees is None: trees = portage.db writemsg("ZapDeps -- %s\n" % (use_binaries), 2) if not reduced or unreduced == ["||"] or dep_eval(reduced): return [] if unreduced[0] != "||": unresolved = [] for x, satisfied in zip(unreduced, reduced): if isinstance(x, list): unresolved += dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees) elif not satisfied: unresolved.append(x) return unresolved # We're at a ( || atom ... ) type level and need to make a choice deps = unreduced[1:] satisfieds = reduced[1:] # Our preference order is for an the first item that: # a) contains all unmasked packages with the same key as installed packages # b) contains all unmasked packages # c) contains masked installed packages # d) is the first item preferred_installed = [] preferred_in_graph = [] preferred_any_slot = [] preferred_non_installed = [] unsat_use_in_graph = [] unsat_use_installed = [] unsat_use_non_installed = [] other = [] # unsat_use_* must come after preferred_non_installed # for correct ordering in cases like || ( foo[a] foo[b] ). choice_bins = ( preferred_in_graph, preferred_installed, preferred_any_slot, preferred_non_installed, unsat_use_in_graph, unsat_use_installed, unsat_use_non_installed, other, ) # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") vardb = None if "vartree" in trees[myroot]: vardb = trees[myroot]["vartree"].dbapi if use_binaries: mydbapi = trees[myroot]["bintree"].dbapi else: mydbapi = trees[myroot]["porttree"].dbapi # Sort the deps into installed, not installed but already # in the graph and other, not installed and not in the graph # and other, with values of [[required_atom], availablility] for x, satisfied in zip(deps, satisfieds): if isinstance(x, list): atoms = dep_zapdeps(x, satisfied, myroot, use_binaries=use_binaries, trees=trees) else: atoms = [x] if vardb is None: # When called by repoman, we can simply return the first choice # because dep_eval() handles preference selection. return atoms all_available = True all_use_satisfied = True slot_map = {} cp_map = {} for atom in atoms: if atom.blocker: continue # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi.match(atom.without_use) if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom( "%s:%s" % (atom.cp, mydbapi.aux_get(avail_pkg, ["SLOT"])[0])) if not avail_pkg: all_available = False all_use_satisfied = False break if atom.use: avail_pkg_use = mydbapi.match(atom) if not avail_pkg_use: all_use_satisfied = False else: # highest (ascending order) avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use avail_slot = Atom( "%s:%s" % (atom.cp, mydbapi.aux_get(avail_pkg, ["SLOT"])[0])) slot_map[avail_slot] = avail_pkg pkg_cp = cpv_getkey(avail_pkg) highest_cpv = cp_map.get(pkg_cp) if highest_cpv is None or \ pkgcmp(catpkgsplit(avail_pkg)[1:], catpkgsplit(highest_cpv)[1:]) > 0: cp_map[pkg_cp] = avail_pkg this_choice = (atoms, slot_map, cp_map, all_available) if all_available: # The "all installed" criterion is not version or slot specific. # If any version of a package is already in the graph then we # assume that it is preferred over other possible packages choices. all_installed = True for atom in set(Atom(atom.cp) for atom in atoms \ if not atom.blocker): # New-style virtuals have zero cost to install. if not vardb.match(atom) and not atom.startswith("virtual/"): all_installed = False break all_installed_slots = False if all_installed: all_installed_slots = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not vardb.match(slot_atom) and \ not slot_atom.startswith("virtual/"): all_installed_slots = False break if graph_db is None: if all_use_satisfied: if all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: all_in_graph = True for slot_atom in slot_map: # New-style virtuals have zero cost to install. if not graph_db.match(slot_atom) and \ not slot_atom.startswith("virtual/"): all_in_graph = False break circular_atom = None if all_in_graph: if parent is None or priority is None: pass elif priority.buildtime: # Check if the atom would result in a direct circular # dependency and try to avoid that if it seems likely # to be unresolvable. This is only relevant for # buildtime deps that aren't already satisfied by an # installed package. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: continue if vardb.match(atom): # If the atom is satisfied by an installed # version then it's not a circular dep. continue if atom.cp != parent.cp: continue if match_from_list(atom, cpv_slot_list): circular_atom = atom break if circular_atom is not None: other.append(this_choice) else: if all_use_satisfied: if all_in_graph: preferred_in_graph.append(this_choice) elif all_installed: if all_installed_slots: preferred_installed.append(this_choice) else: preferred_any_slot.append(this_choice) else: preferred_non_installed.append(this_choice) else: if all_in_graph: unsat_use_in_graph.append(this_choice) elif all_installed_slots: unsat_use_installed.append(this_choice) else: unsat_use_non_installed.append(this_choice) else: other.append(this_choice) # Prefer choices which contain upgrades to higher slots. This helps # for deps such as || ( foo:1 foo:2 ), where we want to prefer the # atom which matches the higher version rather than the atom furthest # to the left. Sorting is done separately for each of choice_bins, so # as not to interfere with the ordering of the bins. Because of the # bin separation, the main function of this code is to allow # --depclean to remove old slots (rather than to pull in new slots). for choices in choice_bins: if len(choices) < 2: continue for choice_1 in choices[1:]: atoms_1, slot_map_1, cp_map_1, all_available_1 = choice_1 cps = set(cp_map_1) for choice_2 in choices: if choice_1 is choice_2: # choice_1 will not be promoted, so move on break atoms_2, slot_map_2, cp_map_2, all_available_2 = choice_2 intersecting_cps = cps.intersection(cp_map_2) if not intersecting_cps: continue has_upgrade = False has_downgrade = False for cp in intersecting_cps: version_1 = cp_map_1[cp] version_2 = cp_map_2[cp] difference = pkgcmp( catpkgsplit(version_1)[1:], catpkgsplit(version_2)[1:]) if difference != 0: if difference > 0: has_upgrade = True else: has_downgrade = True break if has_upgrade and not has_downgrade: # promote choice_1 in front of choice_2 choices.remove(choice_1) index_2 = choices.index(choice_2) choices.insert(index_2, choice_1) break for allow_masked in (False, True): for choices in choice_bins: for atoms, slot_map, cp_map, all_available in choices: if all_available or allow_masked: return atoms assert (False) # This point should not be reachable
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ).""" newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None eapi = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent eapi = virt_parent[0].metadata['EAPI'] else: graph_parent = parent eapi = parent.metadata["EAPI"] repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi myvirtuals = mysettings.getvirtuals() pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): newsplit.append(_expand_new_virtuals(x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs)) continue if not isinstance(x, Atom): try: x = Atom(x) except InvalidAtom: if portage.dep._dep_check_strict: raise ParseError( _("invalid atom: '%s'") % x) else: # Only real Atom instances are allowed past this point. continue else: if x.blocker and x.blocker.overlap.forbid and \ eapi in ("0", "1") and portage.dep._dep_check_strict: raise ParseError( _("invalid atom: '%s'") % (x,)) if x.use and eapi in ("0", "1") and \ portage.dep._dep_check_strict: raise ParseError( _("invalid atom: '%s'") % (x,)) if repoman and x.use and x.use.conditional: evaluated_atom = remove_slot(x) if x.slot: evaluated_atom += ":%s" % x.slot evaluated_atom += str(x.use._eval_qa_conditionals( use_mask, use_force)) x = Atom(evaluated_atom) if not repoman : if 'lib32' not in x and portage.dep_getkey(x) not in mysettings.get("NO_AUTO_FLAG", None): if ']' in x: x = str(x).replace(']',',lib32?]') else: x = str(x) + '[lib32?]' try: x = portage.dep.Atom(x) except portage.exception.InvalidAtom: if portage.dep._dep_check_strict: raise portage.exception.ParseError( "invalid atom: '%s'" % x) if myuse is not None and isinstance(x, Atom) and x.use: if x.use.conditional: x = x.evaluate_conditionals(myuse) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue mychoices = myvirtuals.get(mykey, []) if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue if repoman or not hasattr(portdb, 'match_pkgs'): if portdb.cp_list(x.cp): newsplit.append(x) else: # TODO: Add PROVIDE check for repoman. a = [] for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue a = [] for pkg in pkgs: virt_atom = '=' + pkg.cpv if x.use: virt_atom += str(x.use) virt_atom = Atom(virt_atom) # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg.metadata['RDEPEND'] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg.use.enabled if edebug: writemsg_level(_("Virtual Parent: %s\n") \ % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") \ % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = (pkg, virt_atom) try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError( "%s: %s '%s'" % (y[0], mycheck[1], depstring)) # pull in the new-style virtual mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: atom_graph.add(virt_atom, graph_parent) # Plain old-style virtuals. New-style virtuals are preferred. if not pkgs: for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) matches = portdb.match(new_atom) # portdb is an instance of depgraph._dep_check_composite_db, so # USE conditionals are already evaluated. if matches and mykey in \ portdb.aux_get(matches[-1], ['PROVIDE'])[0].split(): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) return newsplit
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False): if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys))) except KeyError: if not portdb.cpv_exists(mycpv): raise if metadata is None: # Can't access SLOT due to corruption. cpv_slot_list = [mycpv] else: cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])] mycp=mysplit[0]+"/"+mysplit[1] # XXX- This is a temporary duplicate of code from the config constructor. locations = [os.path.join(settings["PORTDIR"], "profiles")] locations.extend(settings.profiles) for ov in settings["PORTDIR_OVERLAY"].split(): profdir = os.path.join(normalize_path(ov), "profiles") if os.path.isdir(profdir): locations.append(profdir) locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations] pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = os.path.join(pmask[0], "package.mask") for i in range(len(pmask[1])): l = pmask[1][i].strip() if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += (l+"\n") comment_valid = i + 1 elif l == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) else: return comment elif comment_valid != -1: # Apparently this comment applies to muliple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) else: return None
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False): if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys))) except KeyError: if not portdb.cpv_exists(mycpv): raise if metadata is None: # Can't access SLOT due to corruption. cpv_slot_list = [mycpv] else: cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])] mycp=mysplit[0]+"/"+mysplit[1] # XXX- This is a temporary duplicate of code from the config constructor. locations = [os.path.join(settings["PORTDIR"], "profiles")] locations.extend(settings.profiles) for ov in settings["PORTDIR_OVERLAY"].split(): profdir = os.path.join(normalize_path(ov), "profiles") if os.path.isdir(profdir): locations.append(profdir) locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations] if mycp in settings.pmaskdict: for x in settings.pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = os.path.join(pmask[0], "package.mask") for i in range(len(pmask[1])): l = pmask[1][i].strip() if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += (l+"\n") comment_valid = i + 1 elif l == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) else: return comment elif comment_valid != -1: # Apparently this comment applies to muliple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) else: return None