def cpv_inject(self, mycpv, metadata=None): """Adds a cpv to the list of available packages. See the exclusive_slots constructor parameter for behavior with respect to SLOT metadata. @param mycpv: cpv for the package to inject @type mycpv: str @param metadata: dictionary of raw metadata for aux_get() calls @param metadata: dict """ self._clear_cache() try: mycp = mycpv.cp except AttributeError: mycp = None try: myslot = mycpv.slot except AttributeError: myslot = None if mycp is None or \ (myslot is None and metadata is not None and metadata.get('SLOT')): if metadata is None: mycpv = _pkg_str(mycpv) else: mycpv = _pkg_str(mycpv, metadata=metadata, settings=self.settings) mycp = mycpv.cp try: myslot = mycpv.slot except AttributeError: pass self.cpvdict[mycpv] = metadata if not self._exclusive_slots: myslot = None if myslot and mycp in self.cpdict: # If necessary, remove another package in the same SLOT. for cpv in self.cpdict[mycp]: if mycpv != cpv: try: other_slot = cpv.slot except AttributeError: pass else: if myslot == other_slot: self.cpv_remove(cpv) break cp_list = self.cpdict.get(mycp) if cp_list is None: cp_list = [] self.cpdict[mycp] = cp_list try: cp_list.remove(mycpv) except ValueError: pass cp_list.append(mycpv)
def testUpdateDbentryBlockerTestCase(self): """ Avoid creating self-blockers for bug #367215. """ cases = ( (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/B-1", eapi="1", slot="0"), " !dev-libs/A ", " !dev-libs/A "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/C-1", eapi="1", slot="0"), " !dev-libs/A ", " !dev-libs/B "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/B-1", eapi="1", slot="0"), " !dev-libs/A:0 ", " !dev-libs/A:0 "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/C-1", eapi="1", slot="0"), " !dev-libs/A:0 ", " !dev-libs/B:0 "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/C-1", eapi="1", slot="0"), " !>=dev-libs/A-1:0 ", " !>=dev-libs/B-1:0 "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/B-1", eapi="1", slot="0"), " !>=dev-libs/A-1:0 ", " !>=dev-libs/A-1:0 "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/C-1", eapi="1", slot="0"), " !>=dev-libs/A-1 ", " !>=dev-libs/B-1 "), (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), _pkg_str("dev-libs/B-1", eapi="1", slot="0"), " !>=dev-libs/A-1 ", " !>=dev-libs/A-1 "), ) for update_cmd, parent, input_str, output_str in cases: result = update_dbentry(update_cmd, input_str, parent=parent) self.assertEqual(result, output_str)
def cp_all(self): """ Returns an ordered iterator instead of a list, so that search results can be displayed incrementally. """ if self._cp_map is not None: return iter(sorted(self._cp_map)) delta_data = self._vardb._cache_delta.loadRace() if delta_data is None: return self._iter_cp_all() self._vardb._cache_delta.applyDelta(delta_data) self._cp_map = cp_map = {} for cpv in self._vardb._aux_cache["packages"]: try: cpv = _pkg_str(cpv) except InvalidData: continue cp_list = cp_map.get(cpv.cp) if cp_list is None: cp_list = [] cp_map[cpv.cp] = cp_list cp_list.append(cpv) return iter(sorted(self._cp_map))
def cp_all(self, sort=True): """ Returns an ordered iterator instead of a list, so that search results can be displayed incrementally. """ if self._cp_map is not None: return iter(sorted(self._cp_map)) if sort else iter(self._cp_map) delta_data = self._vardb._cache_delta.loadRace() if delta_data is None: return self._iter_cp_all() self._vardb._cache_delta.applyDelta(delta_data) self._cp_map = cp_map = {} for cpv in self._vardb._aux_cache["packages"]: try: cpv = _pkg_str(cpv) except InvalidData: continue cp_list = cp_map.get(cpv.cp) if cp_list is None: cp_list = [] cp_map[cpv.cp] = cp_list cp_list.append(cpv) return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
def getUseMask(self, pkg=None, stable=None): if pkg is None: return frozenset(stack_lists(self._usemask_list, incremental=True)) slot = None cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp if stable is None: stable = self._isStable(pkg) usemask = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: usemask.append(self._repo_usemask_dict.get(repo, {})) if stable: usemask.append(self._repo_usestablemask_dict.get(repo, {})) cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) if stable: cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) for i, pusemask_dict in enumerate(self._pusemask_list): if self._usemask_list[i]: usemask.append(self._usemask_list[i]) if stable and self._usestablemask_list[i]: usemask.append(self._usestablemask_list[i]) cpdict = pusemask_dict.get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) if stable: cpdict = self._pusestablemask_list[i].get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) return frozenset(stack_lists(usemask, incremental=True))
def check(self, **kwargs): onProgress = kwargs.get('onProgress', None) bintree = self._bintree # Force reindex in case pkgdir-index-trusted is enabled. bintree._populate_local(reindex=True) bintree.populated = True _instance_key = bintree.dbapi._instance_key cpv_all = self._bintree.dbapi.cpv_all() cpv_all.sort() maxval = len(cpv_all) if onProgress: onProgress(maxval, 0) pkgindex = self._pkgindex missing = [] stale = [] metadata = {} for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=bintree.settings) d["CPV"] = cpv metadata[_instance_key(cpv)] = d if not bintree.dbapi.cpv_exists(cpv): stale.append(cpv) for i, cpv in enumerate(cpv_all): d = metadata.get(_instance_key(cpv)) if not d or self._need_update(cpv, d): missing.append(cpv) if onProgress: onProgress(maxval, i+1) errors = ["'%s' is not in Packages" % cpv for cpv in missing] for cpv in stale: errors.append("'%s' is not in the repository" % cpv) if errors: return (False, errors) return (True, None)
def __init__(self, **kwargs): metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata')) Task.__init__(self, **kwargs) # the SlotObject constructor assigns self.root_config from keyword args # and is an instance of a '_emerge.RootConfig.RootConfig class self.root = self.root_config.root self._raw_metadata = metadata self._metadata = _PackageMetadataWrapper(self, metadata) if not self.built: self._metadata['CHOST'] = self.root_config.settings.get( 'CHOST', '') eapi_attrs = _get_eapi_attrs(self.eapi) self.cpv = _pkg_str(self.cpv, metadata=self._metadata, settings=self.root_config.settings) if hasattr(self.cpv, 'slot_invalid'): self._invalid_metadata( 'SLOT.invalid', "SLOT: invalid value: '%s'" % self._metadata["SLOT"]) self.cpv_split = self.cpv.cpv_split self.category, self.pf = portage.catsplit(self.cpv) self.cp = self.cpv.cp self.version = self.cpv.version self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot)) # sync metadata with validated repo (may be UNKNOWN_REPO) self._metadata['repository'] = self.cpv.repo if eapi_attrs.iuse_effective: implicit_match = self.root_config.settings._iuse_effective_match else: implicit_match = self.root_config.settings._iuse_implicit_match usealiases = self.root_config.settings._use_manager.getUseAliases(self) self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match, usealiases, self.eapi) if (self.iuse.enabled or self.iuse.disabled) and \ not eapi_attrs.iuse_defaults: if not self.installed: self._invalid_metadata( 'EAPI.incompatible', "IUSE contains defaults, but EAPI doesn't allow them") if self.inherited is None: self.inherited = frozenset() if self.operation is None: if self.onlydeps or self.installed: self.operation = "nomerge" else: self.operation = "merge" self._hash_key = Package._gen_hash_key(cpv=self.cpv, installed=self.installed, onlydeps=self.onlydeps, operation=self.operation, repo_name=self.cpv.repo, root_config=self.root_config, type_name=self.type_name) self._hash_value = hash(self._hash_key)
def getUseMask(self, pkg=None, stable=None): if pkg is None: return frozenset(stack_lists( self._usemask_list, incremental=True)) slot = None cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp if stable is None: stable = self._isStable(pkg) usemask = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: usemask.append(self._repo_usemask_dict.get(repo, {})) if stable: usemask.append(self._repo_usestablemask_dict.get(repo, {})) cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) if stable: cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) for i, pusemask_dict in enumerate(self._pusemask_list): if self._usemask_list[i]: usemask.append(self._usemask_list[i]) if stable and self._usestablemask_list[i]: usemask.append(self._usestablemask_list[i]) cpdict = pusemask_dict.get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) if stable: cpdict = self._pusestablemask_list[i].get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) return frozenset(stack_lists(usemask, incremental=True))
def getMaskAtom(self, cpv, slot, repo): """ Take a package and return a matching package.mask atom, or None if no such atom exists or it has been cancelled by package.unmask. PROVIDE is not checked, so atoms will not be found for old-style virtuals. @param cpv: The package name @type cpv: String @param slot: The package's slot @type slot: String @param repo: The package's repository [optional] @type repo: String @rtype: String @return: A matching atom string or None if one is not found. """ try: cpv.slot except AttributeError: pkg = _pkg_str(cpv, slot=slot, repo=repo) else: pkg = cpv return self._getMaskAtom(pkg, slot, repo, self._punmaskdict.get(pkg.cp))
def __iter__(self): """generator for walking the dir struct""" dirs = [(0, self.location)] len_base = len(self.location) while dirs: depth, dir_path = dirs.pop() try: dir_list = os.listdir(dir_path) except OSError as e: if e.errno != errno.ENOENT: raise del e continue for l in dir_list: p = os.path.join(dir_path, l) try: st = os.lstat(p) except OSError: # Cache entry disappeared. continue if stat.S_ISDIR(st.st_mode): # Only recurse 1 deep, in order to avoid iteration over # entries from another nested cache instance. This can # happen if the user nests an overlay inside # /usr/portage/local as in bug #302764. if depth < 1: dirs.append((depth+1, p)) continue try: yield _pkg_str(p[len_base+1:]) except InvalidData: continue
def __init__(self, **kwargs): metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata')) Task.__init__(self, **kwargs) # the SlotObject constructor assigns self.root_config from keyword args # and is an instance of a '_emerge.RootConfig.RootConfig class self.root = self.root_config.root self._raw_metadata = metadata self._metadata = _PackageMetadataWrapper(self, metadata) if not self.built: self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '') eapi_attrs = _get_eapi_attrs(self.eapi) self.cpv = _pkg_str(self.cpv, metadata=self._metadata, settings=self.root_config.settings) if hasattr(self.cpv, 'slot_invalid'): self._invalid_metadata('SLOT.invalid', "SLOT: invalid value: '%s'" % self._metadata["SLOT"]) self.cpv_split = self.cpv.cpv_split self.category, self.pf = portage.catsplit(self.cpv) self.cp = self.cpv.cp self.version = self.cpv.version self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot)) # sync metadata with validated repo (may be UNKNOWN_REPO) self._metadata['repository'] = self.cpv.repo if eapi_attrs.iuse_effective: implicit_match = self.root_config.settings._iuse_effective_match if self.built: implicit_match = functools.partial( self._built_iuse_effective_match, implicit_match, frozenset(self._metadata['USE'].split())) else: implicit_match = self.root_config.settings._iuse_implicit_match usealiases = self.root_config.settings._use_manager.getUseAliases(self) self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match, usealiases, self.eapi) if (self.iuse.enabled or self.iuse.disabled) and \ not eapi_attrs.iuse_defaults: if not self.installed: self._invalid_metadata('EAPI.incompatible', "IUSE contains defaults, but EAPI doesn't allow them") if self.inherited is None: self.inherited = frozenset() if self.operation is None: if self.onlydeps or self.installed: self.operation = "nomerge" else: self.operation = "merge" self._hash_key = Package._gen_hash_key(cpv=self.cpv, installed=self.installed, onlydeps=self.onlydeps, operation=self.operation, repo_name=self.cpv.repo, root_config=self.root_config, type_name=self.type_name) self._hash_value = hash(self._hash_key)
def getUseAliases(self, pkg): if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi): return {} cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp usealiases = {} if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: usealiases_dict = self._repo_usealiases_dict.get(repo, {}) for real_flag, aliases in usealiases_dict.items(): for alias in aliases: if any(alias in v for k, v in usealiases.items() if k != real_flag): writemsg( _("--- Duplicated USE flag alias for '%s%s%s': '%s'\n" ) % (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1, ) else: usealiases.setdefault(real_flag, []).append(alias) cp_usealiases_dict = self._repo_pusealiases_dict.get( repo, {}).get(cp) if cp_usealiases_dict: usealiases_dict_list = ordered_by_atom_specificity( cp_usealiases_dict, pkg) for usealiases_dict in usealiases_dict_list: for real_flag, aliases in usealiases_dict.items(): for alias in aliases: if any(alias in v for k, v in usealiases.items() if k != real_flag): writemsg( _("--- Duplicated USE flag alias for '%s%s%s': '%s'\n" ) % (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1, ) else: usealiases.setdefault(real_flag, []).append(alias) return usealiases
def getUseAliases(self, pkg): if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi): return {} cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp usealiases = {} if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: if self.repositories[pkg.repo].use_aliases_masters is not None: masters = self.repositories[pkg.repo].use_aliases_masters else: masters = self.repositories[pkg.repo].masters repos.extend(repo.name for repo in masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: usealiases_dict = self._repo_usealiases_dict.get(repo, {}) for real_flag, aliases in usealiases_dict.items(): for alias in aliases: if any(alias in v for k, v in usealiases.items() if k != real_flag): writemsg( _("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") % (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1, ) else: usealiases.setdefault(real_flag, []).append(alias) cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp) if cp_usealiases_dict: usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg) for usealiases_dict in usealiases_dict_list: for real_flag, aliases in usealiases_dict.items(): for alias in aliases: if any(alias in v for k, v in usealiases.items() if k != real_flag): writemsg( _("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") % (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1, ) else: usealiases.setdefault(real_flag, []).append(alias) return usealiases
def getPKeywords(self, cpv, slot, repo, global_accept_keywords): """Gets any package.keywords settings for cp for the given cpv, slot and repo @param cpv: The package name (for package.keywords support) @type cpv: String @param slot: The 'SLOT' key from the raw package metadata @type slot: String @param keywords: The 'KEYWORDS' key from the raw package metadata @type keywords: String @param global_accept_keywords: The current value of ACCEPT_KEYWORDS @type global_accept_keywords: String @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env @type backuped_accept_keywords: String @rtype: List @return: list of KEYWORDS that have been accepted """ pgroups = global_accept_keywords.split() try: cpv.slot except AttributeError: cpv = _pkg_str(cpv, slot=slot, repo=repo) cp = cpv.cp unmaskgroups = [] if self._p_accept_keywords: accept_keywords_defaults = tuple('~' + keyword for keyword in \ pgroups if keyword[:1] not in "~-") for d in self._p_accept_keywords: cpdict = d.get(cp) if cpdict: pkg_accept_keywords = \ ordered_by_atom_specificity(cpdict, cpv) if pkg_accept_keywords: for x in pkg_accept_keywords: if not x: x = accept_keywords_defaults unmaskgroups.extend(x) pkgdict = self.pkeywordsdict.get(cp) if pkgdict: pkg_accept_keywords = \ ordered_by_atom_specificity(pkgdict, cpv) if pkg_accept_keywords: for x in pkg_accept_keywords: unmaskgroups.extend(x) return unmaskgroups
def getKeywords(self, cpv, slot, keywords, repo): try: cpv.slot except AttributeError: pkg = _pkg_str(cpv, slot=slot, repo=repo) else: pkg = cpv cp = pkg.cp keywords = [[x for x in keywords.split() if x != "-*"]] for pkeywords_dict in self._pkeywords_list: cpdict = pkeywords_dict.get(cp) if cpdict: pkg_keywords = ordered_by_atom_specificity(cpdict, pkg) if pkg_keywords: keywords.extend(pkg_keywords) return stack_lists(keywords, incremental=True)
def _getPkgAcceptLicense(self, cpv, slot, repo): """ Get an ACCEPT_LICENSE list, accounting for package.license. """ accept_license = self._accept_license cp = cpv_getkey(cpv) cpdict = self._plicensedict.get(cp) if cpdict: if not hasattr(cpv, "slot"): cpv = _pkg_str(cpv, slot=slot, repo=repo) plicence_list = ordered_by_atom_specificity(cpdict, cpv) if plicence_list: accept_license = list(self._accept_license) for x in plicence_list: accept_license.extend(x) return accept_license
def getPUSE(self, pkg): cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp ret = "" cpdict = self._pusedict.get(cp) if cpdict: puse_matches = ordered_by_atom_specificity(cpdict, pkg) if puse_matches: puse_list = [] for x in puse_matches: puse_list.extend(x) ret = " ".join(puse_list) return ret
def __init__(self, atom): atom = Atom(atom, allow_repo=True) self.cp = atom.cp slot = atom.slot if atom.sub_slot: slot = "%s/%s" % (slot, atom.sub_slot) if not slot: slot = "0" self.cpv = _pkg_str(atom.cpv, slot=slot, repo=atom.repo) self.cpv_split = catpkgsplit(self.cpv) self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.repo = atom.repo if atom.use: self.use = self._use_class(atom.use.enabled) self.iuse = self._iuse_class(atom.use.required) else: self.use = self._use_class([]) self.iuse = self._iuse_class([])
def __init__(self, atom): atom = Atom(atom, allow_repo=True) self.cp = atom.cp slot = atom.slot if atom.sub_slot: slot = "%s/%s" % (slot, atom.sub_slot) if not slot: slot = '0' self.cpv = _pkg_str(atom.cpv, slot=slot, repo=atom.repo) self.cpv_split = catpkgsplit(self.cpv) self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.repo = atom.repo if atom.use: self.use = self._use_class(atom.use.enabled) self.iuse = self._iuse_class(atom.use.required) else: self.use = self._use_class([]) self.iuse = self._iuse_class([])
def perform_global_updates(mycpv, aux_dict, mydb, myupdates): try: pkg = _pkg_str(mycpv, metadata=aux_dict, settings=mydb.settings) except InvalidData: return aux_dict = dict((k, aux_dict[k]) for k in Package._dep_keys) try: mycommands = myupdates[pkg.repo] except KeyError: try: mycommands = myupdates['DEFAULT'] except KeyError: return if not mycommands: return updates = update_dbentries(mycommands, aux_dict, parent=pkg) if updates: mydb.aux_update(mycpv, updates)
def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None): """ Take a package and return a matching package.mask atom, or None if no such atom exists or it has been cancelled by package.unmask. PROVIDE is not checked, so atoms will not be found for old-style virtuals. @param cpv: The package name @type cpv: String @param slot: The package's slot @type slot: String @param repo: The package's repository [optional] @type repo: String @param unmask_atoms: if desired pass in self._punmaskdict.get(cp) @type unmask_atoms: list @rtype: String @return: A matching atom string or None if one is not found. """ try: cpv.slot except AttributeError: pkg = _pkg_str(cpv, slot=slot, repo=repo) else: pkg = cpv mask_atoms = self._pmaskdict.get(pkg.cp) if mask_atoms: pkg_list = [pkg] for x in mask_atoms: if not match_from_list(x, pkg_list): continue if unmask_atoms: for y in unmask_atoms: if match_from_list(y, pkg_list): return None return x return None
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action == "unmerge": print() print(bold("emerge unmerge") + " can only be used with specific package names") print() return 1, {} else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action=="unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid reason " "for Portage to unmerge itself.") % (pkg.cpv,) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner(portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid reason " "for Portage to unmerge currently used Python interpreter.") % (pkg.cpv,) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp,) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def check(self, **kwargs): onProgress = kwargs.get('onProgress', None) allupdates, errors = self._grab_global_updates() # Matching packages and moving them is relatively fast, so the # progress bar is updated in indeterminate mode. match = self._tree.dbapi.match aux_get = self._tree.dbapi.aux_get pkg_str = self._tree.dbapi._pkg_str settings = self._tree.dbapi.settings if onProgress: onProgress(0, 0) for repo, updates in allupdates.items(): if repo == 'DEFAULT': continue if not updates: continue def repo_match(repository): return repository == repo or \ (repo == self._master_repo and \ repository not in allupdates) for i, update_cmd in enumerate(updates): if update_cmd[0] == "move": origcp, newcp = update_cmd[1:] for cpv in match(origcp): try: cpv = pkg_str(cpv, origcp.repo) except (KeyError, InvalidData): continue if repo_match(cpv.repo): errors.append("'%s' moved to '%s'" % (cpv, newcp)) elif update_cmd[0] == "slotmove": pkg, origslot, newslot = update_cmd[1:] atom = pkg.with_slot(origslot) for cpv in match(atom): try: cpv = pkg_str(cpv, atom.repo) except (KeyError, InvalidData): continue if repo_match(cpv.repo): errors.append("'%s' slot moved from '%s' to '%s'" % \ (cpv, origslot, newslot)) if onProgress: onProgress(0, 0) # Searching for updates in all the metadata is relatively slow, so this # is where the progress bar comes out of indeterminate mode. cpv_all = self._tree.dbapi.cpv_all() cpv_all.sort() maxval = len(cpv_all) meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys if onProgress: onProgress(maxval, 0) for i, cpv in enumerate(cpv_all): try: metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys))) except KeyError: continue try: pkg = _pkg_str(cpv, metadata=metadata, settings=settings) except InvalidData: continue metadata = dict((k, metadata[k]) for k in self._update_keys) try: updates = allupdates[pkg.repo] except KeyError: try: updates = allupdates['DEFAULT'] except KeyError: continue if not updates: continue metadata_updates = \ portage.update_dbentries(updates, metadata, parent=pkg) if metadata_updates: errors.append("'%s' has outdated metadata" % cpv) if onProgress: onProgress(maxval, i + 1) if errors: return (False, errors) return (True, None)
def pkg_desc_index_line_format(cp, pkgs, desc): return "%s %s: %s\n" % (cp, " ".join(_pkg_str(cpv).version for cpv in pkgs), desc)
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action in ["rage-clean", "unmerge"]: print() print( bold("emerge %s" % unmerge_action) + " can only be used with specific package names") print() return 1, {} global_unmerge = 1 # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to %s have been provided.\n" % unmerge_action) return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if not quiet: newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if ("--pretend" in myopts or "--ask" in myopts) and not quiet: writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = vartree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action in ["rage-clean", "unmerge"]: for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = vartree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s " "since there is no valid reason for Portage to " "%s itself.") % (pkg.cpv, unmerge_action) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid " "reason for Portage to %s currently used Python " "interpreter.") % (pkg.cpv, unmerge_action) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print( colorize("WARN", "Package %s is going to be unmerged," % cpv)) print( colorize( "WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] # Sort each set of selected packages if ordered: for pkg in pkgmap: pkg["selected"] = sorted(pkg["selected"], key=cpv_sort_key()) for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in vartree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp, ) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join('=%s' % x for x in all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def _getmaskingstatus(mycpv, settings, portdb, myrepo=None): metadata = None installed = False if not isinstance(mycpv, str): # emerge passed in a Package instance pkg = mycpv mycpv = pkg.cpv metadata = pkg._metadata installed = pkg.installed if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict( zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise return [_MaskReason("corruption", "corruption")] if "?" in metadata["LICENSE"]: settings.setcpv(mycpv, mydb=metadata) metadata["USE"] = settings["PORTAGE_USE"] else: metadata["USE"] = "" try: mycpv.slot except AttributeError: try: mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings) except portage.exception.InvalidData: raise ValueError(_("invalid CPV: %s") % mycpv) rValue = [] # package.mask checking if settings._getMaskAtom(mycpv, metadata): rValue.append( _MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None))) # keywords checking eapi = metadata["EAPI"] mygroups = settings._getKeywords(mycpv, metadata) licenses = metadata["LICENSE"] properties = metadata["PROPERTIES"] restrict = metadata["RESTRICT"] if not eapi_is_supported(eapi): return [_MaskReason("EAPI", "EAPI %s" % eapi)] if _eapi_is_deprecated(eapi) and not installed: return [_MaskReason("EAPI", "EAPI %s" % eapi)] egroups = settings.configdict["backupenv"].get("ACCEPT_KEYWORDS", "").split() global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "") pgroups = global_accept_keywords.split() myarch = settings["ARCH"] if pgroups and myarch not in pgroups: """For operating systems other than Linux, ARCH is not necessarily a valid keyword.""" myarch = pgroups[0].lstrip("~") # NOTE: This logic is copied from KeywordsManager.getMissingKeywords(). unmaskgroups = settings._keywords_manager.getPKeywords( mycpv, metadata["SLOT"], metadata["repository"], global_accept_keywords) pgroups.extend(unmaskgroups) if unmaskgroups or egroups: pgroups = settings._keywords_manager._getEgroups(egroups, pgroups) else: pgroups = set(pgroups) kmask = "missing" kmask_hint = None if '**' in pgroups: kmask = None else: for keyword in pgroups: if keyword in mygroups: kmask = None break if kmask: for gp in mygroups: if gp == "*": kmask = None break elif gp == "~*": for x in pgroups: if x[:1] == "~": kmask = None break if kmask is None: break elif gp == "-" + myarch and myarch in pgroups: kmask = "-" + myarch break elif gp == "~" + myarch and myarch in pgroups: kmask = "~" + myarch kmask_hint = _UnmaskHint("unstable keyword", kmask) break if kmask == "missing": kmask_hint = _UnmaskHint("unstable keyword", "**") try: missing_licenses = settings._getMissingLicenses(mycpv, metadata) if missing_licenses: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_licenses) license_split = licenses.split() license_split = [x for x in license_split \ if x in allowed_tokens] msg = license_split[:] msg.append("license(s)") rValue.append( _MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses)))) except portage.exception.InvalidDependString as e: rValue.append(_MaskReason("invalid", "LICENSE: " + str(e))) try: missing_properties = settings._getMissingProperties(mycpv, metadata) if missing_properties: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_properties) properties_split = properties.split() properties_split = [x for x in properties_split \ if x in allowed_tokens] msg = properties_split[:] msg.append("properties") rValue.append(_MaskReason("PROPERTIES", " ".join(msg))) except portage.exception.InvalidDependString as e: rValue.append(_MaskReason("invalid", "PROPERTIES: " + str(e))) try: missing_restricts = settings._getMissingRestrict(mycpv, metadata) if missing_restricts: msg = list(missing_restricts) msg.append("in RESTRICT") rValue.append(_MaskReason("RESTRICT", " ".join(msg))) except InvalidDependString as e: rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e, ))) # Only show KEYWORDS masks for installed packages # if they're not masked for any other reason. if kmask and (not installed or not rValue): rValue.append( _MaskReason("KEYWORDS", kmask + " keyword", unmask_hint=kmask_hint)) return rValue
def fix(self, **kwargs): onProgress = kwargs.get('onProgress', None) bintree = self._bintree # Force reindex in case pkgdir-index-trusted is enabled. bintree._populate_local(reindex=True) bintree.populated = True _instance_key = bintree.dbapi._instance_key cpv_all = self._bintree.dbapi.cpv_all() cpv_all.sort() maxval = 0 if onProgress: onProgress(maxval, 0) pkgindex = self._pkgindex missing = [] stale = [] metadata = {} for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=bintree.settings) d["CPV"] = cpv metadata[_instance_key(cpv)] = d if not bintree.dbapi.cpv_exists(cpv): stale.append(cpv) for cpv in cpv_all: d = metadata.get(_instance_key(cpv)) if not d or self._need_update(cpv, d): missing.append(cpv) if missing or stale: from portage import locks pkgindex_lock = locks.lockfile( self._pkgindex_file, wantnewlockfile=1) try: # Repopulate with lock held. If _populate_local returns # data then use that, since _load_pkgindex would return # stale data in this case. self._pkgindex = pkgindex = (bintree._populate_local() or bintree._load_pkgindex()) cpv_all = self._bintree.dbapi.cpv_all() cpv_all.sort() # Recount stale/missing packages, with lock held. missing = [] stale = [] metadata = {} for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=bintree.settings) d["CPV"] = cpv metadata[_instance_key(cpv)] = d if not bintree.dbapi.cpv_exists(cpv): stale.append(cpv) for cpv in cpv_all: d = metadata.get(_instance_key(cpv)) if not d or self._need_update(cpv, d): missing.append(cpv) maxval = len(missing) for i, cpv in enumerate(missing): d = bintree._pkgindex_entry(cpv) try: bintree._eval_use_flags(cpv, d) except portage.exception.InvalidDependString: writemsg("!!! Invalid binary package: '%s'\n" % \ bintree.getname(cpv), noiselevel=-1) else: metadata[_instance_key(cpv)] = d if onProgress: onProgress(maxval, i+1) for cpv in stale: del metadata[_instance_key(cpv)] # We've updated the pkgindex, so set it to # repopulate when necessary. bintree.populated = False del pkgindex.packages[:] pkgindex.packages.extend(metadata.values()) bintree._update_pkgindex_header(self._pkgindex.header) bintree._pkgindex_write(self._pkgindex) finally: locks.unlockfile(pkgindex_lock) if onProgress: if maxval == 0: maxval = 1 onProgress(maxval, maxval) return (True, None)
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None): """ If specified, the myrepo argument is assumed to be valid. This should be a safe assumption since portdbapi methods always return valid repo names and valid "repository" metadata from aux_get. """ if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict( zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise else: if myrepo is None: myrepo = _gen_valid_repo(metadata["repository"]) elif myrepo is None: myrepo = metadata.get("repository") if myrepo is not None: myrepo = _gen_valid_repo(metadata["repository"]) if metadata is not None and not portage.eapi_is_supported( metadata["EAPI"]): # Return early since otherwise we might produce invalid # results given that the EAPI is not supported. Also, # metadata is mostly useless in this case since it doesn't # contain essential things like SLOT. if return_location: return (None, None) return None # Sometimes we can't access SLOT or repository due to corruption. pkg = mycpv try: pkg.slot except AttributeError: pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo) cpv_slot_list = [pkg] mycp = pkg.cp locations = [] if pkg.repo in settings.repositories: for repo in settings.repositories[pkg.repo].masters + ( settings.repositories[pkg.repo], ): locations.append(os.path.join(repo.location, "profiles")) locations.extend(settings.profiles) locations.append( os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") node = None for l, recursive_filename in grablines(pmask_filename, recursive=1, remember_source_file=True): if node is None or node[0] != recursive_filename: node = (recursive_filename, []) pmasklists.append(node) node[1].append(l) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): x = x.without_repo for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = pmask[0] for i in range(len(pmask[1])): l = pmask[1][i].strip() try: l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo except InvalidAtom: l_atom = None if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += l + "\n" comment_valid = i + 1 elif l_atom == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) return comment elif comment_valid != -1: # Apparently this comment applies to multiple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) return None
def __init__(self, **kwargs): metadata = _PackageMetadataWrapperBase(kwargs.pop("metadata")) Task.__init__(self, **kwargs) # the SlotObject constructor assigns self.root_config from keyword args # and is an instance of a '_emerge.RootConfig.RootConfig class self.root = self.root_config.root self._raw_metadata = metadata self._metadata = _PackageMetadataWrapper(self, metadata) if not self.built: self._metadata["CHOST"] = self.root_config.settings.get("CHOST", "") eapi_attrs = _get_eapi_attrs(self.eapi) try: db = self.cpv._db except AttributeError: if self.built: # For independence from the source ebuild repository and # profile implicit IUSE state, require the _db attribute # for built packages. raise db = self.root_config.trees["porttree"].dbapi self.cpv = _pkg_str( self.cpv, metadata=self._metadata, settings=self.root_config.settings, db=db ) if hasattr(self.cpv, "slot_invalid"): self._invalid_metadata( "SLOT.invalid", "SLOT: invalid value: '%s'" % self._metadata["SLOT"] ) self.cpv_split = self.cpv.cpv_split self.category, self.pf = portage.catsplit(self.cpv) self.cp = self.cpv.cp self.version = self.cpv.version self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot)) # sync metadata with validated repo (may be UNKNOWN_REPO) self._metadata["repository"] = self.cpv.repo implicit_match = db._iuse_implicit_cnstr(self.cpv, self._metadata) self.iuse = self._iuse( self, self._metadata["IUSE"].split(), implicit_match, self.eapi ) if (self.iuse.enabled or self.iuse.disabled) and not eapi_attrs.iuse_defaults: if not self.installed: self._invalid_metadata( "EAPI.incompatible", "IUSE contains defaults, but EAPI doesn't allow them", ) if self.inherited is None: self.inherited = frozenset() if self.operation is None: if self.onlydeps or self.installed: self.operation = "nomerge" else: self.operation = "merge" self._hash_key = Package._gen_hash_key( cpv=self.cpv, installed=self.installed, onlydeps=self.onlydeps, operation=self.operation, repo_name=self.cpv.repo, root_config=self.root_config, type_name=self.type_name, ) self._hash_value = hash(self._hash_key)
def getUseForce(self, pkg=None): if pkg is None: return frozenset(stack_lists( self._useforce_list, incremental=True)) cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp try: stable = pkg.stable except AttributeError: # KEYWORDS is unavailable (prior to "depend" phase) stable = False useforce = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: useforce.append(self._repo_useforce_dict.get(repo, {})) if stable: useforce.append(self._repo_usestableforce_dict.get(repo, {})) cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) if stable: cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) for i, puseforce_dict in enumerate(self._puseforce_list): if self._useforce_list[i]: useforce.append(self._useforce_list[i]) if stable and self._usestableforce_list[i]: useforce.append(self._usestableforce_list[i]) cpdict = puseforce_dict.get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) if stable: cpdict = self._pusestableforce_list[i].get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) return frozenset(stack_lists(useforce, incremental=True))
def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False, myrepo=None): """ If specified, the myrepo argument is assumed to be valid. This should be a safe assumption since portdbapi methods always return valid repo names and valid "repository" metadata from aux_get. """ if settings is None: settings = portage.settings if portdb is None: portdb = portage.portdb mysplit = catpkgsplit(mycpv) if not mysplit: raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise else: if myrepo is None: myrepo = _gen_valid_repo(metadata["repository"]) elif myrepo is None: myrepo = metadata.get("repository") if myrepo is not None: myrepo = _gen_valid_repo(metadata["repository"]) if metadata is not None and \ not portage.eapi_is_supported(metadata["EAPI"]): # Return early since otherwise we might produce invalid # results given that the EAPI is not supported. Also, # metadata is mostly useless in this case since it doesn't # contain essential things like SLOT. if return_location: return (None, None) else: return None # Sometimes we can't access SLOT or repository due to corruption. pkg = mycpv try: pkg.slot except AttributeError: pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo) cpv_slot_list = [pkg] mycp = pkg.cp locations = [] if pkg.repo in settings.repositories: for repo in settings.repositories[pkg.repo].masters + (settings.repositories[pkg.repo],): locations.append(os.path.join(repo.location, "profiles")) locations.extend(settings.profiles) locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)) locations.reverse() pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") node = None for l, recursive_filename in grablines(pmask_filename, recursive=1, remember_source_file=True): if node is None or node[0] != recursive_filename: node = (recursive_filename, []) pmasklists.append(node) node[1].append(l) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: for x in pmaskdict[mycp]: if match_from_list(x, cpv_slot_list): x = x.without_repo for pmask in pmasklists: comment = "" comment_valid = -1 pmask_filename = pmask[0] for i in range(len(pmask[1])): l = pmask[1][i].strip() try: l_atom = Atom(l, allow_repo=True, allow_wildcard=True).without_repo except InvalidAtom: l_atom = None if l == "": comment = "" comment_valid = -1 elif l[0] == "#": comment += (l+"\n") comment_valid = i + 1 elif l_atom == x: if comment_valid != i: comment = "" if return_location: return (comment, pmask_filename) else: return comment elif comment_valid != -1: # Apparently this comment applies to multiple masks, so # it remains valid until a blank line is encountered. comment_valid += 1 if return_location: return (None, None) else: return None
def check(self, **kwargs): onProgress = kwargs.get('onProgress', None) allupdates, errors = self._grab_global_updates() # Matching packages and moving them is relatively fast, so the # progress bar is updated in indeterminate mode. match = self._tree.dbapi.match aux_get = self._tree.dbapi.aux_get pkg_str = self._tree.dbapi._pkg_str settings = self._tree.dbapi.settings if onProgress: onProgress(0, 0) for repo, updates in allupdates.items(): if repo == 'DEFAULT': continue if not updates: continue def repo_match(repository): return repository == repo or \ (repo == self._master_repo and \ repository not in allupdates) for i, update_cmd in enumerate(updates): if update_cmd[0] == "move": origcp, newcp = update_cmd[1:] for cpv in match(origcp): try: cpv = pkg_str(cpv, origcp.repo) except (KeyError, InvalidData): continue if repo_match(cpv.repo): errors.append("'%s' moved to '%s'" % (cpv, newcp)) elif update_cmd[0] == "slotmove": pkg, origslot, newslot = update_cmd[1:] atom = pkg.with_slot(origslot) for cpv in match(atom): try: cpv = pkg_str(cpv, atom.repo) except (KeyError, InvalidData): continue if repo_match(cpv.repo): errors.append("'%s' slot moved from '%s' to '%s'" % \ (cpv, origslot, newslot)) if onProgress: onProgress(0, 0) # Searching for updates in all the metadata is relatively slow, so this # is where the progress bar comes out of indeterminate mode. cpv_all = self._tree.dbapi.cpv_all() cpv_all.sort() maxval = len(cpv_all) meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys if onProgress: onProgress(maxval, 0) for i, cpv in enumerate(cpv_all): try: metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys))) except KeyError: continue try: pkg = _pkg_str(cpv, metadata=metadata, settings=settings) except InvalidData: continue metadata = dict((k, metadata[k]) for k in self._update_keys) try: updates = allupdates[pkg.repo] except KeyError: try: updates = allupdates['DEFAULT'] except KeyError: continue if not updates: continue metadata_updates = \ portage.update_dbentries(updates, metadata, parent=pkg) if metadata_updates: errors.append("'%s' has outdated metadata" % cpv) if onProgress: onProgress(maxval, i+1) return errors
def getUseForce(self, pkg=None): if pkg is None: return frozenset(stack_lists(self._useforce_list, incremental=True)) cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp try: stable = pkg.stable except AttributeError: # KEYWORDS is unavailable (prior to "depend" phase) stable = False useforce = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: useforce.append(self._repo_useforce_dict.get(repo, {})) if stable: useforce.append( self._repo_usestableforce_dict.get(repo, {})) cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) if stable: cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) for i, puseforce_dict in enumerate(self._puseforce_list): if self._useforce_list[i]: useforce.append(self._useforce_list[i]) if stable and self._usestableforce_list[i]: useforce.append(self._usestableforce_list[i]) cpdict = puseforce_dict.get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) if stable: cpdict = self._pusestableforce_list[i].get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) return frozenset(stack_lists(useforce, incremental=True))
def _getmaskingstatus(mycpv, settings, portdb, myrepo=None): metadata = None installed = False if not isinstance(mycpv, basestring): # emerge passed in a Package instance pkg = mycpv mycpv = pkg.cpv metadata = pkg._metadata installed = pkg.installed if metadata is None: db_keys = list(portdb._aux_cache_keys) try: metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo))) except KeyError: if not portdb.cpv_exists(mycpv): raise return [_MaskReason("corruption", "corruption")] if "?" in metadata["LICENSE"]: settings.setcpv(mycpv, mydb=metadata) metadata["USE"] = settings["PORTAGE_USE"] else: metadata["USE"] = "" try: mycpv.slot except AttributeError: try: mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings) except portage.exception.InvalidData: raise ValueError(_("invalid CPV: %s") % mycpv) rValue = [] # package.mask checking if settings._getMaskAtom(mycpv, metadata): rValue.append(_MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None))) # keywords checking eapi = metadata["EAPI"] mygroups = settings._getKeywords(mycpv, metadata) licenses = metadata["LICENSE"] properties = metadata["PROPERTIES"] restrict = metadata["RESTRICT"] if not eapi_is_supported(eapi): return [_MaskReason("EAPI", "EAPI %s" % eapi)] elif _eapi_is_deprecated(eapi) and not installed: return [_MaskReason("EAPI", "EAPI %s" % eapi)] egroups = settings.configdict["backupenv"].get( "ACCEPT_KEYWORDS", "").split() global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "") pgroups = global_accept_keywords.split() myarch = settings["ARCH"] if pgroups and myarch not in pgroups: """For operating systems other than Linux, ARCH is not necessarily a valid keyword.""" myarch = pgroups[0].lstrip("~") # NOTE: This logic is copied from KeywordsManager.getMissingKeywords(). unmaskgroups = settings._keywords_manager.getPKeywords(mycpv, metadata["SLOT"], metadata["repository"], global_accept_keywords) pgroups.extend(unmaskgroups) if unmaskgroups or egroups: pgroups = settings._keywords_manager._getEgroups(egroups, pgroups) else: pgroups = set(pgroups) kmask = "missing" kmask_hint = None if '**' in pgroups: kmask = None else: for keyword in pgroups: if keyword in mygroups: kmask = None break if kmask: for gp in mygroups: if gp=="*": kmask=None break elif gp == "~*": for x in pgroups: if x[:1] == "~": kmask = None break if kmask is None: break elif gp=="-"+myarch and myarch in pgroups: kmask="-"+myarch break elif gp=="~"+myarch and myarch in pgroups: kmask="~"+myarch kmask_hint = _UnmaskHint("unstable keyword", kmask) break if kmask == "missing": kmask_hint = _UnmaskHint("unstable keyword", "**") try: missing_licenses = settings._getMissingLicenses(mycpv, metadata) if missing_licenses: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_licenses) license_split = licenses.split() license_split = [x for x in license_split \ if x in allowed_tokens] msg = license_split[:] msg.append("license(s)") rValue.append(_MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses)))) except portage.exception.InvalidDependString as e: rValue.append(_MaskReason("invalid", "LICENSE: "+str(e))) try: missing_properties = settings._getMissingProperties(mycpv, metadata) if missing_properties: allowed_tokens = set(["||", "(", ")"]) allowed_tokens.update(missing_properties) properties_split = properties.split() properties_split = [x for x in properties_split \ if x in allowed_tokens] msg = properties_split[:] msg.append("properties") rValue.append(_MaskReason("PROPERTIES", " ".join(msg))) except portage.exception.InvalidDependString as e: rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e))) try: missing_restricts = settings._getMissingRestrict(mycpv, metadata) if missing_restricts: msg = list(missing_restricts) msg.append("in RESTRICT") rValue.append(_MaskReason("RESTRICT", " ".join(msg))) except InvalidDependString as e: rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e,))) # Only show KEYWORDS masks for installed packages # if they're not masked for any other reason. if kmask and (not installed or not rValue): rValue.append(_MaskReason("KEYWORDS", kmask + " keyword", unmask_hint=kmask_hint)) return rValue
def fix(self, **kwargs): onProgress = kwargs.get('onProgress', None) bintree = self._bintree _instance_key = bintree.dbapi._instance_key cpv_all = self._bintree.dbapi.cpv_all() cpv_all.sort() missing = [] maxval = 0 if onProgress: onProgress(maxval, 0) pkgindex = self._pkgindex missing = [] stale = [] metadata = {} for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=bintree.settings) d["CPV"] = cpv metadata[_instance_key(cpv)] = d if not bintree.dbapi.cpv_exists(cpv): stale.append(cpv) for cpv in cpv_all: d = metadata.get(_instance_key(cpv)) if not d or self._need_update(cpv, d): missing.append(cpv) if missing or stale: from portage import locks pkgindex_lock = locks.lockfile( self._pkgindex_file, wantnewlockfile=1) try: # Repopulate with lock held. bintree._populate() cpv_all = self._bintree.dbapi.cpv_all() cpv_all.sort() pkgindex = bintree._load_pkgindex() self._pkgindex = pkgindex # Recount stale/missing packages, with lock held. missing = [] stale = [] metadata = {} for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=bintree.settings) d["CPV"] = cpv metadata[_instance_key(cpv)] = d if not bintree.dbapi.cpv_exists(cpv): stale.append(cpv) for cpv in cpv_all: d = metadata.get(_instance_key(cpv)) if not d or self._need_update(cpv, d): missing.append(cpv) maxval = len(missing) for i, cpv in enumerate(missing): d = bintree._pkgindex_entry(cpv) try: bintree._eval_use_flags(cpv, d) except portage.exception.InvalidDependString: writemsg("!!! Invalid binary package: '%s'\n" % \ bintree.getname(cpv), noiselevel=-1) else: metadata[_instance_key(cpv)] = d if onProgress: onProgress(maxval, i+1) for cpv in stale: del metadata[_instance_key(cpv)] # We've updated the pkgindex, so set it to # repopulate when necessary. bintree.populated = False del pkgindex.packages[:] pkgindex.packages.extend(metadata.values()) bintree._pkgindex_write(self._pkgindex) finally: locks.unlockfile(pkgindex_lock) if onProgress: if maxval == 0: maxval = 1 onProgress(maxval, maxval) return (True, None)
def __init__(self, **kwargs): metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata')) Task.__init__(self, **kwargs) # the SlotObject constructor assigns self.root_config from keyword args # and is an instance of a '_emerge.RootConfig.RootConfig class self.root = self.root_config.root self._raw_metadata = metadata self._metadata = _PackageMetadataWrapper(self, metadata) if not self.built: self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '') eapi_attrs = _get_eapi_attrs(self.eapi) try: db = self.cpv._db except AttributeError: if self.built: # For independence from the source ebuild repository and # profile implicit IUSE state, require the _db attribute # for built packages. raise db = self.root_config.trees['porttree'].dbapi self.cpv = _pkg_str(self.cpv, metadata=self._metadata, settings=self.root_config.settings, db=db) if hasattr(self.cpv, 'slot_invalid'): self._invalid_metadata('SLOT.invalid', "SLOT: invalid value: '%s'" % self._metadata["SLOT"]) self.cpv_split = self.cpv.cpv_split self.category, self.pf = portage.catsplit(self.cpv) self.cp = self.cpv.cp self.version = self.cpv.version self.slot = self.cpv.slot self.sub_slot = self.cpv.sub_slot self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot)) # sync metadata with validated repo (may be UNKNOWN_REPO) self._metadata['repository'] = self.cpv.repo if self.root_config.settings.local_config: implicit_match = db._iuse_implicit_cnstr(self.cpv, self._metadata) else: implicit_match = db._repoman_iuse_implicit_cnstr(self.cpv, self._metadata) usealiases = self.root_config.settings._use_manager.getUseAliases(self) self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match, usealiases, self.eapi) if (self.iuse.enabled or self.iuse.disabled) and \ not eapi_attrs.iuse_defaults: if not self.installed: self._invalid_metadata('EAPI.incompatible', "IUSE contains defaults, but EAPI doesn't allow them") if self.inherited is None: self.inherited = frozenset() if self.operation is None: if self.onlydeps or self.installed: self.operation = "nomerge" else: self.operation = "merge" self._hash_key = Package._gen_hash_key(cpv=self.cpv, installed=self.installed, onlydeps=self.onlydeps, operation=self.operation, repo_name=self.cpv.repo, root_config=self.root_config, type_name=self.type_name) self._hash_value = hash(self._hash_key)