def get_package_domain(self, pkg): """Get domain object with altered settings from matching package.env entries.""" if getattr(pkg, '_domain', None) is not None: return pkg._domain files = [] for restrict, paths in self.pkg_env: if restrict.match(pkg): files.extend(paths) if files: pkg_settings = dict(self._settings.orig.items()) for path in files: PortageConfig.load_make_conf( pkg_settings, path, allow_sourcing=True, allow_recurse=False, incrementals=True) # TODO: Improve pkg domain vs main domain proxying, e.g. static # jitted attrs should always be generated and pulled from the main # domain obj; however, currently each pkg domain instance gets its # own copy so values collapsed on the pkg domain instance aren't # propagated back to the main domain leading to regen per pkg if # requested. pkg_domain = copy.copy(self) pkg_domain._settings = ProtectedDict(pkg_settings) # reset jitted attrs that can pull updated settings for attr in (x for x in dir(self) if x.startswith('_jit_reset_')): setattr(pkg_domain, attr, None) # store altered domain on the pkg obj to avoid recreating pkg domain object.__setattr__(pkg, "_domain", pkg_domain) return pkg_domain return self
def __setitem__(self, cpv, values): """set a cpv to values This shouldn't be overridden in derived classes since it handles the readonly checks. """ if self.readonly: raise errors.ReadOnly() d = ProtectedDict(values) if self.cleanse_keys: for k in d.keys(): if not d[k]: del d[k] if "_eclasses_" in values: d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"]) elif "_eclasses_" in values: d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"]) d[self._chf_key] = self._chf_serializer(d.pop('_chf_')) self._setitem(cpv, d) self._sync_if_needed(True)
def __setitem__(self, cpv, values): """set a cpv to values This shouldn't be overridden in derived classes since it handles the readonly checks. """ if self.readonly: raise errors.ReadOnly() d = ProtectedDict(values) if self.cleanse_keys: for k in d.iterkeys(): if not d[k]: del d[k] if "_eclasses_" in values: d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"]) elif "_eclasses_" in values: d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"]) d[self._chf_key] = self._chf_serializer(d.pop('_chf_')) self._setitem(cpv, d) self._sync_if_needed(True)
def _setitem(self, cpv, values): values = ProtectedDict(values) # hack. proper solution is to make this a __setitem__ override, since # template.__setitem__ serializes _eclasses_, then we reconstruct it. eclasses = values.pop('_eclasses_', None) if eclasses is not None: eclasses = self.reconstruct_eclasses(cpv, eclasses) values["INHERITED"] = ' '.join(eclasses) s = cpv.rfind('/') fp = pjoin(self.location, cpv[:s], f'.update.{os.getpid()}.{cpv[s+1:]}') try: myf = open(fp, "w") except FileNotFoundError: try: self._ensure_dirs(cpv) myf = open(fp, "w") except EnvironmentError as e: raise errors.CacheCorruption(cpv, e) from e except EnvironmentError as e: raise errors.CacheCorruption(cpv, e) from e count = 0 for idx, key in self.hardcoded_auxdbkeys_order: myf.write("%s%s" % ("\n" * (idx - count), values.get(key, ""))) count = idx myf.write("\n" * (self.magic_line_count - count)) myf.close() self._set_mtime(fp, values, eclasses) # update written, now we move it new_fp = pjoin(self.location, cpv) try: os.rename(fp, new_fp) except EnvironmentError as e: os.remove(fp) raise errors.CacheCorruption(cpv, e) from e
def _setitem(self, cpv, values): values = ProtectedDict(values) # hack. proper solution is to make this a __setitem__ override, since # template.__setitem__ serializes _eclasses_, then we reconstruct it. eclasses = values.pop('_eclasses_', None) if eclasses is not None: eclasses = self.reconstruct_eclasses(cpv, eclasses) values["INHERITED"] = ' '.join(eclasses) s = cpv.rfind("/") fp = pjoin( self.location, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s+1:])) try: myf = open(fp, "w") except EnvironmentError as e: if errno.ENOENT != e.errno: raise_from(errors.CacheCorruption(cpv, e)) try: self._ensure_dirs(cpv) myf = open(fp, "w") except EnvironmentError as e: raise_from(errors.CacheCorruption(cpv, e)) count = 0 for idx, key in self.hardcoded_auxdbkeys_order: myf.write("%s%s" % ("\n" * (idx - count), values.get(key, ""))) count = idx myf.write("\n" * (self.magic_line_count - count)) myf.close() self._set_mtime(fp, values, eclasses) #update written. now we move it. new_fp = pjoin(self.location, cpv) try: os.rename(fp, new_fp) except EnvironmentError as e: os.remove(fp) raise_from(errors.CacheCorruption(cpv, e))
def _setitem(self, cpv, values): values = ProtectedDict(values) # hack. proper solution is to make this a __setitem__ override, since # template.__setitem__ serializes _eclasses_, then we reconstruct it. eclasses = values.pop('_eclasses_', None) if eclasses is not None: eclasses = self.reconstruct_eclasses(cpv, eclasses) values["INHERITED"] = ' '.join(eclasses) s = cpv.rfind("/") fp = pjoin( self.location, cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:])) try: myf = open(fp, "w") except EnvironmentError, e: if errno.ENOENT != e.errno: raise_from(errors.CacheCorruption(cpv, e)) try: self._ensure_dirs(cpv) myf = open(fp,"w") except EnvironmentError, e: raise_from(errors.CacheCorruption(cpv, e))
def __init__(self, profile, repos, vdb, name=None, root='/', config_dir='/etc/portage', prefix='/', *, fetcher, **settings): self.name = name self.root = settings["ROOT"] = root self.config_dir = config_dir self.prefix = prefix self.ebuild_hook_dir = pjoin(self.config_dir, 'env') self.profile = profile self.fetcher = fetcher self.__repos = repos self.__vdb = vdb # prevent critical variables from being changed in make.conf for k in self.profile.profile_only_variables.intersection(settings.keys()): del settings[k] # Protect original settings from being overridden so matching # package.env settings can be overlaid properly. self._settings = ProtectedDict(settings)
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip( repositories_collapsed, repositories)} self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip( vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion( s, settings[incremental], 'While expanding %s ' % (incremental,)) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set(optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower()+"_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion( s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [self.make_keywords_filter( self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals)] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append(self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure( 'user-specified bashrc %r does not exist' % (data,)) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch,)) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories} for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from(Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [repo_masks.get(master, [(), ()]) for master in masters] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
class domain(config_domain): # XXX ouch, verify this crap and add defaults and stuff _types = { 'profile': 'ref:profile', 'fetcher': 'ref:fetcher', 'repositories': 'lazy_refs:repo', 'vdb': 'lazy_refs:repo', 'name': 'str', 'triggers': 'lazy_refs:trigger', } for _thing in list(const.incrementals) + ['bashrc']: _types[_thing] = 'list' for _thing in ('package.mask', 'package.keywords', 'package.license', 'package.use', 'package.unmask', 'package.env', 'package.accept_keywords'): _types[_thing] = 'list' for _thing in ('root', 'CHOST', 'CBUILD', 'CTARGET', 'CFLAGS', 'PATH', 'PORTAGE_TMPDIR', 'DISTCC_PATH', 'DISTCC_DIR', 'CCACHE_DIR'): _types[_thing] = 'str' # TODO this is missing defaults pkgcore_config_type = ConfigHint( _types, typename='domain', required=['repositories', 'profile', 'vdb', 'fetcher', 'name'], allow_unknowns=True) del _types, _thing def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip( repositories_collapsed, repositories)} self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip( vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion( s, settings[incremental], 'While expanding %s ' % (incremental,)) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set(optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower()+"_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion( s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [self.make_keywords_filter( self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals)] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append(self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure( 'user-specified bashrc %r does not exist' % (data,)) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch,)) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories} for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from(Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [repo_masks.get(master, [(), ()]) for master in masters] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True))) def _extend_use_for_features(self, use_settings, features): # hackish implementation; if test is on, flip on the flag if "test" in features: use_settings.add("test") if "prefix" in features or "force-prefix" in features: use_settings.add("prefix") def make_license_filter(self, master_license, pkg_licenses): """Generates a restrict that matches iff the licenses are allowed.""" return delegate(partial(self.apply_license_filter, master_license, pkg_licenses)) def apply_license_filter(self, master_licenses, pkg_licenses, pkg, mode): """Determine if a package's license is allowed.""" # note we're not honoring mode; it's always match. # reason is that of not turning on use flags to get acceptable license # pairs, maybe change this down the line? matched_pkg_licenses = [] for atom, licenses in pkg_licenses: if atom.match(pkg): matched_pkg_licenses += licenses raw_accepted_licenses = master_licenses + matched_pkg_licenses license_manager = getattr(pkg.repo, 'licenses', self.default_licenses_manager) for and_pair in pkg.license.dnf_solutions(): accepted = incremental_expansion_license( and_pair, license_manager.groups, raw_accepted_licenses, msg_prefix="while checking ACCEPT_LICENSE for %s" % (pkg,)) if accepted.issuperset(and_pair): return True return False def make_keywords_filter(self, arch, default_keys, accept_keywords, profile_keywords, incremental=False): """Generates a restrict that matches iff the keywords are allowed.""" if not accept_keywords and not profile_keywords: return packages.PackageRestriction( "keywords", values.ContainmentMatch(*default_keys)) if "~" + arch.lstrip("~") not in default_keys: # stable; thus empty entries == ~arch unstable = "~" + arch def f(r, v): if not v: return r, unstable return r, v data = collapsed_restrict_to_data( ((packages.AlwaysTrue, default_keys),), (f(*i) for i in accept_keywords)) else: if incremental: f = collapsed_restrict_to_data else: f = non_incremental_collapsed_restrict_to_data data = f(((packages.AlwaysTrue, default_keys),), accept_keywords) if incremental: raise NotImplementedError(self.incremental_apply_keywords_filter) #f = self.incremental_apply_keywords_filter else: f = self.apply_keywords_filter return delegate(partial(f, data, profile_keywords)) @staticmethod def incremental_apply_keywords_filter(data, pkg, mode): # note we ignore mode; keywords aren't influenced by conditionals. # note also, we're not using a restriction here. this is faster. allowed = data.pull_data(pkg) return any(True for x in pkg.keywords if x in allowed) @staticmethod def apply_keywords_filter(data, profile_keywords, pkg, mode): # note we ignore mode; keywords aren't influenced by conditionals. # note also, we're not using a restriction here. this is faster. pkg_keywords = pkg.keywords for atom, keywords in profile_keywords: if atom.match(pkg): pkg_keywords += keywords allowed = data.pull_data(pkg) if '**' in allowed: return True if "*" in allowed: for k in pkg_keywords: if k[0] not in "-~": return True if "~*" in allowed: for k in pkg_keywords: if k[0] == "~": return True return any(True for x in pkg_keywords if x in allowed) def split_use_expand_flags(self, use_stream): matcher = self.use_expand_re.match stream = ((matcher(x), x) for x in use_stream) flags, ue_flags = predicate_split(bool, stream, itemgetter(0)) return map(itemgetter(1), flags), [(x[0].groups(), x[1]) for x in ue_flags] def get_package_use_unconfigured(self, pkg, for_metadata=True): """Determine use flags for a given package. Roughly, this should result in the following, evaluated l->r: non USE_EXPAND; profiles, pkg iuse, global configuration, package.use configuration, commandline? stack profiles + pkg iuse; split it into use and use_expanded use; do global configuration + package.use configuration overriding of non-use_expand use if global configuration has a setting for use_expand. Args: pkg: package object for_metadata (bool): if True, we're doing use flag retrieval for metadata generation; otherwise, we're just requesting the raw use flags Returns: Three groups of use flags for the package in the following order: immutable flags, enabled flags, and disabled flags. """ pre_defaults = [x[1:] for x in pkg.iuse if x[0] == '+'] if pre_defaults: pre_defaults, ue_flags = self.split_use_expand_flags(pre_defaults) pre_defaults.extend( x[1] for x in ue_flags if x[0][0].upper() not in self.settings) attr = 'stable_' if self.stable_arch in pkg.keywords \ and self.unstable_arch not in self.settings['ACCEPT_KEYWORDS'] else '' disabled = getattr(self, attr + 'disabled_use').pull_data(pkg) immutable = getattr(self, attr + 'forced_use').pull_data(pkg) # lock the configurable use flags to only what's in IUSE, and what's forced # from the profiles (things like userland_GNU and arch) enabled = self.enabled_use.pull_data(pkg, pre_defaults=pre_defaults) # support globs for USE_EXPAND vars use_globs = [u for u in enabled if u.endswith('*')] enabled_use_globs = [] for glob in use_globs: for u in pkg.iuse_stripped: if u.startswith(glob[:-1]): enabled_use_globs.append(u) enabled.difference_update(use_globs) enabled.update(enabled_use_globs) if for_metadata: preserves = pkg.iuse_stripped enabled.intersection_update(preserves) enabled.update(immutable) enabled.difference_update(disabled) return immutable, enabled, disabled def get_package_use_buildable(self, pkg): # isolate just what isn't exposed for metadata- anything non-IUSE # this brings in actual use flags the ebuild shouldn't see, but that's # a future enhancement to be done when USE_EXPAND is kept separate from # mainline USE in this code. metadata_use = self.get_package_use_unconfigured(pkg, for_metadata=True)[1] raw_use = self.get_package_use_unconfigured(pkg, for_metadata=False)[1] enabled = raw_use.difference(metadata_use) enabled.update(pkg.use) return enabled def get_package_bashrcs(self, pkg): for source in self.profile.bashrcs: yield source for restrict, source in self.bashrcs: if restrict.match(pkg): yield source if not self.ebuild_hook_dir: return # matching portage behaviour... it's whacked. base = pjoin(self.ebuild_hook_dir, pkg.category) for fp in (pkg.package, "%s:%s" % (pkg.package, pkg.slot), getattr(pkg, "P", "nonexistent"), getattr(pkg, "PF", "nonexistent")): fp = pjoin(base, fp) if os.path.exists(fp): yield local_source(fp) def _mk_nonconfig_triggers(self): return ebuild_generate_triggers(self) def _get_tempspace(self): path = self.settings.get("PORTAGE_TMPDIR", None) if path is not None: path = pjoin(path, 'portage') return path @klass.jit_attr def ebuild_repos(self): """Group of all ebuild repos bound with configuration data.""" return util.RepositoryGroup( x for x in self.repos if isinstance(x.raw_repo, ebuild_repo._ConfiguredTree)) @klass.jit_attr def ebuild_repos_raw(self): """Group of all ebuild repos without filtering.""" return util.RepositoryGroup( x for x in self.repos_configured.itervalues() if isinstance(x.raw_repo, ebuild_repo._UnconfiguredTree)) @klass.jit_attr def binary_repos(self): """Group of all binary repos bound with configuration data.""" return util.RepositoryGroup( x for x in self.repos if isinstance(x.raw_repo, binary_repo.ConfiguredBinpkgTree)) @klass.jit_attr def binary_repos_raw(self): """Group of all binary repos without filtering.""" return util.RepositoryGroup( x for x in self.repos_configured.itervalues() if isinstance(x.raw_repo, binary_repo.tree)) # multiplexed repos all_ebuild_repos = klass.alias_attr("ebuild_repos.combined") all_raw_ebuild_repos = klass.alias_attr("ebuild_repos_raw.combined") all_binary_repos = klass.alias_attr("binary_repos.combined") all_raw_binary_repos = klass.alias_attr("binary_repos_raw.combined") def repo_containing_ebuild(self, path): """Determine if an ebuild is in a repo. Note that this will only return a repo if the ebuild is properly placed in the proper category/PN directory structure. Args: path (str): path to ebuild file Returns: configured ebuild repo object if a matching repo is found, otherwise None. """ ebuild_path = os.path.abspath(path) if not (os.path.isfile(ebuild_path) and ebuild_path.endswith('.ebuild')): raise ValueError("'%s' is not an ebuild" % path) repo_path = os.path.abspath(os.path.join( ebuild_path, os.pardir, os.pardir, os.pardir)) for repo in self.ebuild_repos: if repo.location == repo_path: return repo return None
class domain(pkgcore.config.domain.domain): # XXX ouch, verify this crap and add defaults and stuff _types = { 'profile': 'ref:profile', 'fetcher': 'ref:fetcher', 'repositories': 'lazy_refs:repo', 'vdb': 'lazy_refs:repo', 'name': 'str', 'triggers': 'lazy_refs:trigger', } for _thing in list(const.incrementals) + ['bashrc']: _types[_thing] = 'list' for _thing in [ 'package.mask', 'package.keywords', 'package.license', 'package.use', 'package.unmask', 'package.env', 'package.accept_keywords' ]: _types[_thing] = 'list' for _thing in [ 'root', 'CHOST', 'CBUILD', 'CTARGET', 'CFLAGS', 'PATH', 'PORTAGE_TMPDIR', 'DISTCC_PATH', 'DISTCC_DIR', 'CCACHE_DIR' ]: _types[_thing] = 'str' # TODO this is missing defaults pkgcore_config_type = ConfigHint( _types, typename='domain', required=['repositories', 'profile', 'vdb', 'fetcher', 'name'], allow_unknowns=True) del _types, _thing def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers # prevent critical variables from being changed by the user in make.conf for k in set(profile.profile_only_variables).intersection( settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip(repositories_collapsed, repositories) } self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip(vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_maskers, parse_match), ("package.unmask", pkg_unmaskers, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) self.name = name settings.setdefault("PKGCORE_DOMAIN", name) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ( "USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion(s, settings[incremental], 'While expanding %s ' % (incremental, )) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set( optimize_incrementals(settings.get("USE", ()))) self._extend_use_for_features(use, settings.get("FEATURES", ())) self.use_expand = frozenset(profile.use_expand) self.use_expand_hidden = frozenset(profile.use_expand_hidden) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower() + "_" use.update(u2 + x for x in v.split()) if not 'ACCEPT_KEYWORDS' in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion(s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [ self.make_keywords_filter(self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals) ] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append( self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure('user-specified bashrc %r does not exist' % (data, )) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch, )) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = { r.repo_id: r._visibility_limiters() for r in repositories } for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from( Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () masks = [ repo_masks.get(master, [(), ()]) for master in masters ] masks.append(repo_masks[repo.repo_id]) masks.extend(profile_masks) mask_atoms = set() for neg, pos in masks: mask_atoms.difference_update(neg) mask_atoms.update(pos) mask_atoms.update(pkg_maskers) unmask_atoms = set(chain(pkg_unmaskers, *profile_unmasks)) filtered = self.generate_filter( generate_masking_restrict(mask_atoms), generate_unmasking_restrict(unmask_atoms), *vfilters) if filtered: wrapped_repo = visibility.filterTree( wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) if profile.virtuals: l = [ x for x in (getattr(v, 'old_style_virtuals', None) for v in self.vdb) if x is not None ] profile_repo = profile.make_virtuals_repo( multiplex.tree(*repositories), *l) self.repos_raw["profile virtuals"] = profile_repo self.repos_configured_filtered["profile virtuals"] = profile_repo self.repos_configured["profile virtuals"] = profile_repo self.repos = [profile_repo] + self.repos self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(self.use_expand, reverse=True))) def _extend_use_for_features(self, use_settings, features): # hackish implementation; if test is on, flip on the flag if "test" in features: use_settings.add("test") if "prefix" in features or "force-prefix" in features: use_settings.add("prefix") def generate_filter(self, masking, unmasking, *extra): # note that we ignore unmasking if masking isn't specified. # no point, mainly r = () if masking: if unmasking: r = (packages.OrRestriction(masking, unmasking, disable_inst_caching=True), ) else: r = (masking, ) vfilter = packages.AndRestriction(disable_inst_caching=True, finalize=True, *(r + extra)) return vfilter def make_license_filter(self, master_license, pkg_licenses): """Generates a restrict that matches iff the licenses are allowed.""" return delegate( partial(self.apply_license_filter, master_license, pkg_licenses)) def apply_license_filter(self, master_licenses, pkg_licenses, pkg, mode): """Determine if a package's license is allowed.""" # note we're not honoring mode; it's always match. # reason is that of not turning on use flags to get acceptable license # pairs, maybe change this down the line? matched_pkg_licenses = [] for atom, licenses in pkg_licenses: if atom.match(pkg): matched_pkg_licenses += licenses raw_accepted_licenses = master_licenses + matched_pkg_licenses license_manager = getattr(pkg.repo, 'licenses', self.default_licenses_manager) for and_pair in pkg.license.dnf_solutions(): accepted = incremental_expansion_license( and_pair, license_manager.groups, raw_accepted_licenses, msg_prefix="while checking ACCEPT_LICENSE for %s" % (pkg, )) if accepted.issuperset(and_pair): return True return False def make_keywords_filter(self, arch, default_keys, accept_keywords, profile_keywords, incremental=False): """Generates a restrict that matches iff the keywords are allowed.""" if not accept_keywords and not profile_keywords: return packages.PackageRestriction( "keywords", values.ContainmentMatch(*default_keys)) if "~" + arch.lstrip("~") not in default_keys: # stable; thus empty entries == ~arch unstable = "~" + arch def f(r, v): if not v: return r, unstable return r, v data = collapsed_restrict_to_data( ((packages.AlwaysTrue, default_keys), ), (f(*i) for i in accept_keywords)) else: if incremental: f = collapsed_restrict_to_data else: f = non_incremental_collapsed_restrict_to_data data = f(((packages.AlwaysTrue, default_keys), ), accept_keywords) if incremental: raise NotImplementedError(self.incremental_apply_keywords_filter) #f = self.incremental_apply_keywords_filter else: f = self.apply_keywords_filter return delegate(partial(f, data, profile_keywords)) @staticmethod def incremental_apply_keywords_filter(data, pkg, mode): # note we ignore mode; keywords aren't influenced by conditionals. # note also, we're not using a restriction here. this is faster. allowed = data.pull_data(pkg) return any(True for x in pkg.keywords if x in allowed) @staticmethod def apply_keywords_filter(data, profile_keywords, pkg, mode): # note we ignore mode; keywords aren't influenced by conditionals. # note also, we're not using a restriction here. this is faster. pkg_keywords = pkg.keywords for atom, keywords in profile_keywords: if atom.match(pkg): pkg_keywords += keywords allowed = data.pull_data(pkg) if '**' in allowed: return True if "*" in allowed: for k in pkg_keywords: if k[0] not in "-~": return True if "~*" in allowed: for k in pkg_keywords: if k[0] == "~": return True return any(True for x in pkg_keywords if x in allowed) def split_use_expand_flags(self, use_stream): matcher = self.use_expand_re.match stream = ((matcher(x), x) for x in use_stream) flags, ue_flags = predicate_split(bool, stream, itemgetter(0)) return map(itemgetter(1), flags), [(x[0].groups(), x[1]) for x in ue_flags] def get_package_use_unconfigured(self, pkg, for_metadata=True): # roughly, this alog should result in the following, evaluated l->r # non USE_EXPAND; profiles, pkg iuse, global configuration, package.use configuration, commandline? # stack profiles + pkg iuse; split it into use and use_expanded use; # do global configuration + package.use configuration overriding of non-use_expand use # if global configuration has a setting for use_expand, pre_defaults = [x[1:] for x in pkg.iuse if x[0] == '+'] if pre_defaults: pre_defaults, ue_flags = self.split_use_expand_flags(pre_defaults) pre_defaults.extend(x[1] for x in ue_flags if x[0][0].upper() not in self.settings) attr = 'stable_' if self.stable_arch in pkg.keywords \ and self.unstable_arch not in self.settings['ACCEPT_KEYWORDS'] else '' disabled = getattr(self, attr + 'disabled_use').pull_data(pkg) immutable = getattr(self, attr + 'forced_use').pull_data(pkg) # lock the configurable use flags to only what's in IUSE, and what's forced # from the profiles (things like userland_GNU and arch) enabled = self.enabled_use.pull_data(pkg, pre_defaults=pre_defaults) # support globs for USE_EXPAND vars use_globs = [u for u in enabled if u.endswith('*')] enabled_use_globs = [] for glob in use_globs: for u in pkg.iuse: if u.startswith(glob[:-1]): enabled_use_globs.append(u) enabled.difference_update(use_globs) enabled.update(enabled_use_globs) if for_metadata: preserves = set(x.lstrip('-+') for x in pkg.iuse) enabled.intersection_update(preserves) enabled.update(immutable) enabled.difference_update(disabled) return immutable, enabled, disabled def get_package_use_buildable(self, pkg): # isolate just what isn't exposed for metadata- anything non-IUSE # this brings in actual use flags the ebuild shouldn't see, but that's # a future enhancement to be done when USE_EXPAND is kept separate from # mainline USE in this code. metadata_use = self.get_package_use_unconfigured(pkg, for_metadata=True)[1] raw_use = self.get_package_use_unconfigured(pkg, for_metadata=False)[1] enabled = raw_use.difference(metadata_use) enabled.update(pkg.use) return enabled def get_package_bashrcs(self, pkg): for source in self.profile.bashrcs: yield source for restrict, source in self.bashrcs: if restrict.match(pkg): yield source if not self.ebuild_hook_dir: return # matching portage behaviour... it's whacked. base = pjoin(self.ebuild_hook_dir, pkg.category) for fp in (pkg.package, "%s:%s" % (pkg.package, pkg.slot), getattr(pkg, "P", "nonexistent"), getattr(pkg, "PF", "nonexistent")): fp = pjoin(base, fp) if os.path.exists(fp): yield local_source(fp) def _mk_nonconfig_triggers(self): return ebuild_generate_triggers(self) def _get_tempspace(self): path = self.settings.get("PORTAGE_TMPDIR", None) if path is not None: path = pjoin(path, 'portage') return path
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers # prevent critical variables from being changed by the user in make.conf for k in set(profile.profile_only_variables).intersection( settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip(repositories_collapsed, repositories) } self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip(vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_maskers, parse_match), ("package.unmask", pkg_unmaskers, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) self.name = name settings.setdefault("PKGCORE_DOMAIN", name) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ( "USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion(s, settings[incremental], 'While expanding %s ' % (incremental, )) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set( optimize_incrementals(settings.get("USE", ()))) self._extend_use_for_features(use, settings.get("FEATURES", ())) self.use_expand = frozenset(profile.use_expand) self.use_expand_hidden = frozenset(profile.use_expand_hidden) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower() + "_" use.update(u2 + x for x in v.split()) if not 'ACCEPT_KEYWORDS' in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion(s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [ self.make_keywords_filter(self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals) ] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append( self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure('user-specified bashrc %r does not exist' % (data, )) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch, )) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = { r.repo_id: r._visibility_limiters() for r in repositories } for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from( Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () masks = [ repo_masks.get(master, [(), ()]) for master in masters ] masks.append(repo_masks[repo.repo_id]) masks.extend(profile_masks) mask_atoms = set() for neg, pos in masks: mask_atoms.difference_update(neg) mask_atoms.update(pos) mask_atoms.update(pkg_maskers) unmask_atoms = set(chain(pkg_unmaskers, *profile_unmasks)) filtered = self.generate_filter( generate_masking_restrict(mask_atoms), generate_unmasking_restrict(unmask_atoms), *vfilters) if filtered: wrapped_repo = visibility.filterTree( wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) if profile.virtuals: l = [ x for x in (getattr(v, 'old_style_virtuals', None) for v in self.vdb) if x is not None ] profile_repo = profile.make_virtuals_repo( multiplex.tree(*repositories), *l) self.repos_raw["profile virtuals"] = profile_repo self.repos_configured_filtered["profile virtuals"] = profile_repo self.repos_configured["profile virtuals"] = profile_repo self.repos = [profile_repo] + self.repos self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(self.use_expand, reverse=True)))
def spawn_get_output(mycommand, spawn_type=None, raw_exit_code=False, collect_fds=(1, ), fd_pipes=None, split_lines=True, **keywords): """Call spawn, collecting the output to fd's specified in collect_fds list. :param spawn_type: the passed in function to call- typically :func:`spawn_bash`, :func:`spawn`, :func:`spawn_sandbox`, or :func:`spawn_fakeroot`. Defaults to :func:`spawn`. """ if spawn_type is None: spawn_type = spawn pr, pw = None, None if fd_pipes is None: fd_pipes = {0: 0} else: fd_pipes = ProtectedDict(fd_pipes) try: pr, pw = os.pipe() for x in collect_fds: fd_pipes[x] = pw keywords["returnpid"] = True mypid = spawn_type(mycommand, fd_pipes=fd_pipes, **keywords) os.close(pw) pw = None if not isinstance(mypid, (list, tuple)): raise ExecutionFailure() fd = os.fdopen(pr, "r") try: if not split_lines: mydata = fd.read() else: mydata = fd.readlines() finally: fd.close() pw = None retval = os.waitpid(mypid[0], 0)[1] cleanup_pids(mypid) if raw_exit_code: return [retval, mydata] return [process_exit_code(retval), mydata] finally: if pr is not None: try: os.close(pr) except OSError: pass if pw is not None: try: os.close(pw) except OSError: pass
def spawn_fakeroot(mycommand, save_file, env=None, name=None, returnpid=False, **keywords): """spawn a process via fakeroot refer to the fakeroot manpage for specifics of using fakeroot """ if env is None: env = {} else: env = ProtectedDict(env) if name is None: name = "fakeroot %s" % mycommand args = [ FAKED_PATH, "--unknown-is-real", "--foreground", "--save-file", save_file ] rd_fd, wr_fd = os.pipe() daemon_fd_pipes = {1: wr_fd, 2: wr_fd} if os.path.exists(save_file): args.append("--load") daemon_fd_pipes[0] = os.open(save_file, os.O_RDONLY) else: daemon_fd_pipes[0] = os.open("/dev/null", os.O_RDONLY) pids = None pids = spawn(args, fd_pipes=daemon_fd_pipes, returnpid=True) try: try: rd_f = os.fdopen(rd_fd) line = rd_f.readline() rd_f.close() rd_fd = None except: cleanup_pids(pids) raise finally: for x in (rd_fd, wr_fd, daemon_fd_pipes[0]): if x is not None: try: os.close(x) except OSError: pass line = line.strip() try: fakekey, fakepid = map(int, line.split(":")) except ValueError: raise ExecutionFailure("output from faked was unparsable- %s" % line) # by now we have our very own daemonized faked. yay. env["FAKEROOTKEY"] = str(fakekey) paths = [LIBFAKEROOT_PATH] + env.get("LD_PRELOAD", "").split(":") env["LD_PRELOAD"] = ":".join(x for x in paths if x) try: ret = spawn(mycommand, name=name, env=env, returnpid=returnpid, **keywords) if returnpid: return ret + [fakepid] + pids return ret finally: if not returnpid: cleanup_pids([fakepid] + pids)
pjf = pjoin if followSymlinks: return [x for x in os.listdir(path) if stat_swallow_enoent(pjf(path, x), scheck)] lstat = os.lstat return [x for x in os.listdir(path) if scheck(lstat(pjf(path, x)).st_mode)] # we store this outside the function to ensure that # the strings used are reused, thus avoiding unneeded # allocations d_type_mapping = ProtectedDict({ S_IFREG: "file", S_IFDIR: "directory", S_IFLNK: "symlink", S_IFCHR: "chardev", S_IFBLK: "block", S_IFSOCK: "socket", S_IFIFO: "fifo", }) def readdir(path): """ Given a directory, return a list of (filename, filetype) see :py:data:`d_type_mappings` for the translation used :param path: path of a directory to scan :return: list of (filename, filetype) """ pjf = pjoin
def spawn_fakeroot(mycommand, save_file, env=None, name=None, returnpid=False, **keywords): """spawn a process via fakeroot refer to the fakeroot manpage for specifics of using fakeroot """ if env is None: env = {} else: env = ProtectedDict(env) if name is None: name = "fakeroot %s" % mycommand args = [ FAKED_PATH, "--unknown-is-real", "--foreground", "--save-file", save_file] rd_fd, wr_fd = os.pipe() daemon_fd_pipes = {1:wr_fd, 2:wr_fd} if os.path.exists(save_file): args.append("--load") daemon_fd_pipes[0] = os.open(save_file, os.O_RDONLY) else: daemon_fd_pipes[0] = os.open("/dev/null", os.O_RDONLY) pids = None pids = spawn(args, fd_pipes=daemon_fd_pipes, returnpid=True) try: try: rd_f = os.fdopen(rd_fd) line = rd_f.readline() rd_f.close() rd_fd = None except: cleanup_pids(pids) raise finally: for x in (rd_fd, wr_fd, daemon_fd_pipes[0]): if x is not None: try: os.close(x) except OSError: pass line = line.strip() try: fakekey, fakepid = map(int, line.split(":")) except ValueError: raise ExecutionFailure("output from faked was unparsable- %s" % line) # by now we have our very own daemonized faked. yay. env["FAKEROOTKEY"] = str(fakekey) paths = [LIBFAKEROOT_PATH] + env.get("LD_PRELOAD", "").split(":") env["LD_PRELOAD"] = ":".join(x for x in paths if x) try: ret = spawn( mycommand, name=name, env=env, returnpid=returnpid, **keywords) if returnpid: return ret + [fakepid] + pids return ret finally: if not returnpid: cleanup_pids([fakepid] + pids)
def read_bash_dict(bash_source, vars_dict=None, sourcing_command=None): """ read bash source, yielding a dict of vars :param bash_source: either a file to read from or a string holding the filename to open :param vars_dict: initial 'env' for the sourcing. Is protected from modification. :type vars_dict: dict or None :param sourcing_command: controls whether a source command exists. If one does and is encountered, then this func is called. :type sourcing_command: callable :raise BashParseError: thrown if invalid syntax is encountered. :return: dict representing the resultant env if bash executed the source. """ # quite possibly I'm missing something here, but the original # portage_util getconfig/varexpand seemed like it only went # halfway. The shlex posix mode *should* cover everything. if vars_dict is not None: d, protected = ProtectedDict(vars_dict), True else: d, protected = {}, False close = False infile = None if isinstance(bash_source, basestring): f = open(bash_source, "r") close = True infile = bash_source else: f = bash_source s = bash_parser(f, sourcing_command=sourcing_command, env=d, infile=infile) try: tok = "" try: while tok is not None: key = s.get_token() if key is None: break elif key.isspace(): # we specifically have to check this, since we're # screwing with the whitespace filters below to # detect empty assigns continue eq = s.get_token() if eq != '=': raise BashParseError(bash_source, s.lineno, "got token %r, was expecting '='" % eq) val = s.get_token() if val is None: val = '' # look ahead to see if we just got an empty assign. next_tok = s.get_token() if next_tok == '=': # ... we did. # leftmost insertions, thus reversed ordering s.push_token(next_tok) s.push_token(val) val = '' else: s.push_token(next_tok) d[key] = val except ValueError, e: raise_from(BashParseError(bash_source, s.lineno, str(e))) finally: if close and f is not None: f.close() if protected: d = d.new return d