def __init__(self, masked_use={}, stable_masked_use={}, forced_use={}, stable_forced_use={}, provides={}, iuse_effective=[], masks=[], unmasks=[], arch='x86', name='none'): self.provides_repo = SimpleTree(provides) self.masked_use = ChunkedDataDict() self.masked_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in masked_use.iteritems()) self.masked_use.freeze() self.stable_masked_use = ChunkedDataDict() self.stable_masked_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in stable_masked_use.iteritems()) self.stable_masked_use.freeze() self.forced_use = ChunkedDataDict() self.forced_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in forced_use.iteritems()) self.forced_use.freeze() self.stable_forced_use = ChunkedDataDict() self.stable_forced_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in stable_forced_use.iteritems()) self.stable_forced_use.freeze() self.masks = tuple(map(atom, masks)) self.unmasks = tuple(map(atom, unmasks)) self.iuse_effective = tuple(iuse_effective) self.arch = arch self.name = name
def test_bad_value(self): # no-value negation should raise a ValueError bad_values = ( '-', 'a b c - d f e', ) for s in bad_values: with pytest.raises(ValueError): split_negations(s.split())
def __init__(self, masked_use={}, stable_masked_use={}, forced_use={}, stable_forced_use={}, pkg_use={}, provides={}, iuse_effective=[], use=[], masks=[], unmasks=[], arch='x86', name='none'): self.provides_repo = SimpleTree(provides) self.masked_use = ChunkedDataDict() self.masked_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in masked_use.items()) self.masked_use.freeze() self.stable_masked_use = ChunkedDataDict() self.stable_masked_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in stable_masked_use.items()) self.stable_masked_use.freeze() self.forced_use = ChunkedDataDict() self.forced_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in forced_use.items()) self.forced_use.freeze() self.stable_forced_use = ChunkedDataDict() self.stable_forced_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in stable_forced_use.items()) self.stable_forced_use.freeze() self.pkg_use = ChunkedDataDict() self.pkg_use.update_from_stream( chunked_data(atom(k), *split_negations(v)) for k, v in pkg_use.items()) self.pkg_use.freeze() self.masks = tuple(map(atom, masks)) self.unmasks = tuple(map(atom, unmasks)) self.iuse_effective = set(iuse_effective) self.use = set(use) self.key = arch self.name = name vfilter = domain.generate_filter(self.masks, self.unmasks) self.visible = vfilter.match
def enabled_use(self): use = ChunkedDataDict() use.add_bare_global(*split_negations(self.use)) use.merge(self.profile.pkg_use) use.update_from_stream(chunked_data(k, *v) for k, v in self.pkg_use) use.freeze() return use
def pkg_provided(self, data): def _parse_cpv(s): try: return cpv.versioned_CPV(s) except cpv.InvalidCPV: logger.warning(f'invalid package.provided entry: {s!r}') return split_negations(data, _parse_cpv)
def _parse_use(self, data): c = misc.ChunkedDataDict() neg, pos = split_negations(data) if neg or pos: c.add_bare_global(neg, pos) c.freeze() return c
def pkg_provided(self, data): def _parse_cpv(s): try: return cpv.VersionedCPV(s) except cpv.InvalidCPV: logger.error(f'invalid package.provided entry: {s!r}') data = (x[0] for x in data) return split_negations(data, _parse_cpv)
def test_sequences(self): # empty input seq = "" self.assertEqual(split_negations(seq), (tuple(), tuple())) # no-value negation should raise a ValueError seq = "a b c - d f e".split() with self.assertRaises(ValueError): split_negations(seq) # all negs seq = ("-" + str(x) for x in xrange(100)) self.assertEqual(split_negations(seq), (tuple(map(str, xrange(100))), tuple())) # all pos seq = (str(x) for x in xrange(100)) self.assertEqual(split_negations(seq), (tuple(), tuple(map(str, xrange(100))))) # both seq = (("-" + str(x), str(x)) for x in xrange(100)) seq = chain.from_iterable(seq) self.assertEqual(split_negations(seq), (tuple(map(str, xrange(100))), tuple(map(str, xrange(100))))) # converter method seq = (("-" + str(x), str(x)) for x in xrange(100)) seq = chain.from_iterable(seq) self.assertEqual(split_negations(seq, int), (tuple(xrange(100)), tuple(xrange(100))))
def test_sequences(self): # empty input seq = '' self.assertEqual(split_negations(seq), (tuple(), tuple())) # no-value negation should raise a ValueError seq = 'a b c - d f e'.split() with self.assertRaises(ValueError): split_negations(seq) # all negs seq = ('-' + str(x) for x in xrange(100)) self.assertEqual(split_negations(seq), (tuple(map(str, xrange(100))), tuple())) # all pos seq = (str(x) for x in xrange(100)) self.assertEqual(split_negations(seq), (tuple(), tuple(map(str, xrange(100))))) # both seq = (('-' + str(x), str(x)) for x in xrange(100)) seq = chain.from_iterable(seq) self.assertEqual( split_negations(seq), (tuple(map(str, xrange(100))), tuple(map(str, xrange(100))))) # converter method seq = (('-' + str(x), str(x)) for x in xrange(100)) seq = chain.from_iterable(seq) self.assertEqual(split_negations(seq, int), (tuple(xrange(100)), tuple(xrange(100))))
def _parse_package_use(self, data): d = defaultdict(list) # split the data down ordered cat/pkg lines for line in data: l = line.split() a = self.eapi_atom(l[0]) if len(l) == 1: raise Exception("malformed line, missing USE flag(s): %r" % (line,)) d[a.key].append(chunked_data(a, *split_negations(l[1:]))) return ImmutableDict((k, _build_cp_atom_payload(v, atom(k))) for k, v in d.iteritems())
def _parse_package_use(self, data): d = defaultdict(list) # split the data down ordered cat/pkg lines for line in data: l = line.split() a = self.eapi_atom(l[0]) if len(l) == 1: raise Exception("malformed line, missing USE flag(s): %r" % (line, )) d[a.key].append(chunked_data(a, *split_negations(l[1:]))) return ImmutableDict( (k, _build_cp_atom_payload(v, atom(k))) for k, v in d.iteritems())
def _parse_package_use(self, data): d = defaultdict(list) # split the data down ordered cat/pkg lines for line in data: l = line.split() try: a = self.eapi_atom(l[0]) except ebuild_errors.MalformedAtom as e: logger.warning(e) continue if len(l) == 1: logger.warning(f"malformed line, missing USE flag(s): {line!r}") continue d[a.key].append(misc.chunked_data(a, *split_negations(l[1:]))) return ImmutableDict((k, misc._build_cp_atom_payload(v, atom(k))) for k, v in d.items())
def _parse_package_use(self, data): d = defaultdict(list) # split the data down ordered cat/pkg lines for line, lineno, relpath in data: l = line.split() try: a = self.eapi_atom(l[0]) except ebuild_errors.MalformedAtom as e: logger.error(f'{relpath!r}, line {lineno}: parsing error: {e}') continue if len(l) == 1: logger.error(f'{relpath!r}, line {lineno}: missing USE flag(s): {line!r}') continue d[a.key].append(misc.chunked_data(a, *split_negations(l[1:]))) return ImmutableDict((k, misc._build_cp_atom_payload(v, atom(k))) for k, v in d.items())
def test_negs(self): # all negs seq = ('-' + str(x) for x in range(100)) assert split_negations(seq) == (tuple(map(str, range(100))), ())
def test_empty(self): # empty input seq = '' assert split_negations(seq) == ((), ())
def config_main(options, out, err): domain = options.domain installed_repos = domain.all_installed_repos all_repos_raw = domain.all_repos_raw all_ebuild_repos = domain.all_ebuild_repos # proxy to create custom, unfiltered repos unfiltered_repos = _UnfilteredRepos(domain) def iter_restrict(iterable): for x in iterable: restrict = x[0] if (options.exclude_restrict is None or not options.exclude_restrict.match(restrict)): yield restrict, list(x) domain_attrs = ( 'pkg_masks', 'pkg_unmasks', 'pkg_keywords', 'pkg_accept_keywords', 'pkg_licenses', 'pkg_use', 'pkg_env', ) attrs = {} for name in domain_attrs: # call jitted attr funcs directly to provide debug data func = getattr(domain_cls, name).function # filter excluded, matching restricts from the data stream attrs[name] = iter_restrict(func(domain, debug=True)) changes = defaultdict(lambda: defaultdict(list)) for name, iterable in attrs.items(): for restrict, item in iterable: path, lineno, line = item.pop(), item.pop(), item.pop() if not all_repos_raw.match(restrict): changes['unavailable'][path].append( (line, lineno, str(restrict))) continue if not installed_repos.match(restrict): changes['uninstalled'][path].append( (line, lineno, str(restrict))) if name in unfiltered_repos: filtered_pkgs = all_ebuild_repos.match(restrict) unfiltered_pkgs = unfiltered_repos[name].match(restrict) if filtered_pkgs == unfiltered_pkgs: changes[f'unnecessary_{name}'][path].append( (line, lineno, str(restrict))) elif name == 'pkg_use': atom, use = item # find duplicates use_sets = [set(), set()] disabled, enabled = use_sets duplicates = set() for i, data in enumerate(split_negations(use)): for u in data: if u in use_sets[i]: duplicates.add(u) use_sets[i].add(u) if duplicates: changes['duplicate_use'][path].append( (line, lineno, ', '.join(duplicates))) # find conflicts conflicting = enabled & disabled if conflicting: changes['conflicting_use'][path].append( (line, lineno, ', '.join(conflicting))) # find unknowns pkgs = all_repos_raw.match(atom) available = {u for pkg in pkgs for u in pkg.iuse_stripped} unknown = (disabled - available) | (enabled - available) if unknown: changes['unknown_use'][path].append( (line, lineno, ', '.join(unknown))) type_mapping = { 'unavailable': 'Unavailable package(s)', 'uninstalled': 'Uninstalled package(s)', 'unnecessary_pkg_masks': 'Unnecessary mask(s)', 'unnecessary_pkg_unmasks': 'Unnecessary unmask(s)', 'unnecessary_pkg_accept_keywords': 'Unnecessary accept keywords(s)', 'unnecessary_pkg_keywords': 'Unnecessary keywords(s)', 'duplicate_use': 'Duplicate use flag(s)', 'conflicting_use': 'Conflicting use flag(s)', 'unknown_use': 'Nonexistent use flag(s)', } for t, paths in changes.items(): out.write(f"{type_mapping[t]}:") for path, data in paths.items(): out.write(f"{path}:", prefix=" ") for line, lineno, values in data: out.write(f"{values} -- line {lineno}: {line!r}", prefix=" ") out.write()
def __call__(self, namespace, out, err): u = ChunkedDataDict() u.add_bare_global(*split_negations(namespace.profile.use)) u.merge(namespace.profile.pkg_use) namespace.use = u super().__call__(namespace, out, err)
def test_converter(self): # converter method seq = (('-' + str(x), str(x)) for x in range(100)) seq = chain.from_iterable(seq) assert split_negations(seq, int) == (tuple(range(100)), tuple(range(100)))
def test_neg_pos(self): # both seq = (('-' + str(x), str(x)) for x in range(100)) seq = chain.from_iterable(seq) assert split_negations(seq) == (tuple(map(str, range(100))), tuple(map(str, range(100))))
def unmasks(self, data): return split_negations(data, self.eapi_atom)
def pkg_use(self, data, debug=False): if debug: return tuple(data) return tuple((x[0], split_negations(stable_unique(x[1]))) for x in data)
def test_pos(self): # all pos seq = (str(x) for x in range(100)) assert split_negations(seq) == ((), tuple(map(str, range(100))))
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip(repositories_collapsed, repositories) } self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip(vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ( "USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion(s, settings[incremental], 'While expanding %s ' % (incremental, )) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set( optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower() + "_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion(s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [ self.make_keywords_filter(self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals) ] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append( self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure('user-specified bashrc %r does not exist' % (data, )) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch, )) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = { r.repo_id: r._visibility_limiters() for r in repositories } for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from( Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [ repo_masks.get(master, [(), ()]) for master in masters ] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree( wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
def pkg_provided(self, data): return split_negations(data, cpv.versioned_CPV)
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip( repositories_collapsed, repositories)} self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip( vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion( s, settings[incremental], 'While expanding %s ' % (incremental,)) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set(optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower()+"_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion( s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [self.make_keywords_filter( self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals)] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append(self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure( 'user-specified bashrc %r does not exist' % (data,)) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch,)) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories} for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from(Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [repo_masks.get(master, [(), ()]) for master in masters] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))