def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip( repositories_collapsed, repositories)} self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip( vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion( s, settings[incremental], 'While expanding %s ' % (incremental,)) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set(optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower()+"_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion( s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [self.make_keywords_filter( self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals)] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append(self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure( 'user-specified bashrc %r does not exist' % (data,)) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch,)) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories} for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from(Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [repo_masks.get(master, [(), ()]) for master in masters] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers # prevent critical variables from being changed by the user in make.conf for k in set(profile.profile_only_variables).intersection( settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip(repositories_collapsed, repositories) } self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip(vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_maskers, parse_match), ("package.unmask", pkg_unmaskers, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) self.name = name settings.setdefault("PKGCORE_DOMAIN", name) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ( "USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion(s, settings[incremental], 'While expanding %s ' % (incremental, )) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set( optimize_incrementals(settings.get("USE", ()))) self._extend_use_for_features(use, settings.get("FEATURES", ())) self.use_expand = frozenset(profile.use_expand) self.use_expand_hidden = frozenset(profile.use_expand_hidden) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower() + "_" use.update(u2 + x for x in v.split()) if not 'ACCEPT_KEYWORDS' in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion(s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [ self.make_keywords_filter(self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals) ] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append( self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure('user-specified bashrc %r does not exist' % (data, )) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch, )) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = { r.repo_id: r._visibility_limiters() for r in repositories } for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from( Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () masks = [ repo_masks.get(master, [(), ()]) for master in masters ] masks.append(repo_masks[repo.repo_id]) masks.extend(profile_masks) mask_atoms = set() for neg, pos in masks: mask_atoms.difference_update(neg) mask_atoms.update(pos) mask_atoms.update(pkg_maskers) unmask_atoms = set(chain(pkg_unmaskers, *profile_unmasks)) filtered = self.generate_filter( generate_masking_restrict(mask_atoms), generate_unmasking_restrict(unmask_atoms), *vfilters) if filtered: wrapped_repo = visibility.filterTree( wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) if profile.virtuals: l = [ x for x in (getattr(v, 'old_style_virtuals', None) for v in self.vdb) if x is not None ] profile_repo = profile.make_virtuals_repo( multiplex.tree(*repositories), *l) self.repos_raw["profile virtuals"] = profile_repo self.repos_configured_filtered["profile virtuals"] = profile_repo self.repos_configured["profile virtuals"] = profile_repo self.repos = [profile_repo] + self.repos self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(self.use_expand, reverse=True)))
def __init__(self, *args, arches_addon=None): super().__init__(*args) target_repo = self.options.target_repo self.official_arches = target_repo.known_arches self.desired_arches = getattr(self.options, 'arches', None) if self.desired_arches is None or self.options.selected_arches is None: # copy it to be safe self.desired_arches = set(self.official_arches) self.global_insoluble = set() profile_filters = defaultdict(list) chunked_data_cache = {} cached_profiles = defaultdict(dict) if self.options.cache['profiles']: for repo in target_repo.trees: cache_file = self.cache_file(repo) # add profiles-base -> repo mapping to ease storage procedure cached_profiles[repo.config.profiles_base]['repo'] = repo try: with open(cache_file, 'rb') as f: cache = pickle.load(f) if cache.version == self.cache.version: cached_profiles[repo.config.profiles_base].update( cache) else: logger.debug( 'forcing %s profile cache regen ' 'due to outdated version', repo.repo_id) os.remove(cache_file) except FileNotFoundError as e: pass except (AttributeError, EOFError, ImportError, IndexError) as e: logger.debug('forcing %s profile cache regen: %s', repo.repo_id, e) os.remove(cache_file) for k in sorted(self.desired_arches): if k.lstrip("~") not in self.desired_arches: continue stable_key = k.lstrip("~") unstable_key = "~" + stable_key stable_r = packages.PackageRestriction( "keywords", values.ContainmentMatch2((stable_key, ))) unstable_r = packages.PackageRestriction( "keywords", values.ContainmentMatch2(( stable_key, unstable_key, ))) default_masked_use = tuple( set(x for x in self.official_arches if x != stable_key)) for profile_obj, profile in self.options.arch_profiles.get(k, []): files = self.profile_data.get(profile, None) try: cached_profile = cached_profiles[profile.base][ profile.path] if files != cached_profile['files']: # force refresh of outdated cache entry raise KeyError masks = cached_profile['masks'] unmasks = cached_profile['unmasks'] immutable_flags = cached_profile['immutable_flags'] stable_immutable_flags = cached_profile[ 'stable_immutable_flags'] enabled_flags = cached_profile['enabled_flags'] stable_enabled_flags = cached_profile[ 'stable_enabled_flags'] pkg_use = cached_profile['pkg_use'] iuse_effective = cached_profile['iuse_effective'] use = cached_profile['use'] provides_repo = cached_profile['provides_repo'] except KeyError: logger.debug('profile regen: %s', profile.path) try: masks = profile_obj.masks unmasks = profile_obj.unmasks immutable_flags = profile_obj.masked_use.clone( unfreeze=True) immutable_flags.add_bare_global((), default_masked_use) immutable_flags.optimize(cache=chunked_data_cache) immutable_flags.freeze() stable_immutable_flags = profile_obj.stable_masked_use.clone( unfreeze=True) stable_immutable_flags.add_bare_global( (), default_masked_use) stable_immutable_flags.optimize( cache=chunked_data_cache) stable_immutable_flags.freeze() enabled_flags = profile_obj.forced_use.clone( unfreeze=True) enabled_flags.add_bare_global((), (stable_key, )) enabled_flags.optimize(cache=chunked_data_cache) enabled_flags.freeze() stable_enabled_flags = profile_obj.stable_forced_use.clone( unfreeze=True) stable_enabled_flags.add_bare_global((), (stable_key, )) stable_enabled_flags.optimize(cache=chunked_data_cache) stable_enabled_flags.freeze() pkg_use = profile_obj.pkg_use iuse_effective = profile_obj.iuse_effective provides_repo = profile_obj.provides_repo # finalize enabled USE flags use = set() misc.incremental_expansion(use, profile_obj.use, 'while expanding USE') use = frozenset(use) except profiles_mod.ProfileError: # unsupported EAPI or other issue, profile checks will catch this continue if self.options.cache['profiles']: cached_profiles[profile.base]['update'] = True cached_profiles[profile.base][profile.path] = { 'files': files, 'masks': masks, 'unmasks': unmasks, 'immutable_flags': immutable_flags, 'stable_immutable_flags': stable_immutable_flags, 'enabled_flags': enabled_flags, 'stable_enabled_flags': stable_enabled_flags, 'pkg_use': pkg_use, 'iuse_effective': iuse_effective, 'use': use, 'provides_repo': provides_repo, } # used to interlink stable/unstable lookups so that if # unstable says it's not visible, stable doesn't try # if stable says something is visible, unstable doesn't try. stable_cache = set() unstable_insoluble = ProtectedSet(self.global_insoluble) # few notes. for filter, ensure keywords is last, on the # offchance a non-metadata based restrict foregos having to # access the metadata. # note that the cache/insoluble are inversly paired; # stable cache is usable for unstable, but not vice versa. # unstable insoluble is usable for stable, but not vice versa vfilter = domain.generate_filter(target_repo.pkg_masks | masks, unmasks) profile_filters[stable_key].append( ProfileData(profile.path, stable_key, provides_repo, packages.AndRestriction(vfilter, stable_r), iuse_effective, use, pkg_use, stable_immutable_flags, stable_enabled_flags, stable_cache, ProtectedSet(unstable_insoluble), profile.status, profile.deprecated)) profile_filters[unstable_key].append( ProfileData(profile.path, unstable_key, provides_repo, packages.AndRestriction(vfilter, unstable_r), iuse_effective, use, pkg_use, immutable_flags, enabled_flags, ProtectedSet(stable_cache), unstable_insoluble, profile.status, profile.deprecated)) # dump updated profile filters for k, v in cached_profiles.items(): if v.pop('update', False): repo = v.pop('repo') cache_file = self.cache_file(repo) try: os.makedirs(os.path.dirname(cache_file), exist_ok=True) with open(cache_file, 'wb+') as f: pickle.dump( _ProfilesCache( cached_profiles[repo.config.profiles_base]), f) except IOError as e: msg = (f'failed dumping {repo.repo_id} profiles cache: ' f'{cache_file!r}: {e.strerror}') raise UserException(msg) profile_evaluate_dict = {} for key, profile_list in profile_filters.items(): similar = profile_evaluate_dict[key] = [] for profile in profile_list: for existing in similar: if (existing[0].masked_use == profile.masked_use and existing[0].forced_use == profile.forced_use): existing.append(profile) break else: similar.append([profile]) self.profile_evaluate_dict = profile_evaluate_dict self.profile_filters = profile_filters
def settings(self): settings = self._settings if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # reformat env.d and make.conf incrementals system_profile_settings = {} for x in const.incrementals: system_profile_val = self.system_profile.get(x, ()) make_conf_val = settings.get(x, ()) if isinstance(system_profile_val, str): system_profile_val = tuple(system_profile_val.split()) if isinstance(make_conf_val, str): make_conf_val = tuple(make_conf_val.split()) system_profile_settings[x] = system_profile_val settings[x] = make_conf_val # roughly... all incremental stacks should be interpreted left -> right # as such we start with the env.d settings, append profile settings, # and finally append make.conf settings onto that. for k, v in self.profile.default_env.items(): if k not in settings: settings[k] = v continue if k in const.incrementals: settings[k] = system_profile_settings[k] + v + settings[k] # next we finalize incrementals. for incremental in const.incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion( s, settings[incremental], f'while expanding {incremental}') settings[incremental] = tuple(s) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion( s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) # pull trigger options from the env self._triggers = GenerateTriggers(self, settings) return ImmutableDict(settings)