def test_unstable_unique(self): self.common_check(sequences.unstable_unique) uc = UnhashableComplex res = sequences.unstable_unique([uc(1, 0), uc(0, 1), uc(1, 0)]) # sortable self.assertEqual(sorted(sequences.unstable_unique([[1, 2], [1, 3], [1, 2], [1, 3]])), [[1, 2], [1, 3]]) self.assertTrue(res == [uc(1, 0), uc(0, 1)] or res == [uc(0, 1), uc(1, 0)], res) self.assertEqual(sorted(sequences.unstable_unique(self._generator())), sorted(xrange(6)))
def test_unstable_unique(self): self.common_check(sequences.unstable_unique) uc = UnhashableComplex res = sequences.unstable_unique([uc(1, 0), uc(0, 1), uc(1, 0)]) # sortable assert sorted(sequences.unstable_unique( [[1, 2], [1, 3], [1, 2], [1, 3]])) == [[1, 2], [1, 3]] assert res == [uc(1, 0), uc(0, 1)] or res == [uc(0, 1), uc(1, 0)] assert sorted(sequences.unstable_unique(self._generator())) == sorted(range(6))
def test_unstable_unique(self): self.common_check(sequences.unstable_unique) uc = UnhashableComplex res = sequences.unstable_unique([uc(1, 0), uc(0, 1), uc(1, 0)]) # sortable assert sorted( sequences.unstable_unique([[1, 2], [1, 3], [1, 2], [1, 3]])) == [[1, 2], [1, 3]] assert res == [uc(1, 0), uc(0, 1)] or res == [uc(0, 1), uc(1, 0)] assert sorted(sequences.unstable_unique(self._generator())) == sorted( range(6))
def test_unstable_unique(self): self.common_check(sequences.unstable_unique) uc = UnhashableComplex res = sequences.unstable_unique([uc(1, 0), uc(0, 1), uc(1, 0)]) # sortable self.assertEqual( sorted(sequences.unstable_unique([[1, 2], [1, 3], [1, 2], [1, 3]])), [[1, 2], [1, 3]]) self.assertTrue( res == [uc(1, 0), uc(0, 1)] or res == [uc(0, 1), uc(1, 0)], res) self.assertEqual(sorted(sequences.unstable_unique(self._generator())), sorted(xrange(6)))
def main(f=sys.stdout, **kwargs): def out(s, **kwargs): print(s, file=f, **kwargs) def _rst_header(char, text, newline=True): if newline: out('\n', end='') out(text) out(char * len(text)) # add module docstring to output doc if __doc__ is not None: out(__doc__.strip()) reporters = sorted(unstable_unique(get_plugins('reporter', plugins)), key=lambda x: x.__name__) _rst_header('=', 'Reporters', newline=False) for reporter in reporters: if reporter.__doc__ is not None: try: summary, explanation = reporter.__doc__.split('\n', 1) except ValueError: summary = reporter.__doc__ explanation = None else: summary = None out('\n{}'.format(reporter.__name__)) if summary: out('\t' + ' '.join(dedent(summary).strip().split('\n'))) if explanation: out('\n\t' + '\n\t'.join(dedent(explanation).strip().split('\n')))
def get_data(self, repo, options): data = {} pos = 0 for pos, pkg in enumerate(repo): for license in unstable_unique(iflatten_instance(pkg.license)): data.setdefault(license, 0) data[license] += 1 return data, pos + 1
def _choices(sections): """Return an iterable of name: location mappings for available repos. If a repo doesn't have a proper location just the name is returned. """ for repo_name, repo in sorted(unstable_unique(sections.items())): repo_name = getattr(repo, 'repo_id', repo_name) if hasattr(repo, 'location'): yield f"{repo_name}:{repo.location}" else: yield repo_name
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip( repositories_collapsed, repositories)} self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip( vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion( s, settings[incremental], 'While expanding %s ' % (incremental,)) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set(optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower()+"_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion( s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [self.make_keywords_filter( self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals)] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append(self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure( 'user-specified bashrc %r does not exist' % (data,)) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch,)) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories} for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from(Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [repo_masks.get(master, [(), ()]) for master in masters] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
from textwrap import dedent from pkgcore.plugin import get_plugins from snakeoil.sequences import unstable_unique from pkgcheck import plugins def _rst_header(char, text, newline=True): if newline: print('\n', end='') print(text) print(char * len(text)) reporters = sorted(unstable_unique(get_plugins('reporter', plugins)), key=lambda x: x.__name__) _rst_header('=', 'Reporters', newline=False) for reporter in reporters: if reporter.__doc__ is not None: try: summary, explanation = reporter.__doc__.split('\n', 1) except ValueError: summary = reporter.__doc__ explanation = None else: summary = None print('\n{}'.format(reporter.__name__))
from textwrap import dedent from pkgcore.plugin import get_plugins from snakeoil.sequences import unstable_unique from pkgcheck import plugins def _rst_header(char, text, newline=True): if newline: print('\n', end='') print(text) print(char * len(text)) checks = sorted(unstable_unique(get_plugins('check', plugins)), key=lambda x: x.__name__) d = defaultdict(set) for check in checks: d[check.scope].add(check) _rst_header('=', 'Checks', newline=False) scopes = ('version', 'package', 'category', 'repository') for scope in reversed(sorted(d)): _rst_header('-', scopes[scope].capitalize() + ' scope') checks = sorted(d[scope], key=lambda x: x.__name__) for check in checks: if check.__doc__ is not None:
def check_args(parser, namespace): # XXX hack... namespace.checks = sorted(unstable_unique( get_plugins('check', plugins)), key=lambda x: x.__name__) if any((namespace.list_keywords, namespace.list_checks, namespace.list_reporters)): # no need to check any other args return cwd = abspath(os.getcwd()) if namespace.suite is None: # No suite explicitly specified. Use the repo to guess the suite. if namespace.target_repo is None: # Not specified either. Try to find a repo our cwd is in. # The use of a dict here is a hack to deal with one # repo having multiple names in the configuration. candidates = {} for name, suite in namespace.config.pkgcheck_suite.iteritems(): repo = suite.target_repo if repo is None: continue repo_base = getattr(repo, 'location', None) if repo_base is not None and cwd.startswith(repo_base): candidates[repo] = name if len(candidates) == 1: namespace.guessed_suite = True namespace.target_repo = tuple(candidates)[0] if namespace.target_repo is not None: # We have a repo, now find a suite matching it. candidates = list( suite for suite in namespace.config.pkgcheck_suite.itervalues() if suite.target_repo is namespace.target_repo) if len(candidates) == 1: namespace.guessed_suite = True namespace.suite = candidates[0] if namespace.suite is None: # If we have multiple candidates or no candidates we # fall back to the default suite. namespace.suite = namespace.config.get_default('pkgcheck_suite') namespace.default_suite = namespace.suite is not None if namespace.suite is not None: # We have a suite. Lift defaults from it for values that # were not set explicitly: if namespace.checkset is None: namespace.checkset = namespace.suite.checkset # If we were called with no atoms we want to force # cwd-based detection. if namespace.target_repo is None: if namespace.targets: namespace.target_repo = namespace.suite.target_repo elif namespace.suite.target_repo is not None: # No atoms were passed in, so we want to guess # what to scan based on cwd below. That only makes # sense if we are inside the target repo. We still # want to pick the suite's target repo if we are # inside it, in case there is more than one repo # definition with a base that contains our dir. repo_base = getattr(namespace.suite.target_repo, 'location', None) if repo_base is not None and cwd.startswith(repo_base): namespace.target_repo = namespace.suite.target_repo if namespace.target_repo is None: # We have no target repo (not explicitly passed, not from a suite, not # from an earlier guess at the target_repo) so try to guess one. if len(namespace.targets) == 1 and os.path.exists(namespace.targets[0]): target_dir = namespace.targets[0] else: target_dir = cwd target_repo = None for name, repo in namespace.config.repo.iteritems(): repo_base = getattr(repo, 'location', None) if repo_base is not None and target_dir in repo: target_repo = repo if target_repo is None: parser.error( 'no target repo specified and ' 'current directory is not inside a known repo') namespace.target_repo = target_repo if namespace.reporter is None: namespace.reporter = namespace.config.get_default( 'pkgcheck_reporter_factory') if namespace.reporter is None: namespace.reporter = get_plugin('reporter', plugins) if namespace.reporter is None: parser.error( 'no config defined reporter found, nor any default ' 'plugin based reporters') else: func = namespace.config.pkgcheck_reporter_factory.get(namespace.reporter) if func is None: func = list(base.Whitelist([namespace.reporter]).filter( get_plugins('reporter', plugins))) if not func: parser.error( "no reporter matches %r (available: %s)" % ( namespace.reporter, ', '.join(sorted(x.__name__ for x in get_plugins('reporter', plugins))) ) ) elif len(func) > 1: parser.error( "--reporter %r matched multiple reporters, " "must match one. %r" % ( namespace.reporter, tuple(sorted("%s.%s" % (x.__module__, x.__name__) for x in func)) ) ) func = func[0] namespace.reporter = func # search_repo is a multiplex of target_repo and its masters, make sure # they're configured properly in metadata/layout.conf. This is used for # things like visibility checks (it is passed to the checkers in "start"). namespace.search_repo = multiplex.tree(*namespace.target_repo.trees) namespace.repo_bases = [abspath(repo.location) for repo in reversed(namespace.target_repo.trees)] if namespace.targets: limiters = [] repo = namespace.target_repo # read targets from stdin if len(namespace.targets) == 1 and namespace.targets[0] == '-': namespace.targets = [x.strip() for x in sys.stdin.readlines() if x.strip() != ''] # reassign stdin to allow interactivity (currently only works for unix) sys.stdin = open('/dev/tty') for target in namespace.targets: try: limiters.append(parserestrict.parse_match(target)) except parserestrict.ParseError as e: if os.path.exists(target): try: limiters.append(repo.path_restrict(target)) except ValueError as e: parser.error(e) else: parser.error(e) namespace.limiters = limiters else: repo_base = getattr(namespace.target_repo, 'location', None) if not repo_base: parser.error( 'Either specify a target repo that is not multi-tree or ' 'one or more extended atoms to scan ' '("*" for the entire repo).') if cwd not in namespace.target_repo: namespace.limiters = [packages.AlwaysTrue] else: namespace.limiters = [packages.AndRestriction(*namespace.target_repo.path_restrict(cwd))] if namespace.checkset is None: namespace.checkset = namespace.config.get_default('pkgcheck_checkset') if namespace.checkset is not None: namespace.checks = list(namespace.checkset.filter(namespace.checks)) disabled_checks, enabled_checks = ((), ()) if namespace.selected_checks is not None: disabled_checks, enabled_checks = namespace.selected_checks if enabled_checks: whitelist = base.Whitelist(enabled_checks) namespace.checks = list(whitelist.filter(namespace.checks)) if disabled_checks: blacklist = base.Blacklist(disabled_checks) namespace.checks = list(blacklist.filter(namespace.checks)) if not namespace.checks: parser.error('no active checks') namespace.addons = set() def add_addon(addon): if addon not in namespace.addons: namespace.addons.add(addon) for dep in addon.required_addons: add_addon(dep) for check in namespace.checks: add_addon(check) try: for addon in namespace.addons: addon.check_args(parser, namespace) except argparse.ArgumentError as e: if namespace.debug: raise parser.error(str(e))
def __init__(self, profile, repositories, vdb, name=None, root='/', prefix='/', incrementals=const.incrementals, triggers=(), **settings): # voodoo, unfortunately (so it goes) # break this up into chunks once it's stabilized (most of code # here has already, but still more to add) self._triggers = triggers self.name = name # prevent critical variables from being changed in make.conf for k in profile.profile_only_variables.intersection(settings.keys()): del settings[k] if 'CHOST' in settings and 'CBUILD' not in settings: settings['CBUILD'] = settings['CHOST'] # if unset, MAKEOPTS defaults to CPU thread count if 'MAKEOPTS' not in settings: settings['MAKEOPTS'] = '-j%i' % cpu_count() # map out sectionname -> config manager immediately. repositories_collapsed = [r.collapse() for r in repositories] repositories = [r.instantiate() for r in repositories_collapsed] self.fetcher = settings.pop("fetcher") self.default_licenses_manager = OverlayedLicenses(*repositories) vdb_collapsed = [r.collapse() for r in vdb] vdb = [r.instantiate() for r in vdb_collapsed] self.repos_raw = { collapsed.name: repo for (collapsed, repo) in izip(repositories_collapsed, repositories) } self.repos_raw.update( (collapsed.name, repo) for (collapsed, repo) in izip(vdb_collapsed, vdb)) self.repos_raw.pop(None, None) if profile.provides_repo is not None: self.repos_raw['package.provided'] = profile.provides_repo vdb.append(profile.provides_repo) self.profile = profile pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], [] pkg_use, self.bashrcs = [], [] self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None) for key, val, action in ( ("package.mask", pkg_masks, parse_match), ("package.unmask", pkg_unmasks, parse_match), ("package.keywords", pkg_keywords, package_keywords_splitter), ("package.accept_keywords", pkg_keywords, package_keywords_splitter), ("package.license", pkg_licenses, package_keywords_splitter), ("package.use", pkg_use, package_keywords_splitter), ("package.env", self.bashrcs, package_env_splitter), ): for fp in settings.pop(key, ()): try: if key == "package.env": base = self.ebuild_hook_dir if base is None: base = os.path.dirname(fp) action = partial(action, base) for fs_obj in iter_scan(fp, follow_symlinks=True): if not fs_obj.is_reg or '/.' in fs_obj.location: continue val.extend( action(x) for x in iter_read_bash(fs_obj.location, allow_line_cont=True)) except EnvironmentError as e: if e.errno == errno.ENOENT: raise MissingFile(fp, key) raise_from(Failure("failed reading '%s': %s" % (fp, e))) except ValueError as e: raise_from(Failure("failed reading '%s': %s" % (fp, e))) for x in incrementals: if isinstance(settings.get(x), basestring): settings[x] = tuple(settings[x].split()) # roughly... all incremental stacks should be interpreted left -> right # as such we start with the profile settings, and append ours onto it. for k, v in profile.default_env.iteritems(): if k not in settings: settings[k] = v continue if k in incrementals: settings[k] = v + tuple(settings[k]) # next we finalize incrementals. for incremental in incrementals: # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the # negations currently so that pkg iuse induced enablings can be # disabled by negations. For example, think of the profile doing # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is # skipped because negations are required for license filtering. if incremental not in settings or incremental in ( "USE", "ACCEPT_LICENSE"): continue s = set() incremental_expansion(s, settings[incremental], 'While expanding %s ' % (incremental, )) settings[incremental] = tuple(s) # use is collapsed; now stack use_expand. use = settings['USE'] = set( optimize_incrementals( list(settings.get('USE', ())) + os.environ.get('USE', '').split())) self._extend_use_for_features(use, settings.get("FEATURES", ())) for u in profile.use_expand: v = settings.get(u) if v is None: continue u2 = u.lower() + "_" use.update(u2 + x for x in v.split()) if 'ACCEPT_KEYWORDS' not in settings: raise Failure("No ACCEPT_KEYWORDS setting detected from profile, " "or user config") s = set() default_keywords = [] incremental_expansion(s, settings['ACCEPT_KEYWORDS'], 'while expanding ACCEPT_KEYWORDS') default_keywords.extend(s) settings['ACCEPT_KEYWORDS'] = set(default_keywords) self.use = use if "ARCH" not in settings: raise Failure( "No ARCH setting detected from profile, or user config") self.arch = self.stable_arch = settings["ARCH"] self.unstable_arch = "~%s" % self.arch # ~amd64 -> [amd64, ~amd64] for x in default_keywords[:]: if x.startswith("~"): default_keywords.append(x.lstrip("~")) default_keywords = unstable_unique(default_keywords + [self.arch]) accept_keywords = pkg_keywords + list(profile.accept_keywords) vfilters = [ self.make_keywords_filter(self.arch, default_keywords, accept_keywords, profile.keywords, incremental="package.keywords" in incrementals) ] del default_keywords, accept_keywords # we can finally close that fricking # "DISALLOW NON FOSS LICENSES" bug via this >:) master_license = [] master_license.extend(settings.get('ACCEPT_LICENSE', ())) if master_license or pkg_licenses: vfilters.append( self.make_license_filter(master_license, pkg_licenses)) del master_license # if it's made it this far... self.root = settings["ROOT"] = root self.prefix = prefix self.settings = ProtectedDict(settings) for data in self.settings.get('bashrc', ()): source = local_source(data) # this is currently local-only so a path check is ok # TODO make this more general if not os.path.exists(source.path): raise Failure('user-specified bashrc %r does not exist' % (data, )) self.bashrcs.append((packages.AlwaysTrue, source)) # stack use stuff first, then profile. self.enabled_use = ChunkedDataDict() self.enabled_use.add_bare_global(*split_negations(self.use)) self.enabled_use.merge(profile.pkg_use) self.enabled_use.update_from_stream( chunked_data(k, *split_negations(v)) for k, v in pkg_use) for attr in ('', 'stable_'): c = ChunkedDataDict() c.merge(getattr(profile, attr + 'forced_use')) c.add_bare_global((), (self.arch, )) setattr(self, attr + 'forced_use', c) c = ChunkedDataDict() c.merge(getattr(profile, attr + 'masked_use')) setattr(self, attr + 'disabled_use', c) self.repos = [] self.vdb = [] self.repos_configured = {} self.repos_configured_filtered = {} rev_names = {repo: name for name, repo in self.repos_raw.iteritems()} profile_masks = profile._incremental_masks() profile_unmasks = profile._incremental_unmasks() repo_masks = { r.repo_id: r._visibility_limiters() for r in repositories } for l, repos, filtered in ((self.repos, repositories, True), (self.vdb, vdb, False)): for repo in repos: if not repo.configured: pargs = [repo] try: for x in repo.configurables: if x == "domain": pargs.append(self) elif x == "settings": pargs.append(settings) elif x == "profile": pargs.append(profile) else: pargs.append(getattr(self, x)) except AttributeError as ae: raise_from( Failure("failed configuring repo '%s': " "configurable missing: %s" % (repo, ae))) wrapped_repo = repo.configure(*pargs) else: wrapped_repo = repo key = rev_names.get(repo) self.repos_configured[key] = wrapped_repo if filtered: config = getattr(repo, 'config', None) masters = getattr(config, 'masters', ()) if masters is None: # tough cookies. If a user has an overlay, no masters # defined, we're not applying the portdir masks. # we do this both since that's annoying, and since # frankly there isn't any good course of action. masters = () global_masks = [ repo_masks.get(master, [(), ()]) for master in masters ] global_masks.append(repo_masks[repo.repo_id]) global_masks.extend(profile_masks) masks = set() for neg, pos in global_masks: masks.difference_update(neg) masks.update(pos) masks.update(pkg_masks) unmasks = set(chain(pkg_unmasks, *profile_unmasks)) filtered = generate_filter(masks, unmasks, *vfilters) if filtered: wrapped_repo = visibility.filterTree( wrapped_repo, filtered, True) self.repos_configured_filtered[key] = wrapped_repo l.append(wrapped_repo) self.use_expand_re = re.compile( "^(?:[+-])?(%s)_(.*)$" % "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
def check_args(parser, namespace): # XXX hack... namespace.checks = sorted(unstable_unique(get_plugins('check', plugins)), key=lambda x: x.__name__) if any((namespace.list_keywords, namespace.list_checks, namespace.list_reporters)): # no need to check any other args return cwd = abspath(os.getcwd()) if namespace.suite is None: # No suite explicitly specified. Use the repo to guess the suite. if namespace.target_repo is None: # Not specified either. Try to find a repo our cwd is in. # The use of a dict here is a hack to deal with one # repo having multiple names in the configuration. candidates = {} for name, suite in namespace.config.pkgcheck_suite.iteritems(): repo = suite.target_repo if repo is None: continue repo_base = getattr(repo, 'location', None) if repo_base is not None and cwd.startswith(repo_base): candidates[repo] = name if len(candidates) == 1: namespace.guessed_suite = True namespace.target_repo = tuple(candidates)[0] if namespace.target_repo is not None: # We have a repo, now find a suite matching it. candidates = list( suite for suite in namespace.config.pkgcheck_suite.itervalues() if suite.target_repo is namespace.target_repo) if len(candidates) == 1: namespace.guessed_suite = True namespace.suite = candidates[0] if namespace.suite is None: # If we have multiple candidates or no candidates we # fall back to the default suite. namespace.suite = namespace.config.get_default('pkgcheck_suite') namespace.default_suite = namespace.suite is not None if namespace.suite is not None: # We have a suite. Lift defaults from it for values that # were not set explicitly: if namespace.checkset is None: namespace.checkset = namespace.suite.checkset # If we were called with no atoms we want to force # cwd-based detection. if namespace.target_repo is None: if namespace.targets: namespace.target_repo = namespace.suite.target_repo elif namespace.suite.target_repo is not None: # No atoms were passed in, so we want to guess # what to scan based on cwd below. That only makes # sense if we are inside the target repo. We still # want to pick the suite's target repo if we are # inside it, in case there is more than one repo # definition with a base that contains our dir. repo_base = getattr(namespace.suite.target_repo, 'location', None) if repo_base is not None and cwd.startswith(repo_base): namespace.target_repo = namespace.suite.target_repo if namespace.target_repo is None: # We have no target repo (not explicitly passed, not from a suite, not # from an earlier guess at the target_repo) so try to guess one. if len(namespace.targets) == 1 and os.path.exists( namespace.targets[0]): target_dir = namespace.targets[0] else: target_dir = cwd target_repo = None for name, repo in namespace.config.repo.iteritems(): repo_base = getattr(repo, 'location', None) if repo_base is not None and target_dir in repo: target_repo = repo if target_repo is None: parser.error('no target repo specified and ' 'current directory is not inside a known repo') namespace.target_repo = target_repo if namespace.reporter is None: namespace.reporter = namespace.config.get_default( 'pkgcheck_reporter_factory') if namespace.reporter is None: namespace.reporter = get_plugin('reporter', plugins) if namespace.reporter is None: parser.error('no config defined reporter found, nor any default ' 'plugin based reporters') else: func = namespace.config.pkgcheck_reporter_factory.get( namespace.reporter) if func is None: func = list( base.Whitelist([namespace.reporter ]).filter(get_plugins('reporter', plugins))) if not func: parser.error( "no reporter matches %r (available: %s)" % (namespace.reporter, ', '.join( sorted(x.__name__ for x in get_plugins('reporter', plugins))))) elif len(func) > 1: parser.error("--reporter %r matched multiple reporters, " "must match one. %r" % (namespace.reporter, tuple( sorted("%s.%s" % (x.__module__, x.__name__) for x in func)))) func = func[0] namespace.reporter = func # search_repo is a multiplex of target_repo and its masters, make sure # they're configured properly in metadata/layout.conf. This is used for # things like visibility checks (it is passed to the checkers in "start"). namespace.search_repo = multiplex.tree(*namespace.target_repo.trees) namespace.repo_bases = [ abspath(repo.location) for repo in reversed(namespace.target_repo.trees) ] if namespace.targets: limiters = [] repo = namespace.target_repo # read targets from stdin if len(namespace.targets) == 1 and namespace.targets[0] == '-': namespace.targets = [ x.strip() for x in sys.stdin.readlines() if x.strip() != '' ] # reassign stdin to allow interactivity (currently only works for unix) sys.stdin = open('/dev/tty') for target in namespace.targets: try: limiters.append(parserestrict.parse_match(target)) except parserestrict.ParseError as e: if os.path.exists(target): try: limiters.append(repo.path_restrict(target)) except ValueError as e: parser.error(e) else: parser.error(e) namespace.limiters = limiters else: repo_base = getattr(namespace.target_repo, 'location', None) if not repo_base: parser.error( 'Either specify a target repo that is not multi-tree or ' 'one or more extended atoms to scan ' '("*" for the entire repo).') if cwd not in namespace.target_repo: namespace.limiters = [packages.AlwaysTrue] else: namespace.limiters = [ packages.AndRestriction( *namespace.target_repo.path_restrict(cwd)) ] if namespace.checkset is None: namespace.checkset = namespace.config.get_default('pkgcheck_checkset') if namespace.checkset is not None: namespace.checks = list(namespace.checkset.filter(namespace.checks)) disabled_checks, enabled_checks = ((), ()) if namespace.selected_checks is not None: disabled_checks, enabled_checks = namespace.selected_checks if enabled_checks: whitelist = base.Whitelist(enabled_checks) namespace.checks = list(whitelist.filter(namespace.checks)) if disabled_checks: blacklist = base.Blacklist(disabled_checks) namespace.checks = list(blacklist.filter(namespace.checks)) if not namespace.checks: parser.error('no active checks') namespace.addons = set() def add_addon(addon): if addon not in namespace.addons: namespace.addons.add(addon) for dep in addon.required_addons: add_addon(dep) for check in namespace.checks: add_addon(check) try: for addon in namespace.addons: addon.check_args(parser, namespace) except argparse.ArgumentError as e: if namespace.debug: raise parser.error(str(e))
if addon not in addon_set: addon_set.add(addon) for dep in addon.required_addons: add_addon(dep, addon_set) all_addons = set() scan.plugin = scan.add_argument_group('plugin options') for check in get_plugins('check', plugins): add_addon(check, all_addons) for addon in all_addons: addon.mangle_argparser(scan) # XXX hack... _known_checks = tuple( sorted(unstable_unique(get_plugins('check', plugins)), key=lambda x: x.__name__)) _known_keywords = tuple( sorted(unstable_unique( chain.from_iterable(check.known_results for check in _known_checks)), key=lambda x: x.__name__)) @scan.bind_final_check def _validate_args(parser, namespace): namespace.enabled_checks = list(_known_checks) namespace.enabled_keywords = list(_known_keywords) cwd = abspath(os.getcwd()) if namespace.suite is None: # No suite explicitly specified. Use the repo to guess the suite.