예제 #1
0
파일: ops.py 프로젝트: veelai/pkgcore
def merge_contents(cset, offset=None, callback=None):

    """
    merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs

    :param cset: :class:`pkgcore.fs.contents.contentsSet` instance
    :param offset: if not None, offset to prefix all locations with.
        Think of it as target dir.
    :param callback: callable to report each entry being merged; given a single arg,
        the fs object being merged.
    :raise EnvironmentError: Thrown for permission failures.
    """

    if callback is None:
        callback = lambda obj:None

    ensure_perms = get_plugin("fs_ops.ensure_perms")
    copyfile = get_plugin("fs_ops.copyfile")
    mkdir = get_plugin("fs_ops.mkdir")

    if not isinstance(cset, contents.contentsSet):
        raise TypeError("cset must be a contentsSet, got %r" % (cset,))

    if offset is not None:
        if os.path.exists(offset):
            if not os.path.isdir(offset):
                raise TypeError("offset must be a dir, or not exist: %s" % offset)
        else:
            mkdir(fs.fsDir(offset, strict=False))
        iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep))
    else:
        iterate = iter

    d = list(iterate(cset.iterdirs()))
    d.sort()
    for x in d:
        callback(x)

        try:
            # we pass in the stat ourselves, using stat instead of
            # lstat gen_obj uses internally; this is the equivalent of
            # "deference that link"
            obj = gen_obj(x.location,  stat=os.stat(x.location))
            if not fs.isdir(obj):
                raise Exception(
                    "%s exists and needs to be a dir, but is a %s" %
                        (x.location, obj))
            ensure_perms(x, obj)
        except OSError, oe:
            if oe.errno != errno.ENOENT:
                raise
            try:
                # we do this form to catch dangling symlinks
                mkdir(x)
            except OSError, oe:
                if oe.errno != errno.EEXIST:
                    raise
                os.unlink(x.location)
                mkdir(x)
            ensure_perms(x)
예제 #2
0
파일: ops.py 프로젝트: vapier/pkgcore
def default_mkdir(d):
    """
    mkdir for a fsDir object

    :param d: :class:`pkgcore.fs.fs.fsDir` instance
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: if can't complete
    """
    if not d.mode:
        mode = 0777
    else:
        mode = d.mode
    os.mkdir(d.location, mode)
    get_plugin("fs_ops.ensure_perms")(d)
    return True
예제 #3
0
파일: ops.py 프로젝트: chutz/pkgcore
def default_mkdir(d):
    """
    mkdir for a fsDir object

    :param d: :class:`pkgcore.fs.fs.fsDir` instance
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: if can't complete
    """
    if not d.mode:
        mode = 0777
    else:
        mode = d.mode
    os.mkdir(d.location, mode)
    get_plugin("fs_ops.ensure_perms")(d)
    return True
예제 #4
0
파일: ops.py 프로젝트: vapier/pkgcore
def default_copyfile(obj, mkdirs=False):
    """
    copy a :class:`pkgcore.fs.fs.fsBase` to its stated location.

    :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir`
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: permission errors

    """

    existent = False
    ensure_perms = get_plugin("fs_ops.ensure_perms")
    if not fs.isfs_obj(obj):
        raise TypeError("obj must be fsBase derivative: %r" % obj)
    elif fs.isdir(obj):
        raise TypeError("obj must not be a fsDir instance: %r" % obj)

    try:
        existing = gen_obj(obj.location)
        if fs.isdir(existing):
            raise CannotOverwrite(obj, existing)
        existent = True
    except OSError as oe:
        # verify the parent dir is there at least
        basefp = os.path.dirname(obj.location)
        if basefp.strip(os.path.sep) and not os.path.exists(basefp):
            if mkdirs:
                if not ensure_dirs(basefp, mode=0750, minimal=True):
                    raise FailedCopy(obj, str(oe))
예제 #5
0
파일: ops.py 프로젝트: chutz/pkgcore
def default_copyfile(obj, mkdirs=False):
    """
    copy a :class:`pkgcore.fs.fs.fsBase` to its stated location.

    :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir`
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: permission errors

    """

    existent = False
    ensure_perms = get_plugin("fs_ops.ensure_perms")
    if not fs.isfs_obj(obj):
        raise TypeError("obj must be fsBase derivative: %r" % obj)
    elif fs.isdir(obj):
        raise TypeError("obj must not be a fsDir instance: %r" % obj)

    try:
        existing = gen_obj(obj.location)
        if fs.isdir(existing):
            raise CannotOverwrite(obj, existing)
        existent = True
    except OSError as oe:
        # verify the parent dir is there at least
        basefp = os.path.dirname(obj.location)
        if basefp.strip(os.path.sep) and not os.path.exists(basefp):
            if mkdirs:
                if not ensure_dirs(basefp, mode=0750, minimal=True):
                    raise FailedCopy(obj, str(oe))
예제 #6
0
    def trigger(self, engine, cset):
        op = self.format_op
        op = getattr(op, 'install_op', op)
        op.setup_workdir()
        merge_contents = get_plugin("fs_ops.merge_contents")
        merge_cset = cset
        if engine.offset != '/':
            merge_cset = cset.change_offset(engine.offset, '/')
        merge_contents(merge_cset, offset=op.env["D"])

        # ok.  they're on disk.
        # now to avoid going back to the binpkg, we rewrite
        # the data_source for files to the on disk location.
        # we can update in place also, since we're not changing the mapping.

        # this rewrites the data_source to the ${D} loc.
        d = op.env["D"]
        fi = (x.change_attributes(
            data=local_source(pjoin(d, x.location.lstrip('/'))))
              for x in merge_cset.iterfiles())

        if engine.offset:
            # we're using merge_cset above, which has the final offset loc
            # pruned; this is required for the merge, however, we're updating
            # the cset so we have to insert the final offset back in.
            # wrap the iter, iow.
            fi = offset_rewriter(engine.offset, fi)

        cset.update(contentsSet(fi))

        # we *probably* should change the csets class at some point
        # since it no longer needs to be tar, but that's for another day.
        engine.replace_cset('new_cset', cset)
예제 #7
0
파일: repository.py 프로젝트: chutz/pkgcore
    def trigger(self, engine, cset):
        op = self.format_op
        op = getattr(op, 'install_op', op)
        op.setup_workdir()
        merge_contents = get_plugin("fs_ops.merge_contents")
        merge_cset = cset
        if engine.offset != '/':
            merge_cset = cset.change_offset(engine.offset, '/')
        merge_contents(merge_cset, offset=op.env["D"])

        # ok.  they're on disk.
        # now to avoid going back to the binpkg, we rewrite
        # the data_source for files to the on disk location.
        # we can update in place also, since we're not changing the mapping.

        # this rewrites the data_source to the ${D} loc.
        d = op.env["D"]
        fi = (x.change_attributes(data=local_source(
                pjoin(d, x.location.lstrip('/'))))
                for x in merge_cset.iterfiles())

        if engine.offset:
            # we're using merge_cset above, which has the final offset loc
            # pruned; this is required for the merge, however, we're updating
            # the cset so we have to insert the final offset back in.
            # wrap the iter, iow.
            fi = offset_rewriter(engine.offset, fi)

        cset.update(contentsSet(fi))

        # we *probably* should change the csets class at some point
        # since it no longer needs to be tar, but that's for another day.
        engine.replace_cset('new_cset', cset)
예제 #8
0
파일: ops.py 프로젝트: radhermit/pkgcore
def default_copyfile(obj, mkdirs=False):
    """
    copy a :class:`pkgcore.fs.fs.fsBase` to its stated location.

    :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir`
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: permission errors

    """

    existent = False
    ensure_perms = get_plugin("fs_ops.ensure_perms")
    if not fs.isfs_obj(obj):
        raise TypeError(f'obj must be fsBase derivative: {obj!r}')
    elif fs.isdir(obj):
        raise TypeError(f'obj must not be a fsDir instance: {obj!r}')

    try:
        existing = gen_obj(obj.location)
        if fs.isdir(existing):
            raise CannotOverwrite(obj, existing)
        existent = True
    except OSError as oe:
        # verify the parent dir is there at least
        basefp = os.path.dirname(obj.location)
        if basefp.strip(os.path.sep) and not os.path.exists(basefp):
            if mkdirs:
                if not ensure_dirs(basefp, mode=0o750, minimal=True):
                    raise FailedCopy(obj, str(oe))
            else:
                raise
        existent = False

    if not existent:
        fp = obj.location
    else:
        fp = existent_fp = obj.location + "#new"

    if fs.isreg(obj):
        obj.data.transfer_to_path(fp)
    elif fs.issym(obj):
        os.symlink(obj.target, fp)
    elif fs.isfifo(obj):
        os.mkfifo(fp)
    elif fs.isdev(obj):
        dev = os.makedev(obj.major, obj.minor)
        os.mknod(fp, obj.mode, dev)
    else:
        ret = spawn([CP_BINARY, "-Rp", obj.location, fp])
        if ret != 0:
            raise FailedCopy(obj, f'got {ret} from {CP_BINARY} -Rp')

    ensure_perms(obj.change_attributes(location=fp))

    if existent:
        os.rename(existent_fp, obj.location)
    return True
예제 #9
0
 def _test_plug(self):
     import mod_testplug
     assert plugin.get_plugin('spork', mod_testplug) is None
     plugins = list(plugin.get_plugins('plugtest', mod_testplug))
     assert len(plugins) == 2, plugins
     plugin.get_plugin('plugtest', mod_testplug)
     assert 'HighPlug' == \
         plugin.get_plugin('plugtest', mod_testplug).__class__.__name__
     with open(pjoin(self.packdir, plugin.CACHE_FILENAME)) as f:
         lines = f.readlines()
     assert len(lines) == 3
     assert plugin.CACHE_HEADER + "\n" == lines[0]
     lines.pop(0)
     lines.sort()
     mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug2.py')))
     assert f'plug2:{mtime}:\n' == lines[0]
     mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug.py')))
     assert (
         f'plug:{mtime}:plugtest,7,1:plugtest,1,tests.test_plugin.LowPlug:plugtest,0,0\n'
         == lines[1])
예제 #10
0
 def _test_plug(self):
     import mod_testplug
     assert plugin.get_plugin('spork', mod_testplug) is None
     plugins = list(plugin.get_plugins('plugtest', mod_testplug))
     assert len(plugins) == 2, plugins
     plugin.get_plugin('plugtest', mod_testplug)
     assert 'HighPlug' == \
         plugin.get_plugin('plugtest', mod_testplug).__class__.__name__
     with open(pjoin(self.packdir, plugin.CACHE_FILENAME)) as f:
         lines = f.readlines()
     assert len(lines) == 3
     assert plugin.CACHE_HEADER + "\n" == lines[0]
     lines.pop(0)
     lines.sort()
     mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug2.py')))
     assert f'plug2:{mtime}:\n' == lines[0]
     mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug.py')))
     assert (
         f'plug:{mtime}:plugtest,7,1:plugtest,1,tests.test_plugin.LowPlug:plugtest,0,0\n'
         == lines[1])
예제 #11
0
 def _test_plug(self):
     import mod_testplug
     self.assertIdentical(None, plugin.get_plugin('spork', mod_testplug))
     plugins = list(plugin.get_plugins('plugtest', mod_testplug))
     self.assertEqual(2, len(plugins), plugins)
     plugin.get_plugin('plugtest', mod_testplug)
     self.assertEqual(
         'HighPlug',
         plugin.get_plugin('plugtest', mod_testplug).__class__.__name__)
     lines = list(open(os.path.join(self.packdir, plugin.CACHE_FILENAME)))
     self.assertEqual(3, len(lines))
     self.assertEqual(plugin.CACHE_HEADER + "\n", lines[0])
     lines.pop(0)
     lines.sort()
     mtime = int(os.path.getmtime(os.path.join(self.packdir, 'plug2.py')))
     self.assertEqual('plug2:%s:\n' % (mtime,), lines[0])
     mtime = int(os.path.getmtime(os.path.join(self.packdir, 'plug.py')))
     self.assertEqual(
         'plug:%s:plugtest,7,1:plugtest,0,pkgcore.test.test_plugin.LowPlug:plugtest,0,0\n'
             % (mtime,),
         lines[1])
예제 #12
0
 def _test_plug(self):
     import mod_testplug
     self.assertIdentical(None, plugin.get_plugin('spork', mod_testplug))
     plugins = list(plugin.get_plugins('plugtest', mod_testplug))
     self.assertEqual(2, len(plugins), plugins)
     plugin.get_plugin('plugtest', mod_testplug)
     self.assertEqual(
         'HighPlug',
         plugin.get_plugin('plugtest', mod_testplug).__class__.__name__)
     with open(pjoin(self.packdir, plugin.CACHE_FILENAME)) as f:
         lines = f.readlines()
     self.assertEqual(3, len(lines))
     self.assertEqual(plugin.CACHE_HEADER + "\n", lines[0])
     lines.pop(0)
     lines.sort()
     mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug2.py')))
     self.assertEqual('plug2:%s:\n' % (mtime, ), lines[0])
     mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug.py')))
     self.assertEqual(
         'plug:%s:plugtest,7,1:plugtest,1,pkgcore.test.test_plugin.LowPlug:plugtest,0,0\n'
         % (mtime, ), lines[1])
예제 #13
0
 def _test_priority_caching(self):
     import mod_testplug
     list(plugin.get_plugins('spork', mod_testplug))
     sys.modules.pop('mod_testplug.plug', None)
     sys.modules.pop('mod_testplug.plug2', None)
     sys.modules.pop('mod_testplug.plug3', None)
     sys.modules.pop('mod_testplug.plug4', None)
     sys.modules.pop('mod_testplug.plug5', None)
     sys.modules.pop('mod_testplug.plug6', None)
     best_plug = plugin.get_plugin('plugtest', mod_testplug)
     from mod_testplug import plug
     assert plug.high_plug == best_plug
     # Extra messages since getting all of sys.modules printed is annoying.
     assert 'mod_testplug.plug' in sys.modules, 'plug not loaded'
     assert 'mod_testplug.plug2' not in sys.modules, 'plug2 loaded'
     assert 'mod_testplug.plug3' not in sys.modules, 'plug3 loaded'
     assert 'mod_testplug.plug4' in sys.modules, 'plug4 not loaded'
     assert 'mod_testplug.plug5' in sys.modules, 'plug4 not loaded'
     assert 'mod_testplug.plug6' not in sys.modules, 'plug6 loaded'
예제 #14
0
 def _test_priority_caching(self):
     import mod_testplug
     list(plugin.get_plugins('spork', mod_testplug))
     sys.modules.pop('mod_testplug.plug', None)
     sys.modules.pop('mod_testplug.plug2', None)
     sys.modules.pop('mod_testplug.plug3', None)
     sys.modules.pop('mod_testplug.plug4', None)
     sys.modules.pop('mod_testplug.plug5', None)
     sys.modules.pop('mod_testplug.plug6', None)
     best_plug = plugin.get_plugin('plugtest', mod_testplug)
     from mod_testplug import plug
     assert plug.high_plug == best_plug
     # Extra messages since getting all of sys.modules printed is annoying.
     assert 'mod_testplug.plug' in sys.modules, 'plug not loaded'
     assert 'mod_testplug.plug2' not in sys.modules, 'plug2 loaded'
     assert 'mod_testplug.plug3' not in sys.modules, 'plug3 loaded'
     assert 'mod_testplug.plug4' in sys.modules, 'plug4 not loaded'
     assert 'mod_testplug.plug5' in sys.modules, 'plug4 not loaded'
     assert 'mod_testplug.plug6' not in sys.modules, 'plug6 loaded'
예제 #15
0
def _validate_args(parser, namespace):
    namespace.enabled_checks = list(_known_checks)
    namespace.enabled_keywords = list(_known_keywords)
    cwd = abspath(os.getcwd())

    if namespace.suite is None:
        # No suite explicitly specified. Use the repo to guess the suite.
        if namespace.target_repo is None:
            # Not specified either. Try to find a repo our cwd is in.
            # The use of a dict here is a hack to deal with one
            # repo having multiple names in the configuration.
            candidates = {}
            for name, suite in namespace.config.pkgcheck_suite.items():
                repo = suite.target_repo
                if repo is None:
                    continue
                repo_base = getattr(repo, 'location', None)
                if repo_base is not None and cwd.startswith(repo_base):
                    candidates[repo] = name
            if len(candidates) == 1:
                namespace.guessed_suite = True
                namespace.target_repo = tuple(candidates)[0]
        if namespace.target_repo is not None:
            # We have a repo, now find a suite matching it.
            candidates = list(
                suite for suite in namespace.config.pkgcheck_suite.values()
                if suite.target_repo is namespace.target_repo)
            if len(candidates) == 1:
                namespace.guessed_suite = True
                namespace.suite = candidates[0]
        if namespace.suite is None:
            # If we have multiple candidates or no candidates we
            # fall back to the default suite.
            namespace.suite = namespace.config.get_default('pkgcheck_suite')
            namespace.default_suite = namespace.suite is not None
    if namespace.suite is not None:
        # We have a suite. Lift defaults from it for values that
        # were not set explicitly:
        if namespace.checkset is None:
            namespace.checkset = namespace.suite.checkset
        # If we were called with no atoms we want to force
        # cwd-based detection.
        if namespace.target_repo is None:
            if namespace.targets:
                namespace.target_repo = namespace.suite.target_repo
            elif namespace.suite.target_repo is not None:
                # No atoms were passed in, so we want to guess
                # what to scan based on cwd below. That only makes
                # sense if we are inside the target repo. We still
                # want to pick the suite's target repo if we are
                # inside it, in case there is more than one repo
                # definition with a base that contains our dir.
                repo_base = getattr(namespace.suite.target_repo, 'location',
                                    None)
                if repo_base is not None and cwd.startswith(repo_base):
                    namespace.target_repo = namespace.suite.target_repo

    if namespace.target_repo is None:
        # We have no target repo (not explicitly passed, not from a suite, not
        # from an earlier guess at the target_repo) so try to guess one.
        target_repo = None
        target_dir = cwd

        # pull a target directory from target args if they're path-based
        if namespace.targets and os.path.exists(namespace.targets[0]):
            target = namespace.targets[0]
            if os.path.isfile(target):
                target = os.path.dirname(target)
            target_dir = target

        # determine target repo from the target directory
        for repo in namespace.domain.ebuild_repos_raw:
            if target_dir in repo:
                target_repo = repo
                break

        # fallback to the default repo
        if target_repo is None:
            target_repo = namespace.config.get_default('repo')

        namespace.target_repo = target_repo

    # use filtered repo if filtering is enabled
    if namespace.filtered:
        namespace.target_repo = namespace.domain.ebuild_repos[str(
            namespace.target_repo)]

    if namespace.reporter is None:
        namespace.reporter = namespace.config.get_default(
            'pkgcheck_reporter_factory')
        if namespace.reporter is None:
            namespace.reporter = get_plugin('reporter', plugins)
        if namespace.reporter is None:
            parser.error('no config defined reporter found, nor any default '
                         'plugin based reporters')
    else:
        func = namespace.config.pkgcheck_reporter_factory.get(
            namespace.reporter)
        if func is None:
            func = list(
                base.Whitelist([namespace.reporter
                                ]).filter(get_plugins('reporter', plugins)))
            if not func:
                available = ', '.join(
                    sorted(x.__name__
                           for x in get_plugins('reporter', plugins)))
                parser.error(f"no reporter matches {namespace.reporter!r} "
                             f"(available: {available})")
            elif len(func) > 1:
                reporters = tuple(
                    sorted(f"{x.__module__}.{x.__name__}" for x in func))
                parser.error(
                    f"reporter {namespace.reporter!r} matched multiple reporters, "
                    f"must match one. {reporters!r}")
            func = func[0]
        namespace.reporter = func

    # search_repo is a multiplex of target_repo and its masters, make sure
    # they're configured properly in metadata/layout.conf. This is used for
    # things like visibility checks (it is passed to the checkers in "start").
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    namespace.repo_bases = [
        abspath(repo.location)
        for repo in reversed(namespace.target_repo.trees)
    ]

    namespace.default_target = None
    if namespace.targets:
        repo = namespace.target_repo

        # read targets from stdin in a non-blocking manner
        if len(namespace.targets) == 1 and namespace.targets[0] == '-':

            def stdin():
                while True:
                    line = sys.stdin.readline()
                    if not line:
                        break
                    yield line.rstrip()

            namespace.targets = stdin()

        def limiters():
            for target in namespace.targets:
                try:
                    yield parserestrict.parse_match(target)
                except parserestrict.ParseError as e:
                    if os.path.exists(target):
                        try:
                            yield repo.path_restrict(target)
                        except ValueError as e:
                            parser.error(e)
                    else:
                        parser.error(e)

        namespace.limiters = limiters()
    else:
        repo_base = getattr(namespace.target_repo, 'location', None)
        if not repo_base:
            parser.error(
                'Either specify a target repo that is not multi-tree or '
                'one or more extended atoms to scan '
                '("*" for the entire repo).')
        if cwd not in namespace.target_repo:
            namespace.limiters = [packages.AlwaysTrue]
        else:
            namespace.limiters = [
                packages.AndRestriction(
                    *namespace.target_repo.path_restrict(cwd))
            ]
            namespace.default_target = cwd

    if namespace.checkset is None:
        namespace.checkset = namespace.config.get_default('pkgcheck_checkset')
    if namespace.checkset is not None:
        namespace.enabled_checks = list(
            namespace.checkset.filter(namespace.enabled_checks))

    if namespace.selected_scopes is not None:
        disabled_scopes, enabled_scopes = namespace.selected_scopes

        # validate selected scopes
        selected_scopes = set(disabled_scopes + enabled_scopes)
        unknown_scopes = selected_scopes - set(base.known_scopes.keys())
        if unknown_scopes:
            parser.error('unknown scope%s: %s (available scopes: %s)' %
                         (_pl(unknown_scopes), ', '.join(unknown_scopes),
                          ', '.join(base.known_scopes.keys())))

        # convert scopes to keyword lists
        disabled_keywords = [
            k.__name__ for s in disabled_scopes for k in _known_keywords
            if k.threshold == base.known_scopes[s]
        ]
        enabled_keywords = [
            k.__name__ for s in enabled_scopes for k in _known_keywords
            if k.threshold == base.known_scopes[s]
        ]

        # filter outputted keywords
        namespace.enabled_keywords = base.filter_update(
            namespace.enabled_keywords, enabled_keywords, disabled_keywords)

    if namespace.selected_keywords is not None:
        disabled_keywords, enabled_keywords = namespace.selected_keywords

        errors = (x.__name__ for x in _known_keywords
                  if issubclass(x, base.Error))
        warnings = (x.__name__ for x in _known_keywords
                    if issubclass(x, base.Warning))

        alias_map = {'errors': errors, 'warnings': warnings}
        replace_aliases = lambda x: alias_map.get(x, [x])

        # expand keyword aliases to keyword lists
        disabled_keywords = list(
            chain.from_iterable(map(replace_aliases, disabled_keywords)))
        enabled_keywords = list(
            chain.from_iterable(map(replace_aliases, enabled_keywords)))

        # validate selected keywords
        selected_keywords = set(disabled_keywords + enabled_keywords)
        available_keywords = set(x.__name__ for x in _known_keywords)
        unknown_keywords = selected_keywords - available_keywords
        if unknown_keywords:
            parser.error(
                "unknown keyword%s: %s (use 'pkgcheck show --keywords' to show valid keywords)"
                % (_pl(unknown_keywords), ', '.join(unknown_keywords)))

        # filter outputted keywords
        namespace.enabled_keywords = base.filter_update(
            namespace.enabled_keywords, enabled_keywords, disabled_keywords)

    namespace.filtered_keywords = set(namespace.enabled_keywords)
    if namespace.filtered_keywords == set(_known_keywords):
        namespace.filtered_keywords = None

    disabled_checks, enabled_checks = ((), ())
    if namespace.selected_checks is not None:
        disabled_checks, enabled_checks = namespace.selected_checks
        # validate selected checks
        selected_checks = set(disabled_checks + enabled_checks)
        available_checks = set(x.__name__ for x in _known_checks)
        unknown_checks = selected_checks - available_checks
        if unknown_checks:
            parser.error(
                "unknown check%s: %r (use 'pkgcheck show --checks' to show valid checks)"
                % (_pl(unknown_checks), ', '.join(unknown_checks)))
    elif namespace.filtered_keywords is not None:
        # enable checks based on enabled keyword -> check mapping
        enabled_checks = []
        for check in _known_checks:
            if namespace.filtered_keywords.intersection(check.known_results):
                enabled_checks.append(check.__name__)

    # filter checks to run
    if enabled_checks:
        whitelist = base.Whitelist(enabled_checks)
        namespace.enabled_checks = list(
            whitelist.filter(namespace.enabled_checks))
    if disabled_checks:
        blacklist = base.Blacklist(disabled_checks)
        namespace.enabled_checks = list(
            blacklist.filter(namespace.enabled_checks))

    if not namespace.enabled_checks:
        parser.error('no active checks')

    namespace.addons = set()

    for check in namespace.enabled_checks:
        add_addon(check, namespace.addons)
    try:
        for addon in namespace.addons:
            addon.check_args(parser, namespace)
    except argparse.ArgumentError as e:
        if namespace.debug:
            raise
        parser.error(str(e))
예제 #16
0
파일: ops.py 프로젝트: radhermit/pkgcore
def merge_contents(cset, offset=None, callback=None):

    """
    merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs

    :param cset: :class:`pkgcore.fs.contents.contentsSet` instance
    :param offset: if not None, offset to prefix all locations with.
        Think of it as target dir.
    :param callback: callable to report each entry being merged; given a single arg,
        the fs object being merged.
    :raise EnvironmentError: Thrown for permission failures.
    """

    if callback is None:
        callback = lambda obj:None

    ensure_perms = get_plugin("fs_ops.ensure_perms")
    copyfile = get_plugin("fs_ops.copyfile")
    mkdir = get_plugin("fs_ops.mkdir")

    if not isinstance(cset, contents.contentsSet):
        raise TypeError(f'cset must be a contentsSet, got {cset!r}')

    if offset is not None:
        if os.path.exists(offset):
            if not os.path.isdir(offset):
                raise TypeError(f'offset must be a dir, or not exist: {offset}')
        else:
            mkdir(fs.fsDir(offset, strict=False))
        iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep))
    else:
        iterate = iter

    d = list(iterate(cset.iterdirs()))
    d.sort()
    for x in d:
        callback(x)

        try:
            # we pass in the stat ourselves, using stat instead of
            # lstat gen_obj uses internally; this is the equivalent of
            # "deference that link"
            obj = gen_obj(x.location, stat=os.stat(x.location))
            if not fs.isdir(obj):
                # according to the spec, dirs can't be merged over files
                # that aren't dirs or symlinks to dirs
                raise CannotOverwrite(x.location, obj)
            ensure_perms(x, obj)
        except FileNotFoundError:
            try:
                # we do this form to catch dangling symlinks
                mkdir(x)
            except FileExistsError:
                os.unlink(x.location)
                mkdir(x)
            ensure_perms(x)
    del d

    # might look odd, but what this does is minimize the try/except cost
    # to one time, assuming everything behaves, rather then per item.
    i = iterate(cset.iterdirs(invert=True))
    merged_inodes = {}
    while True:
        try:
            for x in i:
                callback(x)

                if x.is_reg:
                    key = (x.dev, x.inode)
                    # This logic could be made smarter- instead of
                    # blindly trying candidates, we could inspect the st_dev
                    # of the final location.  This however can be broken by
                    # overlayfs's potentially.  Brute force is in use either
                    # way.
                    candidates = merged_inodes.setdefault(key, [])
                    if any(target._can_be_hardlinked(x) and do_link(target, x)
                            for target in candidates):
                        continue
                    candidates.append(x)

                copyfile(x, mkdirs=True)

            break
        except CannotOverwrite as cf:
            if not fs.issym(x):
                raise

            # by this time, all directories should've been merged.
            # thus we can check the target
            try:
                if not fs.isdir(gen_obj(pjoin(x.location, x.target))):
                    raise
            except OSError:
                raise cf
    return True
예제 #17
0
파일: ops.py 프로젝트: chutz/pkgcore
def merge_contents(cset, offset=None, callback=None):
    """
    merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs

    :param cset: :class:`pkgcore.fs.contents.contentsSet` instance
    :param offset: if not None, offset to prefix all locations with.
        Think of it as target dir.
    :param callback: callable to report each entry being merged; given a single arg,
        the fs object being merged.
    :raise EnvironmentError: Thrown for permission failures.
    """

    if callback is None:
        callback = lambda obj: None

    ensure_perms = get_plugin("fs_ops.ensure_perms")
    copyfile = get_plugin("fs_ops.copyfile")
    mkdir = get_plugin("fs_ops.mkdir")

    if not isinstance(cset, contents.contentsSet):
        raise TypeError("cset must be a contentsSet, got %r" % (cset, ))

    if offset is not None:
        if os.path.exists(offset):
            if not os.path.isdir(offset):
                raise TypeError("offset must be a dir, or not exist: %s" %
                                offset)
        else:
            mkdir(fs.fsDir(offset, strict=False))
        iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep))
    else:
        iterate = iter

    d = list(iterate(cset.iterdirs()))
    d.sort()
    for x in d:
        callback(x)

        try:
            # we pass in the stat ourselves, using stat instead of
            # lstat gen_obj uses internally; this is the equivalent of
            # "deference that link"
            obj = gen_obj(x.location, stat=os.stat(x.location))
            if not fs.isdir(obj):
                raise Exception(
                    "%s exists and needs to be a dir, but is a %s" %
                    (x.location, obj))
            ensure_perms(x, obj)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            try:
                # we do this form to catch dangling symlinks
                mkdir(x)
            except OSError as oe:
                if oe.errno != errno.EEXIST:
                    raise
                os.unlink(x.location)
                mkdir(x)
            ensure_perms(x)
    del d

    # might look odd, but what this does is minimize the try/except cost
    # to one time, assuming everything behaves, rather then per item.
    i = iterate(cset.iterdirs(invert=True))
    merged_inodes = {}
    while True:
        try:
            for x in i:
                callback(x)

                if x.is_reg:
                    key = (x.dev, x.inode)
                    link_target = merged_inodes.get(key)
                    if link_target is not None and \
                        link_target._can_be_hardlinked(x):
                        if do_link(link_target, x):
                            continue
                        # TODO: should notify that hardlinking failed.
                    merged_inodes.setdefault(key, x)

                copyfile(x, mkdirs=True)
            break
        except CannotOverwrite as cf:
            if not fs.issym(x):
                raise

            # by this time, all directories should've been merged.
            # thus we can check the target
            try:
                if not fs.isdir(gen_obj(pjoin(x.location, x.target))):
                    raise
            except OSError:
                raise cf
    return True
예제 #18
0
    def check_values(self, values, args):
        values, args = commandline.OptionParser.check_values(
            self, values, args)
        # XXX hack...
        values.checks = sorted(lists.unstable_unique(
            get_plugins('check', plugins)),
            key=lambda x:x.__name__)
        if values.list_checks or values.list_reporters:
            if values.list_reporters == values.list_checks:
                raise optparse.OptionValueError("--list-checks and "
                    "--list-reporters are mutually exclusive options- "
                    "one or the other.")
            return values, ()
        cwd = None
        if values.suite is None:
            # No suite explicitly specified. Use the repo to guess the suite.
            if values.target_repo is None:
                # Not specified either. Try to find a repo our cwd is in.
                cwd = os.getcwd()
                # The use of a dict here is a hack to deal with one
                # repo having multiple names in the configuration.
                candidates = {}
                for name, suite in values.config.pcheck_suite.iteritems():
                    repo = suite.target_repo
                    if repo is None:
                        continue
                    repo_base = getattr(repo, 'base', None)
                    if repo_base is not None and cwd.startswith(repo_base):
                        candidates[repo] = name
                if len(candidates) == 1:
                    values.guessed_suite = True
                    values.target_repo = tuple(candidates)[0]
            if values.target_repo is not None:
                # We have a repo, now find a suite matching it.
                candidates = list(
                    suite for suite in values.config.pcheck_suite.itervalues()
                    if suite.target_repo is values.target_repo)
                if len(candidates) == 1:
                    values.guessed_suite = True
                    values.suite = candidates[0]
            if values.suite is None:
                # If we have multiple candidates or no candidates we
                # fall back to the default suite.
                values.suite = values.config.get_default('pcheck_suite')
                values.default_suite = values.suite is not None
        if values.suite is not None:
            # We have a suite. Lift defaults from it for values that
            # were not set explicitly:
            if values.checkset is None:
                values.checkset = values.suite.checkset
            if values.src_repo is None:
                values.src_repo = values.suite.src_repo
            # If we were called with no atoms we want to force
            # cwd-based detection.
            if values.target_repo is None:
                if args:
                    values.target_repo = values.suite.target_repo
                elif values.suite.target_repo is not None:
                    # No atoms were passed in, so we want to guess
                    # what to scan based on cwd below. That only makes
                    # sense if we are inside the target repo. We still
                    # want to pick the suite's target repo if we are
                    # inside it, in case there is more than one repo
                    # definition with a base that contains our dir.
                    if cwd is None:
                        cwd = os.getcwd()
                    repo_base = getattr(values.suite.target_repo, 'base', None)
                    if repo_base is not None and cwd.startswith(repo_base):
                        values.target_repo = values.suite.target_repo
        if values.target_repo is None:
            # We have no target repo (not explicitly passed, not from
            # a suite, not from an earlier guess at the target_repo).
            # Try to guess one from cwd:
            if cwd is None:
                cwd = os.getcwd()
            candidates = {}
            for name, repo in values.config.repo.iteritems():
                repo_base = getattr(repo, 'base', None)
                if repo_base is not None and cwd.startswith(repo_base):
                    candidates[repo] = name
            if not candidates:
                self.error(
                    'No target repo specified on commandline or suite and '
                    'current directory is not inside a known repo.')
            elif len(candidates) > 1:
                self.error(
                    'Found multiple matches when guessing repo based on '
                    'current directory (%s). Specify a repo on the '
                    'commandline or suite or remove some repos from your '
                    'configuration.' % (
                        ', '.join(str(repo) for repo in candidates),))
            values.target_repo = tuple(candidates)[0]

        if values.reporter is None:
            values.reporter = values.config.get_default(
                'pcheck_reporter_factory')
            if values.reporter is None:
                values.reporter = get_plugin('reporter', plugins)
            if values.reporter is None:
                self.error('no config defined reporter found, nor any default '
                    'plugin based reporters')
        else:
            func = values.config.pcheck_reporter_factory.get(values.reporter)
            if func is None:
                func = list(base.Whitelist([values.reporter]).filter(
                    get_plugins('reporter', plugins)))
                if not func:
                    self.error("no reporter matches %r\n"
                        "please see --list-reporter for a list of "
                        "valid reporters" % values.reporter)
                elif len(func) > 1:
                    self.error("--reporter %r matched multiple reporters, "
                        "must match one. %r" %
                            (values.reporter,
                                tuple(sorted("%s.%s" %
                                    (x.__module__, x.__name__)
                                    for x in func))
                            )
                    )
                func = func[0]
            values.reporter = func
        if values.src_repo is None:
            values.src_repo = values.target_repo
            values.search_repo = values.target_repo
        else:
            values.search_repo = multiplex.tree(values.target_repo,
                                                values.src_repo)

        # TODO improve this to deal with a multiplex repo.
        for repo in set((values.src_repo, values.target_repo)):
            if isinstance(repo, repository.UnconfiguredTree):
                values.repo_bases.append(osutils.abspath(repo.base))

        if args:
            values.limiters = lists.stable_unique(map(
                    parserestrict.parse_match, args))
        else:
            repo_base = getattr(values.target_repo, 'base', None)
            if not repo_base:
                self.error(
                    'Either specify a target repo that is not multi-tree or '
                    'one or more extended atoms to scan '
                    '("*" for the entire repo).')
            cwd = osutils.abspath(os.getcwd())
            repo_base = osutils.abspath(repo_base)
            if not cwd.startswith(repo_base):
                self.error(
                    'Working dir (%s) is not inside target repo (%s). Fix '
                    'that or specify one or more extended atoms to scan.' % (
                        cwd, repo_base))
            bits = list(p for p in cwd[len(repo_base):].split(os.sep) if p)
            if not bits:
                values.limiters = [packages.AlwaysTrue]
            elif len(bits) == 1:
                values.limiters = [packages.PackageRestriction(
                        'category', StrExactMatch(bits[0]))]
            else:
                values.limiters = [packages.AndRestriction(
                        packages.PackageRestriction(
                            'category', StrExactMatch(bits[0])),
                        packages.PackageRestriction(
                            'package', StrExactMatch(bits[1])))]

        if values.checkset is None:
            values.checkset = values.config.get_default('pcheck_checkset')
        if values.checkset is not None:
            values.checks = list(values.checkset.filter(values.checks))

        if values.checks_to_run:
            whitelist = base.Whitelist(values.checks_to_run)
            values.checks = list(whitelist.filter(values.checks))

        if values.checks_to_disable:
            blacklist = base.Blacklist(values.checks_to_disable)
            values.checks = list(blacklist.filter(values.checks))

        if not values.checks:
            self.error('No active checks')

        values.addons = set()
        def add_addon(addon):
            if addon not in values.addons:
                values.addons.add(addon)
                for dep in addon.required_addons:
                    add_addon(dep)
        for check in values.checks:
            add_addon(check)
        try:
            for addon in values.addons:
                addon.check_values(values)
        except optparse.OptionValueError, e:
            if values.debug:
                raise
            self.error(str(e))
예제 #19
0
 def trigger(self, engine, merging_cset):
     op = get_plugin('fs_ops.merge_contents')
     return op(merging_cset, callback=engine.observer.installing_fs_obj)
예제 #20
0
파일: ops.py 프로젝트: vapier/pkgcore
def merge_contents(cset, offset=None, callback=None):

    """
    merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs

    :param cset: :class:`pkgcore.fs.contents.contentsSet` instance
    :param offset: if not None, offset to prefix all locations with.
        Think of it as target dir.
    :param callback: callable to report each entry being merged; given a single arg,
        the fs object being merged.
    :raise EnvironmentError: Thrown for permission failures.
    """

    if callback is None:
        callback = lambda obj:None

    ensure_perms = get_plugin("fs_ops.ensure_perms")
    copyfile = get_plugin("fs_ops.copyfile")
    mkdir = get_plugin("fs_ops.mkdir")

    if not isinstance(cset, contents.contentsSet):
        raise TypeError("cset must be a contentsSet, got %r" % (cset,))

    if offset is not None:
        if os.path.exists(offset):
            if not os.path.isdir(offset):
                raise TypeError("offset must be a dir, or not exist: %s" % offset)
        else:
            mkdir(fs.fsDir(offset, strict=False))
        iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep))
    else:
        iterate = iter

    d = list(iterate(cset.iterdirs()))
    d.sort()
    for x in d:
        callback(x)

        try:
            # we pass in the stat ourselves, using stat instead of
            # lstat gen_obj uses internally; this is the equivalent of
            # "deference that link"
            obj = gen_obj(x.location,  stat=os.stat(x.location))
            if not fs.isdir(obj):
                raise Exception(
                    "%s exists and needs to be a dir, but is a %s" %
                        (x.location, obj))
            ensure_perms(x, obj)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            try:
                # we do this form to catch dangling symlinks
                mkdir(x)
            except OSError as oe:
                if oe.errno != errno.EEXIST:
                    raise
                os.unlink(x.location)
                mkdir(x)
            ensure_perms(x)
    del d

    # might look odd, but what this does is minimize the try/except cost
    # to one time, assuming everything behaves, rather then per item.
    i = iterate(cset.iterdirs(invert=True))
    merged_inodes = {}
    while True:
        try:
            for x in i:
                callback(x)

                if x.is_reg:
                    key = (x.dev, x.inode)
                    link_target = merged_inodes.get(key)
                    if link_target is not None and \
                        link_target._can_be_hardlinked(x):
                        if do_link(link_target, x):
                            continue
                        # TODO: should notify that hardlinking failed.
                    merged_inodes.setdefault(key, x)

                copyfile(x, mkdirs=True)
            break
        except CannotOverwrite as cf:
            if not fs.issym(x):
                raise

            # by this time, all directories should've been merged.
            # thus we can check the target
            try:
                if not fs.isdir(gen_obj(pjoin(x.location, x.target))):
                    raise
            except OSError:
                raise cf
    return True
예제 #21
0
 def trigger(self, engine, unmerging_cset):
     op = get_plugin('fs_ops.unmerge_contents')
     return op(unmerging_cset, callback=engine.observer.removing_fs_obj)
예제 #22
0
def check_args(parser, namespace):
    # XXX hack...
    namespace.checks = sorted(unstable_unique(
        get_plugins('check', plugins)),
        key=lambda x: x.__name__)

    if any((namespace.list_keywords, namespace.list_checks, namespace.list_reporters)):
        # no need to check any other args
        return

    cwd = abspath(os.getcwd())
    if namespace.suite is None:
        # No suite explicitly specified. Use the repo to guess the suite.
        if namespace.target_repo is None:
            # Not specified either. Try to find a repo our cwd is in.
            # The use of a dict here is a hack to deal with one
            # repo having multiple names in the configuration.
            candidates = {}
            for name, suite in namespace.config.pkgcheck_suite.iteritems():
                repo = suite.target_repo
                if repo is None:
                    continue
                repo_base = getattr(repo, 'location', None)
                if repo_base is not None and cwd.startswith(repo_base):
                    candidates[repo] = name
            if len(candidates) == 1:
                namespace.guessed_suite = True
                namespace.target_repo = tuple(candidates)[0]
        if namespace.target_repo is not None:
            # We have a repo, now find a suite matching it.
            candidates = list(
                suite for suite in namespace.config.pkgcheck_suite.itervalues()
                if suite.target_repo is namespace.target_repo)
            if len(candidates) == 1:
                namespace.guessed_suite = True
                namespace.suite = candidates[0]
        if namespace.suite is None:
            # If we have multiple candidates or no candidates we
            # fall back to the default suite.
            namespace.suite = namespace.config.get_default('pkgcheck_suite')
            namespace.default_suite = namespace.suite is not None
    if namespace.suite is not None:
        # We have a suite. Lift defaults from it for values that
        # were not set explicitly:
        if namespace.checkset is None:
            namespace.checkset = namespace.suite.checkset
        # If we were called with no atoms we want to force
        # cwd-based detection.
        if namespace.target_repo is None:
            if namespace.targets:
                namespace.target_repo = namespace.suite.target_repo
            elif namespace.suite.target_repo is not None:
                # No atoms were passed in, so we want to guess
                # what to scan based on cwd below. That only makes
                # sense if we are inside the target repo. We still
                # want to pick the suite's target repo if we are
                # inside it, in case there is more than one repo
                # definition with a base that contains our dir.
                repo_base = getattr(namespace.suite.target_repo, 'location', None)
                if repo_base is not None and cwd.startswith(repo_base):
                    namespace.target_repo = namespace.suite.target_repo
    if namespace.target_repo is None:
        # We have no target repo (not explicitly passed, not from a suite, not
        # from an earlier guess at the target_repo) so try to guess one.
        if len(namespace.targets) == 1 and os.path.exists(namespace.targets[0]):
            target_dir = namespace.targets[0]
        else:
            target_dir = cwd
        target_repo = None
        for name, repo in namespace.config.repo.iteritems():
            repo_base = getattr(repo, 'location', None)
            if repo_base is not None and target_dir in repo:
                target_repo = repo
        if target_repo is None:
            parser.error(
                'no target repo specified and '
                'current directory is not inside a known repo')
        namespace.target_repo = target_repo

    if namespace.reporter is None:
        namespace.reporter = namespace.config.get_default(
            'pkgcheck_reporter_factory')
        if namespace.reporter is None:
            namespace.reporter = get_plugin('reporter', plugins)
        if namespace.reporter is None:
            parser.error(
                'no config defined reporter found, nor any default '
                'plugin based reporters')
    else:
        func = namespace.config.pkgcheck_reporter_factory.get(namespace.reporter)
        if func is None:
            func = list(base.Whitelist([namespace.reporter]).filter(
                get_plugins('reporter', plugins)))
            if not func:
                parser.error(
                    "no reporter matches %r (available: %s)" % (
                        namespace.reporter,
                        ', '.join(sorted(x.__name__ for x in get_plugins('reporter', plugins)))
                    )
                )
            elif len(func) > 1:
                parser.error(
                    "--reporter %r matched multiple reporters, "
                    "must match one. %r" % (
                        namespace.reporter,
                        tuple(sorted("%s.%s" % (x.__module__, x.__name__) for x in func))
                    )
                )
            func = func[0]
        namespace.reporter = func

    # search_repo is a multiplex of target_repo and its masters, make sure
    # they're configured properly in metadata/layout.conf. This is used for
    # things like visibility checks (it is passed to the checkers in "start").
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    namespace.repo_bases = [abspath(repo.location) for repo in reversed(namespace.target_repo.trees)]

    if namespace.targets:
        limiters = []
        repo = namespace.target_repo

        # read targets from stdin
        if len(namespace.targets) == 1 and namespace.targets[0] == '-':
            namespace.targets = [x.strip() for x in sys.stdin.readlines() if x.strip() != '']
            # reassign stdin to allow interactivity (currently only works for unix)
            sys.stdin = open('/dev/tty')

        for target in namespace.targets:
            try:
                limiters.append(parserestrict.parse_match(target))
            except parserestrict.ParseError as e:
                if os.path.exists(target):
                    try:
                        limiters.append(repo.path_restrict(target))
                    except ValueError as e:
                        parser.error(e)
                else:
                    parser.error(e)
        namespace.limiters = limiters
    else:
        repo_base = getattr(namespace.target_repo, 'location', None)
        if not repo_base:
            parser.error(
                'Either specify a target repo that is not multi-tree or '
                'one or more extended atoms to scan '
                '("*" for the entire repo).')
        if cwd not in namespace.target_repo:
            namespace.limiters = [packages.AlwaysTrue]
        else:
            namespace.limiters = [packages.AndRestriction(*namespace.target_repo.path_restrict(cwd))]

    if namespace.checkset is None:
        namespace.checkset = namespace.config.get_default('pkgcheck_checkset')
    if namespace.checkset is not None:
        namespace.checks = list(namespace.checkset.filter(namespace.checks))

    disabled_checks, enabled_checks = ((), ())
    if namespace.selected_checks is not None:
        disabled_checks, enabled_checks = namespace.selected_checks

    if enabled_checks:
        whitelist = base.Whitelist(enabled_checks)
        namespace.checks = list(whitelist.filter(namespace.checks))

    if disabled_checks:
        blacklist = base.Blacklist(disabled_checks)
        namespace.checks = list(blacklist.filter(namespace.checks))

    if not namespace.checks:
        parser.error('no active checks')

    namespace.addons = set()

    def add_addon(addon):
        if addon not in namespace.addons:
            namespace.addons.add(addon)
            for dep in addon.required_addons:
                add_addon(dep)
    for check in namespace.checks:
        add_addon(check)
    try:
        for addon in namespace.addons:
            addon.check_args(parser, namespace)
    except argparse.ArgumentError as e:
        if namespace.debug:
            raise
        parser.error(str(e))
예제 #23
0
def check_args(parser, namespace):
    # XXX hack...
    namespace.checks = sorted(unstable_unique(get_plugins('check', plugins)),
                              key=lambda x: x.__name__)

    if any((namespace.list_keywords, namespace.list_checks,
            namespace.list_reporters)):
        # no need to check any other args
        return

    cwd = abspath(os.getcwd())
    if namespace.suite is None:
        # No suite explicitly specified. Use the repo to guess the suite.
        if namespace.target_repo is None:
            # Not specified either. Try to find a repo our cwd is in.
            # The use of a dict here is a hack to deal with one
            # repo having multiple names in the configuration.
            candidates = {}
            for name, suite in namespace.config.pkgcheck_suite.iteritems():
                repo = suite.target_repo
                if repo is None:
                    continue
                repo_base = getattr(repo, 'location', None)
                if repo_base is not None and cwd.startswith(repo_base):
                    candidates[repo] = name
            if len(candidates) == 1:
                namespace.guessed_suite = True
                namespace.target_repo = tuple(candidates)[0]
        if namespace.target_repo is not None:
            # We have a repo, now find a suite matching it.
            candidates = list(
                suite
                for suite in namespace.config.pkgcheck_suite.itervalues()
                if suite.target_repo is namespace.target_repo)
            if len(candidates) == 1:
                namespace.guessed_suite = True
                namespace.suite = candidates[0]
        if namespace.suite is None:
            # If we have multiple candidates or no candidates we
            # fall back to the default suite.
            namespace.suite = namespace.config.get_default('pkgcheck_suite')
            namespace.default_suite = namespace.suite is not None
    if namespace.suite is not None:
        # We have a suite. Lift defaults from it for values that
        # were not set explicitly:
        if namespace.checkset is None:
            namespace.checkset = namespace.suite.checkset
        # If we were called with no atoms we want to force
        # cwd-based detection.
        if namespace.target_repo is None:
            if namespace.targets:
                namespace.target_repo = namespace.suite.target_repo
            elif namespace.suite.target_repo is not None:
                # No atoms were passed in, so we want to guess
                # what to scan based on cwd below. That only makes
                # sense if we are inside the target repo. We still
                # want to pick the suite's target repo if we are
                # inside it, in case there is more than one repo
                # definition with a base that contains our dir.
                repo_base = getattr(namespace.suite.target_repo, 'location',
                                    None)
                if repo_base is not None and cwd.startswith(repo_base):
                    namespace.target_repo = namespace.suite.target_repo
    if namespace.target_repo is None:
        # We have no target repo (not explicitly passed, not from a suite, not
        # from an earlier guess at the target_repo) so try to guess one.
        if len(namespace.targets) == 1 and os.path.exists(
                namespace.targets[0]):
            target_dir = namespace.targets[0]
        else:
            target_dir = cwd
        target_repo = None
        for name, repo in namespace.config.repo.iteritems():
            repo_base = getattr(repo, 'location', None)
            if repo_base is not None and target_dir in repo:
                target_repo = repo
        if target_repo is None:
            parser.error('no target repo specified and '
                         'current directory is not inside a known repo')
        namespace.target_repo = target_repo

    if namespace.reporter is None:
        namespace.reporter = namespace.config.get_default(
            'pkgcheck_reporter_factory')
        if namespace.reporter is None:
            namespace.reporter = get_plugin('reporter', plugins)
        if namespace.reporter is None:
            parser.error('no config defined reporter found, nor any default '
                         'plugin based reporters')
    else:
        func = namespace.config.pkgcheck_reporter_factory.get(
            namespace.reporter)
        if func is None:
            func = list(
                base.Whitelist([namespace.reporter
                                ]).filter(get_plugins('reporter', plugins)))
            if not func:
                parser.error(
                    "no reporter matches %r (available: %s)" %
                    (namespace.reporter, ', '.join(
                        sorted(x.__name__
                               for x in get_plugins('reporter', plugins)))))
            elif len(func) > 1:
                parser.error("--reporter %r matched multiple reporters, "
                             "must match one. %r" %
                             (namespace.reporter,
                              tuple(
                                  sorted("%s.%s" % (x.__module__, x.__name__)
                                         for x in func))))
            func = func[0]
        namespace.reporter = func

    # search_repo is a multiplex of target_repo and its masters, make sure
    # they're configured properly in metadata/layout.conf. This is used for
    # things like visibility checks (it is passed to the checkers in "start").
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    namespace.repo_bases = [
        abspath(repo.location)
        for repo in reversed(namespace.target_repo.trees)
    ]

    if namespace.targets:
        limiters = []
        repo = namespace.target_repo

        # read targets from stdin
        if len(namespace.targets) == 1 and namespace.targets[0] == '-':
            namespace.targets = [
                x.strip() for x in sys.stdin.readlines() if x.strip() != ''
            ]
            # reassign stdin to allow interactivity (currently only works for unix)
            sys.stdin = open('/dev/tty')

        for target in namespace.targets:
            try:
                limiters.append(parserestrict.parse_match(target))
            except parserestrict.ParseError as e:
                if os.path.exists(target):
                    try:
                        limiters.append(repo.path_restrict(target))
                    except ValueError as e:
                        parser.error(e)
                else:
                    parser.error(e)
        namespace.limiters = limiters
    else:
        repo_base = getattr(namespace.target_repo, 'location', None)
        if not repo_base:
            parser.error(
                'Either specify a target repo that is not multi-tree or '
                'one or more extended atoms to scan '
                '("*" for the entire repo).')
        if cwd not in namespace.target_repo:
            namespace.limiters = [packages.AlwaysTrue]
        else:
            namespace.limiters = [
                packages.AndRestriction(
                    *namespace.target_repo.path_restrict(cwd))
            ]

    if namespace.checkset is None:
        namespace.checkset = namespace.config.get_default('pkgcheck_checkset')
    if namespace.checkset is not None:
        namespace.checks = list(namespace.checkset.filter(namespace.checks))

    disabled_checks, enabled_checks = ((), ())
    if namespace.selected_checks is not None:
        disabled_checks, enabled_checks = namespace.selected_checks

    if enabled_checks:
        whitelist = base.Whitelist(enabled_checks)
        namespace.checks = list(whitelist.filter(namespace.checks))

    if disabled_checks:
        blacklist = base.Blacklist(disabled_checks)
        namespace.checks = list(blacklist.filter(namespace.checks))

    if not namespace.checks:
        parser.error('no active checks')

    namespace.addons = set()

    def add_addon(addon):
        if addon not in namespace.addons:
            namespace.addons.add(addon)
            for dep in addon.required_addons:
                add_addon(dep)

    for check in namespace.checks:
        add_addon(check)
    try:
        for addon in namespace.addons:
            addon.check_args(parser, namespace)
    except argparse.ArgumentError as e:
        if namespace.debug:
            raise
        parser.error(str(e))