Ejemplo n.º 1
0
def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()):
    collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
    collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects)
    collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables)

    r = [
        values.StrGlobMatch(normpath(x).rstrip("/") + "/")
        for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"]))
    ]
    if len(r) > 1:
        r = values.OrRestriction(*r)
    else:
        r = r[0]
    neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"])
    if neg:
        if len(neg) == 1:
            r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/",
                                     negate=True)
        else:
            r2 = values.OrRestriction(
                negate=True,
                *[
                    values.StrGlobMatch(normpath(x).rstrip("/") + "/")
                    for x in set(neg)
                ])
        r = values.AndRestriction(r, r2)
    return r
Ejemplo n.º 2
0
def rewrite_lafile(handle, filename):
    data = parse_lafile(handle)
    raw_dep_libs = data.get("dependency_libs", False)
    if not raw_dep_libs:
        return False, None

    original_libs = raw_dep_libs.split()
    rpaths, libs, libladirs, inherited_flags = [], [], [], []
    original_inherited_flags = data.get("inherited_linker_flags", [])

    for item in stable_unique(original_libs):
        if item.startswith("-l"):
            libs.append(item)
        elif item.endswith(".la"):
            base = basename(item)
            if base.startswith("lib"):
                # convert to -l; punt .la, and 'lib' prefix
                libs.append("-l" + base[3:-3])
                libladirs.append("-L" + dirname(item))
            else:
                libs.append(item)
        elif item.startswith("-L"):
            # this is heinous, but is what the script did.
            item = x11_sub(item)
            item = local_sub(item)
            item = pkgconfig1_sub(item)
            item = pkgconfig2_sub(item)
            libladirs.append(item)
        elif item.startswith("-R"):
            rpaths.append(item)
        elif flags_match(item):
            if inherited_flags:
                inherited_flags.append(item)
            else:
                libs.append(item)
        else:
            raise UnknownData(raw_dep_libs, item)
    libs = stable_unique(rpaths + libladirs + libs)
    inherited_flags = stable_unique(inherited_flags)
    if libs == original_libs and inherited_flags == original_inherited_flags:
        return False, None

    # must be prefixed with a space
    data["dependency_libs"] = ' ' + (' '.join(libs))
    if inherited_flags:
        # must be prefixed with a space
        data["inherited_flags"] = ' ' + (' '.join(inherited_flags))
    content = "\n".join("%s='%s'" % (k, v)
                        for k, v in sorted(data.iteritems()))
    return True, template % {"content": content, "file": filename}
Ejemplo n.º 3
0
def gen_collision_ignore_filter(offset, extra_ignores=()):
    collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
    ignored = collapsed_d.setdefault("COLLISION_IGNORE", [])
    ignored.extend(extra_ignores)
    ignored.extend(["*/.keep", "*/.keep_*"])

    ignored = stable_unique(ignored)
    for i, x in enumerate(ignored):
        if not x.endswith("/*") and os.path.isdir(x):
            ignored[i] = ignored.rstrip("/") + "/*"
    ignored = [values.StrRegex(fnmatch.translate(x)) for x in stable_unique(ignored)]
    if len(ignored) == 1:
        return ignored[0]
    return values.OrRestriction(*ignored)
Ejemplo n.º 4
0
def rewrite_lafile(handle, filename):
    data = parse_lafile(handle)
    raw_dep_libs = data.get("dependency_libs", False)
    if not raw_dep_libs:
        return False, None

    original_libs = raw_dep_libs.split()
    rpaths, libs, libladirs, inherited_flags = [], [], [], []
    original_inherited_flags = data.get("inherited_linker_flags", [])

    for item in stable_unique(original_libs):
        if item.startswith("-l"):
            libs.append(item)
        elif item.endswith(".la"):
            base = basename(item)
            if base.startswith("lib"):
                # convert to -l; punt .la, and 'lib' prefix
                libs.append("-l" + base[3:-3])
                libladirs.append("-L" + dirname(item))
            else:
                libs.append(item)
        elif item.startswith("-L"):
                # this is heinous, but is what the script did.
                item = x11_sub(item)
                item = local_sub(item)
                item = pkgconfig1_sub(item)
                item = pkgconfig2_sub(item)
                libladirs.append(item)
        elif item.startswith("-R"):
            rpaths.append(item)
        elif flags_match(item):
            if inherited_flags:
                inherited_flags.append(item)
            else:
                libs.append(item)
        else:
            raise UnknownData(raw_dep_libs, item)
    libs = stable_unique(rpaths + libladirs + libs)
    inherited_flags = stable_unique(inherited_flags)
    if libs == original_libs and inherited_flags == original_inherited_flags:
        return False, None

    # must be prefixed with a space
    data["dependency_libs"] = ' ' + (' '.join(libs))
    if inherited_flags:
        # must be prefixed with a space
        data["inherited_flags"] = ' ' + (' '.join(inherited_flags))
    content = "\n".join("%s='%s'" % (k, v) for k,v in sorted(data.iteritems()))
    return True, template % {"content":content, "file":filename}
Ejemplo n.º 5
0
 def __init__(self, pkg, attr, keyword, profile, horked):
     base.Result.__init__(self)
     self._store_cpv(pkg)
     self.attr = attr
     self.profile = profile
     self.keyword = keyword
     self.potentials = tuple(str(x) for x in stable_unique(horked))
Ejemplo n.º 6
0
def gen_collision_ignore_filter(offset, extra_ignores=()):
    collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
    ignored = collapsed_d.setdefault("COLLISION_IGNORE", [])
    ignored.extend(extra_ignores)
    ignored.extend(["*/.keep", "*/.keep_*"])

    ignored = stable_unique(ignored)
    for i, x in enumerate(ignored):
        if not x.endswith("/*") and os.path.isdir(x):
            ignored[i] = ignored.rstrip("/") + "/*"
    ignored = [
        values.StrRegex(fnmatch.translate(x)) for x in stable_unique(ignored)
    ]
    if len(ignored) == 1:
        return ignored[0]
    return values.OrRestriction(*ignored)
Ejemplo n.º 7
0
 def test_extend_path(self):
     import mod_testplug
     expected = lists.stable_unique(
         os.path.join(p, 'mod_testplug')
         for p in sys.path if os.path.isdir(p))
     self.assertEqual(
         expected, mod_testplug.__path__,
         set(expected) ^ set(mod_testplug.__path__))
Ejemplo n.º 8
0
 def test_extend_path(self):
     import mod_testplug
     expected = lists.stable_unique(
         pjoin(p, 'mod_testplug')
         for p in sys.path if os.path.isdir(p))
     self.assertEqual(
         expected, mod_testplug.__path__,
         set(expected) ^ set(mod_testplug.__path__))
Ejemplo n.º 9
0
def main(options, out, err):
    """Update caches."""
    if not options.packages:
        from pkgcore import plugins
        options.packages = [plugins]
    for package in lists.stable_unique(options.packages):
        out.write('Updating cache for %s...' % (package.__name__,))
        plugin.initialize_cache(package, force=True)
Ejemplo n.º 10
0
def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()):
    collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
    collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects)
    collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables)

    r = [values.StrGlobMatch(normpath(x).rstrip("/") + "/")
         for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"]))]
    if len(r) > 1:
        r = values.OrRestriction(*r)
    else:
        r = r[0]
    neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"])
    if neg:
        if len(neg) == 1:
            r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/",
                                     negate=True)
        else:
            r2 = values.OrRestriction(
                negate=True,
                *[values.StrGlobMatch(normpath(x).rstrip("/") + "/")
                  for x in set(neg)])
        r = values.AndRestriction(r, r2)
    return r
Ejemplo n.º 11
0
def main(options, out, err):
    config = options.config
    if options.debug:
        resolver.plan.limiters.add(None)

    domain = options.domain
    livefs_repos = domain.all_livefs_repos
    world_set = world_list = options.world
    if options.oneshot:
        world_set = None

    formatter = options.formatter(
        out=out,
        err=err,
        unstable_arch=domain.unstable_arch,
        domain_settings=domain.settings,
        use_expand=domain.profile.use_expand,
        use_expand_hidden=domain.profile.use_expand_hidden,
        pkg_get_use=domain.get_package_use_unconfigured,
        world_list=world_list,
        verbose=options.verbose,
        livefs_repos=livefs_repos,
        distdir=domain.fetcher.get_storage_path(),
        quiet_repo_display=options.quiet_repo_display)

    # This mode does not care about sets and packages so bypass all that.
    if options.unmerge:
        if not options.oneshot:
            if world_set is None:
                err.write(
                    "Disable world updating via --oneshot, or fix your configuration"
                )
                return 1
        try:
            unmerge(out, err, livefs_repos, options.targets, options,
                    formatter, world_set)
        except (parserestrict.ParseError, Failure) as e:
            out.error(str(e))
            return 1
        return

    source_repos = domain.source_repositories
    installed_repos = domain.installed_repositories

    if options.usepkgonly:
        source_repos = source_repos.change_repos(
            x for x in source_repos
            if getattr(x, 'repository_type', None) != 'source')
    elif options.usepkg:
        repo_types = [(getattr(x, 'repository_type', None) == 'built', x)
                      for x in source_repos]
        source_repos = source_repos.change_repos(
            [x[1] for x in repo_types if x[0]] +
            [x[1] for x in repo_types if not x[0]])
    elif options.source_only:
        source_repos = source_repos.change_repos(
            x for x in source_repos
            if getattr(x, 'repository_type', None) == 'source')

    atoms = []
    for setname, pkgset in options.set:
        if pkgset is None:
            return 1
        l = list(pkgset)
        if not l:
            out.write("skipping set %s: set is empty, nothing to update" %
                      setname)
        else:
            atoms.extend(l)

    for token in options.targets:
        try:
            a = parse_atom(token,
                           source_repos.combined,
                           livefs_repos,
                           return_none=True)
        except parserestrict.ParseError as e:
            out.error(str(e))
            return 1
        if a is None:
            if token in config.pkgset:
                out.error('No package matches %r, but there is a set with '
                          'that name. Use -s to specify a set.' % (token, ))
                return 2
            elif not options.ignore_failures:
                out.error('No matches for %r; ignoring it' % token)
            else:
                return -1
        else:
            atoms.append(a)

    if not atoms and not options.newuse:
        out.error('No targets specified; nothing to do')
        return 1

    atoms = stable_unique(atoms)

    if (not options.set or options.clean) and not options.oneshot:
        if world_set is None:
            err.write(
                "Disable world updating via --oneshot, or fix your configuration"
            )
            return 1

    if options.upgrade:
        resolver_kls = resolver.upgrade_resolver
    else:
        resolver_kls = resolver.min_install_resolver

    extra_kwargs = {}
    if options.empty:
        extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
    if options.debug:
        extra_kwargs['debug'] = True

    # XXX: This should recurse on deep
    if options.newuse:
        out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...')
        out.title('Scanning for changed USE...')
        for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)):
            src_pkgs = source_repos.match(inst_pkg.versioned_atom)
            if src_pkgs:
                src_pkg = max(src_pkgs)
                inst_iuse = set(use.lstrip("+-") for use in inst_pkg.iuse)
                src_iuse = set(use.lstrip("+-") for use in src_pkg.iuse)
                inst_flags = inst_iuse.intersection(inst_pkg.use)
                src_flags = src_iuse.intersection(src_pkg.use)
                if inst_flags.symmetric_difference(src_flags) or \
                   inst_pkg.iuse.symmetric_difference(src_pkg.iuse):
                    atoms.append(src_pkg.unversioned_atom)

#    left intentionally in place for ease of debugging.
#    from guppy import hpy
#    hp = hpy()
#    hp.setrelheap()

    resolver_inst = resolver_kls(
        installed_repos.repositories,
        source_repos.repositories,
        verify_vdb=options.deep,
        nodeps=options.nodeps,
        drop_cycles=options.ignore_cycles,
        force_replace=options.replace,
        process_built_depends=options.with_built_depends,
        **extra_kwargs)

    if options.preload_vdb_state:
        out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
        vdb_time = time()
        resolver_inst.load_vdb_state()
        vdb_time = time() - vdb_time
    else:
        vdb_time = 0.0

    failures = []
    resolve_time = time()
    out.title('Resolving...')
    out.write(out.bold, ' * ', out.reset, 'Resolving...')
    ret = resolver_inst.add_atoms(atoms, finalize=True)
    while ret:
        out.error('resolution failed')
        restrict = ret[0][0]
        just_failures = reduce_to_failures(ret[1])
        display_failures(out, just_failures, debug=options.debug)
        failures.append(restrict)
        if not options.ignore_failures:
            break
        out.write("restarting resolution")
        atoms = [x for x in atoms if x != restrict]
        resolver_inst.reset()
        ret = resolver_inst.add_atoms(atoms, finalize=True)
    resolve_time = time() - resolve_time

    if options.debug:
        out.write(out.bold, " * ", out.reset,
                  "resolution took %.2f seconds" % resolve_time)

    if failures:
        out.write()
        out.write('Failures encountered:')
        for restrict in failures:
            out.error("failed '%s'" % (restrict, ))
            out.write('potentials:')
            match_count = 0
            for r in repo_utils.get_raw_repos(source_repos.repositories):
                l = r.match(restrict)
                if l:
                    out.write("repo %s: [ %s ]" %
                              (r, ", ".join(str(x) for x in l)))
                    match_count += len(l)
            if not match_count:
                out.write("No matches found in %s" %
                          (source_repos.repositories, ))
            out.write()
            if not options.ignore_failures:
                return 1

    resolver_inst.free_caches()

    if options.clean:
        out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
        vset = set(installed_repos.combined)
        len_vset = len(vset)
        vset.difference_update(x.pkg
                               for x in resolver_inst.state.iter_ops(True))
        wipes = sorted(x for x in vset if x.package_is_real)
        for x in wipes:
            out.write("Remove %s" % x)
        out.write()
        if wipes:
            out.write("removing %i packages of %i installed, %0.2f%%." %
                      (len(wipes), len_vset, 100 *
                       (len(wipes) / float(len_vset))))
        else:
            out.write("no packages to remove")
        if options.pretend:
            return 0
        if options.ask:
            if not formatter.ask("Do you wish to proceed?",
                                 default_answer=False):
                return 1
            out.write()
        repo_obs = observer.repo_observer(observer.formatter_output(out),
                                          not options.debug)
        do_unmerge(options, out, err, installed_repos.combined, wipes,
                   world_set, repo_obs)
        return 0

    if options.debug:
        out.write()
        out.write(out.bold, ' * ', out.reset, 'debug: all ops')
        out.first_prefix.append(" ")
        plan_len = len(str(len(resolver_inst.state.plan)))
        for pos, op in enumerate(resolver_inst.state.plan):
            out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
        out.first_prefix.pop()
        out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
        out.write()

    changes = resolver_inst.state.ops(only_real=True)

    build_obs = observer.build_observer(observer.formatter_output(out),
                                        not options.debug)
    repo_obs = observer.repo_observer(observer.formatter_output(out),
                                      not options.debug)

    if options.debug:
        out.write(out.bold, " * ", out.reset, "running sanity checks")
        start_time = time()
    if not changes.run_sanity_checks(domain, build_obs):
        out.error("sanity checks failed.  please resolve them and try again.")
        return 1
    if options.debug:
        out.write(
            out.bold, " * ", out.reset,
            "finished sanity checks in %.2f seconds" % (time() - start_time))
        out.write()

    if options.ask or options.pretend:
        for op in changes:
            formatter.format(op)
        formatter.end()

    if vdb_time:
        out.write(out.bold, 'Took %.2f' % (vdb_time, ), out.reset,
                  ' seconds to preload vdb state')
    if not changes:
        out.write("Nothing to merge.")
        return

    if options.pretend:
        if options.verbose:
            out.write(
                out.bold, ' * ', out.reset,
                "resolver plan required %i ops (%.2f seconds)\n" %
                (len(resolver_inst.state.plan), resolve_time))
        return

    if (options.ask
            and not formatter.ask("Would you like to merge these packages?")):
        return

    change_count = len(changes)

    # left in place for ease of debugging.
    cleanup = []
    try:
        for count, op in enumerate(changes):
            for func in cleanup:
                func()

            cleanup = []

            out.write("\nProcessing %i of %i: %s" %
                      (count + 1, change_count, op.pkg.cpvstr))
            out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr))
            if op.desc != "remove":
                cleanup = [op.pkg.release_cached_data]

                if not options.fetchonly and options.debug:
                    out.write("Forcing a clean of workdir")

                pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs)
                out.write("\n%i files required-" % len(op.pkg.fetchables))
                try:
                    ret = pkg_ops.run_if_supported("fetch", or_return=True)
                except IGNORED_EXCEPTIONS:
                    raise
                except Exception as e:
                    ret = e
                if ret is not True:
                    if ret is False:
                        ret = None
                    commandline.dump_error(
                        out, ret,
                        "\nfetching failed for %s" % (op.pkg.cpvstr, ))
                    if not options.ignore_failures:
                        return 1
                    continue
                if options.fetchonly:
                    continue

                buildop = pkg_ops.run_if_supported("build", or_return=None)
                pkg = op.pkg
                if buildop is not None:
                    out.write("building %s" % (op.pkg.cpvstr, ))
                    result = False
                    try:
                        result = buildop.finalize()
                    except format.errors as e:
                        out.error("caught exception building %s: % s" %
                                  (op.pkg.cpvstr, e))
                    else:
                        if result is False:
                            out.error("failed building %s" % (op.pkg.cpvstr, ))
                    if result is False:
                        if not options.ignore_failures:
                            return 1
                        continue
                    pkg = result
                    cleanup.append(pkg.release_cached_data)
                    pkg_ops = domain.pkg_operations(pkg, observer=build_obs)
                    cleanup.append(buildop.cleanup)

                cleanup.append(partial(pkg_ops.run_if_supported, "cleanup"))
                pkg = pkg_ops.run_if_supported("localize", or_return=pkg)
                # wipe this to ensure we don't inadvertantly use it further down;
                # we aren't resetting it after localizing, so could have the wrong
                # set of ops.
                del pkg_ops

                out.write()
                if op.desc == "replace":
                    if op.old_pkg == pkg:
                        out.write(">>> Reinstalling %s" % (pkg.cpvstr))
                    else:
                        out.write(">>> Replacing %s with %s" %
                                  (op.old_pkg.cpvstr, pkg.cpvstr))
                    i = domain.replace_pkg(op.old_pkg, pkg, repo_obs)
                    cleanup.append(op.old_pkg.release_cached_data)
                else:
                    out.write(">>> Installing %s" % (pkg.cpvstr, ))
                    i = domain.install_pkg(pkg, repo_obs)

                # force this explicitly- can hold onto a helluva lot more
                # then we would like.
            else:
                out.write(">>> Removing %s" % op.pkg.cpvstr)
                i = domain.uninstall_pkg(op.pkg, repo_obs)
            try:
                ret = i.finish()
            except merge_errors.BlockModification as e:
                out.error("Failed to merge %s: %s" % (op.pkg, e))
                if not options.ignore_failures:
                    return 1
                continue

            # while this does get handled through each loop, wipe it now; we don't need
            # that data, thus we punt it now to keep memory down.
            # for safety sake, we let the next pass trigger a release also-
            # mainly to protect against any code following triggering reloads
            # basically, be protective

            if world_set is not None:
                if op.desc == "remove":
                    out.write('>>> Removing %s from world file' %
                              op.pkg.cpvstr)
                    removal_pkg = slotatom_if_slotted(source_repos.combined,
                                                      op.pkg.versioned_atom)
                    update_worldset(world_set, removal_pkg, remove=True)
                elif not options.oneshot and any(
                        x.match(op.pkg) for x in atoms):
                    if not options.upgrade:
                        out.write('>>> Adding %s to world file' %
                                  op.pkg.cpvstr)
                        add_pkg = slotatom_if_slotted(source_repos.combined,
                                                      op.pkg.versioned_atom)
                        update_worldset(world_set, add_pkg)

#    again... left in place for ease of debugging.
#    except KeyboardInterrupt:
#        import pdb;pdb.set_trace()
#    else:
#        import pdb;pdb.set_trace()
    finally:
        pass

    # the final run from the loop above doesn't invoke cleanups;
    # we could ignore it, but better to run it to ensure nothing is inadvertantly
    # held on the way out of this function.
    # makes heappy analysis easier if we're careful about it.
    for func in cleanup:
        func()

    # and wipe the reference to the functions to allow things to fall out of
    # memory.
    cleanup = []

    out.write("finished")
    return 0
Ejemplo n.º 12
0
                out.error(
                    'No package matches %r, but there is a set with '
                    'that name. Use -s to specify a set.' % (token,))
                return 2
            elif not options.ignore_failures:
                out.error('No matches for %r; ignoring it' % token)
            else:
                return -1
        else:
            atoms.append(a)

    if not atoms and not options.newuse:
        out.error('No targets specified; nothing to do')
        return 1

    atoms = lists.stable_unique(atoms)

    if (not options.set or options.clean) and not options.oneshot:
        if world_set is None:
            err.write("Disable world updating via --oneshot, or fix your "
                "configuration")
            return 1

    if options.upgrade:
        resolver_kls = resolver.upgrade_resolver
    else:
        resolver_kls = resolver.min_install_resolver

    extra_kwargs = {}
    if options.empty:
        extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
Ejemplo n.º 13
0
def package_keywords_splitter(val):
    v = val.split()
    return parse_match(v[0]), tuple(stable_unique(v[1:]))
Ejemplo n.º 14
0
    def check_values(self, values, args):
        values, args = commandline.OptionParser.check_values(
            self, values, args)
        # XXX hack...
        values.checks = sorted(lists.unstable_unique(
            get_plugins('check', plugins)),
            key=lambda x:x.__name__)
        if values.list_checks or values.list_reporters:
            if values.list_reporters == values.list_checks:
                raise optparse.OptionValueError("--list-checks and "
                    "--list-reporters are mutually exclusive options- "
                    "one or the other.")
            return values, ()
        cwd = None
        if values.suite is None:
            # No suite explicitly specified. Use the repo to guess the suite.
            if values.target_repo is None:
                # Not specified either. Try to find a repo our cwd is in.
                cwd = os.getcwd()
                # The use of a dict here is a hack to deal with one
                # repo having multiple names in the configuration.
                candidates = {}
                for name, suite in values.config.pcheck_suite.iteritems():
                    repo = suite.target_repo
                    if repo is None:
                        continue
                    repo_base = getattr(repo, 'base', None)
                    if repo_base is not None and cwd.startswith(repo_base):
                        candidates[repo] = name
                if len(candidates) == 1:
                    values.guessed_suite = True
                    values.target_repo = tuple(candidates)[0]
            if values.target_repo is not None:
                # We have a repo, now find a suite matching it.
                candidates = list(
                    suite for suite in values.config.pcheck_suite.itervalues()
                    if suite.target_repo is values.target_repo)
                if len(candidates) == 1:
                    values.guessed_suite = True
                    values.suite = candidates[0]
            if values.suite is None:
                # If we have multiple candidates or no candidates we
                # fall back to the default suite.
                values.suite = values.config.get_default('pcheck_suite')
                values.default_suite = values.suite is not None
        if values.suite is not None:
            # We have a suite. Lift defaults from it for values that
            # were not set explicitly:
            if values.checkset is None:
                values.checkset = values.suite.checkset
            if values.src_repo is None:
                values.src_repo = values.suite.src_repo
            # If we were called with no atoms we want to force
            # cwd-based detection.
            if values.target_repo is None:
                if args:
                    values.target_repo = values.suite.target_repo
                elif values.suite.target_repo is not None:
                    # No atoms were passed in, so we want to guess
                    # what to scan based on cwd below. That only makes
                    # sense if we are inside the target repo. We still
                    # want to pick the suite's target repo if we are
                    # inside it, in case there is more than one repo
                    # definition with a base that contains our dir.
                    if cwd is None:
                        cwd = os.getcwd()
                    repo_base = getattr(values.suite.target_repo, 'base', None)
                    if repo_base is not None and cwd.startswith(repo_base):
                        values.target_repo = values.suite.target_repo
        if values.target_repo is None:
            # We have no target repo (not explicitly passed, not from
            # a suite, not from an earlier guess at the target_repo).
            # Try to guess one from cwd:
            if cwd is None:
                cwd = os.getcwd()
            candidates = {}
            for name, repo in values.config.repo.iteritems():
                repo_base = getattr(repo, 'base', None)
                if repo_base is not None and cwd.startswith(repo_base):
                    candidates[repo] = name
            if not candidates:
                self.error(
                    'No target repo specified on commandline or suite and '
                    'current directory is not inside a known repo.')
            elif len(candidates) > 1:
                self.error(
                    'Found multiple matches when guessing repo based on '
                    'current directory (%s). Specify a repo on the '
                    'commandline or suite or remove some repos from your '
                    'configuration.' % (
                        ', '.join(str(repo) for repo in candidates),))
            values.target_repo = tuple(candidates)[0]

        if values.reporter is None:
            values.reporter = values.config.get_default(
                'pcheck_reporter_factory')
            if values.reporter is None:
                values.reporter = get_plugin('reporter', plugins)
            if values.reporter is None:
                self.error('no config defined reporter found, nor any default '
                    'plugin based reporters')
        else:
            func = values.config.pcheck_reporter_factory.get(values.reporter)
            if func is None:
                func = list(base.Whitelist([values.reporter]).filter(
                    get_plugins('reporter', plugins)))
                if not func:
                    self.error("no reporter matches %r\n"
                        "please see --list-reporter for a list of "
                        "valid reporters" % values.reporter)
                elif len(func) > 1:
                    self.error("--reporter %r matched multiple reporters, "
                        "must match one. %r" %
                            (values.reporter,
                                tuple(sorted("%s.%s" %
                                    (x.__module__, x.__name__)
                                    for x in func))
                            )
                    )
                func = func[0]
            values.reporter = func
        if values.src_repo is None:
            values.src_repo = values.target_repo
            values.search_repo = values.target_repo
        else:
            values.search_repo = multiplex.tree(values.target_repo,
                                                values.src_repo)

        # TODO improve this to deal with a multiplex repo.
        for repo in set((values.src_repo, values.target_repo)):
            if isinstance(repo, repository.UnconfiguredTree):
                values.repo_bases.append(osutils.abspath(repo.base))

        if args:
            values.limiters = lists.stable_unique(map(
                    parserestrict.parse_match, args))
        else:
            repo_base = getattr(values.target_repo, 'base', None)
            if not repo_base:
                self.error(
                    'Either specify a target repo that is not multi-tree or '
                    'one or more extended atoms to scan '
                    '("*" for the entire repo).')
            cwd = osutils.abspath(os.getcwd())
            repo_base = osutils.abspath(repo_base)
            if not cwd.startswith(repo_base):
                self.error(
                    'Working dir (%s) is not inside target repo (%s). Fix '
                    'that or specify one or more extended atoms to scan.' % (
                        cwd, repo_base))
            bits = list(p for p in cwd[len(repo_base):].split(os.sep) if p)
            if not bits:
                values.limiters = [packages.AlwaysTrue]
            elif len(bits) == 1:
                values.limiters = [packages.PackageRestriction(
                        'category', StrExactMatch(bits[0]))]
            else:
                values.limiters = [packages.AndRestriction(
                        packages.PackageRestriction(
                            'category', StrExactMatch(bits[0])),
                        packages.PackageRestriction(
                            'package', StrExactMatch(bits[1])))]

        if values.checkset is None:
            values.checkset = values.config.get_default('pcheck_checkset')
        if values.checkset is not None:
            values.checks = list(values.checkset.filter(values.checks))

        if values.checks_to_run:
            whitelist = base.Whitelist(values.checks_to_run)
            values.checks = list(whitelist.filter(values.checks))

        if values.checks_to_disable:
            blacklist = base.Blacklist(values.checks_to_disable)
            values.checks = list(blacklist.filter(values.checks))

        if not values.checks:
            self.error('No active checks')

        values.addons = set()
        def add_addon(addon):
            if addon not in values.addons:
                values.addons.add(addon)
                for dep in addon.required_addons:
                    add_addon(dep)
        for check in values.checks:
            add_addon(check)
        try:
            for addon in values.addons:
                addon.check_values(values)
        except optparse.OptionValueError, e:
            if values.debug:
                raise
            self.error(str(e))
Ejemplo n.º 15
0
def package_keywords_splitter(val):
    v = val.split()
    return parse_match(v[0]), tuple(stable_unique(v[1:]))
Ejemplo n.º 16
0
def main(options, out, err):
    """Update caches."""
    for package in lists.stable_unique(options.packages):
        out.write('Updating cache for %s...' % (package.__name__,))
        plugin.initialize_cache(package, force=True)
Ejemplo n.º 17
0
def main(options, out, err):
    config = options.config
    if options.debug:
        resolver.plan.limiters.add(None)

    domain = options.domain
    livefs_repos = domain.all_livefs_repos
    world_set = world_list = options.world
    if options.oneshot:
        world_set = None

    formatter = options.formatter(
        out=out, err=err,
        unstable_arch=domain.unstable_arch,
        domain_settings=domain.settings,
        use_expand=domain.profile.use_expand,
        use_expand_hidden=domain.profile.use_expand_hidden,
        pkg_get_use=domain.get_package_use_unconfigured,
        world_list=world_list,
        verbose=options.verbose,
        livefs_repos=livefs_repos,
        distdir=domain.fetcher.get_storage_path(),
        quiet_repo_display=options.quiet_repo_display)

    # This mode does not care about sets and packages so bypass all that.
    if options.unmerge:
        if not options.oneshot:
            if world_set is None:
                err.write("Disable world updating via --oneshot, "
                          "or fix your configuration")
                return 1
        try:
            unmerge(out, err, livefs_repos, options.targets, options, formatter, world_set)
        except (parserestrict.ParseError, Failure) as e:
            out.error(str(e))
            return 1
        return

    source_repos = domain.source_repos
    installed_repos = domain.installed_repos

    if options.usepkgonly:
        source_repos = domain.binary_repos
    elif options.usepkg:
        source_repos = domain.binary_repos + domain.ebuild_repos
    elif options.source_only:
        source_repos = domain.ebuild_repos

    atoms = []
    for setname, pkgset in options.sets:
        if pkgset is None:
            return 1
        l = list(pkgset)
        if not l:
            out.write("skipping set %s: set is empty, nothing to update" % setname)
        else:
            atoms.extend(l)

    for token, restriction in options.targets:
        try:
            matches = parse_target(restriction, source_repos.combined, livefs_repos, return_none=True)
        except parserestrict.ParseError as e:
            out.error(str(e))
            return 1
        if matches is None:
            if token in config.pkgset:
                out.error(
                    "No package matches '%s', but there is a set with "
                    'that name. Use @set to specify a set.' % (token,))
                return 2
            elif not options.ignore_failures:
                out.error("No matches for '%s'; ignoring it" % (token,))
            else:
                return -1
        else:
            atoms.extend(matches)

    if not atoms and not options.newuse:
        out.error('No targets specified; nothing to do')
        return 1

    atoms = stable_unique(atoms)

    if options.clean and not options.oneshot:
        if world_set is None:
            err.write("Disable world updating via --oneshot, or fix your configuration")
            return 1

    if options.upgrade:
        resolver_kls = resolver.upgrade_resolver
    else:
        resolver_kls = resolver.min_install_resolver

    extra_kwargs = {}
    if options.empty:
        extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
    if options.debug:
        extra_kwargs['debug'] = True

    # XXX: This should recurse on deep
    if options.newuse:
        out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...')
        out.title('Scanning for changed USE...')
        for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)):
            src_pkgs = source_repos.match(inst_pkg.versioned_atom)
            if src_pkgs:
                src_pkg = max(src_pkgs)
                inst_iuse = inst_pkg.iuse_stripped
                src_iuse = src_pkg.iuse_stripped
                inst_flags = inst_iuse.intersection(inst_pkg.use)
                src_flags = src_iuse.intersection(src_pkg.use)
                if inst_flags.symmetric_difference(src_flags) or \
                   inst_iuse.symmetric_difference(src_iuse):
                    atoms.append(src_pkg.unversioned_atom)

#    left intentionally in place for ease of debugging.
#    from guppy import hpy
#    hp = hpy()
#    hp.setrelheap()

    resolver_inst = resolver_kls(
        vdbs=installed_repos.repos, dbs=source_repos.repos,
        verify_vdb=options.deep, nodeps=options.nodeps,
        drop_cycles=options.ignore_cycles, force_replace=options.replace,
        process_built_depends=options.with_bdeps, **extra_kwargs)

    if options.preload_vdb_state:
        out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
        vdb_time = time()
        resolver_inst.load_vdb_state()
        vdb_time = time() - vdb_time
    else:
        vdb_time = 0.0

    failures = []
    resolve_time = time()
    out.title('Resolving...')
    out.write(out.bold, ' * ', out.reset, 'Resolving...')
    ret = resolver_inst.add_atoms(atoms, finalize=True)
    while ret:
        out.error('resolution failed')
        restrict = ret[0][0]
        just_failures = reduce_to_failures(ret[1])
        display_failures(out, just_failures, debug=options.debug)
        failures.append(restrict)
        if not options.ignore_failures:
            break
        out.write("restarting resolution")
        atoms = [x for x in atoms if x != restrict]
        resolver_inst.reset()
        ret = resolver_inst.add_atoms(atoms, finalize=True)
    resolve_time = time() - resolve_time

    if options.debug:
        out.write(out.bold, " * ", out.reset, "resolution took %.2f seconds" % resolve_time)

    if failures:
        out.write()
        out.write('Failures encountered:')
        for restrict in failures:
            out.error("failed '%s'" % (restrict,))
            out.write('potentials:')
            match_count = 0
            for r in repo_utils.get_raw_repos(source_repos.repos):
                l = r.match(restrict)
                if l:
                    out.write(
                        "repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l)))
                    match_count += len(l)
            if not match_count:
                out.write("No matches found in %s" % (source_repos.repos,))
            out.write()
            if not options.ignore_failures:
                return 1

    resolver_inst.free_caches()

    if options.clean:
        out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
        vset = set(installed_repos.combined)
        len_vset = len(vset)
        vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True))
        wipes = sorted(x for x in vset if x.package_is_real)
        for x in wipes:
            out.write("Remove %s" % x)
        out.write()
        if wipes:
            out.write("removing %i packages of %i installed, %0.2f%%." %
                      (len(wipes), len_vset, 100*(len(wipes)/float(len_vset))))
        else:
            out.write("no packages to remove")
        if options.pretend:
            return 0
        if options.ask:
            if not formatter.ask("Do you wish to proceed?", default_answer=False):
                return 1
            out.write()
        repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug)
        do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs)
        return 0

    if options.debug:
        out.write()
        out.write(out.bold, ' * ', out.reset, 'debug: all ops')
        out.first_prefix.append(" ")
        plan_len = len(str(len(resolver_inst.state.plan)))
        for pos, op in enumerate(resolver_inst.state.plan):
            out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
        out.first_prefix.pop()
        out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
        out.write()

    changes = resolver_inst.state.ops(only_real=True)

    build_obs = observer.build_observer(observer.formatter_output(out), not options.debug)
    repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug)

    if options.ask or options.pretend:
        for op in changes:
            formatter.format(op)
        formatter.end()

    if vdb_time:
        out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset,
                  ' seconds to preload vdb state')
    if not changes:
        out.write("Nothing to merge.")
        return

    if options.pretend:
        if options.verbose:
            out.write(
                out.bold, ' * ', out.reset,
                "resolver plan required %i ops (%.2f seconds)\n" %
                (len(resolver_inst.state.plan), resolve_time))
        return

    if (options.ask and not formatter.ask("Would you like to merge these packages?")):
        return

    if options.debug:
        out.write(out.bold, " * ", out.reset, "running sanity checks")
        start_time = time()
    if not changes.run_sanity_checks(domain, build_obs):
        out.error("sanity checks failed.  please resolve them and try again.")
        return 1
    if options.debug:
        out.write(
            out.bold, " * ", out.reset,
            "finished sanity checks in %.2f seconds" % (time() - start_time))
        out.write()

    change_count = len(changes)

    # left in place for ease of debugging.
    cleanup = []
    try:
        for count, op in enumerate(changes):
            for func in cleanup:
                func()

            cleanup = []

            out.write("\nProcessing %i of %i: %s" % (count + 1, change_count, op.pkg.cpvstr))
            out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr))
            if op.desc != "remove":
                cleanup = [op.pkg.release_cached_data]

                if not options.fetchonly and options.debug:
                    out.write("Forcing a clean of workdir")

                pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs)
                out.write("\n%i files required-" % len(op.pkg.fetchables))
                try:
                    ret = pkg_ops.run_if_supported("fetch", or_return=True)
                except IGNORED_EXCEPTIONS:
                    raise
                except Exception as e:
                    ret = e
                if ret is not True:
                    if ret is False:
                        ret = None
                    commandline.dump_error(out, ret, "\nfetching failed for %s" % (op.pkg.cpvstr,))
                    if not options.ignore_failures:
                        return 1
                    continue
                if options.fetchonly:
                    continue

                buildop = pkg_ops.run_if_supported("build", or_return=None)
                pkg = op.pkg
                if buildop is not None:
                    out.write("building %s" % (op.pkg.cpvstr,))
                    result = False
                    try:
                        result = buildop.finalize()
                    except format.errors as e:
                        out.error("caught exception building %s: % s" % (op.pkg.cpvstr, e))
                    else:
                        if result is False:
                            out.error("failed building %s" % (op.pkg.cpvstr,))
                    if result is False:
                        if not options.ignore_failures:
                            return 1
                        continue
                    pkg = result
                    cleanup.append(pkg.release_cached_data)
                    pkg_ops = domain.pkg_operations(pkg, observer=build_obs)
                    cleanup.append(buildop.cleanup)

                cleanup.append(partial(pkg_ops.run_if_supported, "cleanup"))
                pkg = pkg_ops.run_if_supported("localize", or_return=pkg)
                # wipe this to ensure we don't inadvertantly use it further down;
                # we aren't resetting it after localizing, so could have the wrong
                # set of ops.
                del pkg_ops

                out.write()
                if op.desc == "replace":
                    if op.old_pkg == pkg:
                        out.write(">>> Reinstalling %s" % (pkg.cpvstr))
                    else:
                        out.write(">>> Replacing %s with %s" % (
                            op.old_pkg.cpvstr, pkg.cpvstr))
                    i = domain.replace_pkg(op.old_pkg, pkg, repo_obs)
                    cleanup.append(op.old_pkg.release_cached_data)
                else:
                    out.write(">>> Installing %s" % (pkg.cpvstr,))
                    i = domain.install_pkg(pkg, repo_obs)

                # force this explicitly- can hold onto a helluva lot more
                # then we would like.
            else:
                out.write(">>> Removing %s" % op.pkg.cpvstr)
                i = domain.uninstall_pkg(op.pkg, repo_obs)
            try:
                ret = i.finish()
            except merge_errors.BlockModification as e:
                out.error("Failed to merge %s: %s" % (op.pkg, e))
                if not options.ignore_failures:
                    return 1
                continue

            # while this does get handled through each loop, wipe it now; we don't need
            # that data, thus we punt it now to keep memory down.
            # for safety sake, we let the next pass trigger a release also-
            # mainly to protect against any code following triggering reloads
            # basically, be protective

            if world_set is not None:
                if op.desc == "remove":
                    out.write('>>> Removing %s from world file' % op.pkg.cpvstr)
                    removal_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom)
                    update_worldset(world_set, removal_pkg, remove=True)
                elif not options.oneshot and any(x.match(op.pkg) for x in atoms):
                    if not options.upgrade:
                        out.write('>>> Adding %s to world file' % op.pkg.cpvstr)
                        add_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom)
                        update_worldset(world_set, add_pkg)


#    again... left in place for ease of debugging.
#    except KeyboardInterrupt:
#        import pdb;pdb.set_trace()
#    else:
#        import pdb;pdb.set_trace()
    finally:
        pass

    # the final run from the loop above doesn't invoke cleanups;
    # we could ignore it, but better to run it to ensure nothing is
    # inadvertantly held on the way out of this function.
    # makes heappy analysis easier if we're careful about it.
    for func in cleanup:
        func()

    # and wipe the reference to the functions to allow things to fall out of
    # memory.
    cleanup = []

    out.write("finished")
    return 0
Ejemplo n.º 18
0
def main(options, out, err):
    """Update caches."""
    for package in lists.stable_unique(options.packages):
        out.write('Updating cache for %s...' % (package.__name__, ))
        plugin.initialize_cache(package, force=True)