예제 #1
0
파일: pclean.py 프로젝트: floppym/pkgcore
def main(options, out, err):
    if options.debug:
        out.write('starting scanning distdir %s...' % options.distdir)
    files = set(basename(file) for file in listdir_files(options.distdir))

    if options.debug:
        out.write('scanning repo...')

    pfiles = set()
    for pkg in options.repo.itermatch(options.restrict, sorter=sorted):
        try:
            pfiles.update(fetchable.filename for fetchable in
                        iflatten_instance(pkg.fetchables, fetchable_kls))
        except ParseChksumError as e:
            err.write("got corruption error '%s', with package %s " %
                (e, pkg.cpvstr))
            if options.ignore_failures:
                err.write("skipping...")
                err.write()
            else:
                err.write("aborting...")
                return 1
        except Exception as e:
            err.write("got error '%s', parsing package %s in repo '%s'" %
                (e, pkg.cpvstr, pkg.repo))
            raise

    d = options.distdir
    for file in (files - pfiles):
        out.write(pjoin(d, file))
예제 #2
0
 def licenses(self):
     """Return the set of all defined licenses in a repo."""
     try:
         content = listdir_files(self.licenses_dir)
     except EnvironmentError:
         content = ()
     return frozenset(content)
예제 #3
0
파일: triggers.py 프로젝트: chutz/pkgcore
    def regen(self, binary, basepath):
        ignores = ("dir", "dir.old")
        try:
            files = listdir_files(basepath)
        except OSError as oe:
            if oe.errno == errno.ENOENT:
                return
            raise

        if self.should_skip_directory(basepath, files):
            return

        # wipe old indexes.
        for x in set(ignores).intersection(files):
            os.remove(pjoin(basepath, x))

        index = pjoin(basepath, 'dir')
        for x in files:
            if x in ignores or x.startswith("."):
                continue

            ret, data = spawn.spawn_get_output(
                [binary, '--quiet', pjoin(basepath, x), '--dir-file', index],
                collect_fds=(1,2), split_lines=False)

            if not data or "already exists" in data or \
                    "warning: no info dir entry" in data:
                continue
            yield pjoin(basepath, x)
예제 #4
0
파일: glsa.py 프로젝트: radhermit/pkgcore
 def iter_vulnerabilities(self):
     """generator yielding each GLSA restriction"""
     for path in self.paths:
         for fn in listdir_files(path):
             # glsa-1234-12.xml
             if not (fn.startswith("glsa-") and fn.endswith(".xml")):
                 continue
             # This verifies the filename is of the correct syntax.
             try:
                 [int(x) for x in fn[5:-4].split("-")]
             except ValueError:
                 continue
             root = etree.parse(pjoin(path, fn))
             glsa_node = root.getroot()
             if glsa_node.tag != 'glsa':
                 raise ValueError("glsa without glsa rootnode")
             for affected in root.findall('affected'):
                 for pkg in affected.findall('package'):
                     try:
                         pkgname = str(pkg.get('name')).strip()
                         pkg_vuln_restrict = \
                             self.generate_intersects_from_pkg_node(
                                 pkg, tag="glsa(%s)" % fn[5:-4])
                         if pkg_vuln_restrict is None:
                             continue
                         pkgatom = atom.atom(pkgname)
                         yield fn[5:-4], pkgname, pkgatom, pkg_vuln_restrict
                     except (TypeError, ValueError) as v:
                         # thrown from cpv.
                         logger.warning(
                             "invalid glsa- %s, package %s: error %s",
                             fn, pkgname, v)
                         del v
예제 #5
0
def add_sets(config, root, portage_base_dir):
    config["world"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.filelist.WorldFile",
        "location": pjoin(root, const.WORLD_FILE)})
    config["system"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.system.SystemSet",
        "profile": "profile"})
    config["installed"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.installed.Installed",
        "vdb": "vdb"})
    config["versioned-installed"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.installed.VersionedInstalled",
        "vdb": "vdb"})

    set_fp = pjoin(portage_base_dir, "sets")
    try:
        for setname in listdir_files(set_fp):
            # Potential for name clashes here, those will just make
            # the set not show up in config.
            if setname in ("system", "world"):
                logger.warning(
                    "user defined set %s is disallowed; ignoring" %
                    pjoin(set_fp, setname))
                continue
            config[setname] = basics.AutoConfigSection({
                "class": "pkgcore.pkgsets.filelist.FileList",
                "location": pjoin(set_fp, setname)})
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
예제 #6
0
 def _get_versions(self, catpkg):
     cppath = pjoin(self.base, catpkg[0], catpkg[1])
     pkg = catpkg[-1] + "-"
     lp = len(pkg)
     extension = self.extension
     ext_len = -len(extension)
     try:
         ret = tuple(x[lp:ext_len] for x in listdir_files(cppath)
                     if x[ext_len:] == extension and x[:lp] == pkg)
         if any(('scm' in x or '-try' in x) for x in ret):
             if not self.ignore_paludis_versioning:
                 for x in ret:
                     if 'scm' in x:
                         raise ebuild_errors.InvalidCPV(
                             "%s/%s-%s has nonstandard -scm "
                             "version component" % (catpkg + (x,)))
                     elif 'try' in x:
                         raise ebuild_errors.InvalidCPV(
                             "%s/%s-%s has nonstandard -try "
                             "version component" % (catpkg + (x,)))
                 raise AssertionError('unreachable codepoint was reached')
             return tuple(x for x in ret
                          if ('scm' not in x and 'try' not in x))
         return ret
     except EnvironmentError as e:
         raise_from(KeyError(
             "failed fetching versions for package %s: %s" %
             (pjoin(self.base, '/'.join(catpkg)), str(e))))
예제 #7
0
파일: triggers.py 프로젝트: veelai/pkgcore
def collapse_envd(base):
    collapsed_d = {}
    try:
        env_d_files = sorted(listdir_files(base))
    except OSError, oe:
        if oe.errno != errno.ENOENT:
            raise
예제 #8
0
파일: triggers.py 프로젝트: vapier/pkgcore
    def trigger(self, engine, existing_cset, install_cset):
        # hackish, but it works.
        protected_filter = gen_config_protect_filter(
            engine.offset, self.extra_protects, self.extra_disables).match
        ignore_filter = gen_collision_ignore_filter(engine.offset).match
        protected = {}

        for x in existing_cset.iterfiles():
            if not ignore_filter(x.location) and protected_filter(x.location):
                replacement = install_cset[x]
                if not simple_chksum_compare(replacement, x):
                    protected.setdefault(
                        pjoin(engine.offset,
                              os.path.dirname(x.location).lstrip(os.path.sep)),
                        []).append((os.path.basename(replacement.location),
                                    replacement))

        for dir_loc, entries in protected.iteritems():
            updates = {x[0]: [] for x in entries}
            try:
                existing = sorted(x for x in listdir_files(dir_loc)
                                  if x.startswith("._cfg"))
            except OSError as oe:
                if oe.errno != errno.ENOENT:
                    raise
                # this shouldn't occur.
                continue

            for x in existing:
                try:
                    # ._cfg0000_filename
                    count = int(x[5:9])
                    if x[9] != "_":
                        raise ValueError
                    fn = x[10:]
                except (ValueError, IndexError):
                    continue
                if fn in updates:
                    updates[fn].append((count, fn))

            # now we rename.
            for fname, entry in entries:
                # check for any updates with the same chksums.
                count = 0
                for cfg_count, cfg_fname in updates[fname]:
                    if simple_chksum_compare(livefs.gen_obj(
                            pjoin(dir_loc, cfg_fname)), entry):
                        count = cfg_count
                        break
                    count = max(count, cfg_count + 1)
                try:
                    install_cset.remove(entry)
                except KeyError:
                    # this shouldn't occur...
                    continue
                new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname))
                new_entry = entry.change_attributes(location=new_fn)
                install_cset.add(new_entry)
                self.renames[new_entry] = entry
            del updates
예제 #9
0
파일: ebd.py 프로젝트: chutz/pkgcore
 def _reload_state(self):
     try:
         self.__set_stage_state__([x[1:]
             for x in listdir_files(self.builddir) if x.startswith(".")])
     except EnvironmentError as e:
         if e.errno not in (errno.ENOTDIR, errno.ENOENT):
             raise
예제 #10
0
    def _add_sets(self):
        self["world"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.filelist.WorldFile",
            "location": pjoin(self.root, econst.WORLD_FILE.lstrip('/'))})
        self["system"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.system.SystemSet",
            "profile": "profile"})
        self["installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.Installed",
            "vdb": "vdb"})
        self["versioned-installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.VersionedInstalled",
            "vdb": "vdb"})

        set_fp = pjoin(self.dir, "sets")
        try:
            for setname in listdir_files(set_fp):
                # Potential for name clashes here, those will just make
                # the set not show up in config.
                if setname in ("system", "world"):
                    logger.warning(
                        "user defined set %r is disallowed; ignoring",
                        pjoin(set_fp, setname))
                    continue
                self[setname] = basics.AutoConfigSection({
                    "class": "pkgcore.pkgsets.filelist.FileList",
                    "location": pjoin(set_fp, setname)})
        except FileNotFoundError:
            pass
예제 #11
0
def _scan_directory(path):
    files = []
    for x in listdir_files(path):
        match = valid_updates_re.match(x)
        if match is not None:
            files.append(((match.group(2), match.group(1)), x))
    files.sort(key=itemgetter(0))
    return [x[1] for x in files]
예제 #12
0
파일: repo_objs.py 프로젝트: veelai/pkgcore
 def raw_use_expand_desc(self):
     base = pjoin(self.profiles_base, 'desc')
     try:
         targets = sorted(listdir_files(base))
     except EnvironmentError, e:
         if e.errno != errno.ENOENT:
             raise
         return ()
예제 #13
0
파일: triggers.py 프로젝트: veelai/pkgcore
 def regen(self, binary, basepath):
     ignores = ("dir", "dir.old")
     try:
         files = listdir_files(basepath)
     except OSError, oe:
         if oe.errno == errno.ENOENT:
             return
         raise
예제 #14
0
 def _load_eclasses(self):
     """Force an update of the internal view of on disk/remote eclasses."""
     ec = {}
     eclass_len = len(".eclass")
     try:
         files = listdir_files(self.eclassdir)
     except EnvironmentError, e:
         if e.errno not in (errno.ENOENT, errno.ENOTDIR):
             raise
         return ImmutableDict()
예제 #15
0
파일: triggers.py 프로젝트: vapier/pkgcore
def collapse_envd(base):
    collapsed_d = {}
    try:
        env_d_files = sorted(listdir_files(base))
    except OSError as oe:
        if oe.errno != errno.ENOENT:
            raise
    else:
        for x in env_d_files:
            if x.endswith(".bak") or x.endswith("~") or x.startswith("._cfg") \
                    or len(x) <= 2 or not x[0:2].isdigit():
                continue
            d = read_bash_dict(pjoin(base, x))
            # inefficient, but works.
            for k, v in d.iteritems():
                collapsed_d.setdefault(k, []).append(v)
            del d

    loc_incrementals = set(incrementals)
    loc_colon_parsed = set(colon_parsed)

    # split out env.d defined incrementals..
    # update incrementals *and* colon parsed for colon_separated;
    # incrementals on its own is space separated.

    for x in collapsed_d.pop("COLON_SEPARATED", []):
        v = x.split()
        if v:
            loc_colon_parsed.update(v)

    loc_incrementals.update(loc_colon_parsed)

    # now space.
    for x in collapsed_d.pop("SPACE_SEPARATED", []):
        v = x.split()
        if v:
            loc_incrementals.update(v)

    # now reinterpret.
    for k, v in collapsed_d.iteritems():
        if k not in loc_incrementals:
            collapsed_d[k] = v[-1]
            continue
        if k in loc_colon_parsed:
            collapsed_d[k] = filter(None, iflatten_instance(
                x.split(':') for x in v))
        else:
            collapsed_d[k] = filter(None, iflatten_instance(
                x.split() for x in v))

    return collapsed_d, loc_incrementals, loc_colon_parsed
예제 #16
0
 def _get_versions(self, catpkg):
     cppath = pjoin(self.base, catpkg[0], catpkg[1])
     pkg = f'{catpkg[-1]}-'
     lp = len(pkg)
     extension = self.extension
     ext_len = -len(extension)
     try:
         return tuple(
             x[lp:ext_len] for x in listdir_files(cppath)
             if x[ext_len:] == extension and x[:lp] == pkg)
     except EnvironmentError as e:
         raise KeyError(
             "failed fetching versions for package %s: %s" %
             (pjoin(self.base, '/'.join(catpkg)), str(e))) from e
예제 #17
0
 def _load_eclasses(self):
     """Force an update of the internal view of on disk/remote eclasses."""
     ec = {}
     eclass_len = len(".eclass")
     try:
         files = listdir_files(self.eclassdir)
     except (FileNotFoundError, NotADirectoryError):
         return ImmutableDict()
     for y in files:
         if not y.endswith(".eclass"):
             continue
         ys = y[:-eclass_len]
         ec[intern(ys)] = LazilyHashedPath(
             pjoin(self.eclassdir, y), eclassdir=self.eclassdir)
     return ImmutableDict(ec)
예제 #18
0
def parse_moves(location):
    pjoin = os.path.join

    # schwartzian comparison, convert it into YYYY-QQ
    def get_key(fname):
        return tuple(reversed(fname.split('-')))

    moves = {}
    for update_file in sorted(listdir_files(location), key=get_key):
        for line in iter_read_bash(pjoin(location, update_file)):
            line = line.split()
            if line[0] != 'move':
                continue
            moves[atom(line[1])] = atom(line[2])
    return moves
예제 #19
0
 def _load_eclasses(self):
     """Force an update of the internal view of on disk/remote eclasses."""
     ec = {}
     eclass_len = len(".eclass")
     try:
         files = listdir_files(self.eclassdir)
     except EnvironmentError as e:
         if e.errno not in (errno.ENOENT, errno.ENOTDIR):
             raise
         return ImmutableDict()
     for y in files:
         if not y.endswith(".eclass"):
             continue
         ys = y[:-eclass_len]
         ec[intern(ys)] = LazilyHashedPath(pjoin(self.eclassdir, y),
                                           eclassdir=self.eclassdir)
     return ImmutableDict(ec)
예제 #20
0
    def raw_use_expand_desc(self):
        base = pjoin(self.profiles_base, 'desc')
        try:
            targets = sorted(listdir_files(base))
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
            return ()

        def f():
            for use_group in targets:
                group = use_group.split('.', 1)[0] + "_"
                def converter(key):
                    return (packages.AlwaysTrue, group + key)
                for blah in self._split_use_desc_file('desc/%s' % use_group, converter):
                    yield blah
        return tuple(f())
예제 #21
0
    def raw_use_expand_desc(self):
        """USE_EXPAND settings for the repo."""
        base = pjoin(self.profiles_base, 'desc')
        try:
            targets = sorted(listdir_files(base))
        except FileNotFoundError:
            return ()

        def f():
            for use_group in targets:
                group = use_group.split('.', 1)[0] + "_"

                def converter(key):
                    return (packages.AlwaysTrue, group + key)

                for x in self._split_use_desc_file(f'desc/{use_group}', converter):
                    yield x

        return tuple(f())
예제 #22
0
    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        l = set()
        d = {}
        lext = len(self.extension)
        bad = False
        try:
            for x in listdir_files(cpath):
                # don't use lstat; symlinks may exist
                if (x.endswith(".lockfile") or
                        not x[-lext:].lower() == self.extension or
                        x.startswith(".tmp.")):
                    continue
                pv = x[:-lext]
                try:
                    pkg = versioned_CPV(category+"/"+pv)
                except InvalidCPV:
                    bad = True
                if bad or not pkg.fullver:
                    if '-scm' in pv:
                        bad = 'scm'
                    elif '-try' in pv:
                        bad = 'try'
                    else:
                        raise InvalidCPV(
                            "%s/%s: no version component" %
                            (category, pv))
                    if self.ignore_paludis_versioning:
                        bad = False
                        continue
                    raise InvalidCPV(
                        "%s/%s: -%s version component is "
                        "not standard." % (category, pv, bad))
                l.add(pkg.package)
                d.setdefault((category, pkg.package), []).append(pkg.fullver)
        except EnvironmentError as e:
            raise_from(KeyError(
                "failed fetching packages for category %s: %s" %
                (pjoin(self.base, category.lstrip(os.path.sep)), str(e))))

        self._versions_tmp_cache.update(d)
        return tuple(l)
예제 #23
0
파일: repository.py 프로젝트: chutz/pkgcore
    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        l = set()
        d = {}
        lext = len(self.extension)
        bad = False
        try:
            for x in listdir_files(cpath):
                # don't use lstat; symlinks may exist
                if (x.endswith(".lockfile")
                        or not x[-lext:].lower() == self.extension
                        or x.startswith(".tmp.")):
                    continue
                pv = x[:-lext]
                try:
                    pkg = versioned_CPV(category + "/" + pv)
                except InvalidCPV:
                    bad = True
                if bad or not pkg.fullver:
                    if '-scm' in pv:
                        bad = 'scm'
                    elif '-try' in pv:
                        bad = 'try'
                    else:
                        raise InvalidCPV("%s/%s: no version component" %
                                         (category, pv))
                    if self.ignore_paludis_versioning:
                        bad = False
                        continue
                    raise InvalidCPV("%s/%s: -%s version component is "
                                     "not standard." % (category, pv, bad))
                l.add(pkg.package)
                d.setdefault((category, pkg.package), []).append(pkg.fullver)
        except EnvironmentError as e:
            raise_from(KeyError("failed fetching packages for category %s: %s" % \
            (pjoin(self.base, category.lstrip(os.path.sep)), str(e))))

        self._versions_tmp_cache.update(d)
        return tuple(l)
예제 #24
0
파일: glsa.py 프로젝트: filmor/pkgcore
 def iter_vulnerabilities(self):
     """generator yielding each GLSA restriction"""
     for path in self.paths:
         for fn in listdir_files(path):
             # glsa-1234-12.xml
             if not (fn.startswith("glsa-") and fn.endswith(".xml")):
                 logger.warning(f'invalid glsa file name: {fn!r}')
                 continue
             # This verifies the filename is of the correct syntax.
             try:
                 [int(x) for x in fn[5:-4].split("-")]
             except ValueError:
                 logger.warning(f'invalid glsa file name: {fn!r}')
                 continue
             root = etree.parse(pjoin(path, fn))
             glsa_node = root.getroot()
             if glsa_node.tag != 'glsa':
                 logger.warning(f'glsa file without glsa root node: {fn!r}')
                 continue
             for affected in root.findall('affected'):
                 for pkg in affected.findall('package'):
                     try:
                         pkgname = str(pkg.get('name')).strip()
                         pkg_vuln_restrict = \
                             self.generate_intersects_from_pkg_node(
                                 pkg, tag="glsa(%s)" % fn[5:-4])
                         if pkg_vuln_restrict is None:
                             continue
                         pkgatom = atom.atom(pkgname)
                         yield fn[5:-4], pkgname, pkgatom, pkg_vuln_restrict
                     except (TypeError, ValueError) as e:
                         # thrown from cpv.
                         logger.warning(
                             f"invalid glsa file {fn!r}, package {pkgname}: {e}"
                         )
                     except IGNORED_EXCEPTIONS:
                         raise
                     except Exception as e:
                         logger.warning(f"invalid glsa file {fn!r}: {e}")
예제 #25
0
def add_sets(config, root, portage_base_dir):
    config["world"] = basics.AutoConfigSection({
        "class":
        "pkgcore.pkgsets.filelist.WorldFile",
        "location":
        pjoin(root, const.WORLD_FILE)
    })
    config["system"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.system.SystemSet",
        "profile": "profile"
    })
    config["installed"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.installed.Installed",
        "vdb": "vdb"
    })
    config["versioned-installed"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.installed.VersionedInstalled",
        "vdb": "vdb"
    })

    set_fp = pjoin(portage_base_dir, "sets")
    try:
        for setname in listdir_files(set_fp):
            # Potential for name clashes here, those will just make
            # the set not show up in config.
            if setname in ("system", "world"):
                logger.warning("user defined set %s is disallowed; ignoring" %
                               pjoin(set_fp, setname))
                continue
            config[setname] = basics.AutoConfigSection({
                "class":
                "pkgcore.pkgsets.filelist.FileList",
                "location":
                pjoin(set_fp, setname)
            })
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
예제 #26
0
    def _add_sets(self):
        self["world"] = basics.AutoConfigSection({
            "class":
            "pkgcore.pkgsets.filelist.WorldFile",
            "location":
            pjoin(self.root, econst.WORLD_FILE.lstrip('/'))
        })
        self["system"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.system.SystemSet",
            "profile": "profile"
        })
        self["installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.Installed",
            "vdb": "vdb"
        })
        self["versioned-installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.VersionedInstalled",
            "vdb": "vdb"
        })

        set_fp = pjoin(self.dir, "sets")
        try:
            for setname in listdir_files(set_fp):
                # Potential for name clashes here, those will just make
                # the set not show up in config.
                if setname in ("system", "world"):
                    logger.warning(
                        "user defined set %r is disallowed; ignoring",
                        pjoin(set_fp, setname))
                    continue
                self[setname] = basics.AutoConfigSection({
                    "class":
                    "pkgcore.pkgsets.filelist.FileList",
                    "location":
                    pjoin(set_fp, setname)
                })
        except FileNotFoundError:
            pass
예제 #27
0
    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        l = set()
        d = {}
        lext = len(self.extension)
        bad = False
        try:
            for x in listdir_files(cpath):
                # don't use lstat; symlinks may exist
                if (x.endswith(".lockfile")
                        or not x[-lext:].lower() == self.extension
                        or x.startswith(".tmp.")):
                    continue
                pv = x[:-lext]
                pkg = VersionedCPV(f'{category}/{pv}')
                l.add(pkg.package)
                d.setdefault((category, pkg.package), []).append(pkg.fullver)
        except EnvironmentError as e:
            raise KeyError(
                "failed fetching packages for category %s: %s" % (pjoin(
                    self.base, category.lstrip(os.path.sep)), str(e))) from e

        self._versions_tmp_cache.update(d)
        return tuple(l)
예제 #28
0
    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        l = set()
        d = {}
        lext = len(self.extension)
        bad = False
        try:
            for x in listdir_files(cpath):
                # don't use lstat; symlinks may exist
                if (x.endswith(".lockfile") or
                        not x[-lext:].lower() == self.extension or
                        x.startswith(".tmp.")):
                    continue
                pv = x[:-lext]
                pkg = versioned_CPV(f'{category}/{pv}')
                l.add(pkg.package)
                d.setdefault((category, pkg.package), []).append(pkg.fullver)
        except EnvironmentError as e:
            raise KeyError(
                "failed fetching packages for category %s: %s" %
                (pjoin(self.base, category.lstrip(os.path.sep)), str(e))) from e

        self._versions_tmp_cache.update(d)
        return tuple(l)
예제 #29
0
def initialize_cache(package, force=False):
    """Determine available plugins in a package.

    Writes cache files if they are stale and writing is possible.
    """
    # package plugin cache, see above.
    package_cache = defaultdict(set)
    modpath = os.path.dirname(package.__file__)
    modlist = listdir_files(modpath)
    stored_cache_name = pjoin(modpath, CACHE_FILENAME)
    stored_cache = _read_cache_file(package, stored_cache_name)

    if force:
        _clean_old_caches(modpath)

    # Directory cache, mapping modulename to
    # (mtime, set([keys]))
    modlist = set(x for x in modlist
                  if os.path.splitext(x)[1] == '.py' and x != '__init__.py')

    cache_stale = False
    # Hunt for modules.
    actual_cache = defaultdict(set)
    mtime_cache = mappings.defaultdictkey(lambda x: int(os.path.getmtime(x)))
    for modfullname in sorted(modlist):
        modname = os.path.splitext(modfullname)[0]
        # It is an actual module. Check if its cache entry is valid.
        mtime = mtime_cache[pjoin(modpath, modfullname)]
        vals = stored_cache.get((modname, mtime))
        if vals is None or force:
            # Cache entry is stale.
            logger.debug('stale because of %s: actual %s != stored %s',
                         modname, mtime,
                         stored_cache.get(modname, (0, ()))[0])
            cache_stale = True
            entries = []
            qualname = '.'.join((package.__name__, modname))
            module = import_module(qualname)
            registry = getattr(module, PLUGIN_ATTR, {})
            vals = set()
            for key, plugs in registry.iteritems():
                for idx, plug_name in enumerate(plugs):
                    if isinstance(plug_name, basestring):
                        plug = _process_plugin(
                            package, _plugin_data(key, 0, qualname, plug_name))
                    else:
                        plug = plug_name
                    if plug is None:
                        # import failure, ignore it, error already logged
                        continue
                    priority = getattr(plug, 'priority', 0)
                    if not isinstance(priority, int):
                        logger.error(
                            "ignoring plugin %s: has a non integer priority: %s",
                            plug, priority)
                        continue
                    if plug_name is plug:
                        # this means it's an object, rather than a string; store
                        # the offset.
                        plug_name = idx
                    data = _plugin_data(key, priority, qualname, plug_name)
                    vals.add(data)
        actual_cache[(modname, mtime)] = vals
        for data in vals:
            package_cache[data.key].add(data)
    if force or set(stored_cache) != set(actual_cache):
        logger.debug('updating cache %r for new plugins', stored_cache_name)
        _write_cache_file(stored_cache_name, actual_cache)

    return mappings.ImmutableDict(
        (k, sort_plugs(v)) for k, v in package_cache.iteritems())
예제 #30
0
파일: plugin.py 프로젝트: ferringb/pkgcore
def initialize_cache(package, force=False, cache_dir=None):
    """Determine available plugins in a package.

    Writes cache files if they are stale and writing is possible.
    """
    modpath = os.path.dirname(package.__file__)
    pkgpath = os.path.dirname(os.path.dirname(modpath))
    uid = gid = -1
    mode = 0o755

    if cache_dir is None:
        if not force:
            # use user-generated caches if they exist, fallback to module cache
            if os.path.exists(pjoin(const.USER_CACHE_PATH, CACHE_FILENAME)):
                cache_dir = const.USER_CACHE_PATH
            elif os.path.exists(pjoin(const.SYSTEM_CACHE_PATH,
                                      CACHE_FILENAME)):
                cache_dir = const.SYSTEM_CACHE_PATH
                uid = os_data.portage_uid
                gid = os_data.portage_gid
                mode = 0o775
            else:
                cache_dir = modpath
        else:
            # generate module cache when running from git repo, otherwise create system/user cache
            if pkgpath == sys.path[0]:
                cache_dir = modpath
            elif os_data.uid in (os_data.root_uid, os_data.portage_uid):
                cache_dir = const.SYSTEM_CACHE_PATH
                uid = os_data.portage_uid
                gid = os_data.portage_gid
                mode = 0o775
            else:
                cache_dir = const.USER_CACHE_PATH

    # put pkgcore consumer plugins (e.g. pkgcheck) inside pkgcore cache dir
    if cache_dir in (const.SYSTEM_CACHE_PATH, const.USER_CACHE_PATH):
        chunks = package.__name__.split('.', 1)
        if chunks[0] != os.path.basename(cache_dir):
            cache_dir = pjoin(cache_dir, chunks[0])

    # package plugin cache, see above.
    package_cache = defaultdict(set)
    stored_cache_name = pjoin(cache_dir, CACHE_FILENAME)
    stored_cache = _read_cache_file(package, stored_cache_name)

    if force:
        _clean_old_caches(cache_dir)

    # Directory cache, mapping modulename to
    # (mtime, set([keys]))
    modlist = listdir_files(modpath)
    modlist = set(x for x in modlist
                  if os.path.splitext(x)[1] == '.py' and x != '__init__.py')

    cache_stale = False
    # Hunt for modules.
    actual_cache = defaultdict(set)
    mtime_cache = mappings.defaultdictkey(lambda x: int(os.path.getmtime(x)))
    for modfullname in sorted(modlist):
        modname = os.path.splitext(modfullname)[0]
        # It is an actual module. Check if its cache entry is valid.
        mtime = mtime_cache[pjoin(modpath, modfullname)]
        vals = stored_cache.get((modname, mtime))
        if vals is None or force:
            # Cache entry is stale.
            logger.debug('stale because of %s: actual %s != stored %s',
                         modname, mtime,
                         stored_cache.get(modname, (0, ()))[0])
            cache_stale = True
            entries = []
            qualname = '.'.join((package.__name__, modname))
            module = import_module(qualname)
            registry = getattr(module, PLUGIN_ATTR, {})
            vals = set()
            for key, plugs in registry.items():
                for idx, plug_name in enumerate(plugs):
                    if isinstance(plug_name, str):
                        plug = _process_plugin(
                            package, _plugin_data(key, 0, qualname, plug_name))
                    else:
                        plug = plug_name
                    if plug is None:
                        # import failure, ignore it, error already logged
                        continue
                    priority = getattr(plug, 'priority', 0)
                    if not isinstance(priority, int):
                        logger.error(
                            "ignoring plugin %s: has a non integer priority: %s",
                            plug, priority)
                        continue
                    if plug_name is plug:
                        # this means it's an object, rather than a string; store
                        # the offset.
                        plug_name = idx
                    data = _plugin_data(key, priority, qualname, plug_name)
                    vals.add(data)
        actual_cache[(modname, mtime)] = vals
        for data in vals:
            package_cache[data.key].add(data)
    if force or set(stored_cache) != set(actual_cache):
        logger.debug('updating cache %r for new plugins', stored_cache_name)
        ensure_dirs(cache_dir, uid=uid, gid=gid, mode=mode)
        _write_cache_file(stored_cache_name, actual_cache, uid=uid, gid=gid)

    return mappings.ImmutableDict(
        (k, sort_plugs(v)) for k, v in package_cache.items())
예제 #31
0
 def licenses(self):
     try:
         content = listdir_files(self.licenses_dir)
     except EnvironmentError:
         content = ()
     return frozenset(content)
예제 #32
0
파일: plugin.py 프로젝트: neko259/pkgcore
def initialize_cache(package, force=False):
    """Determine available plugins in a package.

    Writes cache files if they are stale and writing is possible.
    """
    # package plugin cache, see above.
    package_cache = defaultdict(set)
    modpath = os.path.dirname(package.__file__)
    modlist = listdir_files(modpath)
    stored_cache_name = pjoin(modpath, CACHE_FILENAME)
    stored_cache = _read_cache_file(package, stored_cache_name)

    if force:
        _clean_old_caches(modpath)

    # Directory cache, mapping modulename to
    # (mtime, set([keys]))
    modlist = set(x for x in modlist if os.path.splitext(x)[1] == ".py" and x != "__init__.py")

    cache_stale = False
    # Hunt for modules.
    actual_cache = defaultdict(set)
    mtime_cache = mappings.defaultdictkey(lambda x: int(os.path.getmtime(x)))
    for modfullname in sorted(modlist):
        modname = os.path.splitext(modfullname)[0]
        # It is an actual module. Check if its cache entry is valid.
        mtime = mtime_cache[pjoin(modpath, modfullname)]
        vals = stored_cache.get((modname, mtime))
        if vals is None or force:
            # Cache entry is stale.
            logger.debug(
                "stale because of %s: actual %s != stored %s", modname, mtime, stored_cache.get(modname, (0, ()))[0]
            )
            cache_stale = True
            entries = []
            qualname = ".".join((package.__name__, modname))
            try:
                module = import_module(qualname)
            except ImportError:
                # This is a serious problem, but if we blow up
                # here we cripple pkgcore entirely which may make
                # fixing the problem impossible. So be noisy but
                # try to continue.
                logger.exception("plugin import failed for %s processing %s", package.__name__, modname)
                continue

            registry = getattr(module, PLUGIN_ATTR, {})
            vals = set()
            for key, plugs in registry.iteritems():
                for idx, plug_name in enumerate(plugs):
                    if isinstance(plug_name, basestring):
                        plug = _process_plugin(package, _plugin_data(key, 0, qualname, plug_name))
                    else:
                        plug = plug_name
                    if plug is None:
                        # import failure, ignore it, error already logged
                        continue
                    priority = getattr(plug, "priority", 0)
                    if not isinstance(priority, int):
                        logger.error("ignoring plugin %s: has a non integer priority: %s", plug, priority)
                        continue
                    if plug_name is plug:
                        # this means it's an object, rather than a string; store
                        # the offset.
                        plug_name = idx
                    data = _plugin_data(key, priority, qualname, plug_name)
                    vals.add(data)
        actual_cache[(modname, mtime)] = vals
        for data in vals:
            package_cache[data.key].add(data)
    if force or set(stored_cache) != set(actual_cache):
        logger.debug("updating cache %r for new plugins", stored_cache_name)
        _write_cache_file(stored_cache_name, actual_cache)

    return mappings.ImmutableDict((k, sort_plugs(v)) for k, v in package_cache.iteritems())
예제 #33
0
파일: repo_objs.py 프로젝트: chutz/pkgcore
 def licenses(self):
     try:
         content = listdir_files(self.licenses_dir)
     except EnvironmentError:
         content = ()
     return frozenset(content)
예제 #34
0
    def trigger(self, engine, existing_cset, install_cset):
        # hackish, but it works.
        protected_filter = gen_config_protect_filter(engine.offset,
                                                     self.extra_protects,
                                                     self.extra_disables).match
        ignore_filter = gen_collision_ignore_filter(engine.offset).match
        protected = {}

        for x in existing_cset.iterfiles():
            if not ignore_filter(x.location) and protected_filter(x.location):
                replacement = install_cset[x]
                if not simple_chksum_compare(replacement, x):
                    protected.setdefault(
                        pjoin(engine.offset,
                              os.path.dirname(x.location).lstrip(os.path.sep)),
                        []).append((os.path.basename(replacement.location),
                                    replacement))

        for dir_loc, entries in protected.iteritems():
            updates = {x[0]: [] for x in entries}
            try:
                existing = sorted(x for x in listdir_files(dir_loc)
                                  if x.startswith("._cfg"))
            except OSError as oe:
                if oe.errno != errno.ENOENT:
                    raise
                # this shouldn't occur.
                continue

            for x in existing:
                try:
                    # ._cfg0000_filename
                    count = int(x[5:9])
                    if x[9] != "_":
                        raise ValueError
                    fn = x[10:]
                except (ValueError, IndexError):
                    continue
                if fn in updates:
                    updates[fn].append((count, fn))

            # now we rename.
            for fname, entry in entries:
                # check for any updates with the same chksums.
                count = 0
                for cfg_count, cfg_fname in updates[fname]:
                    if simple_chksum_compare(
                            livefs.gen_obj(pjoin(dir_loc, cfg_fname)), entry):
                        count = cfg_count
                        break
                    count = max(count, cfg_count + 1)
                try:
                    install_cset.remove(entry)
                except KeyError:
                    # this shouldn't occur...
                    continue
                new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname))
                new_entry = entry.change_attributes(location=new_fn)
                install_cset.add(new_entry)
                self.renames[new_entry] = entry
            del updates
예제 #35
0
    def _cmd_implementation_digests(self, domain, matches, observer,
                                    mirrors=False, force=False):
        manifest_config = self.repo.config.manifests
        if manifest_config.disabled:
            observer.info(f"repo {self.repo.repo_id} has manifests disabled")
            return
        required_chksums = set(manifest_config.required_hashes)
        write_chksums = manifest_config.hashes
        distdir = domain.fetcher.distdir
        ret = set()

        for key_query in sorted(set(match.unversioned_atom for match in matches)):
            pkgs = self.repo.match(key_query)

            # check for pkgs masked by bad metadata
            bad_metadata = self.repo._masked.match(key_query)
            if bad_metadata:
                for pkg in bad_metadata:
                    e = pkg.data
                    error_str = f"{pkg.cpvstr}: {e.msg(verbosity=observer.verbosity)}"
                    observer.error(error_str)
                    ret.add(key_query)
                continue

            # Check for bad ebuilds -- mismatched or invalid PNs won't be
            # matched by regular restrictions so they will otherwise be
            # ignored.
            ebuilds = {
                x for x in listdir_files(pjoin(self.repo.location, str(key_query)))
                if x.endswith('.ebuild')
            }
            unknown_ebuilds = ebuilds.difference(os.path.basename(x.path) for x in pkgs)
            if unknown_ebuilds:
                error_str = (
                    f"{key_query}: invalid ebuild{_pl(unknown_ebuilds)}: "
                    f"{', '.join(unknown_ebuilds)}"
                )
                observer.error(error_str)
                ret.add(key_query)
                continue

            # empty package dir
            if not pkgs:
                continue

            manifest = pkgs[0].manifest

            # all pkgdir fetchables
            pkgdir_fetchables = {}
            for pkg in pkgs:
                pkgdir_fetchables.update({
                    fetchable.filename: fetchable for fetchable in
                    iflatten_instance(pkg._get_attr['fetchables'](
                        pkg, allow_missing_checksums=True,
                        skip_default_mirrors=(not mirrors)),
                        fetch.fetchable)
                    })

            # fetchables targeted for (re-)manifest generation
            fetchables = {}
            chksum_set = set(write_chksums)
            for filename, fetchable in pkgdir_fetchables.items():
                if force or not required_chksums.issubset(fetchable.chksums):
                    fetchable.chksums = {
                        k: v for k, v in fetchable.chksums.items() if k in chksum_set}
                    fetchables[filename] = fetchable

            # Manifest file is current and not forcing a refresh
            manifest_current = set(manifest.distfiles.keys()) == set(pkgdir_fetchables.keys())
            if manifest_config.thin and not fetchables and manifest_current:
                # Manifest files aren't necessary with thin manifests and no distfiles
                if os.path.exists(manifest.path) and not pkgdir_fetchables:
                    try:
                        os.remove(manifest.path)
                    except:
                        observer.error(
                            f"failed removing old manifest: {key_query}::{self.repo.repo_id}")
                        ret.add(key_query)
                continue

            pkg_ops = domain.pkg_operations(pkgs[0], observer=observer)
            if not pkg_ops.supports("fetch"):
                observer.error(f"pkg {pkg} doesn't support fetching, can't generate manifest")
                ret.add(key_query)
                continue

            # fetch distfiles
            if not pkg_ops.fetch(list(fetchables.values()), observer):
                ret.add(key_query)
                continue

            # calculate checksums for fetched distfiles
            try:
                for fetchable in fetchables.values():
                    chksums = chksum.get_chksums(
                        pjoin(distdir, fetchable.filename), *write_chksums)
                    fetchable.chksums = dict(zip(write_chksums, chksums))
            except chksum.MissingChksumHandler as e:
                observer.error(f'failed generating chksum: {e}')
                ret.add(key_query)
                break

            if key_query not in ret:
                fetchables.update(pkgdir_fetchables)
                observer.info(f"generating manifest: {key_query}::{self.repo.repo_id}")
                manifest.update(sorted(fetchables.values()), chfs=write_chksums)

        return ret
예제 #36
0
    def _cmd_implementation_digests(self,
                                    domain,
                                    matches,
                                    observer,
                                    mirrors=False,
                                    force=False):
        manifest_config = self.repo.config.manifests
        if manifest_config.disabled:
            observer.info(f"repo {self.repo.repo_id} has manifests disabled")
            return
        required_chksums = set(manifest_config.required_hashes)
        write_chksums = manifest_config.hashes
        distdir = domain.fetcher.distdir
        ret = set()

        for key_query in sorted(
                set(match.unversioned_atom for match in matches)):
            pkgs = self.repo.match(key_query)

            # check for pkgs masked by bad metadata
            bad_metadata = self.repo._masked.match(key_query)
            if bad_metadata:
                for pkg in bad_metadata:
                    e = pkg.data
                    error_str = f"{pkg.cpvstr}: {e.msg(verbosity=observer.verbosity)}"
                    observer.error(error_str)
                    ret.add(key_query)
                continue

            # Check for bad ebuilds -- mismatched or invalid PNs won't be
            # matched by regular restrictions so they will otherwise be
            # ignored.
            ebuilds = {
                x
                for x in listdir_files(
                    pjoin(self.repo.location, str(key_query)))
                if x.endswith('.ebuild')
            }
            unknown_ebuilds = ebuilds.difference(
                os.path.basename(x.path) for x in pkgs)
            if unknown_ebuilds:
                error_str = (
                    f"{key_query}: invalid ebuild{_pl(unknown_ebuilds)}: "
                    f"{', '.join(unknown_ebuilds)}")
                observer.error(error_str)
                ret.add(key_query)
                continue

            # empty package dir
            if not pkgs:
                continue

            manifest = pkgs[0].manifest

            # all pkgdir fetchables
            pkgdir_fetchables = {}
            for pkg in pkgs:
                pkgdir_fetchables.update({
                    fetchable.filename: fetchable
                    for fetchable in iflatten_instance(
                        pkg._get_attr['fetchables']
                        (pkg,
                         allow_missing_checksums=True,
                         skip_default_mirrors=(not mirrors)), fetch.fetchable)
                })

            # fetchables targeted for (re-)manifest generation
            fetchables = {}
            chksum_set = set(write_chksums)
            for filename, fetchable in pkgdir_fetchables.items():
                if force or not required_chksums.issubset(fetchable.chksums):
                    fetchable.chksums = {
                        k: v
                        for k, v in fetchable.chksums.items()
                        if k in chksum_set
                    }
                    fetchables[filename] = fetchable

            # Manifest file is current and not forcing a refresh
            manifest_current = set(manifest.distfiles.keys()) == set(
                pkgdir_fetchables.keys())
            if manifest_config.thin and not fetchables and manifest_current:
                # Manifest files aren't necessary with thin manifests and no distfiles
                if os.path.exists(manifest.path) and not pkgdir_fetchables:
                    try:
                        os.remove(manifest.path)
                    except:
                        observer.error(
                            f"failed removing old manifest: {key_query}::{self.repo.repo_id}"
                        )
                        ret.add(key_query)
                continue

            pkg_ops = domain.pkg_operations(pkgs[0], observer=observer)
            if not pkg_ops.supports("fetch"):
                observer.error(
                    f"pkg {pkg} doesn't support fetching, can't generate manifest"
                )
                ret.add(key_query)
                continue

            # fetch distfiles
            if not pkg_ops.fetch(list(fetchables.values()), observer):
                ret.add(key_query)
                continue

            # calculate checksums for fetched distfiles
            try:
                for fetchable in fetchables.values():
                    chksums = chksum.get_chksums(
                        pjoin(distdir, fetchable.filename), *write_chksums)
                    fetchable.chksums = dict(zip(write_chksums, chksums))
            except chksum.MissingChksumHandler as e:
                observer.error(f'failed generating chksum: {e}')
                ret.add(key_query)
                break

            if key_query not in ret:
                fetchables.update(pkgdir_fetchables)
                observer.info(
                    f"generating manifest: {key_query}::{self.repo.repo_id}")
                manifest.update(sorted(fetchables.values()),
                                chfs=write_chksums)

        return ret
예제 #37
0
파일: pclean.py 프로젝트: ferringb/pkgcore
def _dist_validate_args(parser, namespace):
    distdir = namespace.domain.distdir
    repo = namespace.repo
    if repo is None:
        repo = multiplex.tree(
            *get_virtual_repos(namespace.domain.source_repos, False))

    all_dist_files = set(os.path.basename(f) for f in listdir_files(distdir))
    target_files = set()
    installed_dist = set()
    exists_dist = set()
    excludes_dist = set()
    restricted_dist = set()

    # exclude distfiles used by installed packages -- note that this uses the
    # distfiles attr with USE settings bound to it
    if namespace.exclude_installed:
        for pkg in namespace.domain.all_installed_repos:
            installed_dist.update(iflatten_instance(pkg.distfiles))

    # exclude distfiles for existing ebuilds or fetch restrictions
    if namespace.exclude_fetch_restricted or (namespace.exclude_exists
                                              and not namespace.restrict):
        for pkg in repo:
            exists_dist.update(
                iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))
            if 'fetch' in pkg.restrict:
                restricted_dist.update(
                    iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))

    # exclude distfiles from specified restrictions
    if namespace.exclude_restrict:
        for pkg in repo.itermatch(namespace.exclude_restrict, sorter=sorted):
            excludes_dist.update(
                iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))

    # determine dist files for custom restrict targets
    if namespace.restrict:
        target_dist = defaultdict(lambda: defaultdict(set))
        for pkg in repo.itermatch(namespace.restrict, sorter=sorted):
            s = set(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))
            target_dist[pkg.unversioned_atom][pkg].update(s)
            if namespace.exclude_exists:
                exists_dist.update(s)

        extra_regex_prefixes = defaultdict(set)
        pkg_regex_prefixes = set()
        for catpn, pkgs in target_dist.items():
            pn_regex = r'\W'.join(re.split(r'\W', catpn.package))
            pkg_regex = re.compile(
                r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' % pn_regex,
                re.IGNORECASE)
            pkg_regex_prefixes.add(pn_regex)
            for pkg, files in pkgs.items():
                files = sorted(files)
                for f in files:
                    if (pkg_regex.match(f)
                            or (extra_regex_prefixes and re.match(
                                r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' %
                                '|'.join(extra_regex_prefixes[catpn]), f))):
                        continue
                    else:
                        pieces = re.split(r'([\W?(0-9)+])+(\W\w+)*(\.\w+)+', f)
                        if pieces[-1] == '':
                            pieces.pop()
                        if len(pieces) > 1:
                            extra_regex_prefixes[catpn].add(pieces[0])

        if target_dist:
            regexes = []
            # build regexes to match distfiles for older ebuilds no longer in the tree
            if pkg_regex_prefixes:
                pkg_regex_prefixes_str = '|'.join(sorted(pkg_regex_prefixes))
                regexes.append(
                    re.compile(r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' %
                               (pkg_regex_prefixes_str, )))
            if extra_regex_prefixes:
                extra_regex_prefixes_str = '|'.join(
                    sorted(
                        chain.from_iterable(
                            v for k, v in extra_regex_prefixes.items())))
                regexes.append(
                    re.compile(r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' %
                               (extra_regex_prefixes_str, )))

            if regexes:
                for f in all_dist_files:
                    if any(r.match(f) for r in regexes):
                        target_files.add(f)
    else:
        target_files = all_dist_files

    # exclude files tagged for saving
    saving_files = installed_dist | exists_dist | excludes_dist | restricted_dist
    target_files.difference_update(saving_files)

    targets = (pjoin(distdir, f)
               for f in sorted(all_dist_files.intersection(target_files)))
    removal_func = partial(os.remove)
    namespace.remove = ((removal_func, f)
                        for f in filter(namespace.file_filters.run, targets))
예제 #38
0
파일: plugin.py 프로젝트: chutz/pkgcore
def initialize_cache(package, force=False):
    """Determine available plugins in a package.

    Writes cache files if they are stale and writing is possible.
    """
    # package plugin cache, see above.
    package_cache = collections.defaultdict(set)
    seen_modnames = set()
    for path in package.__path__:
        # Check if the path actually exists first.
        try:
            modlist = listdir_files(path)
        except OSError as e:
            if e.errno not in (errno.ENOENT, errno.ENOTDIR):
                raise
            continue
        stored_cache_name = pjoin(path, CACHE_FILENAME)
        stored_cache = _read_cache_file(package, stored_cache_name)

        if force:
            _clean_old_caches(path)

        # Directory cache, mapping modulename to
        # (mtime, set([keys]))
        modlist = set(
            x for x in modlist
            if os.path.splitext(x)[1] == '.py' and x != '__init__.py')
        modlist.difference_update(seen_modnames)

        cache_stale = False
        # Hunt for modules.
        actual_cache = collections.defaultdict(set)
        mtime_cache = mappings.defaultdictkey(
            lambda x: int(os.path.getmtime(x)))
        for modfullname in sorted(modlist):
            modname = os.path.splitext(modfullname)[0]
            # It is an actual module. Check if its cache entry is valid.
            mtime = mtime_cache[pjoin(path, modfullname)]
            vals = stored_cache.get((modname, mtime))
            if vals is None or force:
                # Cache entry is stale.
                logger.debug('stale because of %s: actual %s != stored %s',
                             modname, mtime,
                             stored_cache.get(modname, (0, ()))[0])
                cache_stale = True
                entries = []
                qualname = '.'.join((package.__name__, modname))
                try:
                    module = modules.load_module(qualname)
                except modules.FailedImport:
                    # This is a serious problem, but if we blow up
                    # here we cripple pkgcore entirely which may make
                    # fixing the problem impossible. So be noisy but
                    # try to continue.
                    logger.exception(
                        'plugin import failed for %s processing %s',
                        package.__name__, modname)
                    continue

                registry = getattr(module, PLUGIN_ATTR, {})
                vals = set()
                for key, plugs in registry.iteritems():
                    for idx, plug_name in enumerate(plugs):
                        if isinstance(plug_name, basestring):
                            plug = _process_plugin(
                                package,
                                _plugin_data(key, 0, qualname, plug_name))
                        else:
                            plug = plug_name
                        if plug is None:
                            # import failure, ignore it, error already logged
                            continue
                        priority = getattr(plug, 'priority', 0)
                        if not isinstance(priority, int):
                            logger.error(
                                "ignoring plugin %s: has a non integer priority: %s",
                                plug, priority)
                            continue
                        if plug_name is plug:
                            # this means it's an object, rather than a string; store
                            # the offset.
                            plug_name = idx
                        data = _plugin_data(key, priority, qualname, plug_name)
                        vals.add(data)
            actual_cache[(modname, mtime)] = vals
            seen_modnames.add(modfullname)
            for data in vals:
                package_cache[data.key].add(data)
        if force or set(stored_cache) != set(actual_cache):
            logger.debug('updating cache %r for new plugins',
                         stored_cache_name)
            _write_cache_file(stored_cache_name, actual_cache)

    return mappings.ImmutableDict(
        (k, sort_plugs(v)) for k, v in package_cache.iteritems())