コード例 #1
0
 def _internal_load_key(self, path, key):
     key = self._metadata_rewrites.get(key, key)
     if key == "contents":
         data = ContentsFile(pjoin(path, "CONTENTS"), mutable=True)
     elif key == "environment":
         fp = pjoin(path, key)
         if not os.path.exists(fp + ".bz2"):
             if not os.path.exists(fp):
                 # icky.
                 raise KeyError("environment: no environment file found")
             data = data_source.local_source(fp)
         else:
             data = data_source.bz2_source(fp + ".bz2")
     elif key == "ebuild":
         fp = pjoin(path,
                    os.path.basename(path.rstrip(os.path.sep)) + ".ebuild")
         data = data_source.local_source(fp)
     elif key == 'repo':
         # try both, for portage/paludis compatibility.
         data = readfile(pjoin(path, 'repository'), True)
         if data is None:
             data = readfile(pjoin(path, 'REPOSITORY'), True)
             if data is None:
                 raise KeyError(key)
     else:
         data = readfile(pjoin(path, key), True)
         if data is None:
             raise KeyError((path, key))
     return data
コード例 #2
0
ファイル: ondisk.py プロジェクト: neko259/pkgcore
 def _internal_load_key(self, path, key):
     key = self._metadata_rewrites.get(key, key)
     if key == "contents":
         data = ContentsFile(pjoin(path, "CONTENTS"), mutable=True)
     elif key == "environment":
         fp = pjoin(path, key)
         if not os.path.exists(fp + ".bz2"):
             if not os.path.exists(fp):
                 # icky.
                 raise KeyError("environment: no environment file found")
             data = data_source.local_source(fp)
         else:
             data = data_source.bz2_source(fp + ".bz2")
     elif key == "ebuild":
         fp = pjoin(path, os.path.basename(path.rstrip(os.path.sep)) + ".ebuild")
         data = data_source.local_source(fp)
     elif key == 'repo':
         # try both, for portage/paludis compatibility.
         data = readfile(pjoin(path, 'repository'), True)
         if data is None:
             data = readfile(pjoin(path, 'REPOSITORY'), True)
             if data is None:
                 raise KeyError(key)
     else:
         data = readfile(pjoin(path, key), True)
         if data is None:
             raise KeyError((path, key))
     return data
コード例 #3
0
    def trigger(self, engine, cset):
        op = self.format_op
        op = getattr(op, 'install_op', op)
        op.setup_workdir()
        merge_cset = cset
        if engine.offset != '/':
            merge_cset = cset.change_offset(engine.offset, '/')
        merge_contents(merge_cset, offset=op.env["D"])

        # ok.  they're on disk.
        # now to avoid going back to the binpkg, we rewrite
        # the data_source for files to the on disk location.
        # we can update in place also, since we're not changing the mapping.

        # this rewrites the data_source to the ${D} loc.
        d = op.env["D"]
        fi = (x.change_attributes(
            data=local_source(pjoin(d, x.location.lstrip('/'))))
              for x in merge_cset.iterfiles())

        if engine.offset:
            # we're using merge_cset above, which has the final offset loc
            # pruned; this is required for the merge, however, we're updating
            # the cset so we have to insert the final offset back in.
            # wrap the iter, iow.
            fi = offset_rewriter(engine.offset, fi)

        cset.update(contentsSet(fi))

        # we *probably* should change the csets class at some point
        # since it no longer needs to be tar, but that's for another day.
        engine.replace_cset('new_cset', cset)
コード例 #4
0
    def get_writable_fsobj(self, fsobj, prefer_reuse=True, empty=False):

        path = source = None
        if fsobj:
            source = fsobj.data
            if source.mutable:
                return fsobj
            if self.allow_reuse and prefer_reuse:
                path = source.path

                # XXX: this should be doing abspath fs intersection probably,
                # although the paths generated are from triggers/engine- still.

                if path is not None and not path.startswith(self.tempdir):
                    # the fsobj pathway isn't in temp space; force a transfer.
                    path = None

            if path:
                # ok, it's tempspace, and reusable.
                obj = data_source.local_source(path,
                                               True,
                                               encoding=source.encoding)

                if empty:
                    obj.bytes_fileobj(True).truncate(0)
                return obj

        # clone it into tempspace; it's required we control the tempspace,
        # so this function is safe in our usage.
        fd, path = tempfile.mkstemp(prefix='merge-engine-', dir=self.tempdir)

        # XXX: annoying quirk of python, we don't want append mode, so 'a+'
        # isn't viable; wr will truncate the file, so data_source uses r+.
        # this however doesn't allow us to state "create if missing"
        # so we create it ourselves.  Annoying, but so it goes.
        # just touch the filepath.
        open(path, 'w').close()
        new_source = data_source.local_source(path,
                                              True,
                                              encoding=getattr(
                                                  fsobj, 'encoding', None))

        if source and not empty:
            data_source.transfer(source.bytes_fsobj(),
                                 new_source.bytes_fsobj(True))
        return new_source
コード例 #5
0
    def test_transfer_to_path(self):
        data = self._mk_data()
        reader = self.get_obj(data=data)
        writer = data_source.local_source(pjoin(self.dir, 'transfer_to_path'), mutable=True)

        reader.transfer_to_path(writer.path)

        self.assertContents(reader, writer)
コード例 #6
0
ファイル: domain.py プロジェクト: houseofsuns/pkgcore
def package_env_splitter(basedir, val):
    val = val.split()
    if len(val) == 1:
        raise ValueError(
            "package.env files require atoms followed by env file names, got %s"
            % val)
    return parse_match(val[0]), tuple(
        local_source(pjoin(basedir, env_file)) for env_file in val[1:])
コード例 #7
0
ファイル: livefs.py プロジェクト: shen390s/pkgcore
def gen_obj(path,
            stat=None,
            chksum_handlers=None,
            real_location=None,
            stat_func=os.lstat,
            **overrides):
    """
    given a fs path, and an optional stat, create an appropriate fs obj.

    :param stat: stat object to reuse if available
    :param real_location: real path to the object if path is the desired
        location, rather then existent location.
    :raise KeyError: if no obj type matches the stat checks
    :return: :obj:`pkgcore.fs.fs.fsBase` derivative
    """

    if real_location is None:
        real_location = path
    if stat is None:
        try:
            stat = stat_func(real_location)
        except EnvironmentError as e:
            if stat_func == os.lstat or e.errno != errno.ENOENT:
                raise
            stat = os.lstat(real_location)

    mode = stat.st_mode
    d = {
        "mtime": stat.st_mtime,
        "mode": S_IMODE(mode),
        "uid": stat.st_uid,
        "gid": stat.st_gid
    }
    if S_ISREG(mode):
        d["size"] = stat.st_size
        d["data"] = local_source(real_location)
        d["dev"] = stat.st_dev
        d["inode"] = stat.st_ino
        if chksum_handlers is not None:
            d["chf_types"] = chksum_handlers
        d.update(overrides)
        return fsFile(path, **d)

    d.update(overrides)
    if S_ISDIR(mode):
        return fsDir(path, **d)
    elif S_ISLNK(mode):
        d["target"] = os.readlink(real_location)
        return fsSymlink(path, **d)
    elif S_ISFIFO(mode):
        return fsFifo(path, **d)
    else:
        major, minor = get_major_minor(stat)
        d["minor"] = minor
        d["major"] = major
        d["mode"] = mode
        return fsDev(path, **d)
コード例 #8
0
ファイル: test_ebuild_src.py プロジェクト: shen390s/pkgcore
        def _path(self, cpv, eapi_str):
            ebuild = pjoin(str(tmpdir), "temp-0.ebuild")
            with open(ebuild, 'w') as f:
                f.write(textwrap.dedent(f'''\
                    # Copyright
                    # License

                    EAPI={eapi_str}'''))
            return local_source(str(ebuild))
コード例 #9
0
ファイル: test_ebuild_src.py プロジェクト: radhermit/pkgcore
        def _path(self, cpv, eapi_str):
            ebuild = pjoin(str(tmpdir), "temp-0.ebuild")
            with open(ebuild, 'w') as f:
                f.write(textwrap.dedent(f'''\
                    # Copyright
                    # License

                    EAPI={eapi_str}'''))
            return local_source(str(ebuild))
コード例 #10
0
ファイル: ebuild_built.py プロジェクト: filmor/pkgcore
 def __pull_metadata(self, key):
     if key == "contents":
         return self.scan_contents(self.image_root)
     elif key == "environment":
         return local_source(self.environment_path)
     else:
         try:
             return getattr(self.pkg, key)
         except AttributeError as e:
             raise KeyError(key) from e
コード例 #11
0
ファイル: ebuild_built.py プロジェクト: chutz/pkgcore
 def __pull_metadata(self, key):
     if key == "contents":
         return self.scan_contents(self.image_root)
     elif key == "environment":
         return local_source(self.environment_path)
     else:
         try:
             return getattr(self.pkg, key)
         except AttributeError:
             raise_from(KeyError(key))
コード例 #12
0
ファイル: engine.py プロジェクト: floppym/pkgcore
    def get_writable_fsobj(self, fsobj, prefer_reuse=True, empty=False):

        path = source = None
        if fsobj:
            source = fsobj.data
            if source.mutable:
                return fsobj
            if self.allow_reuse and prefer_reuse:
                path = source.path

                # XXX: this should be doing abspath fs intersection probably,
                # although the paths generated are from triggers/engine- still.

                if path is not None and not path.startswith(self.tempdir):
                    # the fsobj pathway isn't in temp space; force a transfer.
                    path = None

            if path:
                # ok, it's tempspace, and reusable.
                obj = data_source.local_source(path, True,
                    encoding=source.encoding)

                if empty:
                    obj.bytes_fileobj(True).truncate(0)
                return obj

        # clone it into tempspace; it's required we control the tempspace,
        # so this function is safe in our usage.
        fd, path = tempfile.mkstemp(prefix='merge-engine-', dir=self.tempdir)

        # XXX: annoying quirk of python, we don't want append mode, so 'a+'
        # isn't viable; wr will truncate the file, so data_source uses r+.
        # this however doesn't allow us to state "create if missing"
        # so we create it ourselves.  Annoying, but so it goes.
        # just touch the filepath.
        open(path, 'w').close()
        new_source = data_source.local_source(path, True,
            encoding=getattr(fsobj, 'encoding', None))

        if source and not empty:
            data_source.transfer(source.bytes_fsobj(), new_source.bytes_fsobj(True))
        return new_source
コード例 #13
0
 def get_obj(self, data="foonani", mutable=False, test_creation=False):
     self.fp = pjoin(self.dir, "localsource.test")
     mode = None
     if not test_creation:
         if isinstance(data, bytes):
             mode = 'wb'
         elif mode is None:
             mode = 'w'
         with open(self.fp, mode) as f:
             f.write(data)
     return data_source.local_source(self.fp, mutable=mutable)
コード例 #14
0
 def get_obj(self, data="foonani", mutable=False, test_creation=False):
     self.fp = pjoin(self.dir, "localsource.test")
     mode = None
     if not test_creation:
         if isinstance(data, bytes):
             mode = 'wb'
         elif mode is None:
             mode = 'w'
         with open(self.fp, mode) as f:
             f.write(data)
     return data_source.local_source(self.fp, mutable=mutable)
コード例 #15
0
ファイル: test_ops.py プロジェクト: veelai/pkgcore
 def test_it(self):
     src = pjoin(self.dir, "copy_test_src")
     dest = pjoin(self.dir, "copy_test_dest")
     open(src, "w").writelines("asdf\n" for i in xrange(10))
     kwds = {"mtime":10321, "uid":os.getuid(), "gid":os.getgid(),
             "mode":0664, "data":local_source(src), "dev":None,
             "inode":None}
     o = fs.fsFile(dest, **kwds)
     self.assertTrue(ops.default_copyfile(o))
     self.assertEqual("asdf\n" * 10, open(dest, "r").read())
     self.verify(o, kwds, os.stat(o.location))
コード例 #16
0
 def get_obj(self, data="foonani", mutable=False, test_creation=False):
     self.fp = os.path.join(self.dir, "localsource.test")
     f = None
     if not test_creation:
         if compatibility.is_py3k:
             if isinstance(data, bytes):
                 f = open(self.fp, 'wb')
         if f is None:
             f = open(self.fp, "w")
         f.write(data)
         f.close()
     return data_source.local_source(self.fp, mutable=mutable)
コード例 #17
0
 def get_obj(self, data="foonani", mutable=False, test_creation=False):
     self.fp = pjoin(self.dir, "localsource.test")
     f = None
     if not test_creation:
         if compatibility.is_py3k:
             if isinstance(data, bytes):
                 f = open(self.fp, 'wb')
         if f is None:
             f = open(self.fp, "w")
         f.write(data)
         f.close()
     return data_source.local_source(self.fp, mutable=mutable)
コード例 #18
0
ファイル: test_ops.py プロジェクト: chutz/pkgcore
 def test_it(self):
     src = pjoin(self.dir, "copy_test_src")
     dest = pjoin(self.dir, "copy_test_dest")
     with open(src, "w") as f:
         f.writelines("asdf\n" for i in xrange(10))
     kwds = {"mtime":10321, "uid":os.getuid(), "gid":os.getgid(),
             "mode":0664, "data":local_source(src), "dev":None,
             "inode":None}
     o = fs.fsFile(dest, **kwds)
     self.assertTrue(ops.default_copyfile(o))
     with open(dest, "r") as f:
         self.assertEqual("asdf\n" * 10, f.read())
     self.verify(o, kwds, os.stat(o.location))
コード例 #19
0
ファイル: livefs.py プロジェクト: den4ix/pkgcore
def gen_obj(path, stat=None, chksum_handlers=None, real_location=None,
            stat_func=os.lstat, **overrides):
    """
    given a fs path, and an optional stat, create an appropriate fs obj.

    :param stat: stat object to reuse if available
    :param real_location: real path to the object if path is the desired
        location, rather then existent location.
    :raise KeyError: if no obj type matches the stat checks
    :return: :obj:`pkgcore.fs.fs.fsBase` derivative
    """

    if real_location is None:
        real_location = path
    if stat is None:
        try:
            stat = stat_func(real_location)
        except EnvironmentError as e:
            if stat_func == os.lstat or e.errno != errno.ENOENT:
                raise
            stat = os.lstat(real_location)

    mode = stat.st_mode
    d = {"mtime":stat.st_mtime, "mode":S_IMODE(mode),
         "uid":stat.st_uid, "gid":stat.st_gid}
    if S_ISREG(mode):
        d["size"] = stat.st_size
        d["data"] = local_source(real_location)
        d["dev"] = stat.st_dev
        d["inode"] = stat.st_ino
        if chksum_handlers is not None:
            d["chf_types"] = chksum_handlers
        d.update(overrides)
        return fsFile(path, **d)

    d.update(overrides)
    if S_ISDIR(mode):
        return fsDir(path, **d)
    elif S_ISLNK(mode):
        d["target"] = os.readlink(real_location)
        return fsSymlink(path, **d)
    elif S_ISFIFO(mode):
        return fsFifo(path, **d)
    else:
        major, minor = get_major_minor(stat)
        d["minor"] = minor
        d["major"] = major
        d["mode"] = mode
        return fsDev(path, **d)
コード例 #20
0
    def test_transfer_to_path(self):
        data = self._mk_data()
        reader = self.get_obj(data=data)
        if isinstance(reader, data_source.bz2_source):
            writer = data_source.bz2_source(pjoin(self.dir,
                                                  'transfer_to_path'),
                                            mutable=True)
        else:
            writer = data_source.local_source(pjoin(self.dir,
                                                    'transfer_to_path'),
                                              mutable=True)

        reader.transfer_to_path(writer.path)

        self.assertContents(reader, writer)
コード例 #21
0
ファイル: domain.py プロジェクト: den4ix/pkgcore
 def get_package_bashrcs(self, pkg):
     for source in self.profile.bashrcs:
         yield source
     for restrict, source in self.bashrcs:
         if restrict.match(pkg):
             yield source
     if not self.ebuild_hook_dir:
         return
     # matching portage behaviour... it's whacked.
     base = pjoin(self.ebuild_hook_dir, pkg.category)
     for fp in (pkg.package, "%s:%s" % (pkg.package, pkg.slot),
                getattr(pkg, "P", "nonexistent"), getattr(pkg, "PF", "nonexistent")):
         fp = pjoin(base, fp)
         if os.path.exists(fp):
             yield local_source(fp)
コード例 #22
0
ファイル: domain.py プロジェクト: chutz/pkgcore
 def get_package_bashrcs(self, pkg):
     for source in self.profile.bashrcs:
         yield source
     for restrict, source in self.bashrcs:
         if restrict.match(pkg):
             yield source
     if not self.ebuild_hook_dir:
         return
     # matching portage behaviour... it's whacked.
     base = pjoin(self.ebuild_hook_dir, pkg.category)
     for fp in (pkg.package, "%s:%s" % (pkg.package, pkg.slot),
                getattr(pkg, "P",
                        "nonexistent"), getattr(pkg, "PF", "nonexistent")):
         fp = pjoin(base, fp)
         if os.path.exists(fp):
             yield local_source(fp)
コード例 #23
0
ファイル: fs.py プロジェクト: chutz/pkgcore
    def __init__(self, location, chksums=None, data=None, **kwds):
        """
        :param chksums: dict of checksums, key chksum_type: val hash val.
            See :obj:`snakeoil.chksum`.
        """
        assert 'data_source' not in kwds
        if data is None:
            data = local_source(location)
        kwds["data"] = data

        if chksums is None:
            # this can be problematic offhand if the file is modified
            # but chksum not triggered
            chf_types = kwds.pop("chf_types", None)
            if chf_types is None:
                chf_types = tuple(get_handlers())
            chksums = _LazyChksums(chf_types, self._chksum_callback)
        kwds["chksums"] = chksums
        fsBase.__init__(self, location, **kwds)
コード例 #24
0
    def __init__(self, location, chksums=None, data=None, **kwds):
        """
        :param chksums: dict of checksums, key chksum_type: val hash val.
            See :obj:`snakeoil.chksum`.
        """
        assert 'data_source' not in kwds
        if data is None:
            data = local_source(location)
        kwds["data"] = data

        if chksums is None:
            # this can be problematic offhand if the file is modified
            # but chksum not triggered
            chf_types = kwds.pop("chf_types", None)
            if chf_types is None:
                chf_types = tuple(get_handlers())
            chksums = _LazyChksums(chf_types, self._chksum_callback)
        kwds["chksums"] = chksums
        fsBase.__init__(self, location, **kwds)
コード例 #25
0
ファイル: domain.py プロジェクト: ulm/pkgcore
 def get_package_bashrcs(self, pkg):
     for source in self.profile.bashrcs:
         yield source
     for source in self.bashrcs:
         yield source
     if not os.path.exists(self.ebuild_hook_dir):
         return
     # matching portage behavior... it's whacked.
     base = pjoin(self.ebuild_hook_dir, pkg.category)
     dirs = (
         pkg.package,
         f"{pkg.package}:{pkg.slot}",
         getattr(pkg, "P", None),
         getattr(pkg, "PF", None),
     )
     for fp in filter(None, dirs):
         fp = pjoin(base, fp)
         if os.path.exists(fp):
             yield local_source(fp)
コード例 #26
0
ファイル: domain.py プロジェクト: den4ix/pkgcore
def package_env_splitter(basedir, val):
    val = val.split()
    return parse_match(val[0]), local_source(pjoin(basedir, val[1]))
コード例 #27
0
ファイル: domain.py プロジェクト: den4ix/pkgcore
    def __init__(self, profile, repositories, vdb, name=None,
                 root='/', prefix='/', incrementals=const.incrementals,
                 triggers=(), **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers
        self.name = name

        # prevent critical variables from being changed in make.conf
        for k in profile.profile_only_variables.intersection(settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # if unset, MAKEOPTS defaults to CPU thread count
        if 'MAKEOPTS' not in settings:
            settings['MAKEOPTS'] = '-j%i' % cpu_count()

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo for (collapsed, repo) in izip(
                repositories_collapsed, repositories)}
        self.repos_raw.update(
            (collapsed.name, repo) for (collapsed, repo) in izip(
                vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_masks, parse_match),
            ("package.unmask", pkg_unmasks, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords, package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
            ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(
                            action(x) for x in
                            iter_read_bash(fs_obj.location, allow_line_cont=True))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(
                s, settings[incremental],
                'While expanding %s ' % (incremental,))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(optimize_incrementals(
            list(settings.get('USE', ())) + os.environ.get('USE', '').split()))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower()+"_"
            use.update(u2 + x for x in v.split())

        if 'ACCEPT_KEYWORDS' not in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(
            s, settings['ACCEPT_KEYWORDS'],
            'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [self.make_keywords_filter(
            self.arch, default_keywords, accept_keywords, profile.keywords,
            incremental="package.keywords" in incrementals)]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure(
                    'user-specified bashrc %r does not exist' % (data,))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
             c = ChunkedDataDict()
             c.merge(getattr(profile, attr + 'forced_use'))
             c.add_bare_global((), (self.arch,))
             setattr(self, attr + 'forced_use', c)

             c = ChunkedDataDict()
             c.merge(getattr(profile, attr + 'masked_use'))
             setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories}

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(Failure("failed configuring repo '%s': "
                                           "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    global_masks = [repo_masks.get(master, [(), ()]) for master in masters]
                    global_masks.append(repo_masks[repo.repo_id])
                    global_masks.extend(profile_masks)
                    masks = set()
                    for neg, pos in global_masks:
                        masks.difference_update(neg)
                        masks.update(pos)
                    masks.update(pkg_masks)
                    unmasks = set(chain(pkg_unmasks, *profile_unmasks))
                    filtered = generate_filter(masks, unmasks, *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
コード例 #28
0
ファイル: livefs.py プロジェクト: veelai/pkgcore
    if real_location is None:
        real_location = path
    if stat is None:
        try:
            stat = stat_func(real_location)
        except EnvironmentError, e:
            if stat_func == os.lstat or e.errno != errno.ENOENT:
                raise
            stat = os.lstat(real_location)

    mode = stat.st_mode
    d = {"mtime":stat.st_mtime, "mode":S_IMODE(mode),
         "uid":stat.st_uid, "gid":stat.st_gid}
    if S_ISREG(mode):
        d["size"] = stat.st_size
        d["data"] = local_source(real_location)
        d["dev"] = stat.st_dev
        d["inode"] = stat.st_ino
        if chksum_handlers is not None:
            d["chf_types"] = chksum_handlers
        d.update(overrides)
        return fsFile(path, **d)

    d.update(overrides)
    if S_ISDIR(mode):
        return fsDir(path, **d)
    elif S_ISLNK(mode):
        d["target"] = os.readlink(real_location)
        return fsSymlink(path, **d)
    elif S_ISFIFO(mode):
        return fsFifo(path, **d)
コード例 #29
0
def package_env_splitter(basedir, val):
    val = val.split()
    if len(val) == 1:
        raise ValueError("package.env files require atoms followed by env file names, got %s" % val)
    return parse_match(val[0]), tuple(local_source(pjoin(basedir, env_file)) for env_file in val[1:])
コード例 #30
0
 def test_data_source_check(self):
     self.assertEqual(self.chf(local_source(self.fn)), self.expected_long)
     self.assertEqual(
         self.chf(data_source(fileutils.readfile_ascii(self.fn))),
         self.expected_long)
コード例 #31
0
ファイル: eclass_cache.py プロジェクト: neko259/pkgcore
 def get_eclass(self, eclass):
     o = self.eclasses.get(eclass)
     if o is None:
         return None
     return local_source(o.path)
コード例 #32
0
 def bashrc(self):
     path = pjoin(self.path, "profile.bashrc")
     if os.path.exists(path):
         return local_source(path)
     return None
コード例 #33
0
ファイル: domain.py プロジェクト: chutz/pkgcore
def package_env_splitter(basedir, val):
    val = val.split()
    return parse_match(val[0]), local_source(pjoin(basedir, val[1]))
コード例 #34
0
ファイル: test_defaults.py プロジェクト: chutz/snakeoil
 def test_data_source_check(self):
     self.assertEqual(self.chf(local_source(self.fn)), self.expected_long)
     self.assertEqual(
         self.chf(data_source(fileutils.readfile_ascii(self.fn))), self.expected_long)
コード例 #35
0
ファイル: repository.py プロジェクト: shen390s/pkgcore
 def _get_ebuild_src(self, pkg):
     return local_source(self._get_ebuild_path(pkg), encoding='utf8')
コード例 #36
0
ファイル: domain.py プロジェクト: ulm/pkgcore
 def bashrcs(self):
     files = sorted_scan(pjoin(self.config_dir, 'bashrc'), follow_symlinks=True)
     return tuple(local_source(x) for x in files)
コード例 #37
0
ファイル: test_digest.py プロジェクト: pombreda/pkgcore
class TestManifestDataSource(TestManifest):
    convert_source = staticmethod(lambda x: local_source(x))
コード例 #38
0
 def test_data_source_check(self):
     assert self.chf(local_source(self.fn)) == self.expected_long
     assert self.chf(data_source(fileutils.readfile_ascii(
         self.fn))) == self.expected_long
コード例 #39
0
ファイル: domain.py プロジェクト: chutz/pkgcore
    def __init__(self,
                 profile,
                 repositories,
                 vdb,
                 name=None,
                 root='/',
                 prefix='/',
                 incrementals=const.incrementals,
                 triggers=(),
                 **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers

        # prevent critical variables from being changed by the user in make.conf
        for k in set(profile.profile_only_variables).intersection(
                settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo
            for (collapsed, repo) in izip(repositories_collapsed, repositories)
        }
        self.repos_raw.update(
            (collapsed.name, repo)
            for (collapsed, repo) in izip(vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_maskers, parse_match),
            ("package.unmask", pkg_unmaskers, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords,
             package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
        ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(
                            action(x) for x in iter_read_bash(fs_obj.location))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        self.name = name
        settings.setdefault("PKGCORE_DOMAIN", name)
        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in (
                    "USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(s, settings[incremental],
                                  'While expanding %s ' % (incremental, ))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(
            optimize_incrementals(settings.get("USE", ())))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        self.use_expand = frozenset(profile.use_expand)
        self.use_expand_hidden = frozenset(profile.use_expand_hidden)
        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower() + "_"
            use.update(u2 + x for x in v.split())

        if not 'ACCEPT_KEYWORDS' in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(s, settings['ACCEPT_KEYWORDS'],
                              'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [
            self.make_keywords_filter(self.arch,
                                      default_keywords,
                                      accept_keywords,
                                      profile.keywords,
                                      incremental="package.keywords"
                                      in incrementals)
        ]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(
                self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure('user-specified bashrc %r does not exist' %
                              (data, ))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'forced_use'))
            c.add_bare_global((), (self.arch, ))
            setattr(self, attr + 'forced_use', c)

            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'masked_use'))
            setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {
            r.repo_id: r._visibility_limiters()
            for r in repositories
        }

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(
                            Failure("failed configuring repo '%s': "
                                    "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    masks = [
                        repo_masks.get(master, [(), ()]) for master in masters
                    ]
                    masks.append(repo_masks[repo.repo_id])
                    masks.extend(profile_masks)
                    mask_atoms = set()
                    for neg, pos in masks:
                        mask_atoms.difference_update(neg)
                        mask_atoms.update(pos)
                    mask_atoms.update(pkg_maskers)
                    unmask_atoms = set(chain(pkg_unmaskers, *profile_unmasks))
                    filtered = self.generate_filter(
                        generate_masking_restrict(mask_atoms),
                        generate_unmasking_restrict(unmask_atoms), *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(
                        wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        if profile.virtuals:
            l = [
                x for x in (getattr(v, 'old_style_virtuals', None)
                            for v in self.vdb) if x is not None
            ]
            profile_repo = profile.make_virtuals_repo(
                multiplex.tree(*repositories), *l)
            self.repos_raw["profile virtuals"] = profile_repo
            self.repos_configured_filtered["profile virtuals"] = profile_repo
            self.repos_configured["profile virtuals"] = profile_repo
            self.repos = [profile_repo] + self.repos

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in sorted(self.use_expand, reverse=True)))
コード例 #40
0
 def get_eclass(self, eclass):
     o = self.eclasses.get(eclass)
     if o is None:
         return None
     return local_source(o.path)
コード例 #41
0
    def add_data(self, domain):
        # error checking?
        dirpath = self.tmp_write_path
        ensure_dirs(dirpath, mode=0o755, minimal=True)
        update_mtime(self.repo.location)
        rewrite = self.repo._metadata_rewrites
        for k in self.new_pkg.tracked_attributes:
            if k == "contents":
                v = ContentsFile(pjoin(dirpath, "CONTENTS"),
                                 mutable=True,
                                 create=True)
                v.update(self.new_pkg.contents)
                v.flush()
            elif k == "environment":
                data = compression.compress_data(
                    'bzip2',
                    self.new_pkg.environment.bytes_fileobj().read())
                with open(pjoin(dirpath, "environment.bz2"), "wb") as f:
                    f.write(data)
                del data
            else:
                v = getattr(self.new_pkg, k)
                if k in ('bdepend', 'depend', 'rdepend', 'idepend'):
                    s = v.slotdep_str(domain)
                elif k == 'user_patches':
                    s = '\n'.join(chain.from_iterable(files for _, files in v))
                elif not isinstance(v, str):
                    try:
                        s = ' '.join(v)
                    except TypeError:
                        s = str(v)
                else:
                    s = v
                with open(pjoin(dirpath, rewrite.get(k, k.upper())), "w",
                          32768) as f:
                    if s:
                        s += '\n'
                    f.write(s)

        # ebuild_data is the actual ebuild- no point in holding onto
        # it for built ebuilds, but if it's there, we store it.
        o = getattr(self.new_pkg, "ebuild", None)
        if o is None:
            logger.warning(
                "doing install/replace op, "
                "but source package doesn't provide the actual ebuild data.  "
                "Creating an empty file")
            o = ''
        else:
            o = o.bytes_fileobj().read()
        # XXX lil hackish accessing PF
        with open(pjoin(dirpath, self.new_pkg.PF + ".ebuild"), "wb") as f:
            f.write(o)

        # install NEEDED and NEEDED.ELF.2 files from tmpdir if they exist
        pkg_tmpdir = normpath(
            pjoin(domain.pm_tmpdir, self.new_pkg.category, self.new_pkg.PF,
                  'temp'))
        for f in ['NEEDED', 'NEEDED.ELF.2']:
            fp = pjoin(pkg_tmpdir, f)
            if os.path.exists(fp):
                local_source(fp).transfer_to_path(pjoin(dirpath, f))

        # XXX finally, hack to keep portage from doing stupid shit.
        # relies on counter to discern what to punt during
        # merging/removal, we don't need that crutch however. problem?
        # No counter file, portage wipes all of our merges (friendly
        # bugger).
        # need to get zmedico to localize the counter
        # creation/counting to per CP for this trick to behave
        # perfectly.
        with open(pjoin(dirpath, "COUNTER"), "w") as f:
            f.write(str(int(time.time())))

        # finally, we mark who made this.
        with open(pjoin(dirpath, "PKGMANAGER"), "w") as f:
            f.write(get_version(__title__, __file__))
        return True