Example #1
0
def _internal_offset_iter_scan(path, chksum_handlers, offset, stat_func=os.lstat,
                               hidden=True, backup=True):
    offset = normpath(offset)
    path = normpath(path)
    dirs = collections.deque([path[len(offset):]])
    if dirs[0]:
        yield gen_obj(dirs[0], chksum_handlers=chksum_handlers,
            stat_func=stat_func)

    sep = os.path.sep
    while dirs:
        base = dirs.popleft()
        real_base = pjoin(offset, base.lstrip(sep))
        base = base.rstrip(sep) + sep
        for x in listdir(real_base):
            if not hidden and x.startswith('.'):
                continue
            if not backup and x.endswith('~'):
                continue
            path = pjoin(base, x)
            obj = gen_obj(path, chksum_handlers=chksum_handlers,
                        real_location=pjoin(real_base, x),
                        stat_func=os.lstat)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #2
0
def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()):
    collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
    collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects)
    collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables)

    r = [
        values.StrGlobMatch(normpath(x).rstrip("/") + "/")
        for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"]))
    ]
    if len(r) > 1:
        r = values.OrRestriction(*r)
    else:
        r = r[0]
    neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"])
    if neg:
        if len(neg) == 1:
            r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/",
                                     negate=True)
        else:
            r2 = values.OrRestriction(
                negate=True,
                *[
                    values.StrGlobMatch(normpath(x).rstrip("/") + "/")
                    for x in set(neg)
                ])
        r = values.AndRestriction(r, r2)
    return r
Example #3
0
 def test_from_abspath(self):
     self.mk_profiles({"name": "profiles"}, {"name": "profiles/1"})
     base = pjoin(self.dir, "profiles")
     p = self.kls.from_abspath(pjoin(base, "1"))
     self.assertNotEqual(p, None)
     self.assertEqual(normpath(p.basepath), normpath(base))
     self.assertEqual(normpath(p.profile), normpath(pjoin(base, "1")))
Example #4
0
def _internal_offset_iter_scan(path,
                               chksum_handlers,
                               offset,
                               stat_func=os.lstat,
                               hidden=True,
                               backup=True):
    offset = normpath(offset)
    path = normpath(path)
    dirs = collections.deque([path[len(offset):]])
    if dirs[0]:
        yield gen_obj(dirs[0],
                      chksum_handlers=chksum_handlers,
                      stat_func=stat_func)

    sep = os.path.sep
    while dirs:
        base = dirs.popleft()
        real_base = pjoin(offset, base.lstrip(sep))
        base = base.rstrip(sep) + sep
        for x in listdir(real_base):
            if not hidden and x.startswith('.'):
                continue
            if not backup and x.endswith('~'):
                continue
            path = pjoin(base, x)
            obj = gen_obj(path,
                          chksum_handlers=chksum_handlers,
                          real_location=pjoin(real_base, x),
                          stat_func=os.lstat)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #5
0
 def _cmd_implementation_sanity_check(self, domain):
     pkg = self.pkg
     eapi = pkg.eapi_obj
     if eapi.options.has_required_use:
         use = pkg.use
         for node in pkg.required_use:
             if not node.match(use):
                 print(
                     textwrap.dedent("""
                     REQUIRED_USE requirement wasn't met
                     Failed to match: {}
                     from: {}
                     for USE: {}
                     pkg: {}
                     """.format(node, pkg.required_use, " ".join(use),
                                pkg.cpvstr)))
                 return False
     if 'pretend' not in pkg.mandatory_phases:
         return True
     commands = None
     if not pkg.built:
         commands = {
             "request_inherit": partial(inherit_handler, self._eclass_cache)
         }
     env = expected_ebuild_env(pkg)
     tmpdir = normpath(domain._get_tempspace())
     builddir = pjoin(tmpdir, env["CATEGORY"], env["PF"])
     pkg_tmpdir = normpath(pjoin(builddir, "temp"))
     ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
     env["ROOT"] = domain.root
     env["T"] = pkg_tmpdir
     try:
         logger.debug("running ebuild pkg_pretend sanity check for %s",
                      pkg.cpvstr)
         start = time.time()
         ret = run_generic_phase(pkg,
                                 "pretend",
                                 env,
                                 userpriv=True,
                                 sandbox=True,
                                 fakeroot=False,
                                 extra_handlers=commands)
         logger.debug("pkg_pretend sanity check for %s took %2.2f seconds",
                      pkg.cpvstr,
                      time.time() - start)
         return ret
     except format.GenericBuildError as e:
         logger.error(
             "pkg_pretend sanity check for %s failed with exception %r" %
             (pkg.cpvstr, e))
         return False
     finally:
         shutil.rmtree(builddir)
         # try to wipe the cat dir; if not empty, ignore it
         try:
             os.rmdir(os.path.dirname(builddir))
         except EnvironmentError as e:
             if e.errno != errno.ENOTEMPTY:
                 raise
Example #6
0
def find_domains_from_path(sections, path):
    path = normpath(abspath(path))
    for name, domain in sections.items():
        root = getattr(domain, 'root', None)
        if root is None:
            continue
        root = normpath(abspath(root))
        if root == path:
            yield name, domain
Example #7
0
 def _cmd_implementation_sanity_check(self, domain):
     pkg = self.pkg
     eapi = pkg.eapi_obj
     if eapi.options.has_required_use:
         use = pkg.use
         for node in pkg.required_use:
             if not node.match(use):
                 print(textwrap.dedent(
                     """
                     REQUIRED_USE requirement wasn't met
                     Failed to match: {}
                     from: {}
                     for USE: {}
                     pkg: {}
                     """.format(node, pkg.required_use, " ".join(use), pkg.cpvstr)
                 ))
                 return False
     if 'pretend' not in pkg.mandatory_phases:
         return True
     commands = None
     if not pkg.built:
         commands = {"request_inherit": partial(inherit_handler, self._eclass_cache)}
     env = expected_ebuild_env(pkg)
     tmpdir = normpath(domain._get_tempspace())
     builddir = pjoin(tmpdir, env["CATEGORY"], env["PF"])
     pkg_tmpdir = normpath(pjoin(builddir, "temp"))
     ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
     env["ROOT"] = domain.root
     env["T"] = pkg_tmpdir
     try:
         logger.debug("running ebuild pkg_pretend sanity check for %s", pkg.cpvstr)
         start = time.time()
         ret = run_generic_phase(pkg, "pretend", env, userpriv=True, sandbox=True,
                                 fakeroot=False, extra_handlers=commands)
         logger.debug("pkg_pretend sanity check for %s took %2.2f seconds",
             pkg.cpvstr, time.time() - start)
         return ret
     except format.GenericBuildError as e:
         logger.error("pkg_pretend sanity check for %s failed with exception %r"
             % (pkg.cpvstr, e))
         return False
     finally:
         shutil.rmtree(builddir)
         # try to wipe the cat dir; if not empty, ignore it
         try:
             os.rmdir(os.path.dirname(builddir))
         except EnvironmentError as e:
             if e.errno != errno.ENOTEMPTY:
                 raise
Example #8
0
def _internal_iter_scan(path,
                        chksum_handlers,
                        stat_func=os.lstat,
                        hidden=True,
                        backup=True):
    dirs = collections.deque([normpath(path)])
    obj = gen_obj(dirs[0],
                  chksum_handlers=chksum_handlers,
                  stat_func=stat_func)
    yield obj
    if not obj.is_dir:
        return
    while dirs:
        base = dirs.popleft()
        for x in listdir(base):
            if not hidden and x.startswith('.'):
                continue
            if not backup and x.endswith('~'):
                continue
            path = pjoin(base, x)
            obj = gen_obj(path,
                          chksum_handlers=chksum_handlers,
                          real_location=path,
                          stat_func=stat_func)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #9
0
    def _add_profile(self, profile_override=None):
        if profile_override is None:
            profile = self._find_profile_link()
        else:
            profile = normpath(abspath(profile_override))
            if not os.path.exists(profile):
                raise errors.ComplexInstantiationError(f"{profile} doesn't exist")

        paths = profiles.OnDiskProfile.split_abspath(profile)
        if paths is None:
            raise errors.ComplexInstantiationError(
                '%s expands to %s, but no profile detected' %
                (pjoin(self.dir, 'make.profile'), profile))

        user_profile_path = pjoin(self.dir, 'profile')
        if os.path.isdir(user_profile_path):
            self["profile"] = basics.AutoConfigSection({
                "class": "pkgcore.ebuild.profiles.UserProfile",
                "parent_path": paths[0],
                "parent_profile": paths[1],
                "user_path": user_profile_path,
            })
        else:
            self["profile"] = basics.AutoConfigSection({
                "class": "pkgcore.ebuild.profiles.OnDiskProfile",
                "basepath": paths[0],
                "profile": paths[1],
            })
Example #10
0
def add_profile(config, config_dir, profile_override=None):
    if profile_override is None:
        profile = _find_profile_link(config_dir)
    else:
        profile = normpath(abspath(profile_override))
        if not os.path.exists(profile):
            raise_from(
                errors.ComplexInstantiationError("%s doesn't exist" %
                                                 (profile, )))

    paths = profiles.OnDiskProfile.split_abspath(profile)
    if paths is None:
        raise errors.ComplexInstantiationError(
            '%s expands to %s, but no profile detected' %
            (pjoin(config_dir, 'make.profile'), profile))

    user_profile_path = pjoin(config_dir, 'profile')
    if os.path.isdir(user_profile_path):
        config["profile"] = basics.AutoConfigSection({
            "class":
            "pkgcore.ebuild.profiles.UserProfile",
            "parent_path":
            paths[0],
            "parent_profile":
            paths[1],
            "user_path":
            user_profile_path,
        })
    else:
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.OnDiskProfile",
            "basepath": paths[0],
            "profile": paths[1],
        })
Example #11
0
    def _add_profile(self, profile_override=None):
        if profile_override is None:
            profile = self._find_profile_link()
        else:
            profile = normpath(abspath(profile_override))
            if not os.path.exists(profile):
                raise config_errors.UserConfigError(
                    f"{profile!r} doesn't exist")

        paths = profiles.OnDiskProfile.split_abspath(profile)
        if paths is None:
            raise config_errors.UserConfigError(
                '%r expands to %r, but no profile detected' %
                (pjoin(self.dir, 'make.profile'), profile))

        user_profile_path = pjoin(self.dir, 'profile')
        if os.path.isdir(user_profile_path):
            self["profile"] = basics.AutoConfigSection({
                "class":
                "pkgcore.ebuild.profiles.UserProfile",
                "parent_path":
                paths[0],
                "parent_profile":
                paths[1],
                "user_path":
                user_profile_path,
            })
        else:
            self["profile"] = basics.AutoConfigSection({
                "class": "pkgcore.ebuild.profiles.OnDiskProfile",
                "basepath": paths[0],
                "profile": paths[1],
            })
Example #12
0
def add_profile(config, base_path, user_profile_path=None, profile_override=None):
    if profile_override is None:
        profile = _find_profile_link(base_path)
    else:
        profile = normpath(abspath(profile_override))
        if not os.path.exists(profile):
            raise_from(errors.ComplexInstantiationError(
                "%s doesn't exist" % (profile,)))

    paths = profiles.OnDiskProfile.split_abspath(profile)
    if paths is None:
        raise errors.ComplexInstantiationError(
            '%s expands to %s, but no profile detected' %
            (pjoin(base_path, 'make.profile'), profile))

    if os.path.isdir(user_profile_path):
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.UserProfile",
            "parent_path": paths[0],
            "parent_profile": paths[1],
            "user_path": user_profile_path,
        })
    else:
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.OnDiskProfile",
            "basepath": paths[0],
            "profile": paths[1],
        })
Example #13
0
    def set_op_vars(self, tmp_offset):
        # don't fool with this, without fooling with setup.
        self.tmpdir = normpath(self.domain._get_tempspace())
        if tmp_offset:
            self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.path.sep))

        self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"], self.env["PF"])
        for x, y in (("T", "temp"), ("WORKDIR", "work"), ("D", "image"), ("HOME", "homedir")):
            self.env[x] = normpath(pjoin(self.builddir, y))
        self.env["D"] += "/"
        self.env["IMAGE"] = self.env["D"]
        self.env["PORTAGE_LOGFILE"] = normpath(pjoin(self.env["T"], "build.log"))

        # XXX: Note that this is just EAPI 3 support, not yet prefix
        # full awareness.
        if self.prefix_mode:
            self.env["ED"] = normpath(pjoin(self.env["D"], self.prefix.lstrip("/"))) + "/"
Example #14
0
    def __init__(self,
                 mode,
                 tempdir,
                 hooks,
                 csets,
                 preserves,
                 observer,
                 offset=None,
                 disable_plugins=False,
                 parallelism=None):
        if observer is None:
            observer = observer_mod.repo_observer(observer_mod.null_output)
        self.observer = observer
        self.mode = mode
        if tempdir is not None:
            tempdir = normpath(tempdir) + '/'
        self.tempdir = tempdir

        self.parallelism = parallelism if parallelism is not None else cpu_count(
        )
        self.hooks = ImmutableDict((x, []) for x in hooks)

        self.preserve_csets = []
        self.cset_sources = {}
        # instantiate these separately so their values are preserved
        self.preserved_csets = LazyValDict(self.preserve_csets,
                                           self._get_cset_source)
        for k, v in csets.items():
            if isinstance(v, str):
                v = getattr(self, v, v)
            if not callable(v):
                raise TypeError(
                    "cset values must be either the string name of "
                    f"existing methods, or callables (got {v})")

            if k in preserves:
                self.add_preserved_cset(k, v)
            else:
                self.add_cset(k, v)

        if offset is None:
            offset = "/"
        self.offset = offset

        if not disable_plugins:
            # merge in default triggers first.
            for trigger in get_plugins('triggers'):
                t = trigger()
                t.register(self)

        # merge in overrides
        for hook, triggers in hooks.items():
            for trigger in triggers:
                self.add_trigger(hook, trigger)

        self.regenerate_csets()
        for x in hooks:
            setattr(self, x, partial(self.execute_hook, x))
Example #15
0
 def _find_profile_link(self):
     make_profile = pjoin(self.dir, 'make.profile')
     try:
         return normpath(abspath(
             pjoin(self.dir, os.readlink(make_profile))))
     except EnvironmentError as e:
         if e.errno in (errno.ENOENT, errno.EINVAL):
             raise errors.ComplexInstantiationError(
                 f"{make_profile} must be a symlink pointing to a real target") from e
         raise errors.ComplexInstantiationError(
             f"{make_profile}: unexpected error- {e.strerror}") from e
Example #16
0
File: fs.py Project: chutz/pkgcore
    def __init__(self, location, strict=True, **d):

        d["location"] = normpath(location)

        s = object.__setattr__
        if strict:
            for k in self.__attrs__:
                s(self, k, d[k])
        else:
            for k, v in d.iteritems():
                s(self, k, v)
Example #17
0
    def __init__(self, location, strict=True, **d):

        d["location"] = normpath(location)

        s = object.__setattr__
        if strict:
            for k in self.__attrs__:
                s(self, k, d[k])
        else:
            for k, v in d.items():
                s(self, k, v)
Example #18
0
 def init_distfiles_env(self):
     # cvs/svn ebuilds need to die.
     distdir_write = self.domain.fetcher.get_storage_path()
     if distdir_write is None:
         raise format.GenericBuildError("no usable distdir was found "
             "for PORTAGE_ACTUAL_DISTDIR from fetcher %s" % self.domain.fetcher)
     self.env["PORTAGE_ACTUAL_DISTDIR"] = distdir_write
     self.env["DISTDIR"] = normpath(
         pjoin(self.builddir, "distdir"))
     for x in ("PORTAGE_ACTUAL_DISTDIR", "DISTDIR"):
         self.env[x] = os.path.realpath(self.env[x]).rstrip("/") + "/"
Example #19
0
    def set_op_vars(self, tmp_offset):
        # don't fool with this, without fooling with setup.
        self.tmpdir = normpath(self.domain._get_tempspace())
        if tmp_offset:
            self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.path.sep))

        self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"],
                              self.env["PF"])
        for x, y in (("T", "temp"), ("WORKDIR", "work"), ("D", "image"),
                     ("HOME", "homedir")):
            self.env[x] = normpath(pjoin(self.builddir, y))
        self.env["D"] += "/"
        self.env["IMAGE"] = self.env["D"]
        self.env["PORTAGE_LOGFILE"] = normpath(
            pjoin(self.env["T"], "build.log"))

        # XXX: note that this is just eapi3 support, not yet prefix
        # full awareness.
        if self.prefix_mode:
            self.env["ED"] = normpath(pjoin(self.env["D"], self.prefix)) + "/"
Example #20
0
    def __init__(self, mode, tempdir, hooks, csets, preserves, observer,
                 offset=None, disable_plugins=False, parallelism=None):
        if observer is None:
            observer = observer_mod.repo_observer(observer_mod.null_output)
        self.observer = observer
        self.mode = mode
        if tempdir is not None:
            tempdir = normpath(tempdir) + '/'
        self.tempdir = tempdir

        if parallelism is None:
            parallelism = get_proc_count()

        self.parallelism = parallelism

        self.hooks = ImmutableDict((x, []) for x in hooks)

        self.preserve_csets = []
        self.cset_sources = {}
        # instantiate these separately so their values are preserved
        self.preserved_csets = LazyValDict(
            self.preserve_csets, self._get_cset_source)
        for k, v in csets.iteritems():
            if isinstance(v, basestring):
                v = getattr(self, v, v)
            if not callable(v):
                raise TypeError(
                    "cset values must be either the string name of "
                    "existing methods, or callables (got %s)" % v)

            if k in preserves:
                self.add_preserved_cset(k, v)
            else:
                self.add_cset(k, v)

        if offset is None:
            offset = "/"
        self.offset = offset

        if not disable_plugins:
            # merge in default triggers first.
            for trigger in get_plugins('triggers'):
                t = trigger()
                t.register(self)

        # merge in overrides
        for hook, triggers in hooks.iteritems():
            for trigger in triggers:
                self.add_trigger(hook, trigger)

        self.regenerate_csets()
        for x in hooks:
            setattr(self, x, partial(self.execute_hook, x))
Example #21
0
def add_fetcher(config, make_conf):
    fetchcommand = make_conf.pop("FETCHCOMMAND")
    resumecommand = make_conf.pop("RESUMECOMMAND", fetchcommand)

    fetcher_dict = {
        "class": "pkgcore.fetch.custom.fetcher",
        "distdir": normpath(os.environ.get("DISTDIR", make_conf.pop("DISTDIR"))),
        "command": fetchcommand,
        "resume_command": resumecommand,
        "attempts": make_conf.pop("FETCH_ATTEMPTS", '10'),
    }
    config["fetcher"] = basics.AutoConfigSection(fetcher_dict)
Example #22
0
 def _init_distfiles_env(self):
     # TODO: PORTAGE_ACTUAL_DISTDIR usage by vcs eclasses needs to be killed off
     distdir_write = self.domain.fetcher.get_storage_path()
     if distdir_write is None:
         raise format.GenericBuildError(
             "no usable distdir was found "
             f"for PORTAGE_ACTUAL_DISTDIR from fetcher {self.domain.fetcher}")
     self.env["PORTAGE_ACTUAL_DISTDIR"] = distdir_write
     self.env["DISTDIR"] = normpath(
         pjoin(self.builddir, "distdir"))
     for x in ("PORTAGE_ACTUAL_DISTDIR", "DISTDIR"):
         self.env[x] = os.path.realpath(self.env[x]).rstrip(os.sep) + os.sep
Example #23
0
 def _find_profile_link(self):
     make_profile = pjoin(self.dir, 'make.profile')
     try:
         return normpath(abspath(pjoin(self.dir,
                                       os.readlink(make_profile))))
     except EnvironmentError as e:
         if e.errno in (errno.ENOENT, errno.EINVAL):
             raise config_errors.UserConfigError(
                 f"{make_profile!r} must be a symlink pointing to a real target"
             ) from e
         raise config_errors.ComplexInstantiationError(
             f"{make_profile!r}: unexpected error- {e.strerror}") from e
Example #24
0
def add_fetcher(config, make_conf):
    fetchcommand = make_conf.pop("FETCHCOMMAND")
    resumecommand = make_conf.pop("RESUMECOMMAND", fetchcommand)

    fetcher_dict = {
        "class": "pkgcore.fetch.custom.fetcher",
        "distdir": normpath(os.environ.get("DISTDIR", make_conf.pop("DISTDIR"))),
        "command": fetchcommand,
        "resume_command": resumecommand,
        "attempts": make_conf.pop("FETCH_ATTEMPTS", '10'),
    }
    config["fetcher"] = basics.AutoConfigSection(fetcher_dict)
Example #25
0
def _find_profile_link(config_dir):
    make_profile = pjoin(config_dir, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(config_dir, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Example #26
0
def _find_profile_link(config_dir):
    make_profile = pjoin(config_dir, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(config_dir, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Example #27
0
 def _init_distfiles_env(self):
     # TODO: PORTAGE_ACTUAL_DISTDIR usage by vcs eclasses needs to be killed off
     distdir_write = self.domain.fetcher.get_storage_path()
     if distdir_write is None:
         raise format.GenericBuildError(
             "no usable distdir was found "
             f"for PORTAGE_ACTUAL_DISTDIR from fetcher {self.domain.fetcher}")
     self.env["PORTAGE_ACTUAL_DISTDIR"] = distdir_write
     self.env["DISTDIR"] = normpath(
         pjoin(self.builddir, "distdir"))
     for x in ("PORTAGE_ACTUAL_DISTDIR", "DISTDIR"):
         self.env[x] = os.path.realpath(self.env[x]).rstrip(os.sep) + os.sep
Example #28
0
def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()):
    collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
    collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects)
    collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables)

    r = [values.StrGlobMatch(normpath(x).rstrip("/") + "/")
         for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"]))]
    if len(r) > 1:
        r = values.OrRestriction(*r)
    else:
        r = r[0]
    neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"])
    if neg:
        if len(neg) == 1:
            r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/",
                                     negate=True)
        else:
            r2 = values.OrRestriction(
                negate=True,
                *[values.StrGlobMatch(normpath(x).rstrip("/") + "/")
                  for x in set(neg)])
        r = values.AndRestriction(r, r2)
    return r
Example #29
0
    def _set_op_vars(self, tmp_offset):
        # don't fool with this, without fooling with setup.
        self.tmpdir = self.domain.pm_tmpdir
        if tmp_offset:
            self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.sep))

        self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"], self.env["PF"])
        for x, y in (("T", "temp"),
                     ("WORKDIR", "work"),
                     ("D", "image"),
                     ("HOME", "homedir")):
            self.env[x] = normpath(pjoin(self.builddir, y))
        self.env["D"] += self.eapi.options.trailing_slash
        self.env["PORTAGE_LOGFILE"] = normpath(pjoin(self.env["T"], "build.log"))

        # XXX: Note that this is just EAPI 3 support, not yet prefix
        # full awareness.
        if self.prefix_mode:
            self.env["ED"] = normpath(
                pjoin(self.env["D"].rstrip(os.sep), self.prefix.rstrip(os.sep))) \
                    + self.eapi.options.trailing_slash

        # temporary install dir correct for all EAPIs
        self.ED = self.env.get('ED', self.env['D'])
Example #30
0
    def _set_op_vars(self, tmp_offset):
        # don't fool with this, without fooling with setup.
        self.tmpdir = self.domain.pm_tmpdir
        if tmp_offset:
            self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.sep))

        self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"], self.env["PF"])
        for x, y in (("T", "temp"),
                     ("WORKDIR", "work"),
                     ("D", "image"),
                     ("HOME", "homedir")):
            self.env[x] = normpath(pjoin(self.builddir, y))
        self.env["D"] += self.eapi.options.trailing_slash
        self.env["PORTAGE_LOGFILE"] = normpath(pjoin(self.env["T"], "build.log"))

        # XXX: Note that this is just EAPI 3 support, not yet prefix
        # full awareness.
        if self.pkg.eapi.options.prefix_capable:
            self.env["ED"] = normpath(
                pjoin(self.env["D"].rstrip(os.sep), self.env["EPREFIX"])) \
                    + self.eapi.options.trailing_slash

        # temporary install dir correct for all EAPIs
        self.ED = self.env.get('ED', self.env['D'])
Example #31
0
def _internal_iter_scan(path, chksum_handlers, stat_func=os.lstat):
    dirs = collections.deque([normpath(path)])
    obj = gen_obj(dirs[0], chksum_handlers=chksum_handlers,
        stat_func=stat_func)
    yield obj
    if not obj.is_dir:
        return
    while dirs:
        base = dirs.popleft()
        for x in listdir(base):
            path = pjoin(base, x)
            obj = gen_obj(path, chksum_handlers=chksum_handlers,
                        real_location=path, stat_func=stat_func)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #32
0
def _internal_iter_scan(path, chksum_handlers, stat_func=os.lstat):
    dirs = collections.deque([normpath(path)])
    obj = gen_obj(dirs[0], chksum_handlers=chksum_handlers,
        stat_func=stat_func)
    yield obj
    if not obj.is_dir:
        return
    while dirs:
        base = dirs.popleft()
        for x in listdir(base):
            path = pjoin(base, x)
            obj = gen_obj(path, chksum_handlers=chksum_handlers,
                        real_location=path, stat_func=stat_func)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #33
0
    def iter_child_nodes(self, start_point):
        """Yield a stream of nodes that are fs entries contained within the
        passed in start point.

        :param start_point: fs filepath all yielded nodes must be within.
        """

        if isinstance(start_point, fs.fsBase):
            if start_point.is_sym:
                start_point = start_point.target
            else:
                start_point = start_point.location
        for x in self:
            cn_path = normpath(start_point).rstrip(path.sep) + path.sep
            # what about sym targets?
            if x.location.startswith(cn_path):
                yield x
Example #34
0
    def __delitem__(self, obj):
        """
        remove a fs obj to the set

        :type obj: a derivative of :obj:`pkgcore.fs.fs.fsBase`
            or a string location of an obj in the set.
        :raise KeyError: if the obj isn't found
        """

        if not self.mutable:
            # weird, but keeping with set.
            raise AttributeError(
                f'{self.__class__} is frozen; no remove functionality')
        if fs.isfs_obj(obj):
            del self._dict[obj.location]
        else:
            del self._dict[normpath(obj)]
Example #35
0
    def iter_child_nodes(self, start_point):
        """Yield a stream of nodes that are fs entries contained within the
        passed in start point.

        :param start_point: fs filepath all yielded nodes must be within.
        """

        if isinstance(start_point, fs.fsBase):
            if start_point.is_sym:
                start_point = start_point.target
            else:
                start_point = start_point.location
        for x in self:
            cn_path = normpath(start_point).rstrip(path.sep) + path.sep
            # what about sym targets?
            if x.location.startswith(cn_path):
                yield x
Example #36
0
    def __delitem__(self, obj):

        """
        remove a fs obj to the set

        :type obj: a derivative of :obj:`pkgcore.fs.fs.fsBase`
            or a string location of an obj in the set.
        :raise KeyError: if the obj isn't found
        """

        if not self.mutable:
            # weird, but keeping with set.
            raise AttributeError(
                "%s is frozen; no remove functionality" % self.__class__)
        if fs.isfs_obj(obj):
            del self._dict[obj.location]
        else:
            del self._dict[normpath(obj)]
Example #37
0
def _find_profile_link(base_path, portage_compat=False):
    make_profile = pjoin(base_path, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(base_path, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            if oe.errno == errno.ENOENT:
                if portage_compat:
                    return None
                profile = _find_profile_link(pjoin(base_path, 'portage'), True)
                if profile is not None:
                    return profile
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Example #38
0
def _find_profile_link(base_path, portage_compat=False):
    make_profile = pjoin(base_path, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(base_path, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            if oe.errno == errno.ENOENT:
                if portage_compat:
                    return None
                profile = _find_profile_link(pjoin(base_path, 'portage'), True)
                if profile is not None:
                    return profile
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Example #39
0
def _internal_iter_scan(path, chksum_handlers, stat_func=os.lstat,
                        hidden=True, backup=True):
    dirs = collections.deque([normpath(path)])
    obj = gen_obj(dirs[0], chksum_handlers=chksum_handlers,
        stat_func=stat_func)
    yield obj
    if not obj.is_dir:
        return
    while dirs:
        base = dirs.popleft()
        for x in listdir(base):
            if not hidden and x.startswith('.'):
                continue
            if not backup and x.endswith('~'):
                continue
            path = pjoin(base, x)
            obj = gen_obj(path, chksum_handlers=chksum_handlers,
                        real_location=path, stat_func=stat_func)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #40
0
    def _cmd_implementation_sanity_check(self, domain, observer):
        """Various ebuild sanity checks (REQUIRED_USE, pkg_pretend)."""
        pkg = self.pkg
        eapi = pkg.eapi

        # perform REQUIRED_USE checks
        if eapi.options.has_required_use:
            use = pkg.use
            for node in pkg.required_use:
                if not node.match(use):
                    observer.info(textwrap.dedent(
                        """
                        REQUIRED_USE requirement wasn't met
                        Failed to match: {}
                        from: {}
                        for USE: {}
                        pkg: {}
                        """.format(node, pkg.required_use, " ".join(use), pkg.cpvstr)
                    ))
                    return False

        # return if running pkg_pretend is not required
        if 'pretend' not in pkg.mandatory_phases:
            return True

        # run pkg_pretend phase
        commands = None
        if not pkg.built:
            commands = {"request_inherit": partial(inherit_handler, self._eclass_cache)}
        env = expected_ebuild_env(pkg)
        builddir = pjoin(domain.pm_tmpdir, env["CATEGORY"], env["PF"])
        pkg_tmpdir = normpath(pjoin(builddir, "temp"))
        ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
        env["ROOT"] = domain.root
        env["T"] = pkg_tmpdir

        # TODO: make colored output easier to achieve from observers
        msg = ['>>> Running pkg_pretend for ', observer._output._out.fg('green'),
               pkg.cpvstr, observer._output._out.reset]
        observer._output._out.write(*msg)

        try:
            start = time.time()
            ret = run_generic_phase(
                pkg, "pretend", env, userpriv=True, sandbox=True, extra_handlers=commands)
            logger.debug(
                "pkg_pretend sanity check for %s took %2.2f seconds",
                pkg.cpvstr, time.time() - start)
            return ret
        except format.GenericBuildError as e:
            return False
        finally:
            shutil.rmtree(builddir)
            # try to wipe the cat dir; if not empty, ignore it
            try:
                os.rmdir(os.path.dirname(builddir))
            except EnvironmentError as e:
                # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir
                # in particular, Solaris uses EEXIST in that case.
                # https://github.com/pkgcore/pkgcore/pull/181
                if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
                    raise
Example #41
0
    def add_data(self, domain):
        # error checking?
        dirpath = self.tmp_write_path
        ensure_dirs(dirpath, mode=0755, minimal=True)
        update_mtime(self.repo.location)
        rewrite = self.repo._metadata_rewrites
        for k in self.new_pkg.tracked_attributes:
            if k == "contents":
                v = ContentsFile(pjoin(dirpath, "CONTENTS"),
                                 mutable=True, create=True)
                v.update(self.new_pkg.contents)
                v.flush()
            elif k == "environment":
                data = compression.compress_data('bzip2',
                    self.new_pkg.environment.bytes_fileobj().read())
                with open(pjoin(dirpath, "environment.bz2"), "wb") as f:
                    f.write(data)
                del data
            else:
                v = getattr(self.new_pkg, k)
                if k == 'provides':
                    versionless_providers = lambda b:b.key
                    s = conditionals.stringify_boolean(v,
                        func=versionless_providers)
                elif k == 'eapi_obj':
                    # hackity hack.
                    s = v.magic
                    k = 'eapi'
                elif k == 'depends' or k == 'rdepends':
                    s = v.slotdep_str(domain)
                elif not isinstance(v, basestring):
                    try:
                        s = ' '.join(sorted(v))
                    except TypeError:
                        s = str(v)
                else:
                    s = v
                with open(pjoin(dirpath, rewrite.get(k, k.upper())), "w", 32768) as f:
                    f.write(s + "\n")

        # ebuild_data is the actual ebuild- no point in holding onto
        # it for built ebuilds, but if it's there, we store it.
        o = getattr(self.new_pkg, "ebuild", None)
        if o is None:
            logger.warning(
                "doing install/replace op, "
                "but source package doesn't provide the actual ebuild data.  "
                "Creating an empty file")
            o = ''
        else:
            o = o.bytes_fileobj().read()
        # XXX lil hackish accessing PF
        with open(pjoin(dirpath, self.new_pkg.PF + ".ebuild"), "wb") as f:
            f.write(o)

        # install NEEDED and NEEDED.ELF.2 files from tmpdir if they exist
        pkg_tmpdir = normpath(pjoin(domain._get_tempspace(), self.new_pkg.category,
                                    self.new_pkg.PF, 'temp'))
        for f in ['NEEDED', 'NEEDED.ELF.2']:
            fp = pjoin(pkg_tmpdir, f)
            if os.path.exists(fp):
                local_source(fp).transfer_to_path(pjoin(dirpath, f))

        # XXX finally, hack to keep portage from doing stupid shit.
        # relies on counter to discern what to punt during
        # merging/removal, we don't need that crutch however. problem?
        # No counter file, portage wipes all of our merges (friendly
        # bugger).
        # need to get zmedico to localize the counter
        # creation/counting to per CP for this trick to behave
        # perfectly.
        with open(pjoin(dirpath, "COUNTER"), "w") as f:
            f.write(str(int(time.time())))

        #finally, we mark who made this.
        with open(pjoin(dirpath, "PKGMANAGER"), "w") as f:
            f.write("pkgcore-%s\n" % VERSION)
        return True
Example #42
0
File: fs.py Project: chutz/pkgcore
 def resolved_target(self):
     if self.target.startswith("/"):
         return self.target
     return normpath(pjoin(self.location, '../', self.target))
Example #43
0
 def __init__(self, path, location=None):
     """
     :param location: ondisk location of the tree we're working with
     """
     base.__init__(self, location=location, eclassdir=normpath(path))
Example #44
0
def config_from_make_conf(location="/etc/", profile_override=None, **kwargs):
    """
    generate a config from a file location

    :param location: location the portage configuration is based in,
        defaults to /etc
    :param profile_override: profile to use instead of the current system
        profile, i.e. the target of the /etc/portage/make.profile
        (or deprecated /etc/make.profile) symlink
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
    base_path = pjoin(config_root, location.strip("/"))
    portage_base = pjoin(base_path, "portage")

    # this isn't preserving incremental behaviour for features/use
    # unfortunately

    conf_dict = {}
    try:
        load_make_config(conf_dict, pjoin(base_path, 'make.globals'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            load_make_config(conf_dict, const.MAKE_GLOBALS)
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(
                errors.ParsingError("failed to find a usable make.globals"))
    load_make_config(conf_dict,
                     pjoin(base_path, 'make.conf'),
                     required=False,
                     allow_sourcing=True,
                     incrementals=True)
    load_make_config(conf_dict,
                     pjoin(portage_base, 'make.conf'),
                     required=False,
                     allow_sourcing=True,
                     incrementals=True)

    root = os.environ.get("ROOT", conf_dict.get("ROOT", "/"))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles"
        for x in conf_dict.pop("GENTOO_MIRRORS", "").split()
    ]

    # this is flawed... it'll pick up -some-feature
    features = conf_dict.get("FEATURES", "").split()

    new_config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        new_config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(new_config, root, portage_base)

    user_profile_path = pjoin(base_path, "portage", "profile")
    add_profile(new_config, base_path, user_profile_path, profile_override)

    kwds = {
        "class":
        "pkgcore.vdb.ondisk.tree",
        "location":
        pjoin(root, 'var', 'db', 'pkg'),
        "cache_location":
        pjoin(config_root, 'var', 'cache', 'edb', 'dep', 'var', 'db', 'pkg'),
    }
    new_config["vdb"] = basics.AutoConfigSection(kwds)

    # options used by rsync-based syncers
    rsync_opts = isolate_rsync_opts(conf_dict)

    repo_opts = {}
    overlay_syncers = {}
    try:
        default_repo_opts, repo_opts = load_repos_conf(
            pjoin(portage_base, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise

    if repo_opts:
        main_repo_id = default_repo_opts['main-repo']
        main_repo = repo_opts[main_repo_id]['location']
        overlay_repos = [
            opts['location'] for repo, opts in repo_opts.iteritems()
            if opts['location'] != main_repo
        ]
        main_syncer = repo_opts[main_repo_id].get('sync-uri', None)
    else:
        # fallback to PORTDIR and PORTDIR_OVERLAY settings
        main_repo = normpath(
            os.environ.get("PORTDIR", conf_dict.pop("PORTDIR",
                                                    "/usr/portage")).strip())
        overlay_repos = os.environ.get("PORTDIR_OVERLAY",
                                       conf_dict.pop("PORTDIR_OVERLAY",
                                                     "")).split()
        overlay_repos = [normpath(x) for x in overlay_repos]
        main_syncer = conf_dict.pop("SYNC", None)

        if overlay_repos and '-layman-sync' not in features:
            overlay_syncers = add_layman_syncers(new_config,
                                                 rsync_opts,
                                                 overlay_repos,
                                                 config_root=config_root)

    if main_syncer is not None:
        make_syncer(new_config, main_repo, main_syncer, rsync_opts)

    if overlay_repos and '-autodetect-sync' not in features:
        for path in overlay_repos:
            if path not in overlay_syncers:
                overlay_syncers[path] = make_autodetect_syncer(
                    new_config, path)

    repos = [main_repo] + overlay_repos
    default_repos = list(reversed(repos))

    new_config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class':
        'pkgcore.ebuild.repository.slavedtree',
        'default_mirrors':
        gentoo_mirrors,
        'inherit-only':
        True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    rsync_portdir_cache = 'metadata-transfer' not in features
    # if a metadata cache exists, use it.
    if rsync_portdir_cache:
        for cache_type, frag in (('flat_hash.md5_cache', 'md5-cache'),
                                 ('metadata.database', 'cache')):
            if not os.path.exists(pjoin(main_repo, 'metadata', frag)):
                continue
            new_config["cache:%s/metadata/cache" %
                       (main_repo, )] = basics.AutoConfigSection({
                           'class':
                           'pkgcore.cache.' + cache_type,
                           'readonly':
                           True,
                           'location':
                           main_repo,
                       })
            break
        else:
            rsync_portdir_cache = False

    repo_map = {}

    for tree_loc in repos:
        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(tree_loc)
        repo_map[repo_config.repo_id] = repo_config

        # repo configs
        conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'location': tree_loc,
        }
        if 'sync:%s' % (tree_loc, ) in new_config:
            conf['syncer'] = 'sync:%s' % (tree_loc, )
        if tree_loc == main_repo:
            conf['default'] = True
        new_config['raw:' + tree_loc] = basics.AutoConfigSection(conf)

        # repo trees
        kwds = {
            'inherit': ('ebuild-repo-common', ),
            'raw_repo': ('raw:' + tree_loc),
        }
        cache_name = 'cache:%s' % (tree_loc, )
        new_config[cache_name] = mk_simple_cache(config_root, tree_loc)
        kwds['cache'] = cache_name
        if tree_loc == main_repo:
            kwds['class'] = 'pkgcore.ebuild.repository.tree'
            if rsync_portdir_cache:
                kwds['cache'] = 'cache:%s/metadata/cache %s' % (main_repo,
                                                                cache_name)
        else:
            kwds['parent_repo'] = main_repo
        new_config[tree_loc] = basics.AutoConfigSection(kwds)

    new_config['portdir'] = basics.section_alias(main_repo, 'repo')

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    if overlay_repos:
        new_config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(default_repos)
            })
    else:
        new_config['repo-stack'] = basics.section_alias(main_repo, 'repo')

    new_config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    new_config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.get('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", conf_dict.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning(
                    "disabling buildpkg related features since PKGDIR doesn't exist"
                )
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning(
                    "disabling buildpkg related features since PKGDIR either doesn't "
                    "exist, or lacks 0755 minimal permissions")
                pkgdir = None
Example #45
0
            add_trigger(
                'buildpkg_system_trigger', 'pkgcore.merge.triggers.SavePkgIfInPkgset',
                pristine='yes', target_repo='binpkg', pkgset='system')
        elif 'unmerge-backup' in features:
            add_trigger(
                'unmerge_backup_trigger', 'pkgcore.merge.triggers.SavePkgUnmerging',
                target_repo='binpkg')

    if 'save-deb' in features:
        path = conf_dict.pop("DEB_REPO_ROOT", None)
        if path is None:
            logger.warning("disabling save-deb; DEB_REPO_ROOT is unset")
        else:
            add_trigger(
                'save_deb_trigger', 'pkgcore.ospkg.triggers.SaveDeb',
                basepath=normpath(path), maintainer=conf_dict.pop("DEB_MAINAINER", ''),
                platform=conf_dict.pop("DEB_ARCHITECTURE", ""))

    if 'splitdebug' in features:
        kwds = {}

        if 'compressdebug' in features:
            kwds['compress'] = 'true'

        add_trigger(
            'binary_debug_trigger', 'pkgcore.merge.triggers.BinaryDebug',
            mode='split', **kwds)
    elif 'strip' in features or 'nostrip' not in features:
        add_trigger(
            'binary_debug_trigger', 'pkgcore.merge.triggers.BinaryDebug',
            mode='strip')
Example #46
0
    def add_data(self, domain):
        # error checking?
        dirpath = self.tmp_write_path
        ensure_dirs(dirpath, mode=0755, minimal=True)
        update_mtime(self.repo.location)
        rewrite = self.repo._metadata_rewrites
        for k in self.new_pkg.tracked_attributes:
            if k == "contents":
                v = ContentsFile(pjoin(dirpath, "CONTENTS"),
                                 mutable=True,
                                 create=True)
                v.update(self.new_pkg.contents)
                v.flush()
            elif k == "environment":
                data = compression.compress_data(
                    'bzip2',
                    self.new_pkg.environment.bytes_fileobj().read())
                with open(pjoin(dirpath, "environment.bz2"), "wb") as f:
                    f.write(data)
                del data
            else:
                v = getattr(self.new_pkg, k)
                if k == 'provides':
                    versionless_providers = lambda b: b.key
                    s = conditionals.stringify_boolean(
                        v, func=versionless_providers)
                elif k == 'eapi_obj':
                    # hackity hack.
                    s = v.magic
                    k = 'eapi'
                elif k == 'depends' or k == 'rdepends':
                    s = v.slotdep_str(domain)
                elif not isinstance(v, basestring):
                    try:
                        s = ' '.join(v)
                    except TypeError:
                        s = str(v)
                else:
                    s = v
                with open(pjoin(dirpath, rewrite.get(k, k.upper())), "w",
                          32768) as f:
                    f.write(s + "\n")

        # ebuild_data is the actual ebuild- no point in holding onto
        # it for built ebuilds, but if it's there, we store it.
        o = getattr(self.new_pkg, "ebuild", None)
        if o is None:
            logger.warning(
                "doing install/replace op, "
                "but source package doesn't provide the actual ebuild data.  "
                "Creating an empty file")
            o = ''
        else:
            o = o.bytes_fileobj().read()
        # XXX lil hackish accessing PF
        with open(pjoin(dirpath, self.new_pkg.PF + ".ebuild"), "wb") as f:
            f.write(o)

        # install NEEDED and NEEDED.ELF.2 files from tmpdir if they exist
        pkg_tmpdir = normpath(
            pjoin(domain._get_tempspace(), self.new_pkg.category,
                  self.new_pkg.PF, 'temp'))
        for f in ['NEEDED', 'NEEDED.ELF.2']:
            fp = pjoin(pkg_tmpdir, f)
            if os.path.exists(fp):
                local_source(fp).transfer_to_path(pjoin(dirpath, f))

        # XXX finally, hack to keep portage from doing stupid shit.
        # relies on counter to discern what to punt during
        # merging/removal, we don't need that crutch however. problem?
        # No counter file, portage wipes all of our merges (friendly
        # bugger).
        # need to get zmedico to localize the counter
        # creation/counting to per CP for this trick to behave
        # perfectly.
        with open(pjoin(dirpath, "COUNTER"), "w") as f:
            f.write(str(int(time.time())))

        #finally, we mark who made this.
        with open(pjoin(dirpath, "PKGMANAGER"), "w") as f:
            f.write("pkgcore-%s\n" % VERSION)
        return True
Example #47
0
    def __init__(self,
                 pkg,
                 initial_env=None,
                 env_data_source=None,
                 features=None,
                 observer=None,
                 clean=True,
                 tmp_offset=None,
                 use_override=None,
                 allow_fetching=False):
        """
        :param pkg:
            :class:`pkgcore.ebuild.ebuild_src.package`
            instance this env is being setup for
        :param initial_env: initial environment to use for this ebuild
        :param env_data_source: a :obj:`snakeoil.data_source.base` instance
            to restore the environment from- used for restoring the
            state of an ebuild processing, whether for unmerging, or
            walking phases during building
        :param features: ebuild features, hold over from portage,
            will be broken down at some point
        """

        if use_override is not None:
            use = use_override
        else:
            use = pkg.use

        self.allow_fetching = allow_fetching

        if not hasattr(self, "observer"):
            self.observer = observer
        if not pkg.eapi_obj.is_supported:
            raise TypeError("package %s uses an unsupported eapi: %s" %
                            (pkg, pkg.eapi))

        if initial_env is not None:
            # copy.
            self.env = dict(initial_env)
            for x in ("USE", "ACCEPT_LICENSE"):
                if x in self.env:
                    del self.env[x]
        else:
            self.env = {}

        # temp hack.
        for x in ('chost', 'cbuild', 'ctarget'):
            val = getattr(pkg, x)
            if val is not None:
                self.env[x.upper()] = val
        # special note... if CTARGET is the same as CHOST, suppress it.
        # certain ebuilds (nano for example) will misbehave w/ it.
        if pkg.ctarget is not None and pkg.ctarget == pkg.chost:
            self.env.pop("CTARGET")

        if "PYTHONPATH" in os.environ:
            self.env["PYTHONPATH"] = os.environ["PYTHONPATH"]
        if "PKGCORE_DEBUG" in os.environ:
            self.env["PKGCORE_DEBUG"] = str(int(os.environ["PKGCORE_DEBUG"]))

        if features is None:
            features = self.env.get("FEATURES", ())

        # XXX: note this is just eapi3 compatibility; not full prefix, soon..
        self.env["ROOT"] = self.domain.root
        self.prefix_mode = pkg.eapi_obj.options.prefix_capable or 'force-prefix' in features
        self.env["PKGCORE_PREFIX_SUPPORT"] = 'false'
        self.prefix = '/'
        if self.prefix_mode:
            self.env['EROOT'] = normpath(self.domain.root)
            self.prefix = self.domain.prefix.lstrip("/")
            eprefix = normpath(pjoin(self.env["EROOT"], self.prefix))
            if eprefix == '/':
                # Set eprefix to '' if it's basically empty; this keeps certain crappy builds
                # (cmake for example) from puking over //usr/blah pathways
                eprefix = ''
            self.env["EPREFIX"] = eprefix
            self.env["PKGCORE_PREFIX_SUPPORT"] = 'true'

        self.env.update(pkg.eapi_obj.get_ebd_env())

        # generate a list of internally implemented EAPI specific functions that shouldn't be exported
        ret, eapi_funcs = spawn_get_output([
            pjoin(const.EAPI_BIN_PATH, 'generate_eapi_func_list.bash'),
            str(pkg.eapi)
        ])
        if ret != 0:
            raise Exception(
                "failed to generate list of EAPI %s specific functions" %
                str(pkg.eapi))
        self.env["PKGCORE_EAPI_FUNCS"] = ' '.join(x.strip()
                                                  for x in eapi_funcs)

        self.env_data_source = env_data_source
        if env_data_source is not None and \
            not isinstance(env_data_source, data_source.base):
            raise TypeError(
                "env_data_source must be None, or a pkgcore.data_source.base "
                "derivative: %s: %s" %
                (env_data_source.__class__, env_data_source))

        self.features = set(x.lower() for x in features)

        self.env["FEATURES"] = ' '.join(sorted(self.features))

        iuse_effective_regex = (re.escape(x) for x in pkg.iuse_effective)
        iuse_effective_regex = "^(%s)$" % "|".join(iuse_effective_regex)
        iuse_effective_regex = iuse_effective_regex.replace("\\.\\*", ".*")
        self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex

        expected_ebuild_env(pkg,
                            self.env,
                            env_source_override=self.env_data_source)

        self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(
            str(x) for x in pkg.restrict)

        self.restrict = pkg.restrict

        for x in ("sandbox", "userpriv", "fakeroot"):
            setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
        if self.fakeroot:
            logger.warning(
                "disabling fakeroot; unusable till coreutils/fakeroot" +
                " interaction is fixed")
            self.fakeroot = False
        if self.userpriv and os.getuid() != 0:
            self.userpriv = False

        if "PORT_LOGDIR" in self.env:
            self.logging = pjoin(
                self.env["PORT_LOGDIR"], "%s:%s:%s.log" %
                (pkg.cpvstr, self.__class__.__name__,
                 time.strftime("%Y%m%d-%H%M%S", time.localtime())))
            del self.env["PORT_LOGDIR"]
        else:
            self.logging = False

        self.env["XARGS"] = xargs

        self.bashrc = self.env.pop("bashrc", ())

        self.pkg = pkg
        self.eapi = pkg.eapi
        self.eapi_obj = pkg.eapi_obj
        wipes = [
            k for k, v in self.env.iteritems()
            if not isinstance(v, basestring)
        ]
        for k in wipes:
            del self.env[k]

        self.set_op_vars(tmp_offset)
        self.clean_at_start = clean
        self.clean_needed = False
Example #48
0
    def __init__(self, userpriv, sandbox):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        """

        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid]})
        else:
            if pkgcore.spawn.is_userpriv_capable():
                spawn_opts.update({"gid": os_data.portage_gid,
                                   "groups": [0, os_data.portage_gid]})
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False

        # since it's questionable which spawn method we'll use (if
        # sandbox fex), we ensure the bashrc is invalid.
        env = {x: "/etc/portage/spork/not/valid/ha/ha"
               for x in ("BASHRC", "BASH_ENV")}

        if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)):
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']
        if int(os.environ.get('PKGCORE_DEBUG', 0)):
            env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG']
        if int(os.environ.get('PKGCORE_NOCOLOR', 0)):
            env["PKGCORE_NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']
            if sandbox:
                env["NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']

        # prepend script dir to PATH for git repo or unpacked tarball, for
        # installed versions it's empty
        env["PATH"] = os.pathsep.join(
            list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]])

        args = []
        if sandbox:
            if not pkgcore.spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            self.__sandbox = True
            spawn_func = pkgcore.spawn.spawn_sandbox
#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})
        else:
            spawn_func = pkgcore.spawn.spawn

        # force to a neutral dir so that sandbox won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EBD_PATH
        # Force the pipes to be high up fd wise so nobody stupidly hits 'em, we
        # start from max-3 to avoid a bug in older bash where it doesn't check
        # if an fd is in use before claiming it.
        max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd-4),
            "PKGCORE_EBD_WRITE_FD": str(max_fd-3)})
        # pgid=0: Each ebuild processor is the process group leader for all its
        # spawned children so everything can be terminated easily if necessary.
        self.pid = spawn_func(
            [const.BASH_BINARY, self.ebd, "daemonize"],
            fd_pipes={0: 0, 1: 1, 2: 2, max_fd-4: cread, max_fd-3: dwrite},
            returnpid=True, env=env, pgid=0, *args, **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InitializationError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")
        self.write(e_const.EBD_PATH)

        # send PKGCORE_PYTHON_BINARY...
        self.write(pkgcore.spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(pkgcore.__file__, os.pardir, os.pardir))),
                os.environ.get('PYTHONPATH', '')])
            )
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        self.dont_export_vars = self.read().split()
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Example #49
0
 def test_location_normalization(self):
     for loc in ('/tmp/a', '/tmp//a', '/tmp//', '/tmp/a/..'):
         self.assertEqual(self.make_obj(location=loc).location,
             normpath(loc), reflective=False)
Example #50
0
 def resolved_target(self):
     if self.target.startswith("/"):
         return self.target
     return normpath(pjoin(self.location, '../', self.target))
Example #51
0
def config_from_make_conf(location="/etc/", profile_override=None, **kwargs):
    """
    generate a config from a file location

    :param location: location the portage configuration is based in,
        defaults to /etc
    :param profile_override: profile to use instead of the current system
        profile, i.e. the target of the /etc/portage/make.profile
        (or deprecated /etc/make.profile) symlink
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
    base_path = pjoin(config_root, location.strip("/"))
    portage_base = pjoin(base_path, "portage")

    # this isn't preserving incremental behaviour for features/use
    # unfortunately

    conf_dict = {}
    try:
        load_make_config(conf_dict, pjoin(base_path, 'make.globals'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            if 'PKGCORE_REPO_PATH' in os.environ:
                config_path = pjoin(os.environ['PKGCORE_REPO_PATH'], 'config')
            else:
                config_path = pjoin(
                    config_root, sys.prefix.lstrip('/'), 'share/pkgcore/config')
            load_make_config(conf_dict, pjoin(config_path, 'make.globals'))
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(errors.ParsingError(
                "failed to find a usable make.globals"))
    load_make_config(
        conf_dict, pjoin(base_path, 'make.conf'), required=False,
        allow_sourcing=True, incrementals=True)
    load_make_config(
        conf_dict, pjoin(portage_base, 'make.conf'), required=False,
        allow_sourcing=True, incrementals=True)

    root = os.environ.get("ROOT", conf_dict.get("ROOT", "/"))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles" for x in conf_dict.pop("GENTOO_MIRRORS", "").split()]

    # this is flawed... it'll pick up -some-feature
    features = conf_dict.get("FEATURES", "").split()

    new_config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        new_config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(new_config, root, portage_base)

    user_profile_path = pjoin(base_path, "portage", "profile")
    add_profile(new_config, base_path, user_profile_path, profile_override)

    kwds = {
        "class": "pkgcore.vdb.ondisk.tree",
        "location": pjoin(root, 'var', 'db', 'pkg'),
        "cache_location": pjoin(
            config_root, 'var', 'cache', 'edb', 'dep', 'var', 'db', 'pkg'),
    }
    new_config["vdb"] = basics.AutoConfigSection(kwds)

    # options used by rsync-based syncers
    rsync_opts = isolate_rsync_opts(conf_dict)

    repo_opts = {}
    overlay_syncers = {}
    try:
        default_repo_opts, repo_opts = load_repos_conf(
            pjoin(portage_base, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise

    if repo_opts:
        main_repo_id = default_repo_opts['main-repo']
        main_repo = repo_opts[main_repo_id]['location']
        overlay_repos = [opts['location'] for repo, opts in repo_opts.iteritems()
                         if opts['location'] != main_repo]
        main_syncer = repo_opts[main_repo_id].get('sync-uri', None)
    else:
        # fallback to PORTDIR and PORTDIR_OVERLAY settings
        main_repo = normpath(os.environ.get(
            "PORTDIR", conf_dict.pop("PORTDIR", "/usr/portage")).strip())
        overlay_repos = os.environ.get(
            "PORTDIR_OVERLAY", conf_dict.pop("PORTDIR_OVERLAY", "")).split()
        overlay_repos = [normpath(x) for x in overlay_repos]
        main_syncer = conf_dict.pop("SYNC", None)

        if overlay_repos and '-layman-sync' not in features:
            overlay_syncers = add_layman_syncers(
                new_config, rsync_opts, overlay_repos, config_root=config_root)

    if main_syncer is not None:
        make_syncer(new_config, main_repo, main_syncer, rsync_opts)

    if overlay_repos and '-autodetect-sync' not in features:
        for path in overlay_repos:
            if path not in overlay_syncers:
                overlay_syncers[path] = make_autodetect_syncer(new_config, path)

    repos = [main_repo] + overlay_repos
    default_repos = list(reversed(repos))

    new_config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class': 'pkgcore.ebuild.repository.slavedtree',
        'default_mirrors': gentoo_mirrors,
        'inherit-only': True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    rsync_portdir_cache = 'metadata-transfer' not in features
    # if a metadata cache exists, use it.
    if rsync_portdir_cache:
        for cache_type, frag in (('flat_hash.md5_cache', 'md5-cache'),
                                 ('metadata.database', 'cache')):
            if not os.path.exists(pjoin(main_repo, 'metadata', frag)):
                continue
            new_config["cache:%s/metadata/cache" % (main_repo,)] = basics.AutoConfigSection({
                'class': 'pkgcore.cache.' + cache_type,
                'readonly': True,
                'location': main_repo,
            })
            break
        else:
            rsync_portdir_cache = False

    repo_map = {}

    for tree_loc in repos:
        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(tree_loc)
        repo_map[repo_config.repo_id] = repo_config

        # repo configs
        conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'location': tree_loc,
        }
        if 'sync:%s' % (tree_loc,) in new_config:
            conf['syncer'] = 'sync:%s' % (tree_loc,)
        if tree_loc == main_repo:
            conf['default'] = True
        new_config['raw:' + tree_loc] = basics.AutoConfigSection(conf)

        # repo trees
        kwds = {
            'inherit': ('ebuild-repo-common',),
            'raw_repo': ('raw:' + tree_loc),
        }
        cache_name = 'cache:%s' % (tree_loc,)
        new_config[cache_name] = mk_simple_cache(config_root, tree_loc)
        kwds['cache'] = cache_name
        if tree_loc == main_repo:
            kwds['class'] = 'pkgcore.ebuild.repository.tree'
            if rsync_portdir_cache:
                kwds['cache'] = 'cache:%s/metadata/cache %s' % (main_repo, cache_name)
        else:
            kwds['parent_repo'] = main_repo
        new_config[tree_loc] = basics.AutoConfigSection(kwds)

    new_config['portdir'] = basics.section_alias(main_repo, 'repo')

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    if overlay_repos:
        new_config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(default_repos)})
    else:
        new_config['repo-stack'] = basics.section_alias(main_repo, 'repo')

    new_config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    new_config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.get('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", conf_dict.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                    ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning("disabling buildpkg related features since PKGDIR doesn't exist")
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning("disabling buildpkg related features since PKGDIR either doesn't "
                               "exist, or lacks 0755 minimal permissions")
                pkgdir = None
Example #52
0
            add_trigger(
                'buildpkg_system_trigger', 'pkgcore.merge.triggers.SavePkgIfInPkgset',
                pristine='yes', target_repo='binpkg', pkgset='system')
        elif 'unmerge-backup' in features:
            add_trigger(
                'unmerge_backup_trigger', 'pkgcore.merge.triggers.SavePkgUnmerging',
                target_repo='binpkg')

    if 'save-deb' in features:
        path = make_conf.pop("DEB_REPO_ROOT", None)
        if path is None:
            logger.warning("disabling save-deb; DEB_REPO_ROOT is unset")
        else:
            add_trigger(
                'save_deb_trigger', 'pkgcore.ospkg.triggers.SaveDeb',
                basepath=normpath(path), maintainer=make_conf.pop("DEB_MAINAINER", ''),
                platform=make_conf.pop("DEB_ARCHITECTURE", ""))

    if 'splitdebug' in features:
        kwds = {}

        if 'compressdebug' in features:
            kwds['compress'] = 'true'

        add_trigger(
            'binary_debug_trigger', 'pkgcore.merge.triggers.BinaryDebug',
            mode='split', **kwds)
    elif 'strip' in features or 'nostrip' not in features:
        add_trigger(
            'binary_debug_trigger', 'pkgcore.merge.triggers.BinaryDebug',
            mode='strip')
Example #53
0
    def __init__(self, userpriv, sandbox, fakeroot, save_file):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fakeroot: enables a fakeroot'd processor-
            this is a mutually exclusive option to sandbox, and
            requires userpriv to be enabled. Violating this will
            result in nastiness.
        """

        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if fakeroot and (sandbox or not userpriv):
            traceback.print_stack()
            logger.error("Both sandbox and fakeroot cannot be enabled at the same time")
            raise InitializationError("cannot initialize with sandbox and fakeroot")

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid]})
        else:
            if pkgcore.spawn.is_userpriv_capable():
                spawn_opts.update({"gid": os_data.portage_gid,
                                   "groups": [0, os_data.portage_gid]})
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False
        self.__fakeroot = False

        # since it's questionable which spawn method we'll use (if
        # sandbox or fakeroot fex), we ensure the bashrc is invalid.
        env = {x: "/etc/portage/spork/not/valid/ha/ha"
               for x in ("BASHRC", "BASH_ENV")}
        if int(os.environ.get('PKGCORE_PERF_DEBUG', 1)) > 1:
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']

        # append script dir to PATH for git repo or unpacked tarball
        if "PKGCORE_REPO_PATH" in os.environ:
            env["PATH"] = os.pathsep.join(
                [os.environ["PATH"], pjoin(os.environ["PKGCORE_REPO_PATH"], 'bin')])

        args = []
        if sandbox:
            if not pkgcore.spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            if fakeroot:
                raise InitializationError('fakeroot was on, but sandbox was also on')
            self.__sandbox = True
            spawn_func = pkgcore.spawn.spawn_sandbox
#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})

        elif fakeroot:
            if not pkgcore.spawn.is_fakeroot_capable():
                raise ValueError("spawn lacks fakeroot capabilities")
            self.__fakeroot = True
            spawn_func = pkgcore.spawn.spawn_fakeroot
            args.append(save_file)
        else:
            spawn_func = pkgcore.spawn.spawn

        # force to a neutral dir so that sandbox/fakeroot won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EAPI_BIN_PATH
        # little trick. we force the pipes to be high up fd wise so
        # nobody stupidly hits 'em.
        max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd-2),
            "PKGCORE_EBD_WRITE_FD": str(max_fd-1)})
        self.pid = spawn_func(
            ["/bin/bash", self.ebd, "daemonize"],
            fd_pipes={0: 0, 1: 1, 2: 2, max_fd-2: cread, max_fd-1: dwrite},
            returnpid=True, env=env, *args, **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InitializationError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")
        self.write(e_const.EAPI_BIN_PATH)
        # send PKGCORE_PYTHON_BINARY...
        self.write(pkgcore.spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(pkgcore.__file__, os.pardir, os.pardir))),
                os.environ.get('PYTHONPATH', '')])
            )
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        self.dont_export_vars = self.read().split()
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Example #54
0
 def __contains__(self, key):
     if fs.isfs_obj(key):
         return key.location in self._dict
     return normpath(key) in self._dict
Example #55
0
                        pristine='yes',
                        target_repo='binpkg',
                        pkgset='system')
        elif 'unmerge-backup' in features:
            add_trigger('unmerge_backup_trigger',
                        'pkgcore.merge.triggers.SavePkgUnmerging',
                        target_repo='binpkg')

    if 'save-deb' in features:
        path = conf_dict.pop("DEB_REPO_ROOT", None)
        if path is None:
            logger.warning("disabling save-deb; DEB_REPO_ROOT is unset")
        else:
            add_trigger('save_deb_trigger',
                        'pkgcore.ospkg.triggers.SaveDeb',
                        basepath=normpath(path),
                        maintainer=conf_dict.pop("DEB_MAINAINER", ''),
                        platform=conf_dict.pop("DEB_ARCHITECTURE", ""))

    if 'splitdebug' in features:
        kwds = {}

        if 'compressdebug' in features:
            kwds['compress'] = 'true'

        add_trigger('binary_debug_trigger',
                    'pkgcore.merge.triggers.BinaryDebug',
                    mode='split',
                    **kwds)
    elif 'strip' in features or 'nostrip' not in features:
        add_trigger('binary_debug_trigger',
Example #56
0
    def _cmd_implementation_sanity_check(self, domain, observer):
        """Various ebuild sanity checks (REQUIRED_USE, pkg_pretend)."""
        pkg = self.pkg
        eapi = pkg.eapi

        # perform REQUIRED_USE checks
        if eapi.options.has_required_use:
            use = pkg.use
            for node in pkg.required_use:
                if not node.match(use):
                    observer.info(
                        textwrap.dedent(
                            """
                        REQUIRED_USE requirement wasn't met
                        Failed to match: {}
                        from: {}
                        for USE: {}
                        pkg: {}
                        """.format(
                                node, pkg.required_use, " ".join(use), pkg.cpvstr
                            )
                        )
                    )
                    return False

        # return if running pkg_pretend is not required
        if "pretend" not in pkg.mandatory_phases:
            return True

        # run pkg_pretend phase
        commands = None
        if not pkg.built:
            commands = {"request_inherit": partial(inherit_handler, self._eclass_cache)}
        env = expected_ebuild_env(pkg)
        tmpdir = normpath(domain._get_tempspace())
        builddir = pjoin(tmpdir, env["CATEGORY"], env["PF"])
        pkg_tmpdir = normpath(pjoin(builddir, "temp"))
        ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
        env["ROOT"] = domain.root
        env["T"] = pkg_tmpdir

        # TODO: make colored output easier to achieve from observers
        msg = [
            ">>> Running pkg_pretend for ",
            observer._output._out.fg("green"),
            pkg.cpvstr,
            observer._output._out.reset,
        ]
        observer._output._out.write(*msg)

        try:
            start = time.time()
            ret = run_generic_phase(pkg, "pretend", env, userpriv=True, sandbox=True, extra_handlers=commands)
            logger.debug("pkg_pretend sanity check for %s took %2.2f seconds", pkg.cpvstr, time.time() - start)
            return ret
        except format.GenericBuildError as e:
            return False
        finally:
            shutil.rmtree(builddir)
            # try to wipe the cat dir; if not empty, ignore it
            try:
                os.rmdir(os.path.dirname(builddir))
            except EnvironmentError as e:
                # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir
                # in particular, Solaris uses EEXIST in that case.
                # https://github.com/pkgcore/pkgcore/pull/181
                if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
                    raise
Example #57
0
    def __iter__(self):
        yield env_update()

        yield ConfigProtectInstall(
            self.opts["CONFIG_PROTECT"], self.opts["CONFIG_PROTECT_MASK"])
        yield ConfigProtectUninstall()

        if "collision-protect" in self.domain.features:
            yield CollisionProtect(
                self.opts["CONFIG_PROTECT"], self.opts["CONFIG_PROTECT_MASK"],
                self.opts["COLLISION_IGNORE"])

        if "protect-owned" in self.domain.features and "collision-protect" not in self.domain.features:
            yield ProtectOwned(
                self.domain.installed_repos, self.opts["CONFIG_PROTECT"],
                self.opts["CONFIG_PROTECT_MASK"], self.opts["COLLISION_IGNORE"])

        if "multilib-strict" in self.domain.features:
            yield register_multilib_strict_trigger(self.opts)

        if "sfperms" in self.domain.features:
            yield SFPerms()

        yield install_into_symdir_protect(
            self.opts["CONFIG_PROTECT"], self.opts["CONFIG_PROTECT_MASK"])

        # TODO: support multiple binpkg repo targets?
        pkgdir = self.opts.get("PKGDIR", None)
        if pkgdir:
            target_repo = self.domain.binary_repos_raw.get(pkgdir, None)
        else:
            # get the highest priority binpkg repo
            try:
                target_repo = self.domain.binary_repos_raw[0]
            except IndexError:
                target_repo = None
        if target_repo is not None:
            if 'buildpkg' in self.domain.features:
                yield triggers.SavePkg(pristine='no', target_repo=target_repo)
            elif 'pristine-buildpkg' in self.domain.features:
                yield triggers.SavePkg(pristine='yes', target_repo=target_repo)
            elif 'buildsyspkg' in self.domain.features:
                yield triggers.SavePkgIfInPkgset(
                    pristine='yes', target_repo=target_repo, pkgset=self.domain.profile.system)
            elif 'unmerge-backup' in self.domain.features:
                yield triggers.SavePkgUnmerging(target_repo=target_repo)

        if 'save-deb' in self.domain.features:
            path = self.opts.get("DEB_REPO_ROOT", None)
            if path is None:
                logger.warning("disabling save-deb; DEB_REPO_ROOT is unset")
            else:
                yield ospkg.triggers.SaveDeb(
                    basepath=normpath(path), maintainer=self.opts.get("DEB_MAINAINER", ''),
                    platform=self.opts.get("DEB_ARCHITECTURE", ""))

        if 'splitdebug' in self.domain.features:
            yield triggers.BinaryDebug(mode='split', compress=('compressdebug' in self.domain.features))
        elif 'strip' in self.domain.features or 'nostrip' not in self.domain.features:
            yield triggers.BinaryDebug(mode='strip')

        if '-fixlafiles' not in self.domain.features:
            yield libtool.FixLibtoolArchivesTrigger()

        for x in ("man", "info", "doc"):
            if f"no{x}" in self.domain.features:
                self.opts["INSTALL_MASK"].append(f"/usr/share/{x}")
        l = []
        for x in self.opts["INSTALL_MASK"]:
            x = x.rstrip("/")
            l.append(values.StrRegex(fnmatch.translate(x)))
            l.append(values.StrRegex(fnmatch.translate(f"{x}/*")))
        install_mask = l

        if install_mask:
            if len(install_mask) == 1:
                install_mask = install_mask[0]
            else:
                install_mask = values.OrRestriction(*install_mask)
            yield triggers.PruneFiles(install_mask.match)
            # note that if this wipes all /usr/share/ entries, should
            # wipe the empty dir.

        yield UninstallIgnore(self.opts["UNINSTALL_IGNORE"])
        yield InfoRegen()
Example #58
0
File: ebd.py Project: chutz/pkgcore
    def __init__(self, pkg, initial_env=None, env_data_source=None,
                 features=None, observer=None, clean=True, tmp_offset=None,
                 use_override=None, allow_fetching=False):
        """
        :param pkg:
            :class:`pkgcore.ebuild.ebuild_src.package`
            instance this env is being setup for
        :param initial_env: initial environment to use for this ebuild
        :param env_data_source: a :obj:`snakeoil.data_source.base` instance
            to restore the environment from- used for restoring the
            state of an ebuild processing, whether for unmerging, or
            walking phases during building
        :param features: ebuild features, hold over from portage,
            will be broken down at some point
        """


        if use_override is not None:
            use = use_override
        else:
            use = pkg.use

        self.allow_fetching = allow_fetching

        if not hasattr(self, "observer"):
            self.observer = observer
        if not pkg.eapi_obj.is_supported:
            raise TypeError(
                "package %s uses an unsupported eapi: %s" % (pkg, pkg.eapi))

        if initial_env is not None:
            # copy.
            self.env = dict(initial_env)
            for x in ("USE", "ACCEPT_LICENSE"):
                if x in self.env:
                    del self.env[x]
        else:
            self.env = {}

        # temp hack.
        for x in ('chost', 'cbuild', 'ctarget'):
            val = getattr(pkg, x)
            if val is not None:
                self.env[x.upper()] = val
        # special note... if CTARGET is the same as CHOST, suppress it.
        # certain ebuilds (nano for example) will misbehave w/ it.
        if pkg.ctarget is not None and pkg.ctarget == pkg.chost:
            self.env.pop("CTARGET")

        if "PYTHONPATH" in os.environ:
            self.env["PYTHONPATH"] = os.environ["PYTHONPATH"]
        if "PKGCORE_DEBUG" in os.environ:
            self.env["PKGCORE_DEBUG"] = str(int(os.environ["PKGCORE_DEBUG"]))

        if features is None:
            features = self.env.get("FEATURES", ())

        # XXX: note this is just eapi3 compatibility; not full prefix, soon..
        self.env["ROOT"] = self.domain.root
        self.prefix_mode = pkg.eapi_obj.options.prefix_capable or 'force-prefix' in features
        self.env["PKGCORE_PREFIX_SUPPORT"] = 'false'
        self.prefix = '/'
        if self.prefix_mode:
            self.env['EROOT'] = normpath(self.domain.root)
            self.prefix = self.domain.prefix.lstrip("/")
            eprefix = normpath(pjoin(self.env["EROOT"], self.prefix))
            if eprefix == '/':
                # Set eprefix to '' if it's basically empty; this keeps certain crappy builds
                # (cmake for example) from puking over //usr/blah pathways
                eprefix = ''
            self.env["EPREFIX"] = eprefix
            self.env["PKGCORE_PREFIX_SUPPORT"] = 'true'

        self.env.update(pkg.eapi_obj.get_ebd_env())

        self.env_data_source = env_data_source
        if env_data_source is not None and \
            not isinstance(env_data_source, data_source.base):
            raise TypeError(
                "env_data_source must be None, or a pkgcore.data_source.base "
                "derivative: %s: %s" % (
                    env_data_source.__class__, env_data_source))

        self.features = set(x.lower() for x in features)

        self.env["FEATURES"] = ' '.join(sorted(self.features))

        iuse_effective_regex = (re.escape(x) for x in pkg.iuse_effective)
        iuse_effective_regex = "^(%s)$" % "|".join(iuse_effective_regex)
        iuse_effective_regex = iuse_effective_regex.replace("\\.\\*", ".*")
        self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex

        expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source)

        self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(str(x) for x in pkg.restrict)

        self.restrict = pkg.restrict

        for x in ("sandbox", "userpriv", "fakeroot"):
            setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
        if self.fakeroot:
            logger.warning("disabling fakeroot; unusable till coreutils/fakeroot" +
                " interaction is fixed")
            self.fakeroot = False
        if self.userpriv and os.getuid() != 0:
            self.userpriv = False

        if "PORT_LOGDIR" in self.env:
            self.logging = pjoin(self.env["PORT_LOGDIR"],
                "%s:%s:%s.log" % (pkg.cpvstr, self.__class__.__name__,
                    time.strftime("%Y%m%d-%H%M%S", time.localtime())))
            del self.env["PORT_LOGDIR"]
        else:
            self.logging = False

        self.env["XARGS"] = xargs

        self.bashrc = self.env.pop("bashrc", ())

        self.pkg = pkg
        self.eapi = pkg.eapi
        self.eapi_obj = pkg.eapi_obj
        wipes = [k for k, v in self.env.iteritems()
                 if not isinstance(v, basestring)]
        for k in wipes:
            del self.env[k]

        self.set_op_vars(tmp_offset)
        self.clean_at_start = clean
        self.clean_needed = False
Example #59
0
 def __getitem__(self, obj):
     if fs.isfs_obj(obj):
         return self._dict[obj.location]
     return self._dict[normpath(obj)]