Beispiel #1
0
 def parent_paths(self, data):
     repo_config = self.repoconfig
     if repo_config is not None and 'portage-2' in repo_config.profile_formats:
         l = []
         for line, lineno, relpath in data:
             repo_id, separator, profile_path = line.partition(':')
             if separator:
                 if repo_id:
                     try:
                         location = self._repo_map[repo_id]
                     except KeyError:
                         # check if requested repo ID matches the current
                         # repo which could be the case when running against
                         # unconfigured, external repos.
                         if repo_id == repo_config.repo_id:
                             location = repo_config.location
                         else:
                             logger.error(
                                 f'repo {repo_config.repo_id!r}: '
                                 f"{relpath!r} (line {lineno}), "
                                 f'bad profile parent {line!r}: '
                                 f'unknown repo {repo_id!r}'
                             )
                             continue
                 l.append((abspath(pjoin(location, 'profiles', profile_path)), line, lineno))
             else:
                 l.append((abspath(pjoin(self.path, repo_id)), line, lineno))
         return tuple(l)
     return tuple((abspath(pjoin(self.path, line)), line, lineno)
                  for line, lineno, relpath in data)
Beispiel #2
0
def find_domains_from_path(sections, path):
    path = normpath(abspath(path))
    for name, domain in sections.items():
        root = getattr(domain, 'root', None)
        if root is None:
            continue
        root = normpath(abspath(root))
        if root == path:
            yield name, domain
Beispiel #3
0
def add_profile(config, base_path, user_profile_path=None, profile_override=None):
    if profile_override is None:
        profile = _find_profile_link(base_path)
    else:
        profile = normpath(abspath(profile_override))
        if not os.path.exists(profile):
            raise_from(errors.ComplexInstantiationError(
                "%s doesn't exist" % (profile,)))

    paths = profiles.OnDiskProfile.split_abspath(profile)
    if paths is None:
        raise errors.ComplexInstantiationError(
            '%s expands to %s, but no profile detected' %
            (pjoin(base_path, 'make.profile'), profile))

    if os.path.isdir(user_profile_path):
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.UserProfile",
            "parent_path": paths[0],
            "parent_profile": paths[1],
            "user_path": user_profile_path,
        })
    else:
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.OnDiskProfile",
            "basepath": paths[0],
            "profile": paths[1],
        })
Beispiel #4
0
def _setup_scan(parser, namespace, args):
    # determine target repo early in order to load relevant config settings if they exist
    namespace, _ = parser._parse_known_args(args, namespace)

    # load default args from system/user configs if config-loading is allowed
    if namespace.config_file is None:
        namespace = parser.parse_config_options(namespace)

    # Get the current working directory for repo detection and restriction
    # creation, fallback to the root dir if it's be removed out from under us.
    try:
        namespace.cwd = abspath(os.getcwd())
    except FileNotFoundError:
        namespace.cwd = '/'

    # if we have no target repo figure out what to use
    if namespace.target_repo is None:
        target_repo = _determine_target_repo(namespace, parser)
        # fallback to the default repo
        if target_repo is None:
            target_repo = namespace.config.get_default('repo')
        namespace.target_repo = target_repo

    # use filtered repo if requested
    if namespace.filter == 'repo':
        namespace.target_repo = namespace.domain.ebuild_repos[
            namespace.target_repo.repo_id]

    # determine if we're running in the gentoo repo or a clone
    namespace.gentoo_repo = 'gentoo' in namespace.target_repo.aliases

    # multiplex of target repo and its masters used for package existence queries
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    # support loading repo-specific config settings from metadata/pkgcheck.conf
    repo_config_file = os.path.join(namespace.target_repo.location, 'metadata',
                                    'pkgcheck.conf')

    configs = ()
    if os.path.isfile(repo_config_file):
        # repo settings take precedence over system/user settings
        configs += (repo_config_file, )
    if namespace.config_file is not None:
        # and custom user settings take precedence over everything
        if not namespace.config_file:
            configs = ()
        else:
            configs += (namespace.config_file, )

    if configs:
        parser.parse_config(parser.configs + configs)
        namespace = parser.parse_config_options(namespace)

    # load repo-specific args from config if they exist, command line args override these
    for section in namespace.target_repo.aliases:
        if section in parser.config:
            namespace = parser.parse_config_options(namespace, section)
            break

    return namespace, args
Beispiel #5
0
    def check_args(cls, parser, namespace):
        namespace.glsa_enabled = True
        glsa_loc = namespace.glsa_location
        if glsa_loc is None:
            glsa_dirs = []
            for repo in namespace.target_repo.trees:
                path = pjoin(repo.location, 'metadata', 'glsa')
                if os.path.isdir(path):
                    glsa_dirs.append(path)
            if len(glsa_dirs) > 1:
                glsa_dirs = ', '.join(map(repr, glsa_dirs))
                parser.error(
                    '--glsa-dir needs to be specified to select one of '
                    f'multiple glsa sources: {glsa_dirs}')

            try:
                glsa_loc = glsa_dirs[0]
            except IndexError:
                # force the error if explicitly selected using -c/--checks
                selected_checks = namespace.selected_checks
                if selected_checks is not None and cls.__name__ in selected_checks[
                        1]:
                    parser.error(
                        'no available glsa source, --glsa-dir must be specified'
                    )
                namespace.glsa_enabled = False
                if namespace.verbosity > 1:
                    logger.warning(
                        "disabling GLSA checks due to no glsa source "
                        "being found, and the check not being explicitly enabled"
                    )
                return

        namespace.glsa_location = abspath(glsa_loc)
Beispiel #6
0
    def _add_profile(self, profile_override=None):
        if profile_override is None:
            profile = self._find_profile_link()
        else:
            profile = normpath(abspath(profile_override))
            if not os.path.exists(profile):
                raise errors.ComplexInstantiationError(f"{profile} doesn't exist")

        paths = profiles.OnDiskProfile.split_abspath(profile)
        if paths is None:
            raise errors.ComplexInstantiationError(
                '%s expands to %s, but no profile detected' %
                (pjoin(self.dir, 'make.profile'), profile))

        user_profile_path = pjoin(self.dir, 'profile')
        if os.path.isdir(user_profile_path):
            self["profile"] = basics.AutoConfigSection({
                "class": "pkgcore.ebuild.profiles.UserProfile",
                "parent_path": paths[0],
                "parent_profile": paths[1],
                "user_path": user_profile_path,
            })
        else:
            self["profile"] = basics.AutoConfigSection({
                "class": "pkgcore.ebuild.profiles.OnDiskProfile",
                "basepath": paths[0],
                "profile": paths[1],
            })
Beispiel #7
0
    def _add_profile(self, profile_override=None):
        if profile_override is None:
            profile = self._find_profile_link()
        else:
            profile = normpath(abspath(profile_override))
            if not os.path.exists(profile):
                raise config_errors.UserConfigError(
                    f"{profile!r} doesn't exist")

        paths = profiles.OnDiskProfile.split_abspath(profile)
        if paths is None:
            raise config_errors.UserConfigError(
                '%r expands to %r, but no profile detected' %
                (pjoin(self.dir, 'make.profile'), profile))

        user_profile_path = pjoin(self.dir, 'profile')
        if os.path.isdir(user_profile_path):
            self["profile"] = basics.AutoConfigSection({
                "class":
                "pkgcore.ebuild.profiles.UserProfile",
                "parent_path":
                paths[0],
                "parent_profile":
                paths[1],
                "user_path":
                user_profile_path,
            })
        else:
            self["profile"] = basics.AutoConfigSection({
                "class": "pkgcore.ebuild.profiles.OnDiskProfile",
                "basepath": paths[0],
                "profile": paths[1],
            })
Beispiel #8
0
def add_profile(config, config_dir, profile_override=None):
    if profile_override is None:
        profile = _find_profile_link(config_dir)
    else:
        profile = normpath(abspath(profile_override))
        if not os.path.exists(profile):
            raise_from(
                errors.ComplexInstantiationError("%s doesn't exist" %
                                                 (profile, )))

    paths = profiles.OnDiskProfile.split_abspath(profile)
    if paths is None:
        raise errors.ComplexInstantiationError(
            '%s expands to %s, but no profile detected' %
            (pjoin(config_dir, 'make.profile'), profile))

    user_profile_path = pjoin(config_dir, 'profile')
    if os.path.isdir(user_profile_path):
        config["profile"] = basics.AutoConfigSection({
            "class":
            "pkgcore.ebuild.profiles.UserProfile",
            "parent_path":
            paths[0],
            "parent_profile":
            paths[1],
            "user_path":
            user_profile_path,
        })
    else:
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.OnDiskProfile",
            "basepath": paths[0],
            "profile": paths[1],
        })
Beispiel #9
0
 def parent_paths(self, data):
     repo_config = self.repoconfig
     if repo_config is not None and 'portage-2' in repo_config.profile_formats:
         l = []
         for repo_id, separator, path in (x.partition(':') for x in data):
             if separator:
                 if repo_id:
                     try:
                         location = self._repo_map[repo_id]
                     except KeyError:
                         raise ValueError(f"unknown repository name: {repo_id!r}")
                     except TypeError:
                         raise ValueError("repo mapping is unset")
                 l.append(abspath(pjoin(location, 'profiles', path)))
             else:
                 l.append(abspath(pjoin(self.path, repo_id)))
         return tuple(l)
     return tuple(abspath(pjoin(self.path, x)) for x in data)
Beispiel #10
0
 def parent_paths(self, data):
     repo_config = self.repoconfig
     if repo_config is not None and 'portage-2' in repo_config.profile_formats:
         l = []
         for repo_id, separator, path in (x.partition(':') for x in data):
             if separator:
                 if repo_id:
                     try:
                         location = self._repo_map[repo_id]
                     except KeyError:
                         raise ValueError(
                             f"unknown repository name: {repo_id!r}")
                     except TypeError:
                         raise ValueError("repo mapping is unset")
                 l.append(abspath(pjoin(location, 'profiles', path)))
             else:
                 l.append(abspath(pjoin(self.path, repo_id)))
         return tuple(l)
     return tuple(abspath(pjoin(self.path, x)) for x in data)
Beispiel #11
0
 def _load_repoconfig_from_path(path):
     path = abspath(path)
     # strip '/' so we don't get '/usr/portage' == ('', 'usr', 'portage')
     chunks = path.lstrip('/').split('/')
     try:
         pindex = max(idx for idx, x in enumerate(chunks) if x == 'profiles')
     except ValueError:
         # not in a repo...
         return None
     repo_path = pjoin('/', *chunks[:pindex])
     return repo_objs.RepoConfig(repo_path)
Beispiel #12
0
 def _load_repoconfig_from_path(path):
     path = abspath(path)
     # strip '/' so we don't get '/usr/portage' == ('', 'usr', 'portage')
     chunks = path.lstrip('/').split('/')
     try:
         pindex = max(idx for idx, x in enumerate(chunks) if x == 'profiles')
     except ValueError:
         # not in a repo...
         return None
     repo_path = pjoin('/', *chunks[:pindex])
     return repo_objs.RepoConfig(repo_path)
Beispiel #13
0
 def _find_profile_link(self):
     make_profile = pjoin(self.dir, 'make.profile')
     try:
         return normpath(abspath(
             pjoin(self.dir, os.readlink(make_profile))))
     except EnvironmentError as e:
         if e.errno in (errno.ENOENT, errno.EINVAL):
             raise errors.ComplexInstantiationError(
                 f"{make_profile} must be a symlink pointing to a real target") from e
         raise errors.ComplexInstantiationError(
             f"{make_profile}: unexpected error- {e.strerror}") from e
Beispiel #14
0
 def split_abspath(path):
     path = abspath(path)
     # filter's heavy, but it handles '/' while also
     # suppressing the leading '/'
     chunks = [x for x in path.split("/") if x]
     try:
         # poor mans rindex.
         pbase = max(x for x in enumerate(chunks) if x[1] == 'profiles')[0]
     except ValueError:
         # no base found.
         return None
     return pjoin("/", *chunks[:pbase+1]), '/'.join(chunks[pbase+1:])
Beispiel #15
0
def _find_profile_link(config_dir):
    make_profile = pjoin(config_dir, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(config_dir, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Beispiel #16
0
 def _find_profile_link(self):
     make_profile = pjoin(self.dir, 'make.profile')
     try:
         return normpath(abspath(pjoin(self.dir,
                                       os.readlink(make_profile))))
     except EnvironmentError as e:
         if e.errno in (errno.ENOENT, errno.EINVAL):
             raise config_errors.UserConfigError(
                 f"{make_profile!r} must be a symlink pointing to a real target"
             ) from e
         raise config_errors.ComplexInstantiationError(
             f"{make_profile!r}: unexpected error- {e.strerror}") from e
Beispiel #17
0
 def split_abspath(path):
     path = abspath(path)
     # filter's heavy, but it handles '/' while also
     # suppressing the leading '/'
     chunks = [x for x in path.split("/") if x]
     try:
         # poor mans rindex.
         pbase = max(idx for idx, x in enumerate(chunks) if x == 'profiles')
     except ValueError:
         # no base found.
         return None
     return pjoin("/", *chunks[:pbase+1]), '/'.join(chunks[pbase+1:])
Beispiel #18
0
def _find_profile_link(config_dir):
    make_profile = pjoin(config_dir, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(config_dir, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Beispiel #19
0
def _find_profile_link(base_path, portage_compat=False):
    make_profile = pjoin(base_path, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(base_path, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            if oe.errno == errno.ENOENT:
                if portage_compat:
                    return None
                profile = _find_profile_link(pjoin(base_path, 'portage'), True)
                if profile is not None:
                    return profile
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Beispiel #20
0
def _find_profile_link(base_path, portage_compat=False):
    make_profile = pjoin(base_path, 'make.profile')
    try:
        return normpath(abspath(
            pjoin(base_path, os.readlink(make_profile))))
    except EnvironmentError as oe:
        if oe.errno in (errno.ENOENT, errno.EINVAL):
            if oe.errno == errno.ENOENT:
                if portage_compat:
                    return None
                profile = _find_profile_link(pjoin(base_path, 'portage'), True)
                if profile is not None:
                    return profile
            raise_from(errors.ComplexInstantiationError(
                "%s must be a symlink pointing to a real target" % (
                    make_profile,)))
        raise_from(errors.ComplexInstantiationError(
            "%s: unexpected error- %s" % (make_profile, oe.strerror)))
Beispiel #21
0
def _validate_scan_args(parser, namespace):
    namespace.enabled_checks = list(const.CHECKS.values())
    namespace.enabled_keywords = list(const.KEYWORDS.values())

    # Get the current working directory for repo detection and restriction
    # creation, fallback to the root dir if it's be removed out from under us.
    try:
        cwd = abspath(os.getcwd())
    except FileNotFoundError as e:
        cwd = '/'

    # if we have no target repo figure out what to use
    if namespace.target_repo is None:
        target_repo = _determine_target_repo(namespace, parser, cwd)
        # fallback to the default repo
        if target_repo is None:
            target_repo = namespace.config.get_default('repo')
        namespace.target_repo = target_repo

    # use filtered repo if filtering is enabled
    if namespace.filtered:
        namespace.target_repo = namespace.domain.ebuild_repos[
            namespace.target_repo.repo_id]

    # determine if we're running in the gentoo repo or a clone
    namespace.gentoo_repo = 'gentoo' in namespace.target_repo.aliases

    # multiplex of target repo and its masters used for package existence queries
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    if namespace.targets:
        repo = namespace.target_repo

        # read targets from stdin in a non-blocking manner
        if len(namespace.targets) == 1 and namespace.targets[0] == '-':

            def stdin():
                while True:
                    line = sys.stdin.readline()
                    if not line:
                        break
                    yield line.rstrip()

            namespace.targets = stdin()

        def restrictions():
            for target in namespace.targets:
                try:
                    r = parserestrict.parse_match(target)
                except parserestrict.ParseError as e:
                    if os.path.exists(target):
                        try:
                            r = _path_restrict(target, namespace)
                        except ValueError as e:
                            parser.error(e)
                    else:
                        parser.error(e)
                yield _restrict_to_scope(r), r

        # Collapse restrictions for passed in targets while keeping the
        # generator intact for piped in targets.
        namespace.restrictions = restrictions()
        if isinstance(namespace.targets, list):
            namespace.restrictions = list(namespace.restrictions)

            # collapse restrictions in order to run them in parallel
            if len(namespace.restrictions) > 1:
                # multiple targets are restricted to a single scanning scope
                scopes = {scope for scope, restrict in namespace.restrictions}
                if len(scopes) > 1:
                    scan_scopes = ', '.join(sorted(map(str, scopes)))
                    parser.error(
                        f'targets specify multiple scan scope levels: {scan_scopes}'
                    )

                combined_restrict = boolean.OrRestriction(
                    *(r for s, r in namespace.restrictions))
                namespace.restrictions = [(scopes.pop(), combined_restrict)]
    else:
        if cwd in namespace.target_repo:
            restrict = _path_restrict(cwd, namespace)
        else:
            restrict = packages.AlwaysTrue
        namespace.restrictions = [(_restrict_to_scope(restrict), restrict)]

    if namespace.checkset is None:
        namespace.checkset = namespace.config.get_default('pkgcheck_checkset')
    if namespace.checkset is not None:
        namespace.enabled_checks = list(
            namespace.checkset.filter(namespace.enabled_checks))

    if namespace.selected_scopes is not None:
        disabled_scopes, enabled_scopes = namespace.selected_scopes

        # validate selected scopes
        selected_scopes = set(disabled_scopes + enabled_scopes)
        unknown_scopes = selected_scopes - set(base.scopes)
        if unknown_scopes:
            unknown = ', '.join(map(repr, unknown_scopes))
            available = ', '.join(base.scopes)
            parser.error(f'unknown scope{_pl(unknown_scopes)}: '
                         f'{unknown} (available scopes: {available})')

        disabled_scopes = {base.scopes[x] for x in disabled_scopes}
        enabled_scopes = {base.scopes[x] for x in enabled_scopes}

        # convert scopes to keyword lists
        disabled_keywords = [
            k.__name__ for k in const.KEYWORDS.values()
            if k.scope in disabled_scopes
        ]
        enabled_keywords = [
            k.__name__ for k in const.KEYWORDS.values()
            if k.scope in enabled_scopes
        ]

        # filter outputted keywords
        namespace.enabled_keywords = base.filter_update(
            namespace.enabled_keywords, enabled_keywords, disabled_keywords)

    if namespace.selected_keywords is not None:
        disabled_keywords, enabled_keywords = namespace.selected_keywords

        error = (k for k, v in const.KEYWORDS.items()
                 if issubclass(v, results.Error))
        warning = (k for k, v in const.KEYWORDS.items()
                   if issubclass(v, results.Warning))
        info = (k for k, v in const.KEYWORDS.items()
                if issubclass(v, results.Info))

        alias_map = {'error': error, 'warning': warning, 'info': info}
        replace_aliases = lambda x: alias_map.get(x, [x])

        # expand keyword aliases to keyword lists
        disabled_keywords = list(
            chain.from_iterable(map(replace_aliases, disabled_keywords)))
        enabled_keywords = list(
            chain.from_iterable(map(replace_aliases, enabled_keywords)))

        # validate selected keywords
        selected_keywords = set(disabled_keywords + enabled_keywords)
        available_keywords = set(const.KEYWORDS.keys())
        unknown_keywords = selected_keywords - available_keywords
        if unknown_keywords:
            unknown = ', '.join(map(repr, unknown_keywords))
            parser.error(f'unknown keyword{_pl(unknown_keywords)}: {unknown}')

        # filter outputted keywords
        namespace.enabled_keywords = base.filter_update(
            namespace.enabled_keywords, enabled_keywords, disabled_keywords)

    namespace.filtered_keywords = set(namespace.enabled_keywords)
    if namespace.filtered_keywords == set(const.KEYWORDS.values()):
        namespace.filtered_keywords = None

    disabled_checks, enabled_checks = ((), ())
    if namespace.selected_checks is not None:
        disabled_checks, enabled_checks = namespace.selected_checks
        available_checks = list(const.CHECKS.keys())

        alias_map = {'all': available_checks}
        replace_aliases = lambda x: alias_map.get(x, [x])

        # expand check aliases to check lists
        disabled_checks = list(
            chain.from_iterable(map(replace_aliases, disabled_checks)))
        enabled_checks = list(
            chain.from_iterable(map(replace_aliases, enabled_checks)))

        # overwrite selected checks with expanded aliases
        namespace.selected_checks = (disabled_checks, enabled_checks)

        # validate selected checks
        selected_checks = set(disabled_checks + enabled_checks)
        unknown_checks = selected_checks.difference(available_checks)
        if unknown_checks:
            unknown = ', '.join(map(repr, unknown_checks))
            parser.error(f'unknown check{_pl(unknown_checks)}: {unknown} ')
    elif namespace.filtered_keywords is not None:
        # enable checks based on enabled keyword -> check mapping
        enabled_checks = []
        for check, cls in const.CHECKS.items():
            if namespace.filtered_keywords.intersection(cls.known_results):
                enabled_checks.append(check)

    # filter checks to run
    if enabled_checks:
        whitelist = base.Whitelist(enabled_checks)
        namespace.enabled_checks = list(
            whitelist.filter(namespace.enabled_checks))
    if disabled_checks:
        blacklist = base.Blacklist(disabled_checks)
        namespace.enabled_checks = list(
            blacklist.filter(namespace.enabled_checks))

    # skip checks that may be disabled
    namespace.enabled_checks = [
        c for c in namespace.enabled_checks if not c.skip(namespace)
    ]

    if not namespace.enabled_checks:
        parser.error('no active checks')

    namespace.addons = set()

    for check in namespace.enabled_checks:
        add_addon(check, namespace.addons)
    try:
        for addon in namespace.addons:
            addon.check_args(parser, namespace)
    except argparse.ArgumentError as e:
        if namespace.debug:
            raise
        parser.error(str(e))
Beispiel #22
0
def config_from_make_conf(location="/etc/", profile_override=None, **kwargs):
    """
    generate a config from a file location

    :param location: location the portage configuration is based in,
        defaults to /etc
    :param profile_override: profile to use instead of the current system
        profile, i.e. the target of the /etc/portage/make.profile
        (or deprecated /etc/make.profile) symlink
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
    base_path = pjoin(config_root, location.strip("/"))
    portage_base = pjoin(base_path, "portage")

    # this isn't preserving incremental behaviour for features/use
    # unfortunately

    conf_dict = {}
    try:
        load_make_config(conf_dict, pjoin(base_path, 'make.globals'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            if 'PKGCORE_REPO_PATH' in os.environ:
                config_path = pjoin(os.environ['PKGCORE_REPO_PATH'], 'config')
            else:
                config_path = pjoin(
                    config_root, sys.prefix.lstrip('/'), 'share/pkgcore/config')
            load_make_config(conf_dict, pjoin(config_path, 'make.globals'))
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(errors.ParsingError(
                "failed to find a usable make.globals"))
    load_make_config(
        conf_dict, pjoin(base_path, 'make.conf'), required=False,
        allow_sourcing=True, incrementals=True)
    load_make_config(
        conf_dict, pjoin(portage_base, 'make.conf'), required=False,
        allow_sourcing=True, incrementals=True)

    root = os.environ.get("ROOT", conf_dict.get("ROOT", "/"))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles" for x in conf_dict.pop("GENTOO_MIRRORS", "").split()]

    # this is flawed... it'll pick up -some-feature
    features = conf_dict.get("FEATURES", "").split()

    new_config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        new_config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(new_config, root, portage_base)

    user_profile_path = pjoin(base_path, "portage", "profile")
    add_profile(new_config, base_path, user_profile_path, profile_override)

    kwds = {
        "class": "pkgcore.vdb.ondisk.tree",
        "location": pjoin(root, 'var', 'db', 'pkg'),
        "cache_location": pjoin(
            config_root, 'var', 'cache', 'edb', 'dep', 'var', 'db', 'pkg'),
    }
    new_config["vdb"] = basics.AutoConfigSection(kwds)

    # options used by rsync-based syncers
    rsync_opts = isolate_rsync_opts(conf_dict)

    repo_opts = {}
    overlay_syncers = {}
    try:
        default_repo_opts, repo_opts = load_repos_conf(
            pjoin(portage_base, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise

    if repo_opts:
        main_repo_id = default_repo_opts['main-repo']
        main_repo = repo_opts[main_repo_id]['location']
        overlay_repos = [opts['location'] for repo, opts in repo_opts.iteritems()
                         if opts['location'] != main_repo]
        main_syncer = repo_opts[main_repo_id].get('sync-uri', None)
    else:
        # fallback to PORTDIR and PORTDIR_OVERLAY settings
        main_repo = normpath(os.environ.get(
            "PORTDIR", conf_dict.pop("PORTDIR", "/usr/portage")).strip())
        overlay_repos = os.environ.get(
            "PORTDIR_OVERLAY", conf_dict.pop("PORTDIR_OVERLAY", "")).split()
        overlay_repos = [normpath(x) for x in overlay_repos]
        main_syncer = conf_dict.pop("SYNC", None)

        if overlay_repos and '-layman-sync' not in features:
            overlay_syncers = add_layman_syncers(
                new_config, rsync_opts, overlay_repos, config_root=config_root)

    if main_syncer is not None:
        make_syncer(new_config, main_repo, main_syncer, rsync_opts)

    if overlay_repos and '-autodetect-sync' not in features:
        for path in overlay_repos:
            if path not in overlay_syncers:
                overlay_syncers[path] = make_autodetect_syncer(new_config, path)

    repos = [main_repo] + overlay_repos
    default_repos = list(reversed(repos))

    new_config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class': 'pkgcore.ebuild.repository.slavedtree',
        'default_mirrors': gentoo_mirrors,
        'inherit-only': True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    rsync_portdir_cache = 'metadata-transfer' not in features
    # if a metadata cache exists, use it.
    if rsync_portdir_cache:
        for cache_type, frag in (('flat_hash.md5_cache', 'md5-cache'),
                                 ('metadata.database', 'cache')):
            if not os.path.exists(pjoin(main_repo, 'metadata', frag)):
                continue
            new_config["cache:%s/metadata/cache" % (main_repo,)] = basics.AutoConfigSection({
                'class': 'pkgcore.cache.' + cache_type,
                'readonly': True,
                'location': main_repo,
            })
            break
        else:
            rsync_portdir_cache = False

    repo_map = {}

    for tree_loc in repos:
        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(tree_loc)
        repo_map[repo_config.repo_id] = repo_config

        # repo configs
        conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'location': tree_loc,
        }
        if 'sync:%s' % (tree_loc,) in new_config:
            conf['syncer'] = 'sync:%s' % (tree_loc,)
        if tree_loc == main_repo:
            conf['default'] = True
        new_config['raw:' + tree_loc] = basics.AutoConfigSection(conf)

        # repo trees
        kwds = {
            'inherit': ('ebuild-repo-common',),
            'raw_repo': ('raw:' + tree_loc),
        }
        cache_name = 'cache:%s' % (tree_loc,)
        new_config[cache_name] = mk_simple_cache(config_root, tree_loc)
        kwds['cache'] = cache_name
        if tree_loc == main_repo:
            kwds['class'] = 'pkgcore.ebuild.repository.tree'
            if rsync_portdir_cache:
                kwds['cache'] = 'cache:%s/metadata/cache %s' % (main_repo, cache_name)
        else:
            kwds['parent_repo'] = main_repo
        new_config[tree_loc] = basics.AutoConfigSection(kwds)

    new_config['portdir'] = basics.section_alias(main_repo, 'repo')

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    if overlay_repos:
        new_config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(default_repos)})
    else:
        new_config['repo-stack'] = basics.section_alias(main_repo, 'repo')

    new_config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    new_config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.get('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", conf_dict.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                    ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning("disabling buildpkg related features since PKGDIR doesn't exist")
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning("disabling buildpkg related features since PKGDIR either doesn't "
                               "exist, or lacks 0755 minimal permissions")
                pkgdir = None
Beispiel #23
0
    def check_args(parser, namespace):
        profiles_dir = getattr(namespace, "profiles_dir", None)
        if profiles_dir is not None:
            profiles_dir = abspath(profiles_dir)
            if not os.path.isdir(profiles_dir):
                parser.error(
                    "profile-base doesn't exist or isn't a dir: %r" % (profiles_dir,))

        selected_profiles = namespace.profiles
        if selected_profiles is None:
            selected_profiles = ((), ())

        if profiles_dir:
            profiles_obj = repo_objs.BundledProfiles(profiles_dir)
        else:
            profiles_obj = namespace.target_repo.config.profiles

        def norm_name(s):
            """Expand status keywords and format paths."""
            if s in ('dev', 'exp', 'stable'):
                for x in profiles_obj.status_profiles(s):
                    yield x
            else:
                yield '/'.join(filter(None, s.split('/')))

        disabled, enabled = selected_profiles
        disabled = set(disabled)
        enabled = set(enabled)
        # remove profiles that are both enabled and disabled
        toggled = enabled.intersection(disabled)
        enabled = enabled.difference(toggled)
        disabled = disabled.difference(toggled)
        # expand status keywords, e.g. 'stable' -> set of stable profiles
        disabled = set(chain.from_iterable(imap(norm_name, disabled)))
        enabled = set(chain.from_iterable(imap(norm_name, enabled)))

        # If no profiles are enabled, then all are scanned except ones that are
        # explicitly disabled.
        if not enabled:
            enabled = {
                profile for profile, status in
                chain.from_iterable(profiles_obj.arch_profiles.itervalues())}

        profile_paths = enabled.difference(disabled)

        # We hold onto the profiles as we're going, due to the fact that
        # profile nodes are weakly cached; hold onto all for this loop, avoids
        # a lot of reparsing at the expense of slightly more memory usage
        # temporarily.
        cached_profiles = []

        arch_profiles = defaultdict(list)
        for profile_path in profile_paths:
            try:
                p = profiles_obj.create_profile(profile_path)
            except profiles.ProfileError as e:
                # Only throw errors if the profile was selected by the user, bad
                # repo profiles will be caught during repo metadata scans.
                if namespace.profiles is not None:
                    parser.error('invalid profile: %r: %s' % (e.path, e.error))
                continue
            if namespace.profiles_ignore_deprecated and p.deprecated:
                continue
            cached_profiles.append(p)
            if p.arch is None:
                parser.error(
                    "profile %r lacks arch settings, unable to use it" % (p.path,))
            arch_profiles[p.arch].append((profile_path, p))

        namespace.arch_profiles = arch_profiles
Beispiel #24
0
    def __init__(self, userpriv, sandbox, fakeroot, save_file):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fakeroot: enables a fakeroot'd processor-
            this is a mutually exclusive option to sandbox, and
            requires userpriv to be enabled. Violating this will
            result in nastiness.
        """

        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if fakeroot and (sandbox or not userpriv):
            traceback.print_stack()
            logger.error(
                "Both sandbox and fakeroot cannot be enabled at the same time")
            raise InitializationError(
                "cannot initialize with sandbox and fakeroot")

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid]
            })
        else:
            if pkgcore.spawn.is_userpriv_capable():
                spawn_opts.update({
                    "gid": os_data.portage_gid,
                    "groups": [0, os_data.portage_gid]
                })
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False
        self.__fakeroot = False

        # since it's questionable which spawn method we'll use (if
        # sandbox or fakeroot fex), we ensure the bashrc is invalid.
        env = {
            x: "/etc/portage/spork/not/valid/ha/ha"
            for x in ("BASHRC", "BASH_ENV")
        }
        if int(os.environ.get('PKGCORE_PERF_DEBUG', 1)) > 1:
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']

        # append script dir to PATH for git repo or unpacked tarball
        if "PKGCORE_SCRIPT_PATH" in os.environ:
            env["PATH"] = os.pathsep.join(
                [os.environ["PATH"], os.environ["PKGCORE_SCRIPT_PATH"]])

        args = []
        if sandbox:
            if not pkgcore.spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            if fakeroot:
                raise InitializationError(
                    'fakeroot was on, but sandbox was also on')
            self.__sandbox = True
            spawn_func = pkgcore.spawn.spawn_sandbox


#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})

        elif fakeroot:
            if not pkgcore.spawn.is_fakeroot_capable():
                raise ValueError("spawn lacks fakeroot capabilities")
            self.__fakeroot = True
            spawn_func = pkgcore.spawn.spawn_fakeroot
            args.append(save_file)
        else:
            spawn_func = pkgcore.spawn.spawn

        # force to a neutral dir so that sandbox/fakeroot won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EAPI_BIN_PATH
        # little trick. we force the pipes to be high up fd wise so
        # nobody stupidly hits 'em.
        max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd - 2),
            "PKGCORE_EBD_WRITE_FD": str(max_fd - 1)
        })
        self.pid = spawn_func(["/bin/bash", self.ebd, "daemonize"],
                              fd_pipes={
                                  0: 0,
                                  1: 1,
                                  2: 2,
                                  max_fd - 2: cread,
                                  max_fd - 1: dwrite
                              },
                              returnpid=True,
                              env=env,
                              *args,
                              **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InitializationError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")
        self.write(e_const.EAPI_BIN_PATH)
        # send PKGCORE_PYTHON_BINARY...
        self.write(pkgcore.spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(pkgcore.__file__, os.pardir,
                                       os.pardir))),
                os.environ.get('PYTHONPATH', '')
            ]))
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        self.dont_export_vars = self.read().split()
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Beispiel #25
0
    def check_args(parser, namespace):
        profiles_dir = getattr(namespace, "profiles_dir", None)
        if profiles_dir is not None:
            profiles_dir = abspath(profiles_dir)
            if not os.path.isdir(profiles_dir):
                parser.error(f"invalid profiles base: {profiles_dir!r}")

        selected_profiles = namespace.profiles
        if selected_profiles is None:
            selected_profiles = ((), ())

        if profiles_dir:
            profiles_obj = repo_objs.BundledProfiles(profiles_dir)
        else:
            profiles_obj = namespace.target_repo.config.profiles

        def norm_name(s):
            """Expand status keywords and format paths."""
            if s in ('dev', 'exp', 'stable', 'deprecated'):
                for x in profiles_obj.paths(s):
                    yield x
            else:
                yield '/'.join([_f for _f in s.split('/') if _f])

        disabled, enabled = selected_profiles
        disabled = set(disabled)
        enabled = set(enabled)

        # remove profiles that are both enabled and disabled
        toggled = enabled.intersection(disabled)
        enabled = enabled.difference(toggled)
        disabled = disabled.difference(toggled)
        ignore_deprecated = 'deprecated' not in enabled

        # expand status keywords, e.g. 'stable' -> set of stable profiles
        disabled = set(chain.from_iterable(map(norm_name, disabled)))
        enabled = set(chain.from_iterable(map(norm_name, enabled)))

        # If no profiles are enabled, then all that are defined in
        # profiles.desc are scanned except ones that are explicitly disabled.
        if not enabled:
            enabled = {
                profile
                for profile, status in chain.from_iterable(
                    profiles_obj.arch_profiles.values())
            }

        profile_paths = enabled.difference(disabled)

        # only default to using cache when run without target args within a repo
        if namespace.cache is None and namespace.default_target is None:
            namespace.cache = False

        # initialize cache dir
        cache_dir = pjoin(const.USER_CACHE_PATH, 'pkgcheck')
        namespace.cache_file = pjoin(cache_dir, 'profiles.pickle')
        if ((namespace.cache is None or namespace.cache)
                and not os.path.exists(cache_dir)):
            try:
                os.makedirs(cache_dir)
            except IOError as e:
                raise UserException(
                    f'failed creating profiles cache: {cache_dir!r}: {e.strerror}'
                )
        namespace.forced_cache = bool(namespace.cache)

        # We hold onto the profiles as we're going, due to the fact that
        # profile nodes are weakly cached; hold onto all for this loop, avoids
        # a lot of reparsing at the expense of slightly more memory usage
        # temporarily.
        cached_profiles = []

        arch_profiles = defaultdict(list)
        for profile_path in profile_paths:
            try:
                p = profiles_obj.create_profile(profile_path)
            except profiles.ProfileError as e:
                # Only throw errors if the profile was selected by the user, bad
                # repo profiles will be caught during repo metadata scans.
                if namespace.profiles is not None:
                    parser.error(f'invalid profile: {e.path!r}: {e.error}')
                continue
            if ignore_deprecated and p.deprecated:
                continue
            cached_profiles.append(p)
            if p.arch is None:
                parser.error(
                    f"profile {p.path!r} lacks arch settings, unable to use it"
                )
            arch_profiles[p.arch].append((profile_path, p))

        namespace.arch_profiles = arch_profiles
Beispiel #26
0
    def __init__(self, pkg, initial_env=None, env_data_source=None,
                 features=None, observer=None, clean=True, tmp_offset=None,
                 use_override=None, allow_fetching=False):
        """
        :param pkg:
            :class:`pkgcore.ebuild.ebuild_src.package`
            instance this env is being setup for
        :param initial_env: initial environment to use for this ebuild
        :param env_data_source: a :obj:`snakeoil.data_source.base` instance
            to restore the environment from- used for restoring the
            state of an ebuild processing, whether for unmerging, or
            walking phases during building
        :param features: ebuild features, hold over from portage,
            will be broken down at some point
        """

        if use_override is not None:
            use = use_override
        else:
            use = pkg.use

        self.allow_fetching = allow_fetching

        if not hasattr(self, "observer"):
            self.observer = observer
        if not pkg.eapi.is_supported:
            raise TypeError(
                "package %s uses an unsupported eapi: %s" % (pkg, pkg.eapi))

        if initial_env is not None:
            # copy.
            self.env = dict(initial_env)
            for x in ("USE", "ACCEPT_LICENSE"):
                if x in self.env:
                    del self.env[x]
        else:
            self.env = {}

        if "PYTHONPATH" in os.environ:
            self.env["PYTHONPATH"] = os.environ["PYTHONPATH"]

        if features is None:
            features = self.env.get("FEATURES", ())

        # XXX: note this is just EAPI 3 compatibility; not full prefix, soon..
        self.env["ROOT"] = self.domain.root
        self.prefix_mode = pkg.eapi.options.prefix_capable or 'force-prefix' in features
        self.env["PKGCORE_PREFIX_SUPPORT"] = 'false'
        self.prefix = '/'
        if self.prefix_mode:
            self.prefix = self.domain.prefix
            self.env['EPREFIX'] = self.prefix.rstrip('/')
            self.env['EROOT'] = abspath(
                pjoin(self.domain.root, self.prefix.lstrip('/'))).rstrip('/') + '/'
            self.env["PKGCORE_PREFIX_SUPPORT"] = 'true'

        # set the list of internally implemented EAPI specific functions that
        # shouldn't be exported
        if os.path.exists(pjoin(const.EBD_PATH, 'funcnames', str(pkg.eapi))):
            with open(pjoin(const.EBD_PATH, 'funcnames', str(pkg.eapi)), 'r') as f:
                eapi_funcs = f.readlines()
        else:
            ret, eapi_funcs = spawn_get_output(
                [pjoin(const.EBD_PATH, 'generate_eapi_func_list.bash'), str(pkg.eapi)])
            if ret != 0:
                raise Exception("failed to generate list of EAPI %s specific functions" % str(pkg.eapi))
        self.env["PKGCORE_EAPI_FUNCS"] = ' '.join(x.strip() for x in eapi_funcs)

        self.env_data_source = env_data_source
        if (env_data_source is not None and
                not isinstance(env_data_source, data_source.base)):
            raise TypeError(
                "env_data_source must be None, or a pkgcore.data_source.base "
                "derivative: %s: %s" % (
                    env_data_source.__class__, env_data_source))

        self.features = set(x.lower() for x in features)

        self.env["FEATURES"] = ' '.join(sorted(self.features))

        iuse_effective_regex = (re.escape(x) for x in pkg.iuse_effective)
        iuse_effective_regex = "^(%s)$" % "|".join(iuse_effective_regex)
        iuse_effective_regex = iuse_effective_regex.replace("\\.\\*", ".*")
        self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex

        expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source)

        self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(str(x) for x in pkg.restrict)

        self.restrict = pkg.restrict

        for x in ("sandbox", "userpriv"):
            setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
        if self.userpriv and os.getuid() != 0:
            self.userpriv = False

        if "PORT_LOGDIR" in self.env:
            self.logging = pjoin(
                self.env["PORT_LOGDIR"],
                "%s:%s:%s.log" % (
                    pkg.cpvstr, self.__class__.__name__,
                    time.strftime("%Y%m%d-%H%M%S", time.localtime())))
            del self.env["PORT_LOGDIR"]
        else:
            self.logging = False

        self.env["XARGS"] = xargs

        self.bashrc = self.env.pop("bashrc", ())

        self.pkg = pkg
        self.eapi = pkg.eapi
        wipes = [k for k, v in self.env.iteritems()
                 if not isinstance(v, basestring)]
        for k in wipes:
            del self.env[k]

        self.set_op_vars(tmp_offset)
        self.clean_at_start = clean
        self.clean_needed = False
Beispiel #27
0
 def parent_paths(self, data):
     return tuple(abspath(pjoin(self.path, x))
         for x in data)
Beispiel #28
0
    def __init__(self, userpriv, sandbox):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        """

        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid]})
        else:
            if pkgcore.spawn.is_userpriv_capable():
                spawn_opts.update({"gid": os_data.portage_gid,
                                   "groups": [0, os_data.portage_gid]})
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False

        # since it's questionable which spawn method we'll use (if
        # sandbox fex), we ensure the bashrc is invalid.
        env = {x: "/etc/portage/spork/not/valid/ha/ha"
               for x in ("BASHRC", "BASH_ENV")}

        if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)):
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']
        if int(os.environ.get('PKGCORE_DEBUG', 0)):
            env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG']
        if int(os.environ.get('PKGCORE_NOCOLOR', 0)):
            env["PKGCORE_NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']
            if sandbox:
                env["NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']

        # prepend script dir to PATH for git repo or unpacked tarball, for
        # installed versions it's empty
        env["PATH"] = os.pathsep.join(
            list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]])

        args = []
        if sandbox:
            if not pkgcore.spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            self.__sandbox = True
            spawn_func = pkgcore.spawn.spawn_sandbox
#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})
        else:
            spawn_func = pkgcore.spawn.spawn

        # force to a neutral dir so that sandbox won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EBD_PATH
        # Force the pipes to be high up fd wise so nobody stupidly hits 'em, we
        # start from max-3 to avoid a bug in older bash where it doesn't check
        # if an fd is in use before claiming it.
        max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd-4),
            "PKGCORE_EBD_WRITE_FD": str(max_fd-3)})
        # pgid=0: Each ebuild processor is the process group leader for all its
        # spawned children so everything can be terminated easily if necessary.
        self.pid = spawn_func(
            [const.BASH_BINARY, self.ebd, "daemonize"],
            fd_pipes={0: 0, 1: 1, 2: 2, max_fd-4: cread, max_fd-3: dwrite},
            returnpid=True, env=env, pgid=0, *args, **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InitializationError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")
        self.write(e_const.EBD_PATH)

        # send PKGCORE_PYTHON_BINARY...
        self.write(pkgcore.spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(pkgcore.__file__, os.pardir, os.pardir))),
                os.environ.get('PYTHONPATH', '')])
            )
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        self.dont_export_vars = self.read().split()
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Beispiel #29
0
    def __init__(
        self,
        pkg,
        initial_env=None,
        env_data_source=None,
        features=None,
        observer=None,
        clean=True,
        tmp_offset=None,
        use_override=None,
        allow_fetching=False,
    ):
        """
        :param pkg:
            :class:`pkgcore.ebuild.ebuild_src.package`
            instance this env is being setup for
        :param initial_env: initial environment to use for this ebuild
        :param env_data_source: a :obj:`snakeoil.data_source.base` instance
            to restore the environment from- used for restoring the
            state of an ebuild processing, whether for unmerging, or
            walking phases during building
        :param features: ebuild features, hold over from portage,
            will be broken down at some point
        """

        if use_override is not None:
            use = use_override
        else:
            use = pkg.use

        self.allow_fetching = allow_fetching

        if not hasattr(self, "observer"):
            self.observer = observer
        if not pkg.eapi.is_supported:
            raise TypeError("package %s uses an unsupported eapi: %s" % (pkg, pkg.eapi))

        if initial_env is not None:
            # copy.
            self.env = dict(initial_env)
            for x in ("USE", "ACCEPT_LICENSE"):
                if x in self.env:
                    del self.env[x]
        else:
            self.env = {}

        if "PYTHONPATH" in os.environ:
            self.env["PYTHONPATH"] = os.environ["PYTHONPATH"]

        if features is None:
            features = self.env.get("FEATURES", ())

        # XXX: note this is just EAPI 3 compatibility; not full prefix, soon..
        self.env["ROOT"] = self.domain.root
        self.prefix_mode = pkg.eapi.options.prefix_capable or "force-prefix" in features
        self.env["PKGCORE_PREFIX_SUPPORT"] = "false"
        self.prefix = "/"
        if self.prefix_mode:
            self.prefix = self.domain.prefix
            self.env["EPREFIX"] = self.prefix.rstrip("/")
            self.env["EROOT"] = abspath(pjoin(self.domain.root, self.prefix.lstrip("/"))).rstrip("/") + "/"
            self.env["PKGCORE_PREFIX_SUPPORT"] = "true"

        # set the list of internally implemented EAPI specific functions that
        # shouldn't be exported
        if os.path.exists(pjoin(const.EBD_PATH, "funcnames", str(pkg.eapi))):
            with open(pjoin(const.EBD_PATH, "funcnames", str(pkg.eapi)), "r") as f:
                eapi_funcs = f.readlines()
        else:
            ret, eapi_funcs = spawn_get_output([pjoin(const.EBD_PATH, "generate_eapi_func_list.bash"), str(pkg.eapi)])
            if ret != 0:
                raise Exception("failed to generate list of EAPI %s specific functions" % str(pkg.eapi))
        self.env["PKGCORE_EAPI_FUNCS"] = " ".join(x.strip() for x in eapi_funcs)

        self.env_data_source = env_data_source
        if env_data_source is not None and not isinstance(env_data_source, data_source.base):
            raise TypeError(
                "env_data_source must be None, or a pkgcore.data_source.base "
                "derivative: %s: %s" % (env_data_source.__class__, env_data_source)
            )

        self.features = set(x.lower() for x in features)

        self.env["FEATURES"] = " ".join(sorted(self.features))

        iuse_effective_regex = (re.escape(x) for x in pkg.iuse_effective)
        iuse_effective_regex = "^(%s)$" % "|".join(iuse_effective_regex)
        iuse_effective_regex = iuse_effective_regex.replace("\\.\\*", ".*")
        self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex

        expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source)

        self.env["PKGCORE_FINALIZED_RESTRICT"] = " ".join(str(x) for x in pkg.restrict)

        self.restrict = pkg.restrict

        for x in ("sandbox", "userpriv"):
            setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
        if self.userpriv and os.getuid() != 0:
            self.userpriv = False

        if "PORT_LOGDIR" in self.env:
            self.logging = pjoin(
                self.env["PORT_LOGDIR"],
                "%s:%s:%s.log"
                % (pkg.cpvstr, self.__class__.__name__, time.strftime("%Y%m%d-%H%M%S", time.localtime())),
            )
            del self.env["PORT_LOGDIR"]
        else:
            self.logging = False

        self.env["XARGS"] = xargs

        self.bashrc = self.env.pop("bashrc", ())

        self.pkg = pkg
        self.eapi = pkg.eapi
        wipes = [k for k, v in self.env.iteritems() if not isinstance(v, basestring)]
        for k in wipes:
            del self.env[k]

        self.set_op_vars(tmp_offset)
        self.clean_at_start = clean
        self.clean_needed = False
Beispiel #30
0
def config_from_make_conf(location=None, profile_override=None, **kwargs):
    """generate a config using portage's config files

    Args:
        location (optional[str]): path to the portage config directory,
            (defaults to /etc/portage)
        profile_override (optional[str]): profile to use instead of the current system
            profile, i.e. the target of the /etc/portage/make.profile symlink
        configroot (optional[str]): location for various portage config files (defaults to /)
        root (optional[str]): target root filesystem (defaults to /)
        buildpkg (optional[bool]): forcibly disable/enable building binpkgs, otherwise
            FEATURES=buildpkg from make.conf is used

    Returns:
        dict: config settings
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_dir = location if location is not None else '/etc/portage'
    config_dir = pjoin(
        os.environ.get('PORTAGE_CONFIGROOT', kwargs.pop('configroot', '/')),
        config_dir.lstrip('/'))

    # this isn't preserving incremental behaviour for features/use unfortunately

    make_conf = {}
    try:
        load_make_conf(make_conf, pjoin(const.CONFIG_PATH, 'make.globals'))
    except IGNORED_EXCEPTIONS:
        raise
    except:
        raise_from(errors.ParsingError("failed to load make.globals"))
    load_make_conf(
        make_conf, pjoin(config_dir, 'make.conf'), required=False,
        allow_sourcing=True, incrementals=True)

    root = os.environ.get("ROOT", kwargs.pop('root', make_conf.get("ROOT", "/")))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles" for x in make_conf.pop("GENTOO_MIRRORS", "").split()]

    # this is flawed... it'll pick up -some-feature
    features = make_conf.get("FEATURES", "").split()

    config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(config, root, config_dir)

    add_profile(config, config_dir, profile_override)

    kwds = {
        "class": "pkgcore.vdb.ondisk.tree",
        "location": pjoin(root, 'var', 'db', 'pkg'),
        "cache_location": '/var/cache/edb/dep/var/db/pkg',
    }
    config["vdb"] = basics.AutoConfigSection(kwds)

    try:
        repos_conf_defaults, repos_conf = load_repos_conf(pjoin(config_dir, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            # fallback to defaults provided by pkgcore
            repos_conf_defaults, repos_conf = load_repos_conf(
                pjoin(const.CONFIG_PATH, 'repos.conf'))
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(errors.ParsingError(
                "failed to find a usable repos.conf"))

    make_repo_syncers(config, repos_conf, make_conf)

    config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class': 'pkgcore.ebuild.repository.tree',
        'default_mirrors': gentoo_mirrors,
        'inherit-only': True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    default_repo_path = repos_conf[repos_conf_defaults['main-repo']]['location']
    repo_map = {}

    for repo_name, repo_opts in repos_conf.iteritems():
        repo_path = repo_opts['location']

        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(repo_path, repo_name)
        repo_map[repo_config.repo_id] = repo_config

        # repo configs
        repo_conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'config_name': repo_name,
            'location': repo_path,
            'syncer': 'sync:' + repo_name,
        }

        # repo trees
        repo = {
            'inherit': ('ebuild-repo-common',),
            'repo_config': 'conf:' + repo_name,
        }

        # metadata cache
        if repo_config.cache_format is not None:
            cache_name = 'cache:' + repo_name
            config[cache_name] = make_cache(repo_config.cache_format, repo_path)
            repo['cache'] = cache_name

        if repo_path == default_repo_path:
            repo_conf['default'] = True

        config['conf:' + repo_name] = basics.AutoConfigSection(repo_conf)
        config[repo_name] = basics.AutoConfigSection(repo)

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    repos = [name for name in repos_conf.iterkeys()]
    if len(repos) > 1:
        config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(repos)})
    else:
        config['repo-stack'] = basics.section_alias(repos[0], 'repo')

    config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.pop('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", make_conf.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                    ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning("disabling buildpkg related features since PKGDIR doesn't exist")
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning("disabling buildpkg related features since PKGDIR either doesn't "
                               "exist, or lacks 0755 minimal permissions")
                pkgdir = None
Beispiel #31
0
    def __init__(self, userpriv, sandbox, fd_pipes=None):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fd_pipes: mapping from existing fd to fd inside the ebd process
        """
        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0o002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid],
            })
        else:
            if spawn.is_userpriv_capable():
                spawn_opts.update({
                    "gid": os_data.portage_gid,
                    "groups": [0, os_data.portage_gid],
                })
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False

        self._fd_pipes = fd_pipes if fd_pipes is not None else {}

        # since it's questionable which spawn method we'll use (if
        # sandbox fex), we ensure the bashrc is invalid.
        env = {
            x: "/etc/portage/spork/not/valid/ha/ha"
            for x in ("BASHRC", "BASH_ENV")
        }

        if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)):
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']
        if int(os.environ.get('PKGCORE_DEBUG', 0)):
            env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG']
        if int(os.environ.get('PKGCORE_NOCOLOR', 0)):
            env["PKGCORE_NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']
            if sandbox:
                env["NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']

        # prepend script dir to PATH for git repo or unpacked tarball, for
        # installed versions it's empty
        env["PATH"] = os.pathsep.join(
            list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]])

        if sandbox:
            if not spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            self.__sandbox = True
            spawn_func = spawn.spawn_sandbox


#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})
        else:
            spawn_func = spawn.spawn

        # force to a neutral dir so that sandbox won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EBD_PATH

        # Force the pipes to be high up fd wise so nobody stupidly hits 'em, we
        # start from max-3 to avoid a bug in older bash where it doesn't check
        # if an fd is in use before claiming it.
        max_fd = min(spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd - 4),
            "PKGCORE_EBD_WRITE_FD": str(max_fd - 3),
        })

        # allow any pipe overrides except the ones we use to communicate
        ebd_pipes = {0: 0, 1: 1, 2: 2}
        ebd_pipes.update(self._fd_pipes)
        ebd_pipes.update({max_fd - 4: cread, max_fd - 3: dwrite})

        # pgid=0: Each ebuild processor is the process group leader for all its
        # spawned children so everything can be terminated easily if necessary.
        self.pid = spawn_func([spawn.BASH_BINARY, self.ebd, "daemonize"],
                              fd_pipes=ebd_pipes,
                              returnpid=True,
                              env=env,
                              pgid=0,
                              **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InternalError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")

        # send PKGCORE_PYTHON_BINARY...
        self.write(spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(__file__, os.pardir, os.pardir))),
                os.environ.get('PYTHONPATH', '')
            ]))
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        else:
            self.write("no_sandbox")
        self._readonly_vars = frozenset(self.read().split())
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Beispiel #32
0
def config_from_make_conf(location="/etc/", profile_override=None, **kwargs):
    """
    generate a config from a file location

    :param location: location the portage configuration is based in,
        defaults to /etc
    :param profile_override: profile to use instead of the current system
        profile, i.e. the target of the /etc/portage/make.profile
        (or deprecated /etc/make.profile) symlink
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
    base_path = pjoin(config_root, location.strip("/"))
    portage_base = pjoin(base_path, "portage")

    # this isn't preserving incremental behaviour for features/use
    # unfortunately

    conf_dict = {}
    try:
        load_make_config(conf_dict, pjoin(base_path, 'make.globals'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            load_make_config(conf_dict, const.MAKE_GLOBALS)
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(
                errors.ParsingError("failed to find a usable make.globals"))
    load_make_config(conf_dict,
                     pjoin(base_path, 'make.conf'),
                     required=False,
                     allow_sourcing=True,
                     incrementals=True)
    load_make_config(conf_dict,
                     pjoin(portage_base, 'make.conf'),
                     required=False,
                     allow_sourcing=True,
                     incrementals=True)

    root = os.environ.get("ROOT", conf_dict.get("ROOT", "/"))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles"
        for x in conf_dict.pop("GENTOO_MIRRORS", "").split()
    ]

    # this is flawed... it'll pick up -some-feature
    features = conf_dict.get("FEATURES", "").split()

    new_config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        new_config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(new_config, root, portage_base)

    user_profile_path = pjoin(base_path, "portage", "profile")
    add_profile(new_config, base_path, user_profile_path, profile_override)

    kwds = {
        "class":
        "pkgcore.vdb.ondisk.tree",
        "location":
        pjoin(root, 'var', 'db', 'pkg'),
        "cache_location":
        pjoin(config_root, 'var', 'cache', 'edb', 'dep', 'var', 'db', 'pkg'),
    }
    new_config["vdb"] = basics.AutoConfigSection(kwds)

    # options used by rsync-based syncers
    rsync_opts = isolate_rsync_opts(conf_dict)

    repo_opts = {}
    overlay_syncers = {}
    try:
        default_repo_opts, repo_opts = load_repos_conf(
            pjoin(portage_base, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise

    if repo_opts:
        main_repo_id = default_repo_opts['main-repo']
        main_repo = repo_opts[main_repo_id]['location']
        overlay_repos = [
            opts['location'] for repo, opts in repo_opts.iteritems()
            if opts['location'] != main_repo
        ]
        main_syncer = repo_opts[main_repo_id].get('sync-uri', None)
    else:
        # fallback to PORTDIR and PORTDIR_OVERLAY settings
        main_repo = normpath(
            os.environ.get("PORTDIR", conf_dict.pop("PORTDIR",
                                                    "/usr/portage")).strip())
        overlay_repos = os.environ.get("PORTDIR_OVERLAY",
                                       conf_dict.pop("PORTDIR_OVERLAY",
                                                     "")).split()
        overlay_repos = [normpath(x) for x in overlay_repos]
        main_syncer = conf_dict.pop("SYNC", None)

        if overlay_repos and '-layman-sync' not in features:
            overlay_syncers = add_layman_syncers(new_config,
                                                 rsync_opts,
                                                 overlay_repos,
                                                 config_root=config_root)

    if main_syncer is not None:
        make_syncer(new_config, main_repo, main_syncer, rsync_opts)

    if overlay_repos and '-autodetect-sync' not in features:
        for path in overlay_repos:
            if path not in overlay_syncers:
                overlay_syncers[path] = make_autodetect_syncer(
                    new_config, path)

    repos = [main_repo] + overlay_repos
    default_repos = list(reversed(repos))

    new_config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class':
        'pkgcore.ebuild.repository.slavedtree',
        'default_mirrors':
        gentoo_mirrors,
        'inherit-only':
        True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    rsync_portdir_cache = 'metadata-transfer' not in features
    # if a metadata cache exists, use it.
    if rsync_portdir_cache:
        for cache_type, frag in (('flat_hash.md5_cache', 'md5-cache'),
                                 ('metadata.database', 'cache')):
            if not os.path.exists(pjoin(main_repo, 'metadata', frag)):
                continue
            new_config["cache:%s/metadata/cache" %
                       (main_repo, )] = basics.AutoConfigSection({
                           'class':
                           'pkgcore.cache.' + cache_type,
                           'readonly':
                           True,
                           'location':
                           main_repo,
                       })
            break
        else:
            rsync_portdir_cache = False

    repo_map = {}

    for tree_loc in repos:
        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(tree_loc)
        repo_map[repo_config.repo_id] = repo_config

        # repo configs
        conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'location': tree_loc,
        }
        if 'sync:%s' % (tree_loc, ) in new_config:
            conf['syncer'] = 'sync:%s' % (tree_loc, )
        if tree_loc == main_repo:
            conf['default'] = True
        new_config['raw:' + tree_loc] = basics.AutoConfigSection(conf)

        # repo trees
        kwds = {
            'inherit': ('ebuild-repo-common', ),
            'raw_repo': ('raw:' + tree_loc),
        }
        cache_name = 'cache:%s' % (tree_loc, )
        new_config[cache_name] = mk_simple_cache(config_root, tree_loc)
        kwds['cache'] = cache_name
        if tree_loc == main_repo:
            kwds['class'] = 'pkgcore.ebuild.repository.tree'
            if rsync_portdir_cache:
                kwds['cache'] = 'cache:%s/metadata/cache %s' % (main_repo,
                                                                cache_name)
        else:
            kwds['parent_repo'] = main_repo
        new_config[tree_loc] = basics.AutoConfigSection(kwds)

    new_config['portdir'] = basics.section_alias(main_repo, 'repo')

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    if overlay_repos:
        new_config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(default_repos)
            })
    else:
        new_config['repo-stack'] = basics.section_alias(main_repo, 'repo')

    new_config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    new_config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.get('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", conf_dict.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning(
                    "disabling buildpkg related features since PKGDIR doesn't exist"
                )
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning(
                    "disabling buildpkg related features since PKGDIR either doesn't "
                    "exist, or lacks 0755 minimal permissions")
                pkgdir = None
Beispiel #33
0
def config_from_make_conf(location=None, profile_override=None, **kwargs):
    """generate a config using portage's config files

    Args:
        location (optional[str]): path to the portage config directory,
            (defaults to /etc/portage)
        profile_override (optional[str]): profile to use instead of the current system
            profile, i.e. the target of the /etc/portage/make.profile symlink
        configroot (optional[str]): location for various portage config files (defaults to /)
        root (optional[str]): target root filesystem (defaults to /)
        buildpkg (optional[bool]): forcibly disable/enable building binpkgs, otherwise
            FEATURES=buildpkg from make.conf is used

    Returns:
        dict: config settings
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_dir = location if location is not None else '/etc/portage'
    config_dir = pjoin(
        os.environ.get('PORTAGE_CONFIGROOT', kwargs.pop('configroot', '/')),
        config_dir.lstrip('/'))

    # this isn't preserving incremental behaviour for features/use unfortunately

    make_conf = {}
    try:
        load_make_conf(make_conf, pjoin(const.CONFIG_PATH, 'make.globals'))
    except IGNORED_EXCEPTIONS:
        raise
    except:
        raise_from(errors.ParsingError("failed to load make.globals"))
    load_make_conf(make_conf,
                   pjoin(config_dir, 'make.conf'),
                   required=False,
                   allow_sourcing=True,
                   incrementals=True)

    root = os.environ.get("ROOT", kwargs.pop('root',
                                             make_conf.get("ROOT", "/")))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles"
        for x in make_conf.pop("GENTOO_MIRRORS", "").split()
    ]

    # this is flawed... it'll pick up -some-feature
    features = make_conf.get("FEATURES", "").split()

    config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(config, root, config_dir)

    add_profile(config, config_dir, profile_override)

    kwds = {
        "class": "pkgcore.vdb.ondisk.tree",
        "location": pjoin(root, 'var', 'db', 'pkg'),
        "cache_location": '/var/cache/edb/dep/var/db/pkg',
    }
    config["vdb"] = basics.AutoConfigSection(kwds)

    try:
        repos_conf_defaults, repos_conf = load_repos_conf(
            pjoin(config_dir, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            # fallback to defaults provided by pkgcore
            repos_conf_defaults, repos_conf = load_repos_conf(
                pjoin(const.CONFIG_PATH, 'repos.conf'))
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(
                errors.ParsingError("failed to find a usable repos.conf"))

    make_repo_syncers(config, repos_conf, make_conf)

    config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class':
        'pkgcore.ebuild.repository.tree',
        'default_mirrors':
        gentoo_mirrors,
        'inherit-only':
        True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    default_repo_path = repos_conf[
        repos_conf_defaults['main-repo']]['location']
    repo_map = {}

    for repo_name, repo_opts in repos_conf.iteritems():
        repo_path = repo_opts['location']

        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(repo_path, repo_name)
        repo_map[repo_config.repo_id] = repo_config

        # repo configs
        repo_conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'config_name': repo_name,
            'location': repo_path,
            'syncer': 'sync:' + repo_name,
        }

        # repo trees
        repo = {
            'inherit': ('ebuild-repo-common', ),
            'repo_config': 'conf:' + repo_name,
        }

        # metadata cache
        if repo_config.cache_format is not None:
            cache_name = 'cache:' + repo_name
            config[cache_name] = make_cache(repo_config.cache_format,
                                            repo_path)
            repo['cache'] = cache_name

        if repo_path == default_repo_path:
            repo_conf['default'] = True

        config['conf:' + repo_name] = basics.AutoConfigSection(repo_conf)
        config[repo_name] = basics.AutoConfigSection(repo)

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    repos = [name for name in repos_conf.iterkeys()]
    if len(repos) > 1:
        config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(repos)
            })
    else:
        config['repo-stack'] = basics.section_alias(repos[0], 'repo')

    config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.pop('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", make_conf.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning(
                    "disabling buildpkg related features since PKGDIR doesn't exist"
                )
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning(
                    "disabling buildpkg related features since PKGDIR either doesn't "
                    "exist, or lacks 0755 minimal permissions")
                pkgdir = None
Beispiel #34
0
    def __init__(self, userpriv, sandbox, fakeroot, save_file):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fakeroot: enables a fakeroot'd processor-
            this is a mutually exclusive option to sandbox, and
            requires userpriv to be enabled. Violating this will
            result in nastiness.
        """

        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if fakeroot and (sandbox or not userpriv):
            traceback.print_stack()
            logger.error("Both sandbox and fakeroot cannot be enabled at the same time")
            raise InitializationError("cannot initialize with sandbox and fakeroot")

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid]})
        else:
            if pkgcore.spawn.is_userpriv_capable():
                spawn_opts.update({"gid": os_data.portage_gid,
                                   "groups": [0, os_data.portage_gid]})
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False
        self.__fakeroot = False

        # since it's questionable which spawn method we'll use (if
        # sandbox or fakeroot fex), we ensure the bashrc is invalid.
        env = {x: "/etc/portage/spork/not/valid/ha/ha"
               for x in ("BASHRC", "BASH_ENV")}
        if int(os.environ.get('PKGCORE_PERF_DEBUG', 1)) > 1:
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']

        # append script dir to PATH for git repo or unpacked tarball
        if "PKGCORE_REPO_PATH" in os.environ:
            env["PATH"] = os.pathsep.join(
                [os.environ["PATH"], pjoin(os.environ["PKGCORE_REPO_PATH"], 'bin')])

        args = []
        if sandbox:
            if not pkgcore.spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            if fakeroot:
                raise InitializationError('fakeroot was on, but sandbox was also on')
            self.__sandbox = True
            spawn_func = pkgcore.spawn.spawn_sandbox
#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})

        elif fakeroot:
            if not pkgcore.spawn.is_fakeroot_capable():
                raise ValueError("spawn lacks fakeroot capabilities")
            self.__fakeroot = True
            spawn_func = pkgcore.spawn.spawn_fakeroot
            args.append(save_file)
        else:
            spawn_func = pkgcore.spawn.spawn

        # force to a neutral dir so that sandbox/fakeroot won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EAPI_BIN_PATH
        # little trick. we force the pipes to be high up fd wise so
        # nobody stupidly hits 'em.
        max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd-2),
            "PKGCORE_EBD_WRITE_FD": str(max_fd-1)})
        self.pid = spawn_func(
            ["/bin/bash", self.ebd, "daemonize"],
            fd_pipes={0: 0, 1: 1, 2: 2, max_fd-2: cread, max_fd-1: dwrite},
            returnpid=True, env=env, *args, **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InitializationError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")
        self.write(e_const.EAPI_BIN_PATH)
        # send PKGCORE_PYTHON_BINARY...
        self.write(pkgcore.spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(pkgcore.__file__, os.pardir, os.pardir))),
                os.environ.get('PYTHONPATH', '')])
            )
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        self.dont_export_vars = self.read().split()
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Beispiel #35
0
        new_config['raw:' + tree_loc] = basics.AutoConfigSection(conf)


    new_config['vuln'] = basics.AutoConfigSection({
            'class': SecurityUpgradesViaProfile,
            'ebuild_repo': 'repo-stack',
            'vdb': 'vdb',
            'profile': 'profile'})
    new_config['glsa'] = basics.section_alias('vuln',
        SecurityUpgradesViaProfile.pkgcore_config_type.typename)
    #binpkg.
    pkgdir = conf_dict.pop('PKGDIR', None)
    default_repos = list(reversed(portdir_overlays)) + [portdir]
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError, oe:
            if oe.errno != errno.ENOENT:
                raise
            if set(features).intersection(
                ('buildpkg', 'pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warn("disabling buildpkg related features since PKGDIR doesn't exist")
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warn("disabling buildpkg related features since PKGDIR either doesn't "
                    "exist, or lacks 0755 minimal permissions")
                pkgdir = None
    else:
       if set(features).intersection(
           ('buildpkg', 'pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):