Example #1
0
 def test_parents(self):
     path = pjoin(self.dir, self.profile)
     os.mkdir(pjoin(path, "child"))
     self.write_file("parent", "..", profile="%s/child" % self.profile)
     p = self.klass(pjoin(path, "child"))
     self.assertEqual(1, len(p.parents))
     self.assertEqual(p.parents[0].path, path)
Example #2
0
    def test_packages(self):
        p = self.klass(pjoin(self.dir, self.profile))
        self.assertEqual(p.system, empty)
        self.assertEqual(p.visibility, empty)
        self.parsing_checks("packages", "system")
        self.write_file("packages", "#foo\n")
        p = self.klass(pjoin(self.dir, self.profile))
        self.assertEqual(p.visibility, empty)
        self.assertEqual(p.system, empty)
        self.write_file("packages", "#foo\ndev-util/diffball\n")
        p = self.klass(pjoin(self.dir, self.profile))
        self.assertEqual(p.system, empty)
        self.assertEqual(list(p.visibility), [(), (atom("dev-util/diffball", negate_vers=True),)])

        self.write_file("packages", "-dev-util/diffball\ndev-foo/bar\n*dev-sys/atom\n" "-*dev-sys/atom2\nlock-foo/dar")
        p = self.klass(pjoin(self.dir, self.profile))
        self.assertEqual(p.system, ((atom("dev-sys/atom2"),), (atom("dev-sys/atom"),)))
        self.assertEqual(
            [set(x) for x in p.visibility],
            [
                set([atom("dev-util/diffball", negate_vers=True)]),
                set([atom("dev-foo/bar", negate_vers=True), atom("lock-foo/dar", negate_vers=True)]),
            ],
        )
        self.simple_eapi_awareness_check("packages", "system")
Example #3
0
 def parsing_checks(self, filename, attr, data="", line_negation=True):
     path = pjoin(self.dir, self.profile)
     self.write_file(filename, data)
     getattr(self.klass(path), attr)
     self.write_file(filename, "-")
     self.assertRaises(profiles.ProfileError, getattr, self.klass(path), attr)
     self.wipe_path(pjoin(path, filename))
Example #4
0
 def fetch_one(self, fetchable, observer, retry=False):
     if fetchable.filename in self._basenames:
         return True
     # fetching files without uri won't fly
     # XXX hack atm, could use better logic but works for now
     try:
         fp = self.fetcher(fetchable)
     except fetch_errors.ChksumFailure as e:
         # checksum failed, rename file and try refetching
         path = pjoin(self.fetcher.distdir, fetchable.filename)
         failed_filename = f'{fetchable.filename}._failed_chksum_'
         failed_path = pjoin(self.fetcher.distdir, failed_filename)
         os.rename(path, failed_path)
         if retry:
             raise
         observer.error(str(e))
         observer.error(f'renaming to {failed_filename!r} and refetching from upstream')
         observer.flush()
         # refetch directly from upstream
         return self.fetch_one(fetchable.upstream, observer, retry=True)
     except fetch_errors.FetchFailed as e:
         fp = None
     if fp is None:
         return False
     self.verified_files[fp] = fetchable
     self._basenames.add(fetchable.filename)
     return True
Example #5
0
 def test_from_abspath(self):
     self.mk_profiles({"name": "profiles"}, {"name": "profiles/1"})
     base = pjoin(self.dir, "profiles")
     p = self.kls.from_abspath(pjoin(base, "1"))
     self.assertNotEqual(p, None)
     self.assertEqual(normpath(p.basepath), normpath(base))
     self.assertEqual(normpath(p.profile), normpath(pjoin(base, "1")))
Example #6
0
 def run_check(*args):
     # create a fresh tree for the profile work everytime.
     # do this, so that it's always a unique pathway- this sidesteps
     # any potential issues of ProfileNode instance caching.
     path = pjoin(self.dir, 'foo', str(counter.next()))
     shutil.copytree(pjoin(self.dir, 'foo'), path, symlinks=True)
     return self.process_check(path, list(args))
Example #7
0
def config_from_make_conf(location="/etc/"):
    """
    generate a config from a file location

    :param location: location the portage configuration is based in,
        defaults to /etc
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them seperate (kind of annoying)

    config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
    base_path = pjoin(config_root, location.strip("/"))
    portage_base = pjoin(base_path, "portage")

    # this isn't preserving incremental behaviour for features/use
    # unfortunately

    conf_dict = {}
    try:
        load_make_config(conf_dict, pjoin(base_path, 'make.globals'))
    except errors.ParsingError, e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
           raise
        try:
            load_make_config(conf_dict,
                pjoin(config_root, 'usr/share/portage/config/make.globals'))
        except compatibility.IGNORED_EXCEPTIONS:
            raise
        except:
            compatibility.raise_from(errors.ParsingError(
                "failed to find a usable make.globals"))
Example #8
0
def add_sets(config, root, portage_base_dir):
    config["world"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.filelist.WorldFile",
        "location": pjoin(root, const.WORLD_FILE)})
    config["system"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.system.SystemSet",
        "profile": "profile"})
    config["installed"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.installed.Installed",
        "vdb": "vdb"})
    config["versioned-installed"] = basics.AutoConfigSection({
        "class": "pkgcore.pkgsets.installed.VersionedInstalled",
        "vdb": "vdb"})

    set_fp = pjoin(portage_base_dir, "sets")
    try:
        for setname in listdir_files(set_fp):
            # Potential for name clashes here, those will just make
            # the set not show up in config.
            if setname in ("system", "world"):
                logger.warning(
                    "user defined set %s is disallowed; ignoring" %
                    pjoin(set_fp, setname))
                continue
            config[setname] = basics.AutoConfigSection({
                "class": "pkgcore.pkgsets.filelist.FileList",
                "location": pjoin(set_fp, setname)})
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
Example #9
0
    def regen(self, binary, basepath):
        ignores = ("dir", "dir.old")
        try:
            files = listdir_files(basepath)
        except OSError as oe:
            if oe.errno == errno.ENOENT:
                return
            raise

        if self.should_skip_directory(basepath, files):
            return

        # wipe old indexes.
        for x in set(ignores).intersection(files):
            os.remove(pjoin(basepath, x))

        index = pjoin(basepath, 'dir')
        for x in files:
            if x in ignores or x.startswith("."):
                continue

            ret, data = spawn.spawn_get_output(
                [binary, '--quiet', pjoin(basepath, x), '--dir-file', index],
                collect_fds=(1,2), split_lines=False)

            if not data or "already exists" in data or \
                    "warning: no info dir entry" in data:
                continue
            yield pjoin(basepath, x)
Example #10
0
    def _install_targets(self, targets):
        dirs = set()
        for x in targets:
            basename = os.path.basename(x)
            suffix = os.path.splitext(basename)[1]

            if self.eapi.archive_suffixes_re.match(suffix):
                # TODO: uncompress/warn?
                suffix = os.path.splitext(basename.rsplit('.', 1)[0])[1]

            name = basename
            mandir = f'man{suffix[1:]}'

            if self.language_override and self.opts.i18n:
                mandir = pjoin(self.opts.i18n, mandir)
            elif self.language_detect:
                match = self.detect_lang_re.match(basename)
                if match:
                    name = f'{match.group(0)}.{match.group(3)}'
                    mandir = pjoin(match.group(1), mandir)

            if self.valid_mandir_re.match(mandir):
                if mandir not in dirs:
                    self.install_dirs([mandir])
                    dirs.add(mandir)
                self.install([(x, pjoin(mandir, name))])
            else:
                raise IpcCommandError(f'invalid man page: {x}')
Example #11
0
    def setUp(self):
        TempDirMixin.setUp(self)
        self.dir_orig = self.dir

        self.dir_master = pjoin(self.dir, 'master')
        self.dir_slave = pjoin(self.dir, 'slave')
        ensure_dirs(self.dir_master)
        ensure_dirs(self.dir_slave)

        self.dir = self.dir_slave

        self.master_pdir = pjoin(self.dir_master, 'profiles')
        self.pdir = self.slave_pdir = pjoin(self.dir_slave, 'profiles')
        ensure_dirs(self.master_pdir)
        ensure_dirs(self.slave_pdir)

        with open(pjoin(self.master_pdir, 'repo_name'), 'w') as f:
            f.write('master\n')
        with open(pjoin(self.slave_pdir, 'repo_name'), 'w') as f:
            f.write('slave\n')

        ensure_dirs(pjoin(self.dir_master, 'metadata'))
        ensure_dirs(pjoin(self.dir_slave, 'metadata'))
        with open(pjoin(self.dir_master, 'metadata', 'layout.conf'), 'w') as f:
            f.write('masters =\n')
        with open(pjoin(self.dir_slave, 'metadata', 'layout.conf'), 'w') as f:
            f.write('masters = master\n')
Example #12
0
    def _add_sets(self):
        self["world"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.filelist.WorldFile",
            "location": pjoin(self.root, econst.WORLD_FILE.lstrip('/'))})
        self["system"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.system.SystemSet",
            "profile": "profile"})
        self["installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.Installed",
            "vdb": "vdb"})
        self["versioned-installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.VersionedInstalled",
            "vdb": "vdb"})

        set_fp = pjoin(self.dir, "sets")
        try:
            for setname in listdir_files(set_fp):
                # Potential for name clashes here, those will just make
                # the set not show up in config.
                if setname in ("system", "world"):
                    logger.warning(
                        "user defined set %r is disallowed; ignoring",
                        pjoin(set_fp, setname))
                    continue
                self[setname] = basics.AutoConfigSection({
                    "class": "pkgcore.pkgsets.filelist.FileList",
                    "location": pjoin(set_fp, setname)})
        except FileNotFoundError:
            pass
Example #13
0
    def test_load_repos_conf_dir(self):
        # repo priority sorting and dir/symlink scanning

        repos_conf_dir = pjoin(self.dir, 'repos.conf')
        os.mkdir(repos_conf_dir)
        repos_conf_sym = pjoin(self.dir, 'repos.conf.sym')
        os.symlink(repos_conf_dir, repos_conf_sym)

        # add global repos.conf
        shutil.copyfile(
            pjoin(const.CONFIG_PATH, 'repos.conf'),
            pjoin(repos_conf_dir, 'repos.conf'))

        with open(pjoin(repos_conf_dir, 'z'), 'w') as f:
            f.write(textwrap.dedent('''\
                [bar]
                location = /var/gentoo/repos/bar

                [foo]
                location = /var/gentoo/repos/foo
                priority = 10'''))
            f.flush()

        defaults, repos = load_repos_conf(repos_conf_dir)
        sym_defaults, sym_repos = load_repos_conf(repos_conf_sym)

        self.assertEqual(defaults, sym_defaults)
        self.assertEqual(repos, sym_repos)
        self.assertEqual('gentoo', defaults['main-repo'])
        self.assertEqual(['foo', 'bar', 'gentoo'], repos.keys())
Example #14
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if (self.config.eapi.options['has_profile_data_dirs'] or
                 self.config.profile_formats.intersection(['portage-1', 'portage-2'])):
             paths = sorted_scan(path)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(
                         pjoin(self.base, 'profiles'),
                         'package.mask', "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except FileNotFoundError:
         pass
     except ebuild_errors.MalformedAtom as e:
         raise profiles.ProfileError(
             pjoin(self.base, 'profiles'), 'package.mask', e) from e
     return tuple(neg), tuple(pos)
Example #15
0
 def test_licenses(self):
     licenses = ('GPL-2', 'GPL-3+', 'BSD')
     ensure_dirs(pjoin(self.dir, 'licenses'))
     for license in licenses:
         touch(pjoin(self.dir, 'licenses', license))
     repo = self.mk_tree(self.dir)
     self.assertEqual(sorted(repo.licenses), sorted(licenses))
Example #16
0
def _internal_offset_iter_scan(path, chksum_handlers, offset, stat_func=os.lstat,
                               hidden=True, backup=True):
    offset = normpath(offset)
    path = normpath(path)
    dirs = collections.deque([path[len(offset):]])
    if dirs[0]:
        yield gen_obj(dirs[0], chksum_handlers=chksum_handlers,
            stat_func=stat_func)

    sep = os.path.sep
    while dirs:
        base = dirs.popleft()
        real_base = pjoin(offset, base.lstrip(sep))
        base = base.rstrip(sep) + sep
        for x in listdir(real_base):
            if not hidden and x.startswith('.'):
                continue
            if not backup and x.endswith('~'):
                continue
            path = pjoin(base, x)
            obj = gen_obj(path, chksum_handlers=chksum_handlers,
                        real_location=pjoin(real_base, x),
                        stat_func=os.lstat)
            yield obj
            if obj.is_dir:
                dirs.append(path)
Example #17
0
def _caching_grab_virtuals(repo, cache_basedir):
    virtuals = {}
    update = False
    cache = _read_mtime_cache(pjoin(cache_basedir, 'virtuals.cache'))

    existing = _get_mtimes(repo.location)
    for cat, mtime in existing.iteritems():
        d = cache.pop(cat, None)
        if d is not None and long(d[0]) == long(mtime):
            d = _convert_cached_virtuals(d)
            if d is not None:
                _merge_virtuals(virtuals, d)
                continue

        update = True
        _collect_virtuals(virtuals, repo.itermatch(
            packages.PackageRestriction("category",
                values.StrExactMatch(cat))))

    if update or cache:
        _write_mtime_cache(existing, virtuals,
            pjoin(cache_basedir, 'virtuals.cache'))

    defaults = _collect_default_providers(virtuals)
#    _finalize_virtuals(virtuals)
    return defaults, virtuals
Example #18
0
def _clean_old_caches(path):
    for name in ('plugincache2',):
        try:
            osutils.unlink_if_exists(pjoin(path, name))
        except EnvironmentError, e:
            logger.error("attempting to clean old plugin cache %r failed with %s",
                pjoin(path, name), e)
Example #19
0
    def process_subcommands(self, parser, name, action_group):
        l = []
        h = self._get_formatter(parser, name)
        h.add_arguments(action_group._group_actions)
        data = h.format_help().strip()
        if data:
            assert len(action_group._group_actions) == 1
            l.extend(_rst_header("=", action_group.title))
            if action_group.description:
                l.extend(action_group.description.split("\n"))

            for subcommand, parser in action_group._group_actions[0].choices.iteritems():
                subdir_path = self.name.split()[1:]
                base = pjoin(self.base_path, *subdir_path)
                self.__class__(base, "%s %s" % (
                    self.name, subcommand), parser, mtime=self.mtime, out_name=subcommand).run()

                toc_path = self.name.split()
                if subdir_path:
                    toc_path = subdir_path

            l.append('')
            l.append(".. toctree::")
            l.append("    :maxdepth: 2")
            l.append('')
            l.extend("    %s %s <%s>" %
                     (name, subcommand, pjoin(*list(toc_path + [subcommand])))
                     for subcommand in action_group._group_actions[0].choices)
            l.append('')
        return l
Example #20
0
    def _split(self, iterable, observer, engine, cset):
        debug_store = pjoin(engine.offset, self._debug_storage.lstrip('/'))

        objcopy_args = [self.objcopy_binary, '--only-keep-debug']
        if self._compress:
            objcopy_args.append('--compress-debug-sections')

        for fs_objs, ftype in iterable:
            if 'ar archive' in ftype:
                continue
            if 'relocatable' in ftype:
                if not any(x.basename.endswith(".ko") for x in fs_objs):
                    continue
            fs_obj = fs_objs[0]
            debug_loc = pjoin(debug_store, fs_obj.location.lstrip('/') + ".debug")
            if debug_loc in cset:
                continue
            fpath = fs_obj.data.path
            debug_ondisk = pjoin(os.path.dirname(fpath),
                os.path.basename(fpath) + ".debug")

            # note that we tell the UI the final pathway- not the intermediate one.
            observer.info("splitdebug'ing %s into %s" %
                (fs_obj.location, debug_loc))

            ret = spawn.spawn(objcopy_args + [fpath, debug_ondisk])
            if ret != 0:
                observer.warn("splitdebug'ing %s failed w/ exitcode %s" %
                    (fs_obj.location, ret))
                continue

            # note that the given pathway to the debug file /must/ be relative to ${D};
            # it must exist at the time of invocation.
            ret = spawn.spawn([self.objcopy_binary,
                '--add-gnu-debuglink', debug_ondisk, fpath])
            if ret != 0:
                observer.warn("splitdebug created debug file %r, but "
                    "failed adding links to %r (%r)" % (debug_ondisk, fpath, ret))
                observer.debug("failed splitdebug command was %r",
                    (self.objcopy_binary, '--add-gnu-debuglink', debug_ondisk, fpath))
                continue


            debug_obj = gen_obj(debug_loc, real_location=debug_ondisk,
                uid=os_data.root_uid, gid=os_data.root_gid)

            stripped_fsobj = self._strip_fsobj(fs_obj, ftype, observer, quiet=True)

            self._modified.add(stripped_fsobj)
            self._modified.add(debug_obj)

            for fs_obj in fs_objs[1:]:
                debug_loc = pjoin(debug_store,
                    fs_obj.location.lstrip('/') + ".debug")
                linked_debug_obj = debug_obj.change_attributes(location=debug_loc)
                observer.info("splitdebug hardlinking %s to %s" %
                    (debug_obj.location, debug_loc))
                self._modified.add(linked_debug_obj)
                self._modified.add(stripped_fsobj.change_attributes(
                    location=fs_obj.location))
Example #21
0
    def trigger(self, engine):
        bin_path = self.get_binary_path()
        if bin_path is None:
            return

        offset = engine.offset

        locs = [pjoin(offset, x.lstrip(os.path.sep)) for x in self.locations]

        if engine.phase.startswith('pre_'):
            self.saved_mtimes.set_state(locs)
            return
        elif engine.phase == 'post_merge' and \
            engine.mode == const.REPLACE_MODE:
            # skip post_merge for replace.
            # we catch it on unmerge...
            return

        regens = set(x.location for x in self.saved_mtimes.get_changes(locs))
        # force regeneration of any directory lacking the info index.
        regens.update(x for x in locs if not os.path.isfile(pjoin(x, 'dir')))

        bad = []
        for x in regens:
            bad.extend(self.regen(bin_path, x))

        if bad and engine.observer is not None:
            engine.observer.warn("bad info files: %r" % sorted(bad))
Example #22
0
 def _get_versions(self, catpkg):
     cppath = pjoin(self.base, catpkg[0], catpkg[1])
     pkg = catpkg[-1] + "-"
     lp = len(pkg)
     extension = self.extension
     ext_len = -len(extension)
     try:
         ret = tuple(x[lp:ext_len] for x in listdir_files(cppath)
                     if x[ext_len:] == extension and x[:lp] == pkg)
         if any(('scm' in x or '-try' in x) for x in ret):
             if not self.ignore_paludis_versioning:
                 for x in ret:
                     if 'scm' in x:
                         raise ebuild_errors.InvalidCPV(
                             "%s/%s-%s has nonstandard -scm "
                             "version component" % (catpkg + (x,)))
                     elif 'try' in x:
                         raise ebuild_errors.InvalidCPV(
                             "%s/%s-%s has nonstandard -try "
                             "version component" % (catpkg + (x,)))
                 raise AssertionError('unreachable codepoint was reached')
             return tuple(x for x in ret
                          if ('scm' not in x and 'try' not in x))
         return ret
     except EnvironmentError as e:
         raise_from(KeyError(
             "failed fetching versions for package %s: %s" %
             (pjoin(self.base, '/'.join(catpkg)), str(e))))
Example #23
0
def add_profile(config, config_dir, profile_override=None):
    if profile_override is None:
        profile = _find_profile_link(config_dir)
    else:
        profile = normpath(abspath(profile_override))
        if not os.path.exists(profile):
            raise_from(errors.ComplexInstantiationError(
                "%s doesn't exist" % (profile,)))

    paths = profiles.OnDiskProfile.split_abspath(profile)
    if paths is None:
        raise errors.ComplexInstantiationError(
            '%s expands to %s, but no profile detected' %
            (pjoin(config_dir, 'make.profile'), profile))

    user_profile_path = pjoin(config_dir, 'profile')
    if os.path.isdir(user_profile_path):
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.UserProfile",
            "parent_path": paths[0],
            "parent_profile": paths[1],
            "user_path": user_profile_path,
        })
    else:
        config["profile"] = basics.AutoConfigSection({
            "class": "pkgcore.ebuild.profiles.OnDiskProfile",
            "basepath": paths[0],
            "profile": paths[1],
        })
Example #24
0
    def test_load_make_conf(self):
        self.assertIn('PORTAGE_TMPDIR', self.make_globals)

        # nonexistent file
        d = {}
        # by default files are required
        self.assertRaises(
            errors.ParsingError, load_make_conf,
            d, pjoin(self.dir, 'make.globals'))
        # should return empty dict when not required
        load_make_conf(d, pjoin(self.dir, 'make.conf'), required=False)
        self.assertEqual({}, d)

        # unreadable file
        d = {}
        with NamedTemporaryFile() as f:
            os.chmod(f.name, stat.S_IWUSR)
            self.assertRaises(
                errors.PermissionDeniedError, load_make_conf, d, f.name)

        # overrides and incrementals
        with NamedTemporaryFile() as f:
            f.write(b'DISTDIR=foo\nACCEPT_LICENSE=foo\n')
            f.flush()
            d = {}
            load_make_conf(d, pjoin(const.CONFIG_PATH, 'make.globals'))
            load_make_conf(d, f.name, allow_sourcing=True, incrementals=True)
            self.assertEqual('foo', d['DISTDIR'])
            self.assertEqual(
                ' '.join([self.make_globals['ACCEPT_LICENSE'], 'foo']),
                d['ACCEPT_LICENSE'])
Example #25
0
    def trigger(self, engine, existing_cset, install_cset):
        # hackish, but it works.
        protected_filter = gen_config_protect_filter(
            engine.offset, self.extra_protects, self.extra_disables).match
        ignore_filter = gen_collision_ignore_filter(engine.offset).match
        protected = {}

        for x in existing_cset.iterfiles():
            if not ignore_filter(x.location) and protected_filter(x.location):
                replacement = install_cset[x]
                if not simple_chksum_compare(replacement, x):
                    protected.setdefault(
                        pjoin(engine.offset,
                              os.path.dirname(x.location).lstrip(os.path.sep)),
                        []).append((os.path.basename(replacement.location),
                                    replacement))

        for dir_loc, entries in protected.iteritems():
            updates = {x[0]: [] for x in entries}
            try:
                existing = sorted(x for x in listdir_files(dir_loc)
                                  if x.startswith("._cfg"))
            except OSError as oe:
                if oe.errno != errno.ENOENT:
                    raise
                # this shouldn't occur.
                continue

            for x in existing:
                try:
                    # ._cfg0000_filename
                    count = int(x[5:9])
                    if x[9] != "_":
                        raise ValueError
                    fn = x[10:]
                except (ValueError, IndexError):
                    continue
                if fn in updates:
                    updates[fn].append((count, fn))

            # now we rename.
            for fname, entry in entries:
                # check for any updates with the same chksums.
                count = 0
                for cfg_count, cfg_fname in updates[fname]:
                    if simple_chksum_compare(livefs.gen_obj(
                            pjoin(dir_loc, cfg_fname)), entry):
                        count = cfg_count
                        break
                    count = max(count, cfg_count + 1)
                try:
                    install_cset.remove(entry)
                except KeyError:
                    # this shouldn't occur...
                    continue
                new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname))
                new_entry = entry.change_attributes(location=new_fn)
                install_cset.add(new_entry)
                self.renames[new_entry] = entry
            del updates
Example #26
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if self.config.profile_formats.intersection(['portage-1', 'portage-2']):
             paths = sorted_scan(path)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(
                         pjoin(self.base, 'profiles'),
                         'package.mask', "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except IOError as i:
         if i.errno != errno.ENOENT:
             raise
     except ebuild_errors.MalformedAtom as ma:
         raise_from(profiles.ProfileError(
             pjoin(self.base, 'profiles'),
             'package.mask', ma))
     return [neg, pos]
Example #27
0
 def mk_tree(self, path, *args, **kwds):
     eclasses = kwds.pop('eclass_cache', None)
     if eclasses is None:
         epath = pjoin(path, 'eclass')
         ensure_dirs(epath)
         eclasses = eclass_cache.cache(epath)
     ensure_dirs(pjoin(path, 'profiles'))
     return repository.UnconfiguredTree(path, eclass_cache=eclasses, *args, **kwds)
Example #28
0
 def test_dangling_symlink(self):
     src = self.gen_dir("src")
     self.generate_tree(src, {"dir":["dir"]})
     cset = livefs.scan(src, offset=src)
     dest = self.gen_dir("dest")
     os.symlink(pjoin(dest, "dest"), pjoin(dest, "dir"))
     self.assertTrue(ops.merge_contents(cset, offset=dest))
     self.assertEqual(cset, livefs.scan(src, offset=dest))
Example #29
0
 def _install_targets(self, targets):
     dirs = set()
     for x in targets:
         d = pjoin(os.path.splitext(os.path.basename(x))[0], 'LC_MESSAGES')
         if d not in dirs:
             self.install_dirs([d])
             dirs.add(d)
         self.install([(x, pjoin(d, f'{self.pkg.PN}.mo'))])
Example #30
0
 def _get_ebuild_path(self, pkg):
     if pkg.revision is None:
         if pkg.fullver not in self.versions[(pkg.category, pkg.package)]:
             # daft explicit -r0 on disk.
             return pjoin(self.base, pkg.category, pkg.package,
                 "%s-%s-r0%s" % (pkg.package, pkg.fullver, self.extension))
     return pjoin(self.base, pkg.category, pkg.package, \
         "%s-%s%s" % (pkg.package, pkg.fullver, self.extension))
Example #31
0
 def test_extend_path(self):
     import mod_testplug
     expected = lists.stable_unique(
         pjoin(p, 'mod_testplug') for p in sys.path if os.path.isdir(p))
     self.assertEqual(expected, mod_testplug.__path__,
                      set(expected) ^ set(mod_testplug.__path__))
Example #32
0
    def __init__(self, userpriv, sandbox, fakeroot, save_file):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fakeroot: enables a fakeroot'd processor-
            this is a mutually exclusive option to sandbox, and
            requires userpriv to be enabled. Violating this will
            result in nastiness.
        """

        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if fakeroot and (sandbox or not userpriv):
            traceback.print_stack()
            logger.error(
                "Both sandbox and fakeroot cannot be enabled at the same time")
            raise InitializationError(
                "cannot initialize with sandbox and fakeroot")

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid]
            })
        else:
            if pkgcore.spawn.is_userpriv_capable():
                spawn_opts.update({
                    "gid": os_data.portage_gid,
                    "groups": [0, os_data.portage_gid]
                })
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False
        self.__fakeroot = False

        # since it's questionable which spawn method we'll use (if
        # sandbox or fakeroot fex), we ensure the bashrc is invalid.
        env = {
            x: "/etc/portage/spork/not/valid/ha/ha"
            for x in ("BASHRC", "BASH_ENV")
        }
        if int(os.environ.get('PKGCORE_PERF_DEBUG', 1)) > 1:
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']

        # append script dir to PATH for git repo or unpacked tarball
        if "PKGCORE_SCRIPT_PATH" in os.environ:
            env["PATH"] = os.pathsep.join(
                [os.environ["PATH"], os.environ["PKGCORE_SCRIPT_PATH"]])

        args = []
        if sandbox:
            if not pkgcore.spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            if fakeroot:
                raise InitializationError(
                    'fakeroot was on, but sandbox was also on')
            self.__sandbox = True
            spawn_func = pkgcore.spawn.spawn_sandbox


#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})

        elif fakeroot:
            if not pkgcore.spawn.is_fakeroot_capable():
                raise ValueError("spawn lacks fakeroot capabilities")
            self.__fakeroot = True
            spawn_func = pkgcore.spawn.spawn_fakeroot
            args.append(save_file)
        else:
            spawn_func = pkgcore.spawn.spawn

        # force to a neutral dir so that sandbox/fakeroot won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EAPI_BIN_PATH
        # little trick. we force the pipes to be high up fd wise so
        # nobody stupidly hits 'em.
        max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd - 2),
            "PKGCORE_EBD_WRITE_FD": str(max_fd - 1)
        })
        self.pid = spawn_func(["/bin/bash", self.ebd, "daemonize"],
                              fd_pipes={
                                  0: 0,
                                  1: 1,
                                  2: 2,
                                  max_fd - 2: cread,
                                  max_fd - 1: dwrite
                              },
                              returnpid=True,
                              env=env,
                              *args,
                              **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InitializationError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")
        self.write(e_const.EAPI_BIN_PATH)
        # send PKGCORE_PYTHON_BINARY...
        self.write(pkgcore.spawn.find_invoking_python())
        self.write(
            os.pathsep.join([
                normpath(abspath(pjoin(pkgcore.__file__, os.pardir,
                                       os.pardir))),
                os.environ.get('PYTHONPATH', '')
            ]))
        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        self.dont_export_vars = self.read().split()
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
Example #33
0
    def fetch(self, target):
        """Fetch a file.

        :type target: :obj:`pkgcore.fetch.fetchable` instance
        :return: None if fetching failed,
            else on disk location of the copied file
        """
        if not isinstance(target, fetchable):
            raise TypeError(
                f"target must be fetchable instance/derivative: {target}")

        kw = {"mode": 0o775}
        if self.readonly:
            kw["mode"] = 0o555
        if self.userpriv:
            kw["gid"] = portage_gid
        kw["minimal"] = True
        if not ensure_dirs(self.distdir, **kw):
            raise errors.DistdirPerms(
                self.distdir, "if userpriv, uid must be %i, gid must be %i. "
                "if not readonly, directory must be 0775, else 0555" %
                (portage_uid, portage_gid))

        path = pjoin(self.distdir, target.filename)
        uris = iter(target.uri)
        last_exc = RuntimeError("fetching failed for an unknown reason")
        spawn_opts = {'umask': 0o002, 'env': self.extra_env}
        if self.userpriv and is_userpriv_capable():
            spawn_opts.update({"uid": portage_uid, "gid": portage_gid})

        for _attempt in range(self.attempts):
            try:
                self._verify(path, target)
                return path
            except errors.MissingDistfile as e:
                command = self.command
                last_exc = e
            except errors.ChksumFailure:
                raise
            except errors.FetchFailed as e:
                last_exc = e
                if not e.resumable:
                    try:
                        os.unlink(path)
                        command = self.command
                    except OSError as e:
                        raise errors.UnmodifiableFile(path, e) from e
                else:
                    command = self.resume_command
            # Note we're not even checking the results, the verify portion of
            # the loop handles this. In other words, don't trust the external
            # fetcher's exit code, trust our chksums instead.
            try:
                spawn_bash(
                    command % {
                        "URI": next(uris),
                        "FILE": target.filename
                    }, **spawn_opts)
            except StopIteration:
                raise errors.FetchFailed(target.filename,
                                         "ran out of urls to fetch from")
        else:
            raise last_exc
Example #34
0
 def pm_tmpdir(self):
     """Temporary directory for the package manager."""
     return pjoin(self.tmpdir, 'portage')
Example #35
0
 def __init__(self, path, parent_path):
     self.override_path = pjoin(path, parent_path)
     ProfileNode.__init__(self, path, pms_strict=False)
Example #36
0
 def _get_ebuild_path(self, pkg):
     return pjoin(self.base, pkg.category, pkg.package,
                  f"{pkg.package}-{pkg.fullver}{self.extension}")
Example #37
0
def discern_loc(base, pkg, extension='.tbz2'):
    return pjoin(base, pkg.category,
                 "%s-%s%s" % (pkg.package, pkg.fullver, extension))
Example #38
0
 def setUp(self):
     TempDirMixin.setUp(self)
     self.pdir = pjoin(self.dir, 'profiles')
     ensure_dirs(self.pdir)
Example #39
0
    def test_path_restrict(self):
        repo_dir = pjoin(self.dir, 'repo')
        sym_repo_dir = pjoin(self.dir, 'sym_repo')
        os.symlink(repo_dir, sym_repo_dir)

        ensure_dirs(pjoin(repo_dir, 'profiles'))
        with open(pjoin(repo_dir, 'profiles', 'repo_name'), 'w') as f:
            f.write('testrepo\n')
        ensure_dirs(pjoin(repo_dir, 'cat', 'foo'))
        ensure_dirs(pjoin(repo_dir, 'cat', 'bar'))
        ensure_dirs(pjoin(repo_dir, 'tac', 'oof'))
        touch(pjoin(repo_dir, 'skel.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'foo-1.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'foo-2.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'Manifest'))
        touch(pjoin(repo_dir, 'cat', 'bar', 'bar-1.ebuild'))
        touch(pjoin(repo_dir, 'tac', 'oof', 'oof-1.ebuild'))

        # specify repo category dirs
        with open(pjoin(repo_dir, 'profiles', 'categories'), 'w') as f:
            f.write('cat\n')
            f.write('tac\n')

        for d in (repo_dir, sym_repo_dir):
            repo = self.mk_tree(d)
            for path in (
                    self.dir,  # path not in repo
                    pjoin(repo.location, 'a'),  # nonexistent category dir
                    pjoin(repo.location, 'profiles'),  # non-category dir
                    pjoin(
                        repo.location,
                        'skel.ebuild'),  # not in the correct cat/PN dir layout
                    pjoin(repo.location, 'cat',
                          'a'),  # nonexistent package dir
                    pjoin(repo.location, 'cat', 'foo',
                          'foo-0.ebuild'),  # nonexistent ebuild file
                    pjoin(repo.location, 'cat', 'foo',
                          'Manifest')):  # non-ebuild file
                self.assertRaises(ValueError, repo.path_restrict, path)

            # repo dir
            restriction = repo.path_restrict(repo.location)
            self.assertEqual(len(restriction), 1)
            self.assertInstance(restriction[0], restricts.RepositoryDep)
            # matches all 4 ebuilds in the repo
            self.assertEqual(len(repo.match(restriction)), 4)

            # category dir
            restriction = repo.path_restrict(pjoin(repo.location, 'cat'))
            self.assertEqual(len(restriction), 2)
            self.assertInstance(restriction[1], restricts.CategoryDep)
            # matches all 3 ebuilds in the category
            self.assertEqual(len(repo.match(restriction)), 3)

            # package dir
            restriction = repo.path_restrict(pjoin(repo.location, 'cat',
                                                   'foo'))
            self.assertEqual(len(restriction), 3)
            self.assertInstance(restriction[2], restricts.PackageDep)
            # matches both ebuilds in the package dir
            self.assertEqual(len(repo.match(restriction)), 2)

            # ebuild file
            restriction = repo.path_restrict(
                pjoin(repo.location, 'cat', 'foo', 'foo-1.ebuild'))
            self.assertEqual(len(restriction), 4)
            self.assertInstance(restriction[3], restricts.VersionMatch)
            # specific ebuild version match
            self.assertEqual(len(repo.match(restriction)), 1)

            # relative ebuild file path
            with mock.patch('os.getcwd',
                            return_value=os.path.realpath(
                                pjoin(repo.location, 'cat', 'foo'))):
                restriction = repo.path_restrict('./foo-1.ebuild')
                self.assertEqual(len(restriction), 4)
                self.assertInstance(restriction[3], restricts.VersionMatch)
                # specific ebuild version match
                self.assertEqual(len(repo.match(restriction)), 1)
Example #40
0
 def bashrc(self):
     path = pjoin(self.path, "profile.bashrc")
     if os.path.exists(path):
         return local_source(path)
     return None
Example #41
0
 def __init__(self, basepath, profile, load_profile_base=True):
     ProfileStack.__init__(self, pjoin(basepath, profile))
     self.basepath = basepath
     self.load_profile_base = load_profile_base
Example #42
0
 def _get_manifest(self, category, package):
     return digest.Manifest(pjoin(self.base, category, package, "Manifest"),
                            thin=self.config.manifests.thin,
                            enforce_gpg=self.enable_gpg)
Example #43
0
    def finish(self):
        unknown_pkgs = defaultdict(lambda: defaultdict(list))
        unknown_pkg_use = defaultdict(lambda: defaultdict(list))
        unknown_use = defaultdict(lambda: defaultdict(list))
        unknown_keywords = defaultdict(lambda: defaultdict(list))

        def _pkg_atoms(filename, profile, vals):
            for a in iflatten_instance(vals, atom.atom):
                if not self.repo.match(a):
                    unknown_pkgs[profile.path][filename].append(a)

        def _pkg_keywords(filename, profile, vals):
            for atom, keywords in vals:
                invalid = set(keywords) - self.valid_keywords
                if invalid:
                    unknown_keywords[profile.path][filename].append(
                        (atom, invalid))

        def _pkg_use(filename, profile, vals):
            # TODO: give ChunkedDataDict some dict view methods
            d = vals
            if isinstance(d, misc.ChunkedDataDict):
                d = vals.render_to_dict()

            for _pkg, entries in d.items():
                for a, disabled, enabled in entries:
                    pkgs = self.repo.match(a)
                    if not pkgs:
                        unknown_pkgs[profile.path][filename].append(a)
                    else:
                        available = {
                            u
                            for pkg in pkgs for u in pkg.iuse_stripped
                        }
                        unknown_disabled = set(disabled) - available
                        unknown_enabled = set(enabled) - available
                        if unknown_disabled:
                            unknown_pkg_use[profile.path][filename].append(
                                (a, ('-' + u for u in unknown_disabled)))
                        if unknown_enabled:
                            unknown_pkg_use[profile.path][filename].append(
                                (a, unknown_enabled))

        def _use(filename, profile, vals):
            # TODO: give ChunkedDataDict some dict view methods
            d = vals.render_to_dict()
            for _, entries in d.items():
                for _, disabled, enabled in entries:
                    unknown_disabled = set(disabled) - self.available_iuse
                    unknown_enabled = set(enabled) - self.available_iuse
                    if unknown_disabled:
                        unknown_use[profile.path][filename].extend(
                            ('-' + u for u in unknown_disabled))
                    if unknown_enabled:
                        unknown_use[profile.path][filename].extend(
                            unknown_enabled)

        def _deprecated(filename, profile, vals):
            # make sure replacement profile exists
            if vals is not None:
                replacement, msg = vals
                try:
                    _ProfileNode(pjoin(self.profiles_dir, replacement))
                except profiles_mod.ProfileError as e:
                    yield ProfileError(
                        f'nonexistent replacement {replacement!r} '
                        f'for deprecated profile: {profile.name!r}')

        file_parse_map = {
            'packages': ('packages', _pkg_atoms),
            'package.mask': ('masks', _pkg_atoms),
            'package.unmask': ('unmasks', _pkg_atoms),
            'package.use': ('pkg_use', _pkg_use),
            'package.use.force': ('pkg_use_force', _pkg_use),
            'package.use.stable.force': ('pkg_use_stable_force', _pkg_use),
            'package.use.mask': ('pkg_use_mask', _pkg_use),
            'package.use.stable.mask': ('pkg_use_stable_mask', _pkg_use),
            'use.force': ('use_force', _use),
            'use.stable.force': ('use_stable_force', _use),
            'use.mask': ('use_mask', _use),
            'use.stable.mask': ('use_stable_mask', _use),
            'parent': ('parents', lambda *args: None),
            'deprecated': ('deprecated', _deprecated),

            # non-PMS files
            'package.keywords': ('keywords', _pkg_keywords),
            'package.accept_keywords': ('accept_keywords', _pkg_keywords),
        }

        profile_reports = []
        report_profile_warnings = lambda x: profile_reports.append(
            ProfileWarning(x))
        report_profile_errors = lambda x: profile_reports.append(
            ProfileError(x))

        for root, _dirs, files in os.walk(self.profiles_dir):
            if root not in self.non_profile_dirs:
                profile = _ProfileNode(root)
                for f in set(files).intersection(file_parse_map.keys()):
                    attr, func = file_parse_map[f]
                    file_path = pjoin(root[len(self.profiles_dir) + 1:], f)
                    # convert log warnings/errors into reports
                    with patch('pkgcore.log.logger.error', report_profile_errors), \
                            patch('pkgcore.log.logger.warning', report_profile_warnings):
                        vals = getattr(profile, attr)
                    results = func(f, profile, vals)
                    if results is not None:
                        yield from results

        yield from profile_reports

        for path, filenames in sorted(unknown_pkgs.items()):
            for filename, vals in filenames.items():
                pkgs = map(str, vals)
                yield UnknownProfilePackages(
                    pjoin(path[len(self.profiles_dir):].lstrip('/'), filename),
                    pkgs)

        for path, filenames in sorted(unknown_pkg_use.items()):
            for filename, vals in filenames.items():
                for pkg, flags in vals:
                    yield UnknownProfilePackageUse(
                        pjoin(path[len(self.profiles_dir):].lstrip('/'),
                              filename), str(pkg), flags)

        for path, filenames in sorted(unknown_use.items()):
            for filename, vals in filenames.items():
                yield UnknownProfileUse(
                    pjoin(path[len(self.profiles_dir):].lstrip('/'), filename),
                    vals)

        for path, filenames in sorted(unknown_keywords.items()):
            for filename, vals in filenames.items():
                for pkg, keywords in vals:
                    yield UnknownProfilePackageKeywords(
                        pjoin(path[len(self.profiles_dir):].lstrip('/'),
                              filename), str(pkg), keywords)
Example #44
0
                pjoin(self.env["D"], self.prefix.lstrip('/'))) + "/"

    def get_env_source(self):
        with open(pjoin(self.env["T"], "environment"), "rb") as f:
            return data_source.bytes_data_source(f.read())

    def setup_env_data_source(self):
        if not ensure_dirs(
                self.env["T"], mode=0770, gid=portage_gid, minimal=True):
            raise format.FailedDirectory(
                self.env['T'],
                "%s doesn't fulfill minimum mode %o and gid %i" %
                (self.env['T'], 0770, portage_gid))

        if self.env_data_source is not None:
            fp = pjoin(self.env["T"], "environment")
            # load data first (might be a local_source), *then* write
            # if it's a src_ebuild being installed, trying to do two steps
            # stomps the local_sources data.
            data = self.env_data_source.bytes_fileobj().read()
            with open(fp, "wb") as f:
                f.write(data)
            del data

    def _set_per_phase_env(self, phase, env):
        self._setup_merge_type(phase, env)

    def _setup_merge_type(self, phase, env):
        # only allowed in pkg_ phases.

        if (not self.eapi.phases.get(phase, "").startswith("pkg_")
Example #45
0
    def finish(self):
        # don't check for unknown category dirs on overlays
        if self.options.gentoo_repo:
            category_dirs = set(
                filterfalse(self.repo.false_categories.__contains__,
                            (x for x in listdir_dirs(self.repo.location)
                             if x[0] != '.')))
            unknown_categories = category_dirs.difference(self.repo.categories)
            if unknown_categories:
                yield UnknownCategories(sorted(unknown_categories))

        arches_without_profiles = set(self.arches) - set(
            self.repo.profiles.arches())
        if arches_without_profiles:
            yield ArchesWithoutProfiles(sorted(arches_without_profiles))

        root_profile_dirs = {'embedded'}
        available_profile_dirs = set()
        for root, _dirs, _files in os.walk(self.profiles_dir):
            d = root[len(self.profiles_dir):].lstrip('/')
            if d:
                available_profile_dirs.add(d)
        available_profile_dirs -= self.non_profile_dirs | root_profile_dirs

        profile_reports = []
        report_profile_warnings = lambda x: profile_reports.append(
            ProfileWarning(x))
        report_profile_errors = lambda x: profile_reports.append(
            ProfileError(x))

        # don't check for acceptable profile statuses on overlays
        if self.options.gentoo_repo:
            known_profile_statuses = self.known_profile_statuses
        else:
            known_profile_statuses = None

        # forcibly parse profiles.desc and convert log warnings/errors into reports
        with patch('pkgcore.log.logger.error', report_profile_errors), \
                patch('pkgcore.log.logger.warning', report_profile_warnings):
            profiles = Profiles.parse(self.profiles_dir,
                                      self.repo.repo_id,
                                      known_status=known_profile_statuses,
                                      known_arch=self.arches)

        yield from profile_reports

        seen_profile_dirs = set()
        lagging_profile_eapi = defaultdict(list)
        for p in profiles:
            try:
                profile = profiles_mod.ProfileStack(
                    pjoin(self.profiles_dir, p.path))
            except profiles_mod.ProfileError:
                yield NonexistentProfilePath(p.path)
                continue
            for parent in profile.stack:
                seen_profile_dirs.update(
                    dir_parents(parent.path[len(self.profiles_dir):]))
                # flag lagging profile EAPIs -- assumes EAPIs are sequentially
                # numbered which should be the case for the gentoo repo
                if (self.options.gentoo_repo
                        and str(profile.eapi) < str(parent.eapi)):
                    lagging_profile_eapi[profile].append(parent)

        for profile, parents in lagging_profile_eapi.items():
            parent = parents[-1]
            yield LaggingProfileEapi(profile.name, str(profile.eapi),
                                     parent.name, str(parent.eapi))

        unused_profile_dirs = available_profile_dirs - seen_profile_dirs
        if unused_profile_dirs:
            yield UnusedProfileDirs(sorted(unused_profile_dirs))
Example #46
0
    def _cmd_implementation_digests(self,
                                    domain,
                                    matches,
                                    observer,
                                    mirrors=False,
                                    force=False):
        manifest_config = self.repo.config.manifests
        if manifest_config.disabled:
            observer.info(f"repo {self.repo.repo_id} has manifests disabled")
            return
        required_chksums = set(manifest_config.required_hashes)
        write_chksums = manifest_config.hashes
        distdir = domain.fetcher.distdir
        ret = set()

        for key_query in sorted(
                set(match.unversioned_atom for match in matches)):
            pkgs = self.repo.match(key_query)

            # check for pkgs masked by bad metadata
            bad_metadata = self.repo._bad_masked.match(key_query)
            if bad_metadata:
                for pkg in bad_metadata:
                    e = pkg.data
                    error_str = f"{pkg.cpvstr}: {e.msg(verbosity=observer.verbosity)}"
                    observer.error(error_str)
                    ret.add(key_query)
                continue

            # Check for bad ebuilds -- mismatched or invalid PNs won't be
            # matched by regular restrictions so they will otherwise be
            # ignored.
            ebuilds = {
                x
                for x in listdir_files(
                    pjoin(self.repo.location, str(key_query)))
                if x.endswith('.ebuild')
            }
            unknown_ebuilds = ebuilds.difference(
                os.path.basename(x.path) for x in pkgs)
            if unknown_ebuilds:
                error_str = (
                    f"{key_query}: invalid ebuild{_pl(unknown_ebuilds)}: "
                    f"{', '.join(unknown_ebuilds)}")
                observer.error(error_str)
                ret.add(key_query)
                continue

            # empty package dir
            if not pkgs:
                continue

            manifest = pkgs[0].manifest

            # all pkgdir fetchables
            pkgdir_fetchables = {}
            for pkg in pkgs:
                pkgdir_fetchables.update({
                    fetchable.filename: fetchable
                    for fetchable in iflatten_instance(
                        pkg._get_attr['fetchables']
                        (pkg,
                         allow_missing_checksums=True,
                         skip_default_mirrors=(not mirrors)), fetch.fetchable)
                })

            # fetchables targeted for (re-)manifest generation
            fetchables = {}
            chksum_set = set(write_chksums)
            for filename, fetchable in pkgdir_fetchables.items():
                if force or not required_chksums.issubset(fetchable.chksums):
                    fetchable.chksums = {
                        k: v
                        for k, v in fetchable.chksums.items()
                        if k in chksum_set
                    }
                    fetchables[filename] = fetchable

            # Manifest files aren't necessary with thin manifests and no distfiles
            if manifest_config.thin and not pkgdir_fetchables:
                if os.path.exists(manifest.path):
                    try:
                        os.remove(manifest.path)
                    except EnvironmentError as e:
                        observer.error(
                            'failed removing old manifest: '
                            f'{key_query}::{self.repo.repo_id}: {e}')
                        ret.add(key_query)
                continue

            # Manifest file is current and not forcing a refresh
            if not force and manifest.distfiles.keys(
            ) == pkgdir_fetchables.keys():
                continue

            pkg_ops = domain.pkg_operations(pkgs[0], observer=observer)
            if not pkg_ops.supports("fetch"):
                observer.error(
                    f"pkg {pkg} doesn't support fetching, can't generate manifest"
                )
                ret.add(key_query)
                continue

            # fetch distfiles
            if not pkg_ops.fetch(list(fetchables.values()), observer):
                ret.add(key_query)
                continue

            # calculate checksums for fetched distfiles
            try:
                for fetchable in fetchables.values():
                    chksums = chksum.get_chksums(
                        pjoin(distdir, fetchable.filename), *write_chksums)
                    fetchable.chksums = dict(zip(write_chksums, chksums))
            except chksum.MissingChksumHandler as e:
                observer.error(f'failed generating chksum: {e}')
                ret.add(key_query)
                break

            if key_query not in ret:
                fetchables.update(pkgdir_fetchables)
                observer.info(
                    f"generating manifest: {key_query}::{self.repo.repo_id}")
                manifest.update(sorted(fetchables.values()),
                                chfs=write_chksums)

        return ret
Example #47
0
    def test_it(self):
        with open(pjoin(self.dir, "arch.list"), "w") as f:
            f.write("\n".join(('amd64', 'ppc', 'x86')))

        self.mk_profiles({
            "1": ["x86"],
            "2": ["x86"],
            "3": ["ppc"],
        })

        with open(pjoin(self.dir, 'profiles', '1', 'package.use.stable.mask'), 'w') as f:
            f.write('dev-util/diffball foo')
        with open(pjoin(self.dir, 'profiles', '2', 'package.use.stable.force'), 'w') as f:
            f.write('=dev-util/diffball-0.1 bar foo')
        with open(pjoin(self.dir, 'profiles', '3', 'package.use.stable.force'), 'w') as f:
            f.write('dev-util/diffball bar foo')

        check = self.get_check('1', '2', '3')

        def get_rets(ver, attr, KEYWORDS="x86", **data):
            data["KEYWORDS"] = KEYWORDS
            pkg = FakePkg(f"dev-util/diffball-{ver}", data=data)
            return check.collapse_evaluate_depset(pkg, attr, getattr(pkg, attr))

        # few notes... for ensuring proper profiles came through, use
        # sorted(x.name for x in blah); reasoning is that it will catch
        # if duplicates come through, *and* ensure proper profile collapsing

        # shouldn't return anything due to no profiles matching the keywords.
        assert get_rets("0.0.1", "depend", KEYWORDS="foon") == []
        l = get_rets("0.0.2", "depend")
        assert len(l) == 1, f"must collapse all profiles down to one run: got {l!r}"
        assert len(l[0][1]) == 4, "must have four runs, (arch and ~arch for each profile)"
        assert sorted(set(x.name for x in l[0][1])) == ['1', '2'], f"must have two profiles: got {l!r}"
        assert l[0][1][0].key == 'x86'
        assert l[0][1][1].key == 'x86'

        l = get_rets(
            "0.1", "rdepend",
            RDEPEND="x? ( dev-util/confcache ) foo? ( dev-util/foo ) "
                    "bar? ( dev-util/bar ) !bar? ( dev-util/nobar ) x11-libs/xserver"
        )

        assert len(l) == 3, f"must collapse all profiles down to 3 runs: got {l!r}"

        # ordering is potentially random; thus pull out which depset result is
        # which based upon profile
        l1 = [x for x in l if x[1][0].name == '1'][0]
        l2 = [x for x in l if x[1][0].name == '2'][0]

        assert (
            set(str(l1[0]).split()) ==
            {'dev-util/confcache', 'dev-util/bar', 'dev-util/nobar', 'x11-libs/xserver'})

        assert (
            set(str(l2[0]).split()) ==
            {'dev-util/confcache', 'dev-util/foo', 'dev-util/bar', 'x11-libs/xserver'})

        # test feed wiping, using an empty depset; if it didn't clear, then
        # results from a pkg/attr tuple from above would come through rather
        # then an empty.
        pkg = FakePkg('dev-util/diffball-0.5')
        check.feed(pkg)
        l = get_rets("0.1", "rdepend")
        assert len(l) == 1, f"feed didn't clear the cache- should be len 1: {l!r}"

        check.feed(pkg)

        # ensure it handles arch right.
        l = get_rets("0", "depend", KEYWORDS="ppc x86")
        assert len(l) == 1, f"should be len 1, got {l!r}"
        assert sorted(set(x.name for x in l[0][1])) == ["1", "2", "3"], (
            f"should have three profiles of 1-3, got {l[0][1]!r}")

        # ensure it's caching profile collapsing, iow, keywords for same ver
        # that's partially cached (single attr at least) should *not* change
        # things.

        l = get_rets("0", "depend", KEYWORDS="ppc")
        assert sorted(set(x.name for x in l[0][1])) == ['1', '2', '3'], (
            f"should have 3 profiles, got {l[0][1]!r}\nthis indicates it's "
            "re-identifying profiles every invocation, which is unwarranted ")

        l = get_rets("1", "depend", KEYWORDS="ppc x86",
            DEPEND="ppc? ( dev-util/ppc ) !ppc? ( dev-util/x86 )")
        assert len(l) == 2, f"should be len 2, got {l!r}"

        # same issue, figure out what is what
        l1 = [x[1] for x in l if str(x[0]).strip() == "dev-util/ppc"][0]
        l2 = [x[1] for x in l if str(x[0]).strip() == "dev-util/x86"][0]

        assert sorted(set(x.name for x in l1)) == ["3"]
        assert sorted(set(x.name for x in l2)) == ["1", "2"]
Example #48
0
 def _get_path(self, pkg):
     return pjoin(self.base, pkg.category,
                  f"{pkg.package}-{pkg.fullver}.tbz2")
Example #49
0
 def get_env_source(self):
     with open(pjoin(self.env["T"], "environment"), "rb") as f:
         return data_source.bytes_data_source(f.read())
Example #50
0
    def __init__(self,
                 location,
                 eclass_cache,
                 cache=(),
                 default_mirrors=None,
                 override_repo_id=None,
                 ignore_paludis_versioning=False,
                 allow_missing_manifests=False,
                 repo_config=None):
        """
        :param location: on disk location of the tree
        :param cache: sequence of :obj:`pkgcore.cache.template.database` instances
            to use for storing metadata
        :param eclass_cache: If not None, :obj:`pkgcore.ebuild.eclass_cache`
            instance representing the eclasses available,
            if None, generates the eclass_cache itself
        :param default_mirrors: Either None, or sequence of mirrors to try
            fetching from first, then falling back to other uri
        :param override_repo_id: Either None, or string to force as the
            repository unique id
        :param ignore_paludis_versioning: If False, fail when -scm is encountred.  if True,
            silently ignore -scm ebuilds.
        """

        prototype.tree.__init__(self)
        if repo_config is None:
            repo_config = repo_objs.RepoConfig(location)
        self.config = repo_config
        self._repo_id = override_repo_id
        self.base = self.location = location
        try:
            if not stat.S_ISDIR(os.stat(self.base).st_mode):
                raise errors.InitializationError("base not a dir: %s" %
                                                 self.base)

        except OSError:
            raise_from(
                errors.InitializationError("lstat failed on base %s" %
                                           (self.base, )))
        self.eclass_cache = eclass_cache

        self.licenses = repo_objs.Licenses(location)

        fp = pjoin(self.base, metadata_offset, "thirdpartymirrors")
        mirrors = {}
        try:
            for k, v in read_dict(fp, splitter=None).iteritems():
                v = v.split()
                shuffle(v)
                mirrors[k] = v
        except EnvironmentError as ee:
            if ee.errno != errno.ENOENT:
                raise

        if isinstance(cache, (tuple, list)):
            cache = tuple(cache)
        else:
            cache = (cache, )

        self.mirrors = mirrors
        self.default_mirrors = default_mirrors
        self.cache = cache
        self.ignore_paludis_versioning = ignore_paludis_versioning
        self._allow_missing_chksums = allow_missing_manifests
        self.package_class = self.package_factory(self, cache,
                                                  self.eclass_cache,
                                                  self.mirrors,
                                                  self.default_mirrors)
        self._shared_pkg_cache = WeakValCache()
Example #51
0
    def __init__(self,
                 location,
                 eclass_cache=None,
                 masters=(),
                 cache=(),
                 default_mirrors=None,
                 allow_missing_manifests=False,
                 repo_config=None):
        """
        :param location: on disk location of the tree
        :param cache: sequence of :obj:`pkgcore.cache.template.database` instances
            to use for storing metadata
        :param masters: repo masters this repo inherits from
        :param eclass_cache: If not None, :obj:`pkgcore.ebuild.eclass_cache`
            instance representing the eclasses available,
            if None, generates the eclass_cache itself
        :param default_mirrors: Either None, or sequence of mirrors to try
            fetching from first, then falling back to other uri
        """
        super().__init__()
        self.base = self.location = location
        try:
            if not stat.S_ISDIR(os.stat(self.base).st_mode):
                raise errors.InitializationError(
                    f"base not a dir: {self.base}")
        except OSError as e:
            raise errors.InitializationError(
                f"lstat failed: {self.base}") from e

        if repo_config is None:
            repo_config = repo_objs.RepoConfig(location)
        self.config = repo_config

        # profiles dir is required by PMS
        if not os.path.isdir(self.config.profiles_base):
            raise errors.InvalidRepo(
                f'missing required profiles dir: {self.location!r}')

        # verify we support the repo's EAPI
        if not self.is_supported:
            raise errors.UnsupportedRepo(self)

        if eclass_cache is None:
            eclass_cache = eclass_cache_mod.cache(pjoin(
                self.location, 'eclass'),
                                                  location=self.location)
        self.eclass_cache = eclass_cache

        self.masters = masters
        self.trees = tuple(masters) + (self, )
        self.licenses = repo_objs.Licenses(self.location)
        self.profiles = self.config.profiles
        if masters:
            self.licenses = repo_objs.OverlayedLicenses(*self.trees)
            self.profiles = repo_objs.OverlayedProfiles(*self.trees)

        # use mirrors from masters if not defined in the repo
        mirrors = dict(self.thirdpartymirrors)
        for master in masters:
            for k, v in master.mirrors.items():
                if k not in mirrors:
                    mirrors[k] = v

        if isinstance(cache, (tuple, list)):
            cache = tuple(cache)
        else:
            cache = (cache, )

        self.mirrors = mirrors
        self.default_mirrors = default_mirrors
        self.cache = cache
        self._allow_missing_chksums = allow_missing_manifests
        self.package_class = self.package_factory(self, cache,
                                                  self.eclass_cache,
                                                  self.mirrors,
                                                  self.default_mirrors)
        self._shared_pkg_cache = WeakValCache()
        self._bad_masked = RestrictionRepo(repo_id='bad_masked')
        self.projects_xml = repo_objs.LocalProjectsXml(
            pjoin(self.location, 'metadata', 'projects.xml'))
Example #52
0
 def __stage_step_callback__(self, stage):
     try:
         open(pjoin(self.builddir, '.%s' % (stage, )), 'w').close()
     except EnvironmentError:
         # we really don't care...
         pass
Example #53
0
    def __init__(self,
                 pkg,
                 initial_env=None,
                 env_data_source=None,
                 features=None,
                 observer=None,
                 clean=True,
                 tmp_offset=None,
                 use_override=None,
                 allow_fetching=False):
        """
        :param pkg:
            :class:`pkgcore.ebuild.ebuild_src.package`
            instance this env is being setup for
        :param initial_env: initial environment to use for this ebuild
        :param env_data_source: a :obj:`snakeoil.data_source.base` instance
            to restore the environment from- used for restoring the
            state of an ebuild processing, whether for unmerging, or
            walking phases during building
        :param features: ebuild features, hold over from portage,
            will be broken down at some point
        """

        if use_override is not None:
            use = use_override
        else:
            use = pkg.use

        self.allow_fetching = allow_fetching

        if not hasattr(self, "observer"):
            self.observer = observer
        if not pkg.eapi.is_supported:
            raise TypeError("package %s uses an unsupported eapi: %s" %
                            (pkg, pkg.eapi))

        if initial_env is not None:
            # copy.
            self.env = dict(initial_env)
            for x in ("USE", "ACCEPT_LICENSE"):
                if x in self.env:
                    del self.env[x]
        else:
            self.env = {}

        if "PYTHONPATH" in os.environ:
            self.env["PYTHONPATH"] = os.environ["PYTHONPATH"]

        if features is None:
            features = self.env.get("FEATURES", ())

        # XXX: note this is just EAPI 3 compatibility; not full prefix, soon..
        self.env["ROOT"] = self.domain.root
        self.prefix_mode = pkg.eapi.options.prefix_capable or 'force-prefix' in features
        self.env["PKGCORE_PREFIX_SUPPORT"] = 'false'
        self.prefix = '/'
        if self.prefix_mode:
            self.prefix = self.domain.prefix
            self.env['EPREFIX'] = self.prefix.rstrip('/')
            self.env['EROOT'] = abspath(
                pjoin(self.domain.root,
                      self.prefix.lstrip('/'))).rstrip('/') + '/'
            self.env["PKGCORE_PREFIX_SUPPORT"] = 'true'

        # set the list of internally implemented EAPI specific functions that
        # shouldn't be exported
        if os.path.exists(pjoin(const.EBD_PATH, 'funcnames', str(pkg.eapi))):
            with open(pjoin(const.EBD_PATH, 'funcnames', str(pkg.eapi)),
                      'r') as f:
                eapi_funcs = f.readlines()
        else:
            ret, eapi_funcs = spawn_get_output([
                pjoin(const.EBD_PATH, 'generate_eapi_func_list.bash'),
                str(pkg.eapi)
            ])
            if ret != 0:
                raise Exception(
                    "failed to generate list of EAPI %s specific functions" %
                    str(pkg.eapi))
        self.env["PKGCORE_EAPI_FUNCS"] = ' '.join(x.strip()
                                                  for x in eapi_funcs)

        self.env_data_source = env_data_source
        if (env_data_source is not None
                and not isinstance(env_data_source, data_source.base)):
            raise TypeError(
                "env_data_source must be None, or a pkgcore.data_source.base "
                "derivative: %s: %s" %
                (env_data_source.__class__, env_data_source))

        self.features = set(x.lower() for x in features)

        self.env["FEATURES"] = ' '.join(sorted(self.features))

        iuse_effective_regex = (re.escape(x) for x in pkg.iuse_effective)
        iuse_effective_regex = "^(%s)$" % "|".join(iuse_effective_regex)
        iuse_effective_regex = iuse_effective_regex.replace("\\.\\*", ".*")
        self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex

        expected_ebuild_env(pkg,
                            self.env,
                            env_source_override=self.env_data_source)

        self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(
            str(x) for x in pkg.restrict)

        self.restrict = pkg.restrict

        for x in ("sandbox", "userpriv"):
            setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
        if self.userpriv and os.getuid() != 0:
            self.userpriv = False

        if "PORT_LOGDIR" in self.env:
            self.logging = pjoin(
                self.env["PORT_LOGDIR"], "%s:%s:%s.log" %
                (pkg.cpvstr, self.__class__.__name__,
                 time.strftime("%Y%m%d-%H%M%S", time.localtime())))
            del self.env["PORT_LOGDIR"]
        else:
            self.logging = False

        self.env["XARGS"] = xargs

        self.bashrc = self.env.pop("bashrc", ())

        self.pkg = pkg
        self.eapi = pkg.eapi
        wipes = [
            k for k, v in self.env.iteritems()
            if not isinstance(v, basestring)
        ]
        for k in wipes:
            del self.env[k]

        self.set_op_vars(tmp_offset)
        self.clean_at_start = clean
        self.clean_needed = False
Example #54
0
    def __init__(self,
                 domain,
                 pkg,
                 verified_files,
                 eclass_cache,
                 observer=None,
                 **kwargs):
        """
        :param pkg: :obj:`pkgcore.ebuild.ebuild_src.package` instance we'll be
            building
        :param domain_settings: dict bled down from the domain configuration;
            basically initial env
        :param eclass_cache: the :class:`pkgcore.ebuild.eclass_cache`
            we'll be using
        :param files: mapping of fetchables mapped to their disk location
        """

        use = kwargs.get("use_override", pkg.use)
        domain_settings = domain.settings

        format.build.__init__(self, domain, pkg, verified_files, observer)
        ebd.__init__(self,
                     pkg,
                     initial_env=domain_settings,
                     features=domain_settings["FEATURES"],
                     **kwargs)

        self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files")
        self.eclass_cache = eclass_cache
        self.env["ECLASSDIR"] = eclass_cache.eclassdir

        # this needs to be deprecated and dropped from future EAPIs
        self.env["PORTDIR"] = eclass_cache.location

        self.run_test = self.feat_or_bool("test", domain_settings)
        self.allow_failed_test = self.feat_or_bool("test-fail-continue",
                                                   domain_settings)
        if "test" in self.restrict:
            self.run_test = False
        elif "test" not in use:
            if self.run_test:
                logger.warning(
                    "disabling test for %s due to test use flag being disabled"
                    % pkg)
            self.run_test = False

        # XXX minor hack
        path = self.env["PATH"].split(":")

        for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")):
            b = (self.feat_or_bool(s, domain_settings)
                 and s not in self.restrict)
            setattr(self, s.lower(), b)
            if b:
                # looks weird I realize, but
                # pjoin("/foor/bar", "/barr/foo") == "/barr/foo"
                # and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf"
                self.env.setdefault(s + "_DIR", pjoin(self.tmpdir, default))
                # gentoo bug 355283
                libdir = self.env.get("ABI")
                if libdir is not None:
                    libdir = self.env.get("LIBDIR_%s" % (libdir, ))
                    if libdir is not None:
                        libdir = self.env.get(libdir)
                if libdir is None:
                    libdir = "lib"
                path.insert(0, "/usr/%s/%s/bin" % (libdir, s.lower()))
            else:
                for y in ("_PATH", "_DIR"):
                    if s + y in self.env:
                        del self.env[s + y]
        path = [piece for piece in path if piece]
        self.env["PATH"] = os.pathsep.join(path)
        self.env["A"] = ' '.join(set(x.filename for x in pkg.fetchables))

        if self.eapi.options.has_AA:
            pkg = getattr(self.pkg, '_raw_pkg', self.pkg)
            self.env["AA"] = ' '.join(
                set(x.filename for x in iflatten_instance(
                    pkg.fetchables, fetch.fetchable)))

        if self.eapi.options.has_KV:
            ret = spawn_get_output(['uname', '-r'])
            if ret[0] == 0:
                self.env["KV"] = ret[1][0].strip()

        if self.eapi.options.has_merge_type:
            self.env["MERGE_TYPE"] = "source"

        if self.setup_is_for_src:
            self.init_distfiles_env()
Example #55
0
 def bashrcs(self):
     files = sorted_scan(pjoin(self.config_dir, 'bashrc'), follow_symlinks=True)
     return tuple(local_source(x) for x in files)
Example #56
0
    def _cmd_implementation_sanity_check(self, domain, observer):
        """Various ebuild sanity checks (REQUIRED_USE, pkg_pretend)."""
        pkg = self.pkg
        eapi = pkg.eapi

        # perform REQUIRED_USE checks
        if eapi.options.has_required_use:
            use = pkg.use
            for node in pkg.required_use:
                if not node.match(use):
                    observer.info(
                        textwrap.dedent("""
                        REQUIRED_USE requirement wasn't met
                        Failed to match: {}
                        from: {}
                        for USE: {}
                        pkg: {}
                        """.format(node, pkg.required_use, " ".join(use),
                                   pkg.cpvstr)))
                    return False

        # return if running pkg_pretend is not required
        if 'pretend' not in pkg.mandatory_phases:
            return True

        # run pkg_pretend phase
        commands = None
        if not pkg.built:
            commands = {
                "request_inherit": partial(inherit_handler, self._eclass_cache)
            }
        env = expected_ebuild_env(pkg)
        builddir = pjoin(domain.tmpdir, env["CATEGORY"], env["PF"])
        pkg_tmpdir = normpath(pjoin(builddir, "temp"))
        ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
        env["ROOT"] = domain.root
        env["T"] = pkg_tmpdir

        # TODO: make colored output easier to achieve from observers
        msg = [
            '>>> Running pkg_pretend for ',
            observer._output._out.fg('green'), pkg.cpvstr,
            observer._output._out.reset
        ]
        observer._output._out.write(*msg)

        try:
            start = time.time()
            ret = run_generic_phase(pkg,
                                    "pretend",
                                    env,
                                    userpriv=True,
                                    sandbox=True,
                                    extra_handlers=commands)
            logger.debug("pkg_pretend sanity check for %s took %2.2f seconds",
                         pkg.cpvstr,
                         time.time() - start)
            return ret
        except format.GenericBuildError as e:
            return False
        finally:
            shutil.rmtree(builddir)
            # try to wipe the cat dir; if not empty, ignore it
            try:
                os.rmdir(os.path.dirname(builddir))
            except EnvironmentError as e:
                # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir
                # in particular, Solaris uses EEXIST in that case.
                # https://github.com/pkgcore/pkgcore/pull/181
                if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
                    raise
Example #57
0
    def test_trigger(self):
        if self.raw_kls().get_binary_path() is None:
            raise SkipTest(
                "can't verify regen behaviour due to install-info not being available")

        cur = os.environ.get("PATH", self)
        try:
            os.environ.pop("PATH", None)
            # shouldn't run if the binary is missing
            # although it should warn, and this code will explode when it does.
            self.engine.phase = 'post_merge'
            self.assertEqual(None, self.trigger(self.engine, {}))
        finally:
            if cur is not self:
                os.environ["PATH"] = cur

        # verify it runs when dir is missing.
        # doesn't create the file since no info files.
        self.reset_objects()
        self.assertFalse(self.run_trigger('pre_merge', []))
        self.assertFalse(self.run_trigger('post_merge', [self.dir]))

        # add an info, and verify it generated.
        with open(pjoin(self.dir, 'foo.info'), 'w') as f:
            f.write(self.info_data)
        self.reset_objects()
        self.trigger.enable_regen = True
        self.assertFalse(self.run_trigger('pre_merge', []))
        self.assertFalse(self.run_trigger('post_merge', [self.dir]))

        # verify it doesn't; mtime is fine
        self.reset_objects()
        self.trigger.enable_regen = True
        self.assertFalse(self.run_trigger('pre_merge', []))
        self.assertFalse(self.run_trigger('post_merge', []))

        # verify it handles quoting properly, and that it ignores
        # complaints about duplicates.
        self.reset_objects()
        self.trigger.enable_regen = True
        self.assertFalse(self.run_trigger('pre_merge', []))
        with open(pjoin(self.dir, "blaidd drwg.info"), "w") as f:
            f.write(self.info_data)
        self.assertFalse(self.run_trigger('post_merge', [self.dir]))

        # verify it passes back failures.
        self.reset_objects()
        self.trigger.enable_regen = True
        self.assertFalse(self.run_trigger('pre_merge', []))
        with open(pjoin(self.dir, "tiza grande.info"), "w") as f:
            f.write('\n'.join(self.info_data.splitlines()[:-1]))
        l = self.run_trigger('post_merge', [self.dir])
        self.assertEqual(len(l), 1)
        self.assertIn('tiza grande.info', l[0])

        # verify it holds off on info regen till after unmerge for replaces.
        self.reset_objects(mode=const.REPLACE_MODE)
        self.assertFalse(self.run_trigger('pre_merge', []))
        self.assertFalse(self.run_trigger('post_merge', []))
        self.assertFalse(self.run_trigger('pre_unmerge', []))
        os.unlink(pjoin(self.dir, "tiza grande.info"))
        self.assertFalse(self.run_trigger('post_unmerge', [self.dir]))
Example #58
0
                data.append("%s='%s'" % (key, val))
            else:
                data.append("%s=$'%s'" % (key, val.replace("'", "\\'")))
        return 'export %s' % (' '.join(data), )

    def send_env(self, env_dict, async=False, tmpdir=None):
        """
        transfer the ebuild's desired env (env_dict) to the running daemon

        :type env_dict: mapping with string keys and values.
        :param env_dict: the bash env.
        """
        data = self._generate_env_str(env_dict)
        old_umask = os.umask(0002)
        if tmpdir:
            path = pjoin(tmpdir, 'ebd-env-transfer')
            fileutils.write_file(path, 'wb', data)
            self.write("start_receiving_env file %s\n" % (path, ),
                       append_newline=False)
        else:
            self.write("start_receiving_env bytes %i\n%s" % (len(data), data),
                       append_newline=False)
        os.umask(old_umask)
        return self.expect("env_received", async=async, flush=True)

    def set_logfile(self, logfile=''):
        """
        Set the logfile (location to log to).

        Relevant only when the daemon is sandbox'd,
Example #59
0
 def _get_metadata_xml(self, category, package):
     return repo_objs.LocalMetadataXml(
         pjoin(self.base, category, package, "metadata.xml"))
Example #60
0
 def make_obj(self, location="/tmp/foo", **kwds):
     target = kwds.pop("target", pjoin(location, "target"))
     kwds.setdefault("strict", False)
     return self.kls(location, target, **kwds)