コード例 #1
0
ファイル: test_repository.py プロジェクト: radhermit/pkgcore
 def test_licenses(self):
     licenses = ('GPL-2', 'GPL-3+', 'BSD')
     ensure_dirs(pjoin(self.dir, 'licenses'))
     for license in licenses:
         touch(pjoin(self.dir, 'licenses', license))
     repo = self.mk_tree(self.dir)
     self.assertEqual(sorted(repo.licenses), sorted(licenses))
コード例 #2
0
ファイル: test_repository.py プロジェクト: neko259/pkgcore
 def mk_tree(self, path, *args, **kwds):
     eclasses = kwds.pop('eclass_cache', None)
     if eclasses is None:
         epath = pjoin(path, 'eclass')
         ensure_dirs(epath)
         eclasses = eclass_cache.cache(epath)
     return repository._UnconfiguredTree(path, eclasses, *args, **kwds)
コード例 #3
0
ファイル: test_osutils.py プロジェクト: radhermit/snakeoil
 def test_mode(self):
     path = pjoin(self.dir, 'mode', 'mode')
     assert osutils.ensure_dirs(path, mode=0o700)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o700)
     # unrestrict it
     osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
コード例 #4
0
ファイル: ebd.py プロジェクト: radhermit/pkgcore
    def nofetch(self):
        """Execute the nofetch phase.

        We need the same prerequisites as setup, so reuse that.
        """
        ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True)
        return setup_mixin.setup(self, "nofetch")
コード例 #5
0
ファイル: triggers.py プロジェクト: den4ix/pkgcore
 def trigger(self, engine, cset):
     pkg = engine.new
     filename = "%s_%s%s.deb" % (pkg.package, pkg.version, self.postfix)
     tmp_path = pjoin(engine.tempdir, filename)
     final_path = pjoin(self.basepath, filename)
     ensure_dirs(tmp_path)
     deb.write(tmp_path, final_path, pkg,
         cset=cset,
         platform=self.platform, maintainer=self.maintainer)
コード例 #6
0
ファイル: test_repository.py プロジェクト: radhermit/pkgcore
    def setUp(self):
        TempDirMixin.setUp(self)
        self.pdir = pjoin(self.dir, 'profiles')
        ensure_dirs(self.pdir)

        # silence missing masters warnings
        ensure_dirs(pjoin(self.dir, 'metadata'))
        with open(pjoin(self.dir, 'metadata', 'layout.conf'), 'w') as f:
            f.write('masters =\n')
コード例 #7
0
ファイル: test_osutils.py プロジェクト: radhermit/snakeoil
 def test_create_unwritable_subdir(self):
     path = pjoin(self.dir, 'restricted', 'restricted')
     # create the subdirs without 020 first
     assert osutils.ensure_dirs(os.path.dirname(path))
     assert osutils.ensure_dirs(path, mode=0o020)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o020)
     # unrestrict it
     osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
コード例 #8
0
ファイル: test_repository.py プロジェクト: veelai/pkgcore
 def test_repo_id(self):
     dir1 = pjoin(self.dir, '1')
     os.mkdir(dir1, 0755)
     repo = self.mk_tree(dir1)
     self.assertEqual(repo.repo_id, '<unlabeled repository %s>' % (dir1,))
     dir2 = pjoin(self.dir, '2')
     osutils.ensure_dirs(pjoin(dir2, 'profiles'))
     open(pjoin(dir2, 'profiles', 'repo_name'), 'w').write('testrepo\n')
     repo = self.mk_tree(dir2)
     self.assertEqual('testrepo', repo.repo_id)
コード例 #9
0
ファイル: test_osutils.py プロジェクト: radhermit/snakeoil
    def test_mkdir_failing(self):
        # fail if os.mkdir fails
        with mock.patch('snakeoil.osutils.os.mkdir') as mkdir:
            mkdir.side_effect = OSError(30, 'Read-only file system')
            path = pjoin(self.dir, 'dir')
            assert not osutils.ensure_dirs(path, mode=0o700)

            # force temp perms
            assert not osutils.ensure_dirs(path, mode=0o400)
            mkdir.side_effect = OSError(17, 'File exists')
            assert not osutils.ensure_dirs(path, mode=0o700)
コード例 #10
0
ファイル: test_repository.py プロジェクト: radhermit/pkgcore
 def test_repo_id(self):
     dir1 = pjoin(self.dir, '1')
     os.mkdir(dir1, 0o755)
     repo = self.mk_tree(dir1)
     self.assertEqual(repo.repo_id, f'<unlabeled repo: {dir1!r}>')
     dir2 = pjoin(self.dir, '2')
     ensure_dirs(pjoin(dir2, 'profiles'))
     with open(pjoin(dir2, 'profiles', 'repo_name'), 'w') as f:
         f.write('testrepo\n')
     repo = self.mk_tree(dir2)
     self.assertEqual('testrepo', repo.repo_id)
コード例 #11
0
ファイル: test_repository.py プロジェクト: radhermit/pkgcore
 def test_licenses(self):
     master_licenses = ('GPL-2', 'GPL-3+', 'BSD')
     slave_licenses = ('BSD-2', 'MIT')
     ensure_dirs(pjoin(self.dir_slave, 'licenses'))
     ensure_dirs(pjoin(self.dir_master, 'licenses'))
     for license in master_licenses:
         touch(pjoin(self.dir_master, 'licenses', license))
     for license in slave_licenses:
         touch(pjoin(self.dir_slave, 'licenses', license))
     repo = self.mk_tree(self.dir)
     self.assertEqual(sorted(repo.licenses), sorted(master_licenses + slave_licenses))
コード例 #12
0
ファイル: test_repository.py プロジェクト: neko259/pkgcore
    def test_categories_packages(self):
        ensure_dirs(pjoin(self.dir, 'cat', 'pkg'))
        ensure_dirs(pjoin(self.dir, 'empty', 'empty'))
        ensure_dirs(pjoin(self.dir, 'scripts', 'pkg'))
        ensure_dirs(pjoin(self.dir, 'notcat', 'CVS'))
        touch(pjoin(self.dir, 'cat', 'pkg', 'pkg-3.ebuild'))
        repo = self.mk_tree(self.dir)
        self.assertEqual(
            {'cat': (), 'notcat': (), 'empty': ()}, dict(repo.categories))
        self.assertEqual(
            {'cat': ('pkg',), 'empty': ('empty',), 'notcat': ()},
            dict(repo.packages))
        self.assertEqual(
            {('cat', 'pkg'): ('3',), ('empty', 'empty'): ()},
            dict(repo.versions))

        for x in ("1-scm", "scm", "1-try", "1_beta-scm", "1_beta-try"):
            for rev in ("", "-r1"):
                fp = pjoin(self.dir, 'cat', 'pkg', 'pkg-%s%s.ebuild' %
                    (x, rev))
                open(fp, 'w').close()
                repo = self.mk_tree(self.dir)
                self.assertRaises(ebuild_errors.InvalidCPV,
                    repo.match, atom('cat/pkg'))
                repo = self.mk_tree(self.dir, ignore_paludis_versioning=True)
                self.assertEqual(sorted(x.cpvstr for x in
                    repo.itermatch(atom('cat/pkg'))), ['cat/pkg-3'])
                os.unlink(fp)
コード例 #13
0
ファイル: ebd.py プロジェクト: floppym/pkgcore
 def _cmd_implementation_sanity_check(self, domain):
     pkg = self.pkg
     eapi = pkg.eapi_obj
     if eapi.options.has_required_use:
         use = pkg.use
         for node in pkg.required_use:
             if not node.match(use):
                 print(textwrap.dedent(
                     """
                     REQUIRED_USE requirement wasn't met
                     Failed to match: {}
                     from: {}
                     for USE: {}
                     pkg: {}
                     """.format(node, pkg.required_use, " ".join(use), pkg.cpvstr)
                 ))
                 return False
     if 'pretend' not in pkg.mandatory_phases:
         return True
     commands = None
     if not pkg.built:
         commands = {"request_inherit": partial(inherit_handler, self._eclass_cache)}
     env = expected_ebuild_env(pkg)
     tmpdir = normpath(domain._get_tempspace())
     builddir = pjoin(tmpdir, env["CATEGORY"], env["PF"])
     pkg_tmpdir = normpath(pjoin(builddir, "temp"))
     ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
     env["ROOT"] = domain.root
     env["T"] = pkg_tmpdir
     try:
         logger.debug("running ebuild pkg_pretend sanity check for %s", pkg.cpvstr)
         start = time.time()
         ret = run_generic_phase(pkg, "pretend", env, userpriv=True, sandbox=True,
                                 fakeroot=False, extra_handlers=commands)
         logger.debug("pkg_pretend sanity check for %s took %2.2f seconds",
             pkg.cpvstr, time.time() - start)
         return ret
     except format.GenericBuildError as e:
         logger.error("pkg_pretend sanity check for %s failed with exception %r"
             % (pkg.cpvstr, e))
         return False
     finally:
         shutil.rmtree(builddir)
         # try to wipe the cat dir; if not empty, ignore it
         try:
             os.rmdir(os.path.dirname(builddir))
         except EnvironmentError as e:
             if e.errno != errno.ENOTEMPTY:
                 raise
コード例 #14
0
ファイル: test_osutils.py プロジェクト: radhermit/snakeoil
 def test_gid(self):
     # abuse the portage group as secondary group
     try:
         portage_gid = grp.getgrnam('portage').gr_gid
     except KeyError:
         pytest.skip('the portage group does not exist')
     if portage_gid not in os.getgroups():
         pytest.skip('you are not in the portage group')
     path = pjoin(self.dir, 'group', 'group')
     assert osutils.ensure_dirs(path, gid=portage_gid)
     self.check_dir(path, os.geteuid(), portage_gid, 0o777)
     assert osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), portage_gid, 0o777)
     assert osutils.ensure_dirs(path, gid=os.getegid())
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
コード例 #15
0
ファイル: tar.py プロジェクト: radhermit/pkgcore
    def _post_download(self, path):
        super()._post_download(path)

        # create tempdir for staging decompression
        if not ensure_dirs(self.tempdir, mode=0o755, uid=self.uid, gid=self.gid):
            raise base.SyncError(
                f'failed creating repo update dir: {self.tempdir!r}')

        exts = {'gz': 'gzip', 'bz2': 'bzip2', 'xz': 'xz'}
        compression = exts[self.uri.rsplit('.', 1)[1]]
        # use tar instead of tarfile so we can easily strip leading path components
        # TODO: programmatically determine how man components to strip?
        cmd = [
            'tar', '--extract', f'--{compression}', '-f', path,
            '--strip-components=1', '--no-same-owner', '-C', self.tempdir
        ]
        with open(os.devnull) as f:
            ret = self._spawn(cmd, pipes={1: f.fileno(), 2: f.fileno()})
        if ret:
            raise base.SyncError('failed to unpack tarball')

        # TODO: verify gpg data if it exists

        # move old repo out of the way and then move new, unpacked repo into place
        try:
            os.rename(self.basedir, self.tempdir_old)
            os.rename(self.tempdir, self.basedir)
        except OSError as e:
            raise base.SyncError(f'failed to update repo: {e.strerror}') from e

        # register old repo removal after it has been successfully replaced
        atexit.register(partial(shutil.rmtree, self.tempdir_old, ignore_errors=True))
コード例 #16
0
ファイル: virtuals.py プロジェクト: vapier/pkgcore
def _write_mtime_cache(mtimes, data, location):
    old_umask = os.umask(0113)
    try:
        f = None
        logger.debug("attempting to update mtime cache at %r", (location,))
        try:
            if not ensure_dirs(os.path.dirname(location),
                gid=portage_gid, mode=0775):
                # bugger, can't update..
                return
            f = AtomicWriteFile(location, gid=portage_gid, perms=0664)
            # invert the data...
            rev_data = {}
            for pkg, ver_dict in data.iteritems():
                for fullver, virtuals in ver_dict.iteritems():
                    for virtual in virtuals:
                        rev_data.setdefault(virtual.category, []).extend(
                            (pkg, fullver, str(virtual)))
            for cat, mtime in mtimes.iteritems():
                if cat in rev_data:
                    f.write("%s\t%i\t%s\n" % (cat, mtime,
                         '\t'.join(rev_data[cat])))
                else:
                    f.write("%s\t%i\n" % (cat, mtime))
            f.close()
            os.chown(location, -1, portage_gid)
        except IOError as e:
            if f is not None:
                f.discard()
            if e.errno != errno.EACCES:
                raise
            logger.warning("unable to update vdb virtuals cache due to "
                "lacking permissions")
    finally:
        os.umask(old_umask)
コード例 #17
0
ファイル: ebd.py プロジェクト: radhermit/pkgcore
    def _setup_distfiles(self):
        if not self.verified_files and self.allow_fetching:
            ops = self.domain.pkg_operations(self.pkg, observer=self.observer)
            if not ops.fetch():
                raise format.GenericBuildError("failed fetching required distfiles")
            self.verified_files = ops._fetch_op.verified_files

        if self.verified_files:
            try:
                if os.path.exists(self.env["DISTDIR"]):
                    if (os.path.isdir(self.env["DISTDIR"]) and
                            not os.path.islink(self.env["DISTDIR"])):
                        shutil.rmtree(self.env["DISTDIR"])
                    else:
                        os.unlink(self.env["DISTDIR"])

            except EnvironmentError as e:
                raise format.FailedDirectory(
                    self.env["DISTDIR"],
                    f"failed removing existing file/dir/link: {e}") from e

            if not ensure_dirs(self.env["DISTDIR"], mode=0o770, gid=portage_gid):
                raise format.FailedDirectory(
                    self.env["DISTDIR"],
                    "failed creating distdir symlink directory")

            try:
                for src, dest in [
                        (k, pjoin(self.env["DISTDIR"], v.filename))
                        for (k, v) in self.verified_files.items()]:
                    os.symlink(src, dest)

            except EnvironmentError as e:
                raise format.GenericBuildError(
                    f"Failed symlinking in distfiles for src {src} -> {dest}: {e}") from e
コード例 #18
0
ファイル: fs_template.py プロジェクト: chutz/pkgcore
 def _ensure_dirs(self, path=None):
     """Make sure a path relative to C{self.location} exists."""
     if path is not None:
         path = pjoin(self.location, os.path.dirname(path))
     else:
         path = self.location
     return ensure_dirs(path, mode=0775, minimal=False)
コード例 #19
0
ファイル: ebd.py プロジェクト: chutz/pkgcore
    def setup_distfiles(self):
        if not self.verified_files and self.allow_fetching:
            ops = self.domain.pkg_operations(self.pkg,
                observer=self.observer)
            if not ops.fetch():
                raise format.BuildError("failed fetching required distfiles")
            self.verified_files = ops._fetch_op.verified_files

        if self.verified_files:
            try:
                if os.path.exists(self.env["DISTDIR"]):
                    if (os.path.isdir(self.env["DISTDIR"])
                        and not os.path.islink(self.env["DISTDIR"])):
                        shutil.rmtree(self.env["DISTDIR"])
                    else:
                        os.unlink(self.env["DISTDIR"])

            except EnvironmentError as oe:
                raise_from(format.FailedDirectory(
                    self.env["DISTDIR"],
                    "failed removing existing file/dir/link at: exception %s"
                    % oe))

            if not ensure_dirs(self.env["DISTDIR"], mode=0770,
                               gid=portage_gid):
                raise format.FailedDirectory(
コード例 #20
0
ファイル: ops.py プロジェクト: vapier/pkgcore
def default_copyfile(obj, mkdirs=False):
    """
    copy a :class:`pkgcore.fs.fs.fsBase` to its stated location.

    :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir`
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: permission errors

    """

    existent = False
    ensure_perms = get_plugin("fs_ops.ensure_perms")
    if not fs.isfs_obj(obj):
        raise TypeError("obj must be fsBase derivative: %r" % obj)
    elif fs.isdir(obj):
        raise TypeError("obj must not be a fsDir instance: %r" % obj)

    try:
        existing = gen_obj(obj.location)
        if fs.isdir(existing):
            raise CannotOverwrite(obj, existing)
        existent = True
    except OSError as oe:
        # verify the parent dir is there at least
        basefp = os.path.dirname(obj.location)
        if basefp.strip(os.path.sep) and not os.path.exists(basefp):
            if mkdirs:
                if not ensure_dirs(basefp, mode=0750, minimal=True):
                    raise FailedCopy(obj, str(oe))
コード例 #21
0
ファイル: repo_ops.py プロジェクト: chutz/pkgcore
    def add_data(self):
        if self.observer is None:
            end = start = lambda x:None
        else:
            start = self.observer.phase_start
            end = self.observer.phase_end
        pkg = self.new_pkg
        final_path = discern_loc(self.repo.base, pkg, self.repo.extension)
        tmp_path = pjoin(os.path.dirname(final_path),
            ".tmp.%i.%s" % (os.getpid(), os.path.basename(final_path)))

        self.tmp_path, self.final_path = tmp_path, final_path

        if not ensure_dirs(os.path.dirname(tmp_path), mode=0755):
            raise repo_interfaces.Failure("failed creating directory %r" %
                os.path.dirname(tmp_path))
        try:
            start("generating tarball: %s" % tmp_path)
            tar.write_set(pkg.contents, tmp_path, compressor='bzip2',
                parallelize=True)
            end("tarball created", True)
            start("writing Xpak")
            # ok... got a tarball.  now add xpak.
            xpak.Xpak.write_xpak(tmp_path, generate_attr_dict(pkg))
            end("wrote Xpak", True)
            # ok... we tagged the xpak on.
            os.chmod(tmp_path, 0644)
        except Exception as e:
            try:
                unlink_if_exists(tmp_path)
            except EnvironmentError as e:
                logger.warning("failed removing %r: %r" % (tmp_path, e))
            raise
        return True
コード例 #22
0
ファイル: test_addons.py プロジェクト: den4ix/pkgcheck
    def mk_profiles(self, profiles, base='profiles', arches=None):
        os.mkdir(pjoin(self.dir, 'metadata'))
        # write masters= to suppress logging complaints.
        write_file(pjoin(self.dir, 'metadata', 'layout.conf'), 'w', 'masters=')

        loc = pjoin(self.dir, base)
        os.mkdir(loc)
        for profile in profiles:
            self.assertTrue(ensure_dirs(pjoin(loc, profile)),
                            msg="failed creating profile %r" % profile)
        if arches is None:
            arches = set(val[0] for val in profiles.itervalues())
        write_file(pjoin(loc, 'arch.list'), 'w', "\n".join(arches))
        write_file(pjoin(loc, 'repo_name'), 'w', 'testing')
        write_file(pjoin(loc, 'eapi'), 'w', '5')
        with open(pjoin(loc, 'profiles.desc'), 'w') as fd:
            for profile, vals in profiles.iteritems():
                l = len(vals)
                if l == 1 or not vals[1]:
                    fd.write("%s\t%s\tstable\n" % (vals[0], profile))
                else:
                    fd.write("%s\t%s\t%s\n" % (vals[0], profile, vals[1]))
                if l == 3 and vals[2]:
                    with open(pjoin(loc, profile, 'deprecated'), 'w') as f:
                        f.write("foon\n#dar\n")
                with open(pjoin(loc, profile, 'make.defaults'), 'w') as f:
                    f.write("ARCH=%s\n" % vals[0])
                with open(pjoin(loc, profile, 'eapi'), 'w') as f:
                    f.write('5')
コード例 #23
0
ファイル: test_repository.py プロジェクト: neko259/pkgcore
    def test_path_restrict(self):
        repo_dir = pjoin(self.dir, 'repo')
        ensure_dirs(pjoin(repo_dir, 'profiles'))
        with open(pjoin(repo_dir, 'profiles', 'repo_name'), 'w') as f:
            f.write('testrepo\n')
        repo = self.mk_tree(repo_dir)
        ensure_dirs(pjoin(repo_dir, 'cat', 'foo'))
        ensure_dirs(pjoin(repo_dir, 'cat', 'bar'))
        ensure_dirs(pjoin(repo_dir, 'tac', 'oof'))
        touch(pjoin(repo_dir, 'skel.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'foo-1.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'foo-2.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'Manifest'))
        touch(pjoin(repo_dir, 'cat', 'bar', 'bar-1.ebuild'))
        touch(pjoin(repo_dir, 'tac', 'oof', 'oof-1.ebuild'))

        # specify repo category dirs
        with open(pjoin(repo_dir, 'profiles', 'categories'), 'w') as f:
            f.write('cat\n')
            f.write('tac\n')

        for path in (self.dir,  # path not in repo
                     pjoin(repo_dir, 'a'),  # nonexistent category dir
                     pjoin(repo_dir, 'profiles'),  # non-category dir
                     pjoin(repo_dir, 'skel.ebuild'),  # not in the correct cat/PN dir layout
                     pjoin(repo_dir, 'cat', 'a'),  # nonexistent package dir
                     pjoin(repo_dir, 'cat', 'foo', 'foo-0.ebuild'),  # nonexistent ebuild file
                     pjoin(repo_dir, 'cat', 'foo', 'Manifest')):  # non-ebuild file
            self.assertRaises(ValueError, repo.path_restrict, path)

        # repo dir
        restriction = repo.path_restrict(repo_dir)
        self.assertEqual(len(restriction), 1)
        self.assertInstance(restriction[0], restricts.RepositoryDep)
        # matches all 4 ebuilds in the repo
        self.assertEqual(len(repo.match(restriction)), 4)

        # category dir
        restriction = repo.path_restrict(pjoin(repo_dir, 'cat'))
        self.assertEqual(len(restriction), 2)
        self.assertInstance(restriction[1], restricts.CategoryDep)
        # matches all 3 ebuilds in the category
        self.assertEqual(len(repo.match(restriction)), 3)

        # package dir
        restriction = repo.path_restrict(pjoin(repo_dir, 'cat', 'foo'))
        self.assertEqual(len(restriction), 3)
        self.assertInstance(restriction[2], restricts.PackageDep)
        # matches both ebuilds in the package dir
        self.assertEqual(len(repo.match(restriction)), 2)

        # ebuild file
        restriction = repo.path_restrict(pjoin(repo_dir, 'cat', 'foo', 'foo-1.ebuild'))
        self.assertEqual(len(restriction), 4)
        self.assertInstance(restriction[3], restricts.VersionMatch)
        # specific ebuild version match
        self.assertEqual(len(repo.match(restriction)), 1)
コード例 #24
0
ファイル: test_repository.py プロジェクト: radhermit/pkgcore
    def mk_tree(self, path, *args, **kwds):
        if path != self.dir:
            self.dir_slave = path
            self.dir_master = pjoin(os.path.dirname(path), os.path.basename(path) + 'master')
            ensure_dirs(self.dir_slave)
            ensure_dirs(self.dir_master)
            ensure_dirs(pjoin(self.dir_slave, 'profiles'))
            ensure_dirs(pjoin(self.dir_master, 'profiles'))

        eclasses = kwds.pop('eclass_cache', None)
        if eclasses is None:
            epath = pjoin(self.dir_master, 'eclass')
            ensure_dirs(epath)
            eclasses = eclass_cache.cache(epath)

        self.master_repo = repository.UnconfiguredTree(self.dir_master, eclass_cache=eclasses, *args, **kwds)
        masters = (self.master_repo,)
        return repository.UnconfiguredTree(self.dir_slave, eclass_cache=eclasses, masters=masters, *args, **kwds)
コード例 #25
0
ファイル: ops.py プロジェクト: radhermit/pkgcore
def default_copyfile(obj, mkdirs=False):
    """
    copy a :class:`pkgcore.fs.fs.fsBase` to its stated location.

    :param obj: :class:`pkgcore.fs.fs.fsBase` instance, exempting :class:`fsDir`
    :return: true if success, else an exception is thrown
    :raise EnvironmentError: permission errors

    """

    existent = False
    ensure_perms = get_plugin("fs_ops.ensure_perms")
    if not fs.isfs_obj(obj):
        raise TypeError(f'obj must be fsBase derivative: {obj!r}')
    elif fs.isdir(obj):
        raise TypeError(f'obj must not be a fsDir instance: {obj!r}')

    try:
        existing = gen_obj(obj.location)
        if fs.isdir(existing):
            raise CannotOverwrite(obj, existing)
        existent = True
    except OSError as oe:
        # verify the parent dir is there at least
        basefp = os.path.dirname(obj.location)
        if basefp.strip(os.path.sep) and not os.path.exists(basefp):
            if mkdirs:
                if not ensure_dirs(basefp, mode=0o750, minimal=True):
                    raise FailedCopy(obj, str(oe))
            else:
                raise
        existent = False

    if not existent:
        fp = obj.location
    else:
        fp = existent_fp = obj.location + "#new"

    if fs.isreg(obj):
        obj.data.transfer_to_path(fp)
    elif fs.issym(obj):
        os.symlink(obj.target, fp)
    elif fs.isfifo(obj):
        os.mkfifo(fp)
    elif fs.isdev(obj):
        dev = os.makedev(obj.major, obj.minor)
        os.mknod(fp, obj.mode, dev)
    else:
        ret = spawn([CP_BINARY, "-Rp", obj.location, fp])
        if ret != 0:
            raise FailedCopy(obj, f'got {ret} from {CP_BINARY} -Rp')

    ensure_perms(obj.change_attributes(location=fp))

    if existent:
        os.rename(existent_fp, obj.location)
    return True
コード例 #26
0
ファイル: ebd.py プロジェクト: radhermit/pkgcore
 def setup_workdir(self):
     # ensure dirs.
     for k in ("HOME", "T", "WORKDIR", "D"):
         if not ensure_dirs(self.env[k], mode=0o4770, gid=portage_gid, minimal=True):
             raise format.FailedDirectory(
                 self.env[k],
                 "%s doesn't fulfill minimum mode %o and gid %i" % (k, 0o770, portage_gid))
         # XXX hack, just 'til pkgcore controls these directories
         if (os.stat(self.env[k]).st_mode & 0o2000):
             logger.warning(f"{self.env[k]} ( {k} ) is setgid")
コード例 #27
0
ファイル: test_profiles.py プロジェクト: veelai/pkgcore
    def mk_profiles(self, *profiles, **kwds):
        for x in os.listdir(self.dir):
            shutil.rmtree(pjoin(self.dir, x))
        for idx, vals in enumerate(profiles):
            name = str(vals.pop("name", idx))
            path = pjoin(self.dir, name)
            ensure_dirs(path)
            parent = vals.pop("parent", None)
            for fname, data in vals.iteritems():
                open(pjoin(path, fname), "w").write(data)

            if idx and not parent:
                parent = idx - 1

            if parent is not None:
                open(pjoin(path, "parent"), "w").write("../%s" % (parent,))
        if kwds:
            for key, val in kwds.iteritems():
                open(pjoin(self.dir, key), "w").write(val)
コード例 #28
0
ファイル: test_triggers.py プロジェクト: veelai/pkgcore
    def assertTrigger(self, touches, ran, dirs=['test-lib', 'test-lib2'],
        hook='merge', mode=const.INSTALL_MODE, mkdirs=True, same_mtime=False):

        # wipe whats there.
        for x in scan(self.dir).iterdirs():
            if x.location == self.dir:
                continue
            shutil.rmtree(x.location)
        for x in scan(self.dir).iterdirs(True):
            os.unlink(x.location)

        ensure_dirs(pjoin(self.dir, "etc"))
        open(pjoin(self.dir, "etc/ld.so.conf"), "w").write(
            "\n".join('/' + x for x in dirs))
        # force directory mtime to 1s less.
        past = time.time() - 10.0
        if mkdirs:
            for x in dirs:
                ensure_dirs(pjoin(self.dir, x))
                os.utime(pjoin(self.dir, x), (past, past))

        self.reset_objects()
        self.engine.phase = 'pre_%s' % hook
        self.engine.mode = mode
        self.trigger(self.engine, {})
        self.assertFalse(self.trigger._passed_in_args)
        resets = set()
        for x in touches:
            fp = pjoin(self.dir, x.lstrip('/'))
            open(pjoin(fp), "w")
            if same_mtime:
                os.utime(fp, (past, past))
                resets.add(os.path.dirname(fp))

        for x in resets:
            os.utime(x, (past, past))

        self.engine.phase = 'post_%s' % hook
        self.trigger(self.engine, {})

        self.assertEqual([[getattr(x, 'offset', None) for x in y]
            for y in self.trigger._passed_in_args],
            [[self.dir]])
コード例 #29
0
ファイル: test_osutils.py プロジェクト: radhermit/snakeoil
 def test_reset_sticky_parent_perms(self):
     # make sure perms are reset after traversing over sticky parents
     sticky_parent = pjoin(self.dir, 'dir')
     path = pjoin(sticky_parent, 'dir')
     os.mkdir(sticky_parent)
     os.chmod(sticky_parent, 0o2755)
     pre_sticky_parent = os.stat(sticky_parent)
     assert osutils.ensure_dirs(path, mode=0o700)
     post_sticky_parent = os.stat(sticky_parent)
     assert pre_sticky_parent.st_mode == post_sticky_parent.st_mode
コード例 #30
0
    def setup(self):
        """
        execute the setup phase, mapping out to pkg_setup in the ebuild

        necessarily dirs are created as required, and build env is
        initialized at this point
        """
        if self.distcc:
            for p in ("", "/lock", "/state"):
                if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p), mode=02775, gid=portage_gid):
                    raise format.FailedDirectory(
                        pjoin(self.env["DISTCC_DIR"], p), "failed creating needed distcc directory"
                    )
コード例 #31
0
 def test_non_dir_in_path(self):
     # fail if one of the parts of the path isn't a dir
     path = pjoin(self.dir, 'file', 'dir')
     touch(pjoin(self.dir, 'file'))
     assert not osutils.ensure_dirs(path, mode=0o700)
コード例 #32
0
    def test_path_restrict(self):
        repo_dir = pjoin(self.dir, 'repo')
        sym_repo_dir = pjoin(self.dir, 'sym_repo')
        os.symlink(repo_dir, sym_repo_dir)

        ensure_dirs(pjoin(repo_dir, 'profiles'))
        with open(pjoin(repo_dir, 'profiles', 'repo_name'), 'w') as f:
            f.write('testrepo\n')
        ensure_dirs(pjoin(repo_dir, 'cat', 'foo'))
        ensure_dirs(pjoin(repo_dir, 'cat', 'bar'))
        ensure_dirs(pjoin(repo_dir, 'tac', 'oof'))
        touch(pjoin(repo_dir, 'skel.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'foo-1.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'foo-2.ebuild'))
        touch(pjoin(repo_dir, 'cat', 'foo', 'Manifest'))
        touch(pjoin(repo_dir, 'cat', 'bar', 'bar-1.ebuild'))
        touch(pjoin(repo_dir, 'tac', 'oof', 'oof-1.ebuild'))

        # specify repo category dirs
        with open(pjoin(repo_dir, 'profiles', 'categories'), 'w') as f:
            f.write('cat\n')
            f.write('tac\n')

        for d in (repo_dir, sym_repo_dir):
            repo = self.mk_tree(d)
            for path in (
                    self.dir,  # path not in repo
                    pjoin(repo.location, 'a'),  # nonexistent category dir
                    pjoin(repo.location, 'profiles'),  # non-category dir
                    pjoin(
                        repo.location,
                        'skel.ebuild'),  # not in the correct cat/PN dir layout
                    pjoin(repo.location, 'cat',
                          'a'),  # nonexistent package dir
                    pjoin(repo.location, 'cat', 'foo',
                          'foo-0.ebuild'),  # nonexistent ebuild file
                    pjoin(repo.location, 'cat', 'foo',
                          'Manifest'),  # non-ebuild file
            ):
                self.assertRaises(ValueError, repo.path_restrict, path)

            # repo dir
            restriction = repo.path_restrict(repo.location)
            self.assertEqual(len(restriction), 1)
            self.assertInstance(restriction[0], restricts.RepositoryDep)
            # matches all 4 ebuilds in the repo
            self.assertEqual(len(repo.match(restriction)), 4)

            # category dir
            restriction = repo.path_restrict(pjoin(repo.location, 'cat'))
            self.assertEqual(len(restriction), 2)
            self.assertInstance(restriction[1], restricts.CategoryDep)
            # matches all 3 ebuilds in the category
            self.assertEqual(len(repo.match(restriction)), 3)

            # package dir
            restriction = repo.path_restrict(pjoin(repo.location, 'cat',
                                                   'foo'))
            self.assertEqual(len(restriction), 3)
            self.assertInstance(restriction[2], restricts.PackageDep)
            # matches both ebuilds in the package dir
            self.assertEqual(len(repo.match(restriction)), 2)

            # ebuild file
            restriction = repo.path_restrict(
                pjoin(repo.location, 'cat', 'foo', 'foo-1.ebuild'))
            self.assertEqual(len(restriction), 4)
            self.assertInstance(restriction[3], restricts.VersionMatch)
            # specific ebuild version match
            self.assertEqual(len(repo.match(restriction)), 1)

            # relative ebuild file path
            with mock.patch('os.getcwd',
                            return_value=os.path.realpath(
                                pjoin(repo.location, 'cat', 'foo'))):
                restriction = repo.path_restrict('./foo-1.ebuild')
                self.assertEqual(len(restriction), 4)
                self.assertInstance(restriction[3], restricts.VersionMatch)
                # specific ebuild version match
                self.assertEqual(len(repo.match(restriction)), 1)
コード例 #33
0
ファイル: test_osutils.py プロジェクト: chutz/snakeoil
 def test_minimal_nonmodifying(self):
     path = pjoin(self.dir, 'foo', 'bar')
     self.assertTrue(osutils.ensure_dirs(path, mode=0755))
     os.chmod(path, 0777)
     self.assertTrue(osutils.ensure_dirs(path, mode=0755, minimal=True))
コード例 #34
0
ファイル: ebd.py プロジェクト: ferringb/pkgcore
 def setup_logging(self):
     if self.logging and not ensure_dirs(
             os.path.dirname(self.logging), mode=0o2770, gid=portage_gid):
         raise format.FailedDirectory(
             os.path.dirname(self.logging),
             "PORT_LOGDIR, desired mode 02770 and gid %i" % portage_gid)
コード例 #35
0
def initialize_cache(package, force=False, cache_dir=None):
    """Determine available plugins in a package.

    Writes cache files if they are stale and writing is possible.
    """
    modpath = os.path.dirname(package.__file__)
    pkgpath = os.path.dirname(os.path.dirname(modpath))
    uid = gid = -1
    mode = 0o755

    if cache_dir is None:
        if not force:
            # use user-generated caches if they exist, fallback to module cache
            if os.path.exists(pjoin(const.USER_CACHE_PATH, CACHE_FILENAME)):
                cache_dir = const.USER_CACHE_PATH
            elif os.path.exists(pjoin(const.SYSTEM_CACHE_PATH,
                                      CACHE_FILENAME)):
                cache_dir = const.SYSTEM_CACHE_PATH
                uid = os_data.portage_uid
                gid = os_data.portage_gid
                mode = 0o775
            else:
                cache_dir = modpath
        else:
            # generate module cache when running from git repo, otherwise create system/user cache
            if pkgpath == sys.path[0]:
                cache_dir = modpath
            elif os_data.uid in (os_data.root_uid, os_data.portage_uid):
                cache_dir = const.SYSTEM_CACHE_PATH
                uid = os_data.portage_uid
                gid = os_data.portage_gid
                mode = 0o775
            else:
                cache_dir = const.USER_CACHE_PATH

    # put pkgcore consumer plugins (e.g. pkgcheck) inside pkgcore cache dir
    if cache_dir in (const.SYSTEM_CACHE_PATH, const.USER_CACHE_PATH):
        chunks = package.__name__.split('.', 1)
        if chunks[0] != os.path.basename(cache_dir):
            cache_dir = pjoin(cache_dir, chunks[0])

    # package plugin cache, see above.
    package_cache = defaultdict(set)
    stored_cache_name = pjoin(cache_dir, CACHE_FILENAME)
    stored_cache = _read_cache_file(package, stored_cache_name)

    if force:
        _clean_old_caches(cache_dir)

    # Directory cache, mapping modulename to
    # (mtime, set([keys]))
    modlist = listdir_files(modpath)
    modlist = set(x for x in modlist
                  if os.path.splitext(x)[1] == '.py' and x != '__init__.py')

    cache_stale = False
    # Hunt for modules.
    actual_cache = defaultdict(set)
    mtime_cache = mappings.defaultdictkey(lambda x: int(os.path.getmtime(x)))
    for modfullname in sorted(modlist):
        modname = os.path.splitext(modfullname)[0]
        # It is an actual module. Check if its cache entry is valid.
        mtime = mtime_cache[pjoin(modpath, modfullname)]
        vals = stored_cache.get((modname, mtime))
        if vals is None or force:
            # Cache entry is stale.
            logger.debug('stale because of %s: actual %s != stored %s',
                         modname, mtime,
                         stored_cache.get(modname, (0, ()))[0])
            cache_stale = True
            entries = []
            qualname = '.'.join((package.__name__, modname))
            module = import_module(qualname)
            registry = getattr(module, PLUGIN_ATTR, {})
            vals = set()
            for key, plugs in registry.items():
                for idx, plug_name in enumerate(plugs):
                    if isinstance(plug_name, str):
                        plug = _process_plugin(
                            package, _plugin_data(key, 0, qualname, plug_name))
                    else:
                        plug = plug_name
                    if plug is None:
                        # import failure, ignore it, error already logged
                        continue
                    priority = getattr(plug, 'priority', 0)
                    if not isinstance(priority, int):
                        logger.error(
                            "ignoring plugin %s: has a non integer priority: %s",
                            plug, priority)
                        continue
                    if plug_name is plug:
                        # this means it's an object, rather than a string; store
                        # the offset.
                        plug_name = idx
                    data = _plugin_data(key, priority, qualname, plug_name)
                    vals.add(data)
        actual_cache[(modname, mtime)] = vals
        for data in vals:
            package_cache[data.key].add(data)
    if force or set(stored_cache) != set(actual_cache):
        logger.debug('updating cache %r for new plugins', stored_cache_name)
        ensure_dirs(cache_dir, uid=uid, gid=gid, mode=mode)
        _write_cache_file(stored_cache_name, actual_cache, uid=uid, gid=gid)

    return mappings.ImmutableDict(
        (k, sort_plugs(v)) for k, v in package_cache.items())
コード例 #36
0
ファイル: ebd.py プロジェクト: 4eetah/pkgcore
 def setup_workdir(self):
     # ensure dirs.
     for k in ("HOME", "T", "WORKDIR", "D"):
         if not ensure_dirs(
                 self.env[k], mode=04770, gid=portage_gid, minimal=True):
コード例 #37
0
ファイル: ebd.py プロジェクト: 4eetah/pkgcore
 def setup_logging(self):
     if self.logging and not ensure_dirs(
             os.path.dirname(self.logging), mode=02770, gid=portage_gid):
コード例 #38
0
ファイル: ebd.py プロジェクト: 4eetah/pkgcore
 def setup_env_data_source(self):
     if not ensure_dirs(
             self.env["T"], mode=0770, gid=portage_gid, minimal=True):
コード例 #39
0
ファイル: ebd.py プロジェクト: 4eetah/pkgcore
        if self.distcc:
            for p in ("", "/lock", "/state"):
                if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p),
                                   mode=02775,
                                   gid=portage_gid):
                    raise format.FailedDirectory(
                        pjoin(self.env["DISTCC_DIR"], p),
                        "failed creating needed distcc directory")
        if self.ccache:
            # yuck.
            st = None
            try:
                st = os.stat(self.env["CCACHE_DIR"])
            except OSError:
                st = None
                if not ensure_dirs(
                        self.env["CCACHE_DIR"], mode=02775, gid=portage_gid):
                    raise_from(
                        format.FailedDirectory(
                            self.env["CCACHE_DIR"],
                            "failed creation of ccache dir"))

                # XXX this is more then mildly stupid.
                st = os.stat(self.env["CCACHE_DIR"])
            try:
                if st.st_gid != portage_gid or (st.st_mode & 02775) != 02775:
                    try:
                        cwd = os.getcwd()
                    except OSError:
                        cwd = "/"
                    try:
                        # crap.
コード例 #40
0
def config_from_make_conf(location=None, profile_override=None, **kwargs):
    """generate a config using portage's config files

    Args:
        location (optional[str]): path to the portage config directory,
            (defaults to /etc/portage)
        profile_override (optional[str]): profile to use instead of the current system
            profile, i.e. the target of the /etc/portage/make.profile symlink
        configroot (optional[str]): location for various portage config files (defaults to /)
        root (optional[str]): target root filesystem (defaults to /)
        buildpkg (optional[bool]): forcibly disable/enable building binpkgs, otherwise
            FEATURES=buildpkg from make.conf is used

    Returns:
        dict: config settings
    """

    # this actually differs from portage parsing- we allow
    # make.globals to provide vars used in make.conf, portage keeps
    # them separate (kind of annoying)

    config_dir = location if location is not None else '/etc/portage'
    config_dir = pjoin(
        os.environ.get('PORTAGE_CONFIGROOT', kwargs.pop('configroot', '/')),
        config_dir.lstrip('/'))

    # this isn't preserving incremental behaviour for features/use unfortunately

    make_conf = {}
    try:
        load_make_conf(make_conf, pjoin(const.CONFIG_PATH, 'make.globals'))
    except IGNORED_EXCEPTIONS:
        raise
    except:
        raise_from(errors.ParsingError("failed to load make.globals"))
    load_make_conf(
        make_conf, pjoin(config_dir, 'make.conf'), required=False,
        allow_sourcing=True, incrementals=True)

    root = os.environ.get("ROOT", kwargs.pop('root', make_conf.get("ROOT", "/")))
    gentoo_mirrors = [
        x.rstrip("/") + "/distfiles" for x in make_conf.pop("GENTOO_MIRRORS", "").split()]

    # this is flawed... it'll pick up -some-feature
    features = make_conf.get("FEATURES", "").split()

    config = {}
    triggers = []

    def add_trigger(name, kls_path, **extra_args):
        d = extra_args.copy()
        d['class'] = kls_path
        config[name] = basics.ConfigSectionFromStringDict(d)
        triggers.append(name)

    # sets...
    add_sets(config, root, config_dir)

    add_profile(config, config_dir, profile_override)

    kwds = {
        "class": "pkgcore.vdb.ondisk.tree",
        "location": pjoin(root, 'var', 'db', 'pkg'),
        "cache_location": '/var/cache/edb/dep/var/db/pkg',
    }
    config["vdb"] = basics.AutoConfigSection(kwds)

    try:
        repos_conf_defaults, repos_conf = load_repos_conf(pjoin(config_dir, 'repos.conf'))
    except errors.ParsingError as e:
        if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
            raise
        try:
            # fallback to defaults provided by pkgcore
            repos_conf_defaults, repos_conf = load_repos_conf(
                pjoin(const.CONFIG_PATH, 'repos.conf'))
        except IGNORED_EXCEPTIONS:
            raise
        except:
            raise_from(errors.ParsingError(
                "failed to find a usable repos.conf"))

    make_repo_syncers(config, repos_conf, make_conf)

    config['ebuild-repo-common'] = basics.AutoConfigSection({
        'class': 'pkgcore.ebuild.repository.tree',
        'default_mirrors': gentoo_mirrors,
        'inherit-only': True,
        'ignore_paludis_versioning': ('ignore-paludis-versioning' in features),
    })

    repo_map = {}

    for repo_name, repo_opts in repos_conf.iteritems():
        repo_path = repo_opts['location']

        # XXX: Hack for portage-2 profile format support.
        repo_config = RepoConfig(repo_path, repo_name)
        repo_map[repo_config.repo_id] = repo_path

        # repo configs
        repo_conf = {
            'class': 'pkgcore.ebuild.repo_objs.RepoConfig',
            'config_name': repo_name,
            'location': repo_path,
            'syncer': 'sync:' + repo_name,
        }

        # repo trees
        repo = {
            'inherit': ('ebuild-repo-common',),
            'repo_config': 'conf:' + repo_name,
        }

        # metadata cache
        if repo_config.cache_format is not None:
            cache_name = 'cache:' + repo_name
            config[cache_name] = make_cache(repo_config.cache_format, repo_path)
            repo['cache'] = cache_name

        if repo_name == repos_conf_defaults['main-repo']:
            repo_conf['default'] = True
            repo['default'] = True

        config['conf:' + repo_name] = basics.AutoConfigSection(repo_conf)
        config[repo_name] = basics.AutoConfigSection(repo)

    # XXX: Hack for portage-2 profile format support. We need to figure out how
    # to dynamically create this from the config at runtime on attr access.
    profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

    repos = [name for name in repos_conf.iterkeys()]
    if len(repos) > 1:
        config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, {
                'class': 'pkgcore.repository.multiplex.config_tree',
                'repositories': tuple(repos)})
    else:
        config['repo-stack'] = basics.section_alias(repos[0], 'repo')

    config['vuln'] = basics.AutoConfigSection({
        'class': SecurityUpgradesViaProfile,
        'ebuild_repo': 'repo-stack',
        'vdb': 'vdb',
        'profile': 'profile',
    })
    config['glsa'] = basics.section_alias(
        'vuln', SecurityUpgradesViaProfile.pkgcore_config_type.typename)

    # binpkg.
    buildpkg = 'buildpkg' in features or kwargs.pop('buildpkg', False)
    pkgdir = os.environ.get("PKGDIR", make_conf.pop('PKGDIR', None))
    if pkgdir is not None:
        try:
            pkgdir = abspath(pkgdir)
        except OSError as oe:
            if oe.errno != errno.ENOENT:
                raise
            if buildpkg or set(features).intersection(
                    ('pristine-buildpkg', 'buildsyspkg', 'unmerge-backup')):
                logger.warning("disabling buildpkg related features since PKGDIR doesn't exist")
            pkgdir = None
        else:
            if not ensure_dirs(pkgdir, mode=0755, minimal=True):
                logger.warning("disabling buildpkg related features since PKGDIR either doesn't "
                               "exist, or lacks 0755 minimal permissions")
                pkgdir = None
コード例 #41
0
ファイル: triggers.py プロジェクト: pombreda/pkgcore
 def _mk_ld_so_conf(self, fp):
     if not ensure_dirs(os.path.dirname(fp), mode=0755, minimal=True):
コード例 #42
0
 def test_path_is_a_file(self):
     # fail if passed a path to an existing file
     path = pjoin(self.dir, 'file')
     touch(path)
     assert os.path.isfile(path)
     assert not osutils.ensure_dirs(path, mode=0o700)
コード例 #43
0
 def test_minimal_modifying(self):
     path = pjoin(self.dir, 'foo', 'bar')
     assert osutils.ensure_dirs(path, mode=0o750)
     assert osutils.ensure_dirs(path, mode=0o005, minimal=True)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o755)
コード例 #44
0
ファイル: ebd.py プロジェクト: 4eetah/pkgcore
    def _cmd_implementation_sanity_check(self, domain, observer):
        """Various ebuild sanity checks (REQUIRED_USE, pkg_pretend)."""
        pkg = self.pkg
        eapi = pkg.eapi

        # perform REQUIRED_USE checks
        if eapi.options.has_required_use:
            use = pkg.use
            for node in pkg.required_use:
                if not node.match(use):
                    observer.info(
                        textwrap.dedent("""
                        REQUIRED_USE requirement wasn't met
                        Failed to match: {}
                        from: {}
                        for USE: {}
                        pkg: {}
                        """.format(node, pkg.required_use, " ".join(use),
                                   pkg.cpvstr)))
                    return False

        # return if running pkg_pretend is not required
        if 'pretend' not in pkg.mandatory_phases:
            return True

        # run pkg_pretend phase
        commands = None
        if not pkg.built:
            commands = {
                "request_inherit": partial(inherit_handler, self._eclass_cache)
            }
        env = expected_ebuild_env(pkg)
        tmpdir = normpath(domain._get_tempspace())
        builddir = pjoin(tmpdir, env["CATEGORY"], env["PF"])
        pkg_tmpdir = normpath(pjoin(builddir, "temp"))
        ensure_dirs(pkg_tmpdir, mode=0770, gid=portage_gid, minimal=True)
        env["ROOT"] = domain.root
        env["T"] = pkg_tmpdir

        # TODO: make colored output easier to achieve from observers
        msg = [
            '>>> Running pkg_pretend for ',
            observer._output._out.fg('green'), pkg.cpvstr,
            observer._output._out.reset
        ]
        observer._output._out.write(*msg)

        try:
            start = time.time()
            ret = run_generic_phase(pkg,
                                    "pretend",
                                    env,
                                    userpriv=True,
                                    sandbox=True,
                                    extra_handlers=commands)
            logger.debug("pkg_pretend sanity check for %s took %2.2f seconds",
                         pkg.cpvstr,
                         time.time() - start)
            return ret
        except format.GenericBuildError as e:
            return False
        finally:
            shutil.rmtree(builddir)
            # try to wipe the cat dir; if not empty, ignore it
            try:
                os.rmdir(os.path.dirname(builddir))
            except EnvironmentError as e:
                # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir
                # in particular, Solaris uses EEXIST in that case.
                # https://github.com/pkgcore/pkgcore/pull/181
                if e.errno not in (errno.ENOTEMPTY, errno.EEXIST):
                    raise
コード例 #45
0
 def test_ensure_dirs(self):
     # default settings
     path = pjoin(self.dir, 'foo', 'bar')
     assert osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
コード例 #46
0
ファイル: repo_ops.py プロジェクト: 4eetah/pkgcore
    def add_data(self, domain):
        # error checking?
        dirpath = self.tmp_write_path
        ensure_dirs(dirpath, mode=0755, minimal=True)
        update_mtime(self.repo.location)
        rewrite = self.repo._metadata_rewrites
        for k in self.new_pkg.tracked_attributes:
            if k == "contents":
                v = ContentsFile(pjoin(dirpath, "CONTENTS"),
                                 mutable=True,
                                 create=True)
                v.update(self.new_pkg.contents)
                v.flush()
            elif k == "environment":
                data = compression.compress_data(
                    'bzip2',
                    self.new_pkg.environment.bytes_fileobj().read())
                with open(pjoin(dirpath, "environment.bz2"), "wb") as f:
                    f.write(data)
                del data
            else:
                v = getattr(self.new_pkg, k)
                if k == 'depends' or k == 'rdepends':
                    s = v.slotdep_str(domain)
                elif not isinstance(v, basestring):
                    try:
                        s = ' '.join(v)
                    except TypeError:
                        s = str(v)
                else:
                    s = v
                with open(pjoin(dirpath, rewrite.get(k, k.upper())), "w",
                          32768) as f:
                    if s:
                        s += '\n'
                    f.write(s)

        # ebuild_data is the actual ebuild- no point in holding onto
        # it for built ebuilds, but if it's there, we store it.
        o = getattr(self.new_pkg, "ebuild", None)
        if o is None:
            logger.warning(
                "doing install/replace op, "
                "but source package doesn't provide the actual ebuild data.  "
                "Creating an empty file")
            o = ''
        else:
            o = o.bytes_fileobj().read()
        # XXX lil hackish accessing PF
        with open(pjoin(dirpath, self.new_pkg.PF + ".ebuild"), "wb") as f:
            f.write(o)

        # install NEEDED and NEEDED.ELF.2 files from tmpdir if they exist
        pkg_tmpdir = normpath(
            pjoin(domain._get_tempspace(), self.new_pkg.category,
                  self.new_pkg.PF, 'temp'))
        for f in ['NEEDED', 'NEEDED.ELF.2']:
            fp = pjoin(pkg_tmpdir, f)
            if os.path.exists(fp):
                local_source(fp).transfer_to_path(pjoin(dirpath, f))

        # XXX finally, hack to keep portage from doing stupid shit.
        # relies on counter to discern what to punt during
        # merging/removal, we don't need that crutch however. problem?
        # No counter file, portage wipes all of our merges (friendly
        # bugger).
        # need to get zmedico to localize the counter
        # creation/counting to per CP for this trick to behave
        # perfectly.
        with open(pjoin(dirpath, "COUNTER"), "w") as f:
            f.write(str(int(time.time())))

        #finally, we mark who made this.
        with open(pjoin(dirpath, "PKGMANAGER"), "w") as f:
            f.write("pkgcore-%s\n" % __version__)
        return True
コード例 #47
0
ファイル: ebd.py プロジェクト: ferringb/pkgcore
    def setup(self):
        """Execute the setup phase, mapping out to pkg_setup in the ebuild.

        Necessarily dirs are created as required, and build env is
        initialized at this point.
        """
        if self.distcc:
            for p in ("", "/lock", "/state"):
                if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p),
                                   mode=0o2775,
                                   gid=portage_gid):
                    raise format.FailedDirectory(
                        pjoin(self.env["DISTCC_DIR"], p),
                        "failed creating needed distcc directory")
        if self.ccache:
            # yuck.
            st = None
            try:
                st = os.stat(self.env["CCACHE_DIR"])
            except OSError as e:
                st = None
                if not ensure_dirs(
                        self.env["CCACHE_DIR"], mode=0o2775, gid=portage_gid):
                    raise format.FailedDirectory(
                        self.env["CCACHE_DIR"],
                        "failed creation of ccache dir") from e

                # XXX this is more then mildly stupid.
                st = os.stat(self.env["CCACHE_DIR"])
            try:
                if st.st_gid != portage_gid or (st.st_mode & 0o2775) != 0o2775:
                    try:
                        cwd = os.getcwd()
                    except OSError:
                        cwd = "/"
                    with chdir(cwd):
                        # crap.
                        os.chmod(self.env["CCACHE_DIR"], 0o2775)
                        os.chown(self.env["CCACHE_DIR"], -1, portage_gid)
                        if 0 != spawn([
                                "chgrp", "-R",
                                str(portage_gid), self.env["CCACHE_DIR"]
                        ]):
                            raise format.FailedDirectory(
                                self.env["CCACHE_DIR"],
                                "failed changing ownership for CCACHE_DIR")
                        if 0 != spawn_bash(
                                "find '%s' -type d -print0 | %s --null chmod 02775"
                                % (self.env["CCACHE_DIR"], xargs)):
                            raise format.FailedDirectory(
                                self.env["CCACHE_DIR"],
                                "failed correcting perms for CCACHE_DIR")

                        if 0 != spawn_bash(
                                "find '%s' -type f -print0 | %s --null chmod 0775"
                                % (self.env["CCACHE_DIR"], xargs)):
                            raise format.FailedDirectory(
                                self.env["CCACHE_DIR"],
                                "failed correcting perms for CCACHE_DIR")
            except OSError as e:
                raise format.FailedDirectory(
                    self.env["CCACHE_DIR"],
                    "failed ensuring perms/group owner for CCACHE_DIR") from e

        return setup_mixin.setup(self)
コード例 #48
0
    def fetch(self, target):
        """
        fetch a file

        :type target: :obj:`pkgcore.fetch.fetchable` instance
        :return: None if fetching failed,
            else on disk location of the copied file
        """

        if not isinstance(target, fetchable):
            raise TypeError(
                "target must be fetchable instance/derivative: %s" % target)

        kw = {"mode": 0775}
        if self.readonly:
            kw["mode"] = 0555
        if self.userpriv:
            kw["gid"] = portage_gid
        kw["minimal"] = True
        if not ensure_dirs(self.distdir, **kw):
            raise errors.distdirPerms(
                self.distdir, "if userpriv, uid must be %i, gid must be %i. "
                "if not readonly, directory must be 0775, else 0555" %
                (portage_uid, portage_gid))

        fp = pjoin(self.distdir, target.filename)
        filename = os.path.basename(fp)

        uri = iter(target.uri)
        if self.userpriv and is_userpriv_capable():
            extra = {"uid": portage_uid, "gid": portage_gid}
        else:
            extra = {}
        extra["umask"] = 0002
        extra["env"] = self.extra_env
        attempts = self.attempts
        last_exc = None
        try:
            while attempts >= 0:
                try:
                    c = self._verify(fp, target)
                    return fp
                except errors.MissingDistfile:
                    command = self.command
                    last_exc = sys.exc_info()
                except errors.FetchFailed as e:
                    last_exc = sys.exc_info()
                    if not e.resumable:
                        try:
                            os.unlink(fp)
                            command = self.command
                        except OSError as oe:
                            raise_from(errors.UnmodifiableFile(fp, oe))
                    else:
                        command = self.resume_command

                # yeah, it's funky, but it works.
                if attempts > 0:
                    u = uri.next()
                    # note we're not even checking the results. the
                    # verify portion of the loop handles this. iow,
                    # don't trust their exit code. trust our chksums
                    # instead.
                    spawn_bash(command % {"URI": u, "FILE": filename}, **extra)
                attempts -= 1
            assert last_exc is not None
            raise last_exc[0], last_exc[1], last_exc[2]

        except StopIteration:
            # ran out of uris
            return FetchFailed(fp, "Ran out of urls to fetch from")
コード例 #49
0
ファイル: test_osutils.py プロジェクト: chutz/snakeoil
 def test_minimal_modifying(self):
     path = pjoin(self.dir, 'foo', 'bar')
     self.assertTrue(osutils.ensure_dirs(path, mode=0750))
     self.assertTrue(osutils.ensure_dirs(path, mode=0005, minimal=True))
コード例 #50
0
ファイル: domain.py プロジェクト: shen390s/pkgcore
 def _create_tempspace(self):
     location = self.domain.pm_tmpdir
     osutils.ensure_dirs(location)
     self.tempspace = tempfile.mkdtemp(dir=location, prefix="merge-engine-tmp")
コード例 #51
0
 def setUp(self):
     TempDirMixin.setUp(self)
     self.pdir = pjoin(self.dir, 'profiles')
     ensure_dirs(self.pdir)
コード例 #52
0
    def setUp(self):
        TempDirMixin.setUp(self)
        self.dir_orig = self.dir

        self.dir_master = pjoin(self.dir, 'master')
        self.dir_slave = pjoin(self.dir, 'slave')
        ensure_dirs(self.dir_master)
        ensure_dirs(self.dir_slave)

        ensure_dirs(pjoin(self.dir_master, 'metadata'))
        ensure_dirs(pjoin(self.dir_slave, 'metadata'))
        # silence missing masters warnings
        with open(pjoin(self.dir_master, 'metadata', 'layout.conf'), 'w') as f:
            f.write('masters =\n')
        with open(pjoin(self.dir_slave, 'metadata', 'layout.conf'), 'w') as f:
            f.write('masters = master\n')

        self.master_pdir = pjoin(self.dir_master, 'profiles')
        self.pdir = self.slave_pdir = pjoin(self.dir_slave, 'profiles')
        ensure_dirs(self.master_pdir)
        ensure_dirs(self.slave_pdir)
        # silence missing repo name warnings
        with open(pjoin(self.master_pdir, 'repo_name'), 'w') as f:
            f.write('master\n')
        with open(pjoin(self.slave_pdir, 'repo_name'), 'w') as f:
            f.write('slave\n')

        self.dir = self.dir_slave