示例#1
0
def request_ebuild_processor(userpriv=False, sandbox=None, fd_pipes=None):
    """Request a processor instance, creating a new one if needed.

    :return: :obj:`EbuildProcessor`
    :param userpriv: should the processor be deprived to
        :obj:`pkgcore.os_data.portage_gid` and :obj:`pkgcore.os_data.portage_uid`?
    :param sandbox: should the processor be sandboxed?
    """

    if sandbox is None:
        sandbox = spawn.is_sandbox_capable()

    for ebp in inactive_ebp_list:
        if ebp.userprived() == userpriv and (ebp.sandboxed() or not sandbox):
            if not ebp.is_alive:
                inactive_ebp_list.remove(ebp)
                continue
            inactive_ebp_list.remove(ebp)
            active_ebp_list.append(ebp)
            break
    else:
        ebp = EbuildProcessor(userpriv, sandbox, fd_pipes=fd_pipes)
        active_ebp_list.append(ebp)

    return ebp
示例#2
0
def run_generic_phase(pkg, phase, env, userpriv, sandbox,
                      extra_handlers=None, failure_allowed=False, logging=None):
    """
    :param phase: phase to execute
    :param env: environment mapping for the phase
    :param userpriv: will we drop to
        :obj:`pkgcore.os_data.portage_uid` and
        :obj:`pkgcore.os_data.portage_gid` access for this phase?
    :param sandbox: should this phase be sandboxed?
    :param extra_handlers: extra command handlers
    :type extra_handlers: mapping from string to callable
    :param failure_allowed: allow failure without raising error
    :type failure_allowed: boolean
    :param logging: None or a filepath to log output to
    :return: True when the phase has finished execution
    """

    userpriv = userpriv and is_userpriv_capable()
    sandbox = sandbox and is_sandbox_capable()

    if env is None:
        env = expected_ebuild_env(pkg)

    ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox)
    # this is a bit of a hack; used until ebd accepts observers that handle
    # the output redirection on its own.  Primary relevance is when
    # stdout/stderr are pointed at a file; we leave buffering on, just
    # force the flush for synchronization.
    sys.stdout.flush()
    sys.stderr.flush()
    try:
        if not ebd.run_phase(phase, env, env.get('T'), sandbox=sandbox,
                             logging=logging, additional_commands=extra_handlers):
            if not failure_allowed:
                raise format.GenericBuildError(
                    phase + ": Failed building (False/0 return from handler)")
                logger.warning("executing phase %s: execution failed, ignoring", phase)

    except Exception as e:
        ebd.shutdown_processor()
        release_ebuild_processor(ebd)
        if isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)):
            raise
        raise_from(format.GenericBuildError(
            "Executing phase %s: Caught exception: %s" % (phase, e)))

    release_ebuild_processor(ebd)
    return True
示例#3
0
def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None,
                      extra_handlers=None, failure_allowed=False, logging=None, **kwargs):
    """
    :param phase: phase to execute
    :param env: environment mapping for the phase
    :param userpriv: will we drop to
        :obj:`pkgcore.os_data.portage_uid` and
        :obj:`pkgcore.os_data.portage_gid` access for this phase?
    :param sandbox: should this phase be sandboxed?
    :param fd_pipes: use custom file descriptors for ebd instance
    :type fd_pipes: mapping between file descriptors
    :param extra_handlers: extra command handlers
    :type extra_handlers: mapping from string to callable
    :param failure_allowed: allow failure without raising error
    :type failure_allowed: boolean
    :param logging: None or a filepath to log output to
    :return: True when the phase has finished execution
    """

    userpriv = userpriv and is_userpriv_capable()
    sandbox = sandbox and is_sandbox_capable()
    tmpdir = kwargs.get('tmpdir', env.get('T', None))

    if env is None:
        env = expected_ebuild_env(pkg)

    ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes)
    # this is a bit of a hack; used until ebd accepts observers that handle
    # the output redirection on its own.  Primary relevance is when
    # stdout/stderr are pointed at a file; we leave buffering on, just
    # force the flush for synchronization.
    sys.stdout.flush()
    sys.stderr.flush()
    try:
        if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox,
                             logging=logging, additional_commands=extra_handlers):
            if not failure_allowed:
                raise format.GenericBuildError(
                    phase + ": Failed building (False/0 return from handler)")
                logger.warning(f"executing phase {phase}: execution failed, ignoring")
    except Exception as e:
        if isinstance(e, ebd_ipc.IpcError):
            # notify bash side of IPC error
            ebd.write(e.ret)
            if isinstance(e, ebd_ipc.IpcInternalError):
                # show main exception cause for internal IPC errors
                ebd.shutdown_processor(force=True)
                raise e.__cause__
        try:
            ebd.shutdown_processor()
        except ProcessorError as pe:
            # catch die errors during shutdown
            e = pe
        release_ebuild_processor(ebd)
        if isinstance(e, ProcessorError):
            # force verbose die output
            e._verbosity = 1
            raise e
        elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)):
            raise
        raise format.GenericBuildError(
            f"Executing phase {phase}: Caught exception: {e}") from e

    release_ebuild_processor(ebd)
    return True
示例#4
0
class TestSpawn(TempDir):

    def setup(self):
        self.orig_env = os.environ["PATH"]
        self.null_file = open("/dev/null", "w")
        self.null = self.null_file.fileno()
        os.environ["PATH"] = ":".join([self.dir] + self.orig_env.split(":"))

    def teardown(self):
        self.null_file.close()
        os.environ["PATH"] = self.orig_env

    def generate_script(self, filename, text):
        if not os.path.isabs(filename):
            fp = os.path.join(self.dir, filename)
        with open(fp, "w") as f:
            f.write("#!/usr/bin/env bash\n")
            f.write(text)
        os.chmod(fp, 0o750)
        assert os.stat(fp).st_mode & 0o750 == 0o750
        return fp

    def test_get_output(self):
        filename = "spawn-getoutput.sh"
        for r, s, text, args in (
                [0, ["dar\n"], "echo dar\n", {}],
                [0, ["dar"], "echo -n dar", {}],
                [1, ["blah\n", "dar\n"], "echo blah\necho dar\nexit 1", {}],
                [0, [], "echo dar 1>&2", {"fd_pipes": {1: 1, 2: self.null}}]):
            fp = self.generate_script(filename, text)
            assert (r, s) == spawn.spawn_get_output(fp, spawn_type=spawn.spawn_bash, **args)
        os.unlink(fp)

    @pytest.mark.skipif(not spawn.is_sandbox_capable(), reason="missing sandbox binary")
    def test_sandbox(self):
        fp = self.generate_script(
            "spawn-sandbox.sh", "echo $LD_PRELOAD")
        ret = spawn.spawn_get_output(fp, spawn_type=spawn.spawn_sandbox)
        assert ret[1], "no output; exit code was %s; script location %s" % (ret[0], fp)
        assert "libsandbox.so" in [os.path.basename(x.strip()) for x in ret[1][0].split()]
        os.unlink(fp)

    @pytest.mark.skipif(not spawn.is_sandbox_capable(), reason="missing sandbox binary")
    def test_sandbox_empty_dir(self):
        """sandbox gets pissy if it's ran from a nonexistent dir

        this verifies our fix works.
        """
        fp = self.generate_script(
            "spawn-sandbox.sh", "echo $LD_PRELOAD")
        dpath = os.path.join(self.dir, "dar")
        os.mkdir(dpath)
        try:
            cwd = os.getcwd()
        except OSError:
            cwd = None
        try:
            os.chdir(dpath)
            os.rmdir(dpath)
            assert "libsandbox.so" in \
                [os.path.basename(x.strip()) for x in spawn.spawn_get_output(
                    fp, spawn_type=spawn.spawn_sandbox, cwd='/')[1][0].split()]
            os.unlink(fp)
        finally:
            if cwd is not None:
                os.chdir(cwd)

    def test_process_exit_code(self):
        assert spawn.process_exit_code(0) == 0
        assert spawn.process_exit_code(16 << 8) == 16

    def generate_background_pid(self):
        try:
            return spawn.spawn(["sleep", "5s"], returnpid=True)[0]
        except process.CommandNotFound:
            pytest.skip("can't complete the test, sleep binary doesn't exist")

    def test_spawn_returnpid(self):
        pid = self.generate_background_pid()
        try:
            assert os.kill(pid, 0) is None, "returned pid was invalid, or sleep died"
            assert pid in spawn.spawned_pids, "pid wasn't recorded in global pids"
        finally:
            os.kill(pid, signal.SIGKILL)

    def test_cleanup_pids(self):
        pid = self.generate_background_pid()
        spawn.cleanup_pids([pid])
        with pytest.raises(OSError):
            os.kill(pid, 0)
        assert pid not in spawn.spawned_pids, "pid wasn't removed from global pids"

    def test_spawn_bash(self, capfd):
        # bash builtin for true without exec'ing true (eg, no path lookup)
        assert 0 == spawn.spawn_bash('echo bash')
        out, err = capfd.readouterr()
        assert out.strip() == 'bash'

    def test_umask(self):
        fp = self.generate_script(
            "spawn-umask.sh", "#!%s\numask" % BASH_BINARY)
        try:
            old_umask = os.umask(0)
            if old_umask == 0:
                # crap.
                desired = 0o22
                os.umask(desired)
            else:
                desired = 0
            assert str(desired).lstrip("0") == \
                spawn.spawn_get_output(fp)[1][0].strip().lstrip("0")
        finally:
            os.umask(old_umask)
示例#5
0
    def __init__(self, userpriv, sandbox, fd_pipes=None):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fd_pipes: mapping from existing fd to fd inside the ebd process
        """
        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0o002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid],
            })
        else:
            if spawn.is_userpriv_capable():
                spawn_opts.update({
                    "gid": os_data.portage_gid,
                    "groups": [0, os_data.portage_gid],
                })
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False

        self._fd_pipes = fd_pipes if fd_pipes is not None else {}

        # since it's questionable which spawn method we'll use (if
        # sandbox fex), we ensure the bashrc is invalid.
        env = {
            x: "/etc/portage/spork/not/valid/ha/ha"
            for x in ("BASHRC", "BASH_ENV")
        }

        if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)):
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']
        if int(os.environ.get('PKGCORE_DEBUG', 0)):
            env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG']
        if int(os.environ.get('PKGCORE_NOCOLOR', 0)):
            env["PKGCORE_NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']
            if sandbox:
                env["NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']

        # prepend script dir to PATH for git repo or unpacked tarball, for
        # installed versions it's empty
        env["PATH"] = os.pathsep.join(
            list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]])

        if sandbox:
            if not spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            self.__sandbox = True
            spawn_func = spawn.spawn_sandbox


#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})
        else:
            spawn_func = spawn.spawn

        # force to a neutral dir so that sandbox won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EBD_PATH

        # Force the pipes to be high up fd wise so nobody stupidly hits 'em, we
        # start from max-3 to avoid a bug in older bash where it doesn't check
        # if an fd is in use before claiming it.
        max_fd = min(spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd - 4),
            "PKGCORE_EBD_WRITE_FD": str(max_fd - 3),
        })

        # allow any pipe overrides except the ones we use to communicate
        ebd_pipes = {0: 0, 1: 1, 2: 2}
        ebd_pipes.update(self._fd_pipes)
        ebd_pipes.update({max_fd - 4: cread, max_fd - 3: dwrite})

        # pgid=0: Each processor is the process group leader for all its
        # spawned children so everything can be terminated easily if necessary.
        self.pid = spawn_func([spawn.BASH_BINARY, self.ebd, "daemonize"],
                              fd_pipes=ebd_pipes,
                              returnpid=True,
                              env=env,
                              pgid=0,
                              **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InternalError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")

        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        else:
            self.write("no_sandbox")
        self._readonly_vars = frozenset(self.read().split())
        # locking isn't used much, but w/ threading this will matter
        self.unlock()
示例#6
0
文件: ebd.py 项目: radhermit/pkgcore
def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None,
                      extra_handlers=None, failure_allowed=False, logging=None, **kwargs):
    """
    :param phase: phase to execute
    :param env: environment mapping for the phase
    :param userpriv: will we drop to
        :obj:`pkgcore.os_data.portage_uid` and
        :obj:`pkgcore.os_data.portage_gid` access for this phase?
    :param sandbox: should this phase be sandboxed?
    :param fd_pipes: use custom file descriptors for ebd instance
    :type fd_pipes: mapping between file descriptors
    :param extra_handlers: extra command handlers
    :type extra_handlers: mapping from string to callable
    :param failure_allowed: allow failure without raising error
    :type failure_allowed: boolean
    :param logging: None or a filepath to log output to
    :return: True when the phase has finished execution
    """

    userpriv = userpriv and is_userpriv_capable()
    sandbox = sandbox and is_sandbox_capable()
    tmpdir = kwargs.get('tmpdir', env.get('T', None))

    if env is None:
        env = expected_ebuild_env(pkg)

    ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes)
    # this is a bit of a hack; used until ebd accepts observers that handle
    # the output redirection on its own.  Primary relevance is when
    # stdout/stderr are pointed at a file; we leave buffering on, just
    # force the flush for synchronization.
    sys.stdout.flush()
    sys.stderr.flush()
    try:
        if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox,
                             logging=logging, additional_commands=extra_handlers):
            if not failure_allowed:
                raise format.GenericBuildError(
                    phase + ": Failed building (False/0 return from handler)")
                logger.warning(f"executing phase {phase}: execution failed, ignoring")
    except Exception as e:
        if isinstance(e, ebd_ipc.IpcError):
            # notify bash side of IPC error
            ebd.write(e.ret)
            if isinstance(e, ebd_ipc.IpcInternalError):
                # show main exception cause for internal IPC errors
                ebd.shutdown_processor(force=True)
                raise e.__cause__
        try:
            ebd.shutdown_processor()
        except ProcessorError as pe:
            # catch die errors during shutdown
            e = pe
        release_ebuild_processor(ebd)
        if isinstance(e, ProcessorError):
            # force verbose die output
            e._verbosity = 1
            raise e
        elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)):
            raise
        raise format.GenericBuildError(
            f"Executing phase {phase}: Caught exception: {e}") from e

    release_ebuild_processor(ebd)
    return True
示例#7
0
    def __init__(self, userpriv, sandbox, fd_pipes=None):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fd_pipes: mapping from existing fd to fd inside the ebd process
        """
        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        self.sandbox = sandbox
        self.userpriv = userpriv
        self.custom_fds = fd_pipes

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        spawn_opts = {'umask': 0o002}
        if self.userpriv:
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid],
            })
        elif spawn.is_userpriv_capable():
            spawn_opts.update({
                "gid": os_data.portage_gid,
                "groups": [0, os_data.portage_gid],
            })

        # force invalid bashrc
        env = {x: "/not/valid" for x in ("BASHRC", "BASH_ENV")}

        if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)):
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']
        if int(os.environ.get('PKGCORE_DEBUG', 0)):
            env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG']

        # prepend script dir to PATH for git repo or unpacked tarball, for
        # installed versions it's empty
        env["PATH"] = os.pathsep.join(
            list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]])

        if self.sandbox:
            if not spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            spawn_func = spawn.spawn_sandbox
        else:
            spawn_func = spawn.spawn

        # force to a neutral dir so that sandbox won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EBD_PATH

        # Use high numbered fds for pipes to avoid external usage collisions
        # starting with max-3 to avoid a bug in older bash versions where it
        # doesn't check if an fd is in use before claiming it.
        max_fd = min(spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd - 4),
            "PKGCORE_EBD_WRITE_FD": str(max_fd - 3),
        })

        # open pipes used for communication
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()

        # allow pipe overrides except ebd-related
        ebd_pipes = {0: 0, 1: 1, 2: 2}
        if fd_pipes:
            ebd_pipes.update(fd_pipes)
        ebd_pipes[max_fd - 4] = cread
        ebd_pipes[max_fd - 3] = dwrite

        # force each ebd instance to be a process group leader so everything
        # can be easily terminated
        self.pid = spawn_func(
            [spawn.BASH_BINARY, self.ebd, "daemonize"],
            fd_pipes=ebd_pipes, returnpid=True, env=env, pgid=0, **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # verify ebd is running
        self.write("ebd?")
        if not self.expect("ebd!"):
            raise InternalError("expected 'ebd!' response from ebd, which wasn't received")

        if self.sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        else:
            self.write("no_sandbox")
        self._readonly_vars = frozenset(self.read().split())
        # locking isn't used much, but w/ threading this will matter
        self.unlock()