def fetch(self, target): """Fetch a file. :type target: :obj:`pkgcore.fetch.fetchable` instance :return: None if fetching failed, else on disk location of the copied file """ if not isinstance(target, fetchable): raise TypeError( f"target must be fetchable instance/derivative: {target}") path = pjoin(self.distdir, target.filename) uris = iter(target.uri) last_exc = RuntimeError("fetching failed for an unknown reason") spawn_opts = {'umask': 0o002, 'env': self.extra_env} if self.userpriv and is_userpriv_capable(): spawn_opts.update({"uid": portage_uid, "gid": portage_gid}) for _attempt in range(self.attempts): try: self._verify(path, target) return path except errors.MissingDistfile as e: command = self.command last_exc = e except errors.ChksumFailure: raise except errors.FetchFailed as e: last_exc = e if not e.resumable: try: os.unlink(path) command = self.command except OSError as e: raise errors.UnmodifiableFile(path, e) from e else: command = self.resume_command # Note we're not even checking the results, the verify portion of # the loop handles this. In other words, don't trust the external # fetcher's exit code, trust our chksums instead. try: spawn_bash( command % {"URI": next(uris), "FILE": target.filename}, **spawn_opts) except StopIteration: raise errors.FetchFailed( target.filename, "ran out of urls to fetch from") else: raise last_exc
def run_generic_phase(pkg, phase, env, userpriv, sandbox, extra_handlers=None, failure_allowed=False, logging=None): """ :param phase: phase to execute :param env: environment mapping for the phase :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? :param extra_handlers: extra command handlers :type extra_handlers: mapping from string to callable :param failure_allowed: allow failure without raising error :type failure_allowed: boolean :param logging: None or a filepath to log output to :return: True when the phase has finished execution """ userpriv = userpriv and is_userpriv_capable() sandbox = sandbox and is_sandbox_capable() if env is None: env = expected_ebuild_env(pkg) ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox) # this is a bit of a hack; used until ebd accepts observers that handle # the output redirection on its own. Primary relevance is when # stdout/stderr are pointed at a file; we leave buffering on, just # force the flush for synchronization. sys.stdout.flush() sys.stderr.flush() try: if not ebd.run_phase(phase, env, env.get('T'), sandbox=sandbox, logging=logging, additional_commands=extra_handlers): if not failure_allowed: raise format.GenericBuildError( phase + ": Failed building (False/0 return from handler)") logger.warning("executing phase %s: execution failed, ignoring", phase) except Exception as e: ebd.shutdown_processor() release_ebuild_processor(ebd) if isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)): raise raise_from(format.GenericBuildError( "Executing phase %s: Caught exception: %s" % (phase, e))) release_ebuild_processor(ebd) return True
def fetch(self, target): """ fetch a file :type target: :obj:`pkgcore.fetch.fetchable` instance :return: None if fetching failed, else on disk location of the copied file """ if not isinstance(target, fetchable): raise TypeError( "target must be fetchable instance/derivative: %s" % target) kw = {"mode": 0775} if self.readonly: kw["mode"] = 0555 if self.userpriv: kw["gid"] = portage_gid kw["minimal"] = True if not ensure_dirs(self.distdir, **kw): raise errors.distdirPerms( self.distdir, "if userpriv, uid must be %i, gid must be %i. " "if not readonly, directory must be 0775, else 0555" % (portage_uid, portage_gid)) fp = pjoin(self.distdir, target.filename) filename = os.path.basename(fp) uri = iter(target.uri) if self.userpriv and is_userpriv_capable(): extra = {"uid": portage_uid, "gid": portage_gid} else: extra = {} extra["umask"] = 0002 extra["env"] = self.extra_env attempts = self.attempts last_exc = None try: while attempts >= 0: try: c = self._verify(fp, target) return fp except errors.MissingDistfile: command = self.command last_exc = sys.exc_info() except errors.FetchFailed as e: last_exc = sys.exc_info() if not e.resumable: try: os.unlink(fp) command = self.command except OSError as oe: raise_from(errors.UnmodifiableFile(fp, oe)) else: command = self.resume_command # yeah, it's funky, but it works. if attempts > 0: u = uri.next() # note we're not even checking the results. the # verify portion of the loop handles this. iow, # don't trust their exit code. trust our chksums # instead. spawn_bash(command % {"URI": u, "FILE": filename}, **extra) attempts -= 1 assert last_exc is not None raise last_exc[0], last_exc[1], last_exc[2] except StopIteration: # ran out of uris raise errors.FetchFailed(fp, "Ran out of urls to fetch from")
def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None, extra_handlers=None, failure_allowed=False, logging=None, **kwargs): """ :param phase: phase to execute :param env: environment mapping for the phase :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? :param fd_pipes: use custom file descriptors for ebd instance :type fd_pipes: mapping between file descriptors :param extra_handlers: extra command handlers :type extra_handlers: mapping from string to callable :param failure_allowed: allow failure without raising error :type failure_allowed: boolean :param logging: None or a filepath to log output to :return: True when the phase has finished execution """ userpriv = userpriv and is_userpriv_capable() sandbox = sandbox and is_sandbox_capable() tmpdir = kwargs.get('tmpdir', env.get('T', None)) if env is None: env = expected_ebuild_env(pkg) ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes) # this is a bit of a hack; used until ebd accepts observers that handle # the output redirection on its own. Primary relevance is when # stdout/stderr are pointed at a file; we leave buffering on, just # force the flush for synchronization. sys.stdout.flush() sys.stderr.flush() try: if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox, logging=logging, additional_commands=extra_handlers): if not failure_allowed: raise format.GenericBuildError( phase + ": Failed building (False/0 return from handler)") logger.warning(f"executing phase {phase}: execution failed, ignoring") except Exception as e: if isinstance(e, ebd_ipc.IpcError): # notify bash side of IPC error ebd.write(e.ret) if isinstance(e, ebd_ipc.IpcInternalError): # show main exception cause for internal IPC errors ebd.shutdown_processor(force=True) raise e.__cause__ try: ebd.shutdown_processor() except ProcessorError as pe: # catch die errors during shutdown e = pe release_ebuild_processor(ebd) if isinstance(e, ProcessorError): # force verbose die output e._verbosity = 1 raise e elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)): raise raise format.GenericBuildError( f"Executing phase {phase}: Caught exception: {e}") from e release_ebuild_processor(ebd) return True
def __init__(self, userpriv, sandbox, fd_pipes=None): """ :param sandbox: enables a sandboxed processor :param userpriv: enables a userpriv'd processor :param fd_pipes: mapping from existing fd to fd inside the ebd process """ self.lock() self.ebd = e_const.EBUILD_DAEMON_PATH spawn_opts = {'umask': 0o002} self._preloaded_eclasses = {} self._eclass_caching = False self._outstanding_expects = [] self._metadata_paths = None if userpriv: self.__userpriv = True spawn_opts.update({ "uid": os_data.portage_uid, "gid": os_data.portage_gid, "groups": [os_data.portage_gid], }) else: if spawn.is_userpriv_capable(): spawn_opts.update({ "gid": os_data.portage_gid, "groups": [0, os_data.portage_gid], }) self.__userpriv = False # open the pipes to be used for chatting with the new daemon cread, cwrite = os.pipe() dread, dwrite = os.pipe() self.__sandbox = False self._fd_pipes = fd_pipes if fd_pipes is not None else {} # since it's questionable which spawn method we'll use (if # sandbox fex), we ensure the bashrc is invalid. env = { x: "/etc/portage/spork/not/valid/ha/ha" for x in ("BASHRC", "BASH_ENV") } if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)): env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG'] if int(os.environ.get('PKGCORE_DEBUG', 0)): env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG'] if int(os.environ.get('PKGCORE_NOCOLOR', 0)): env["PKGCORE_NOCOLOR"] = os.environ['PKGCORE_NOCOLOR'] if sandbox: env["NOCOLOR"] = os.environ['PKGCORE_NOCOLOR'] # prepend script dir to PATH for git repo or unpacked tarball, for # installed versions it's empty env["PATH"] = os.pathsep.join( list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]]) if sandbox: if not spawn.is_sandbox_capable(): raise ValueError("spawn lacks sandbox capabilities") self.__sandbox = True spawn_func = spawn.spawn_sandbox # env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"}) else: spawn_func = spawn.spawn # force to a neutral dir so that sandbox won't explode if # ran from a nonexistent dir spawn_opts["cwd"] = e_const.EBD_PATH # Force the pipes to be high up fd wise so nobody stupidly hits 'em, we # start from max-3 to avoid a bug in older bash where it doesn't check # if an fd is in use before claiming it. max_fd = min(spawn.max_fd_limit, 1024) env.update({ "PKGCORE_EBD_READ_FD": str(max_fd - 4), "PKGCORE_EBD_WRITE_FD": str(max_fd - 3), }) # allow any pipe overrides except the ones we use to communicate ebd_pipes = {0: 0, 1: 1, 2: 2} ebd_pipes.update(self._fd_pipes) ebd_pipes.update({max_fd - 4: cread, max_fd - 3: dwrite}) # pgid=0: Each processor is the process group leader for all its # spawned children so everything can be terminated easily if necessary. self.pid = spawn_func([spawn.BASH_BINARY, self.ebd, "daemonize"], fd_pipes=ebd_pipes, returnpid=True, env=env, pgid=0, **spawn_opts)[0] os.close(cread) os.close(dwrite) self.ebd_write = os.fdopen(cwrite, "w") self.ebd_read = os.fdopen(dread, "r") # basically a quick "yo" to the daemon self.write("dude?") if not self.expect("dude!"): logger.error("error in server coms, bailing.") raise InternalError( "expected 'dude!' response from ebd, which wasn't received. " "likely a bug") if self.__sandbox: self.write("sandbox_log?") self.__sandbox_log = self.read().split()[0] else: self.write("no_sandbox") self._readonly_vars = frozenset(self.read().split()) # locking isn't used much, but w/ threading this will matter self.unlock()
def fetch(self, target): """Fetch a file. :type target: :obj:`pkgcore.fetch.fetchable` instance :return: None if fetching failed, else on disk location of the copied file """ if not isinstance(target, fetchable): raise TypeError( f"target must be fetchable instance/derivative: {target}") kw = {"mode": 0o775} if self.readonly: kw["mode"] = 0o555 if self.userpriv: kw["gid"] = portage_gid kw["minimal"] = True if not ensure_dirs(self.distdir, **kw): raise errors.DistdirPerms( self.distdir, "if userpriv, uid must be %i, gid must be %i. " "if not readonly, directory must be 0775, else 0555" % ( portage_uid, portage_gid)) path = pjoin(self.distdir, target.filename) uris = iter(target.uri) last_exc = RuntimeError("fetching failed for an unknown reason") spawn_opts = {'umask': 0o002, 'env': self.extra_env} if self.userpriv and is_userpriv_capable(): spawn_opts.update({"uid": portage_uid, "gid": portage_gid}) for _attempt in range(self.attempts): try: self._verify(path, target) return path except errors.MissingDistfile as e: command = self.command last_exc = e except errors.ChksumFailure: raise except errors.FetchFailed as e: last_exc = e if not e.resumable: try: os.unlink(path) command = self.command except OSError as e: raise errors.UnmodifiableFile(path, e) from e else: command = self.resume_command # Note we're not even checking the results, the verify portion of # the loop handles this. In other words, don't trust the external # fetcher's exit code, trust our chksums instead. try: spawn_bash( command % {"URI": next(uris), "FILE": target.filename}, **spawn_opts) except StopIteration: raise errors.FetchFailed( target.filename, "ran out of urls to fetch from") else: raise last_exc
def __init__(self, userpriv, sandbox, fd_pipes=None): """ :param sandbox: enables a sandboxed processor :param userpriv: enables a userpriv'd processor :param fd_pipes: mapping from existing fd to fd inside the ebd process """ self.lock() self.ebd = e_const.EBUILD_DAEMON_PATH self.sandbox = sandbox self.userpriv = userpriv self.custom_fds = fd_pipes self._preloaded_eclasses = {} self._eclass_caching = False self._outstanding_expects = [] self._metadata_paths = None spawn_opts = {'umask': 0o002} if self.userpriv: spawn_opts.update({ "uid": os_data.portage_uid, "gid": os_data.portage_gid, "groups": [os_data.portage_gid], }) elif spawn.is_userpriv_capable(): spawn_opts.update({ "gid": os_data.portage_gid, "groups": [0, os_data.portage_gid], }) # force invalid bashrc env = {x: "/not/valid" for x in ("BASHRC", "BASH_ENV")} if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)): env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG'] if int(os.environ.get('PKGCORE_DEBUG', 0)): env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG'] # prepend script dir to PATH for git repo or unpacked tarball, for # installed versions it's empty env["PATH"] = os.pathsep.join( list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]]) if self.sandbox: if not spawn.is_sandbox_capable(): raise ValueError("spawn lacks sandbox capabilities") spawn_func = spawn.spawn_sandbox else: spawn_func = spawn.spawn # force to a neutral dir so that sandbox won't explode if # ran from a nonexistent dir spawn_opts["cwd"] = e_const.EBD_PATH # Use high numbered fds for pipes to avoid external usage collisions # starting with max-3 to avoid a bug in older bash versions where it # doesn't check if an fd is in use before claiming it. max_fd = min(spawn.max_fd_limit, 1024) env.update({ "PKGCORE_EBD_READ_FD": str(max_fd - 4), "PKGCORE_EBD_WRITE_FD": str(max_fd - 3), }) # open pipes used for communication cread, cwrite = os.pipe() dread, dwrite = os.pipe() # allow pipe overrides except ebd-related ebd_pipes = {0: 0, 1: 1, 2: 2} if fd_pipes: ebd_pipes.update(fd_pipes) ebd_pipes[max_fd - 4] = cread ebd_pipes[max_fd - 3] = dwrite # force each ebd instance to be a process group leader so everything # can be easily terminated self.pid = spawn_func( [spawn.BASH_BINARY, self.ebd, "daemonize"], fd_pipes=ebd_pipes, returnpid=True, env=env, pgid=0, **spawn_opts)[0] os.close(cread) os.close(dwrite) self.ebd_write = os.fdopen(cwrite, "w") self.ebd_read = os.fdopen(dread, "r") # verify ebd is running self.write("ebd?") if not self.expect("ebd!"): raise InternalError("expected 'ebd!' response from ebd, which wasn't received") if self.sandbox: self.write("sandbox_log?") self.__sandbox_log = self.read().split()[0] else: self.write("no_sandbox") self._readonly_vars = frozenset(self.read().split()) # locking isn't used much, but w/ threading this will matter self.unlock()