def _unregister(self): """ Unregister from the scheduler and close open files. """ if not self.unmerge: # Populate the vardbapi cache for the new package # while its inodes are still hot. try: self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"]) except KeyError: pass self._unlock_vdb() if self._elog_reader_fd is not None: self.scheduler.remove_reader(self._elog_reader_fd) os.close(self._elog_reader_fd) self._elog_reader_fd = None if self._elog_keys is not None: for key in self._elog_keys: portage.elog.elog_process(key, self.settings, phasefilter=("prerm", "postrm")) self._elog_keys = None super(MergeProcess, self)._unregister()
def perform_checksum(filename, hashname="MD5", calc_prelink=0): """ Run a specific checksum against a file. The filename can be either unicode or an encoded byte string. If filename is unicode then a UnicodeDecodeError will be raised if necessary. @param filename: File to run the checksum against @type filename: String @param hashname: The type of hash function to run @type hashname: String @param calc_prelink: Whether or not to reverse prelink before running the checksum @type calc_prelink: Integer @rtype: Tuple @return: The hash and size of the data """ global prelink_capable # Make sure filename is encoded with the correct encoding before # it is passed to spawn (for prelink) and/or the hash function. filename = _unicode_encode(filename, encoding=_encodings['fs'], errors='strict') myfilename = filename prelink_tmpfile = None try: if (calc_prelink and prelink_capable and is_prelinkable_elf(filename)): # Create non-prelinked temporary file to checksum. # Files rejected by prelink are summed in place. try: tmpfile_fd, prelink_tmpfile = tempfile.mkstemp() try: retval = portage.process.spawn([PRELINK_BINARY, "--verify", filename], fd_pipes={1:tmpfile_fd}) finally: os.close(tmpfile_fd) if retval == os.EX_OK: myfilename = prelink_tmpfile except portage.exception.CommandNotFound: # This happens during uninstallation of prelink. prelink_capable = False try: if hashname not in hashfunc_keys: raise portage.exception.DigestException(hashname + \ " hash function not available (needs dev-python/pycrypto)") myhash, mysize = hashfunc_map[hashname].checksum_file(myfilename) except (OSError, IOError) as e: if e.errno in (errno.ENOENT, errno.ESTALE): raise portage.exception.FileNotFound(myfilename) elif e.errno == portage.exception.PermissionDenied.errno: raise portage.exception.PermissionDenied(myfilename) raise return myhash, mysize finally: if prelink_tmpfile: try: os.unlink(prelink_tmpfile) except OSError as e: if e.errno != errno.ENOENT: raise del e
def _elog_output_handler(self, fd, event): output = None if event & self.scheduler.IO_IN: try: output = os.read(fd, self._bufsize) except OSError as e: if e.errno not in (errno.EAGAIN, errno.EINTR): raise if output: lines = _unicode_decode(output).split('\n') if len(lines) == 1: self._buf += lines[0] else: lines[0] = self._buf + lines[0] self._buf = lines.pop() out = io.StringIO() for line in lines: funcname, phase, key, msg = line.split(' ', 3) self._elog_keys.add(key) reporter = getattr(portage.elog.messages, funcname) reporter(msg, phase=phase, key=key, out=out) if event & self.scheduler.IO_HUP: self.scheduler.source_remove(self._elog_reg_id) self._elog_reg_id = None os.close(self._elog_reader_fd) self._elog_reader_fd = None return False return True
def _testPipeLogger(self, test_string): producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=global_event_loop()) fd, log_file_path = tempfile.mkstemp() try: consumer = PipeLogger(background=True, input_fd=os.dup(producer.proc.stdout.fileno()), log_file_path=log_file_path) # Close the stdout pipe, since we duplicated it, and it # must be closed in order to avoid a ResourceWarning. producer.proc.stdout.close() producer.pipe_reader = consumer producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) with open(log_file_path, 'rb') as f: content = f.read() finally: os.close(fd) os.unlink(log_file_path) return content.decode('ascii', 'replace')
def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False): if lock_future is not None: if lock_future is not self._start_future: raise AssertionError('lock_future is not self._start_future') self._start_future = None if lock_future.cancelled(): self._build_dir = None self.cancelled = True self._was_cancelled() self._async_wait() return lock_future.result() if start_ipc_daemon: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd self.log_filter_file = self.settings.get('PORTAGE_LOG_FILTER_FILE_CMD') try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def _testPipeLogger(self, test_string): producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=global_event_loop()) fd, log_file_path = tempfile.mkstemp() try: consumer = PipeLogger(background=True, input_fd=producer.proc.stdout, log_file_path=log_file_path) producer.pipe_reader = consumer producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) with open(log_file_path, 'rb') as f: content = f.read() finally: os.close(fd) os.unlink(log_file_path) return content.decode('ascii', 'replace')
def _spawn(self, args, fd_pipes, **kwargs): """ Extend the superclass _spawn method to perform some pre-fork and post-fork actions. """ elog_reader_fd, elog_writer_fd = os.pipe() fcntl.fcntl( elog_reader_fd, fcntl.F_SETFL, fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK, ) blockers = None if self.blockers is not None: # Query blockers in the main process, since closing # of file descriptors in the subprocess can prevent # access to open database connections such as that # used by the sqlite metadata cache module. blockers = self.blockers() mylink = portage.dblink( self.mycat, self.mypkg, settings=self.settings, treetype=self.treetype, vartree=self.vartree, blockers=blockers, pipe=elog_writer_fd, ) fd_pipes[elog_writer_fd] = elog_writer_fd self.scheduler.add_reader(elog_reader_fd, self._elog_output_handler) # If a concurrent emerge process tries to install a package # in the same SLOT as this one at the same time, there is an # extremely unlikely chance that the COUNTER values will not be # ordered correctly unless we lock the vdb here. # FEATURES=parallel-install skips this lock in order to # improve performance, and the risk is practically negligible. self._lock_vdb() if not self.unmerge: self._counter = self.vartree.dbapi.counter_tick() self._dblink = mylink self._elog_reader_fd = elog_reader_fd pids = super(MergeProcess, self)._spawn(args, fd_pipes, **kwargs) os.close(elog_writer_fd) self._buf = "" self._elog_keys = set() # Discard messages which will be collected by the subprocess, # in order to avoid duplicates (bug #446136). portage.elog.messages.collect_messages(key=mylink.mycpv) # invalidate relevant vardbapi caches if self.vartree.dbapi._categories is not None: self.vartree.dbapi._categories = None self.vartree.dbapi._pkgs_changed = True self.vartree.dbapi._clear_pkg_cache(mylink) return pids
def testLogfile(self): logfile = None try: fd, logfile = tempfile.mkstemp() os.close(fd) null_fd = os.open('/dev/null', os.O_RDWR) test_string = 2 * "blah blah blah\n" scheduler = PollScheduler().sched_iface proc = SpawnProcess( args=[BASH_BINARY, "-c", "echo -n '%s'" % test_string], env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd}, scheduler=scheduler, logfile=logfile) proc.start() os.close(null_fd) self.assertEqual(proc.wait(), os.EX_OK) f = io.open(_unicode_encode(logfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='strict') log_content = f.read() f.close() # When logging passes through a pty, this comparison will fail # unless the oflag terminal attributes have the termios.OPOST # bit disabled. Otherwise, tranformations such as \n -> \r\n # may occur. self.assertEqual(test_string, log_content) finally: if logfile: try: os.unlink(logfile) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e
def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False): if lock_future is not None: if lock_future is not self._start_future: raise AssertionError('lock_future is not self._start_future') self._start_future = None lock_future.result() if start_ipc_daemon: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def _unregister(self): """ Unregister from the scheduler and close open files. """ if not self.unmerge: # Populate the vardbapi cache for the new package # while its inodes are still hot. try: self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"]) except KeyError: pass self._unlock_vdb() if self._elog_reg_id is not None: self.scheduler.source_remove(self._elog_reg_id) self._elog_reg_id = None if self._elog_reader_fd is not None: os.close(self._elog_reader_fd) self._elog_reader_fd = None if self._elog_keys is not None: for key in self._elog_keys: portage.elog.elog_process(key, self.settings, phasefilter=("prerm", "postrm")) self._elog_keys = None super(MergeProcess, self)._unregister()
def _setup_pipes(fd_pipes, close_fds=True): """Setup pipes for a forked process. WARNING: When not followed by exec, the close_fds behavior can trigger interference from destructors that close file descriptors. This interference happens when the garbage collector intermittently executes such destructors after their corresponding file descriptors have been re-used, leading to intermittent "[Errno 9] Bad file descriptor" exceptions in forked processes. This problem has been observed with PyPy 1.8, and also with CPython under some circumstances (as triggered by xmpppy in bug #374335). In order to close a safe subset of file descriptors, see portage.locks._close_fds(). """ my_fds = {} # To protect from cases where direct assignment could # clobber needed fds ({1:2, 2:1}) we first dupe the fds # into unused fds. for fd in fd_pipes: my_fds[fd] = os.dup(fd_pipes[fd]) # Then assign them to what they should be. for fd in my_fds: os.dup2(my_fds[fd], fd) if close_fds: # Then close _all_ fds that haven't been explicitly # requested to be kept open. for fd in get_open_fds(): if fd not in my_fds: try: os.close(fd) except OSError: pass
def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False): if lock_future is not None: if lock_future is not self._start_future: raise AssertionError('lock_future is not self._start_future') self._start_future = None if lock_future.cancelled(): self._build_dir = None self.cancelled = True self._was_cancelled() self._async_wait() return lock_future.result() if start_ipc_daemon: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def testLogfile(self): logfile = None try: fd, logfile = tempfile.mkstemp() os.close(fd) null_fd = os.open('/dev/null', os.O_RDWR) test_string = 2 * "blah blah blah\n" task_scheduler = TaskScheduler() proc = SpawnProcess( args=[BASH_BINARY, "-c", "echo -n '%s'" % test_string], env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd}, scheduler=task_scheduler.sched_iface, logfile=logfile) task_scheduler.add(proc) task_scheduler.run() os.close(null_fd) f = codecs.open(_unicode_encode(logfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='strict') log_content = f.read() f.close() # When logging passes through a pty, this comparison will fail # unless the oflag terminal attributes have the termios.OPOST # bit disabled. Otherwise, tranformations such as \n -> \r\n # may occur. self.assertEqual(test_string, log_content) finally: if logfile: try: os.unlink(logfile) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e
def _start(self): in_pr, in_pw = os.pipe() out_pr, out_pw = os.pipe() self._files = {} self._files['pipe_in'] = in_pr self._files['pipe_out'] = out_pw fcntl.fcntl(in_pr, fcntl.F_SETFL, fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK) # FD_CLOEXEC is enabled by default in Python >=3.4. if sys.hexversion < 0x3040000: try: fcntl.FD_CLOEXEC except AttributeError: pass else: fcntl.fcntl(in_pr, fcntl.F_SETFD, fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC) self._reg_id = self.scheduler.io_add_watch(in_pr, self.scheduler.IO_IN, self._output_handler) self._registered = True self._proc = SpawnProcess( args=[portage._python_interpreter, os.path.join(portage._bin_path, 'lock-helper.py'), self.path], env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path), fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()}, scheduler=self.scheduler) self._proc.addExitListener(self._proc_exit) self._proc.start() os.close(out_pr) os.close(in_pw)
def perform_checksum(filename, hashname="MD5", calc_prelink=0): """ Run a specific checksum against a file. The filename can be either unicode or an encoded byte string. If filename is unicode then a UnicodeDecodeError will be raised if necessary. @param filename: File to run the checksum against @type filename: String @param hashname: The type of hash function to run @type hashname: String @param calc_prelink: Whether or not to reverse prelink before running the checksum @type calc_prelink: Integer @rtype: Tuple @return: The hash and size of the data """ global prelink_capable # Make sure filename is encoded with the correct encoding before # it is passed to spawn (for prelink) and/or the hash function. filename = _unicode_encode(filename, encoding=_encodings['fs'], errors='strict') myfilename = filename prelink_tmpfile = None try: if (calc_prelink and prelink_capable and is_prelinkable_elf(filename)): # Create non-prelinked temporary file to checksum. # Files rejected by prelink are summed in place. try: tmpfile_fd, prelink_tmpfile = tempfile.mkstemp() try: retval = portage.process.spawn([PRELINK_BINARY, "--verify", filename], fd_pipes={1:tmpfile_fd}) finally: os.close(tmpfile_fd) if retval == os.EX_OK: myfilename = prelink_tmpfile except portage.exception.CommandNotFound: # This happens during uninstallation of prelink. prelink_capable = False try: if hashname not in hashfunc_map: raise portage.exception.DigestException(hashname + \ " hash function not available (needs dev-python/pycrypto)") myhash, mysize = hashfunc_map[hashname](myfilename) except (OSError, IOError) as e: if e.errno in (errno.ENOENT, errno.ESTALE): raise portage.exception.FileNotFound(myfilename) elif e.errno == portage.exception.PermissionDenied.errno: raise portage.exception.PermissionDenied(myfilename) raise return myhash, mysize finally: if prelink_tmpfile: try: os.unlink(prelink_tmpfile) except OSError as e: if e.errno != errno.ENOENT: raise del e
def _testPipeReader(self, master_fd, slave_fd, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) scheduler = global_event_loop() consumer = PipeReader( input_files={"producer" : master_file}, _use_array=self._use_array, scheduler=scheduler) producer = PopenProcess( pipe_reader=consumer, proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string], stdout=slave_fd), scheduler=scheduler) producer.start() os.close(slave_fd) producer.wait() consumer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
async def run(self): self.expected = getattr(self, "expected", None) or {"returncode": 0} if self.debug: fd_pipes = {} pr = None pw = None else: pr, pw = os.pipe() fd_pipes = {1: pw, 2: pw} pr = open(pr, "rb", 0) proc = AsyncFunction( scheduler=asyncio.get_event_loop(), target=self._subprocess, args=(self.args, self.cwd, self.env, self.expected, self.debug), fd_pipes=fd_pipes, ) proc.start() if pw is not None: os.close(pw) await proc.async_wait() if pr is None: stdio = None else: stdio = await _reader(pr) self.result = { "stdio": stdio, "result": proc.result, }
def _unregister(self): if self._reg_id is not None: self.scheduler.source_remove(self._reg_id) self._reg_id = None if self.input_fd is not None: if isinstance(self.input_fd, int): os.close(self.input_fd) else: self.input_fd.close() self.input_fd = None if self.stdout_fd is not None: os.close(self.stdout_fd) self.stdout_fd = None if self._log_file is not None: self._log_file.close() self._log_file = None if self._log_file_real is not None: # Avoid "ResourceWarning: unclosed file" since python 3.2. self._log_file_real.close() self._log_file_real = None self._registered = False
def _reopen_input(self): """ Re-open the input stream, in order to suppress POLLHUP events (bug #339976). """ self.scheduler.source_remove(self._reg_id) os.close(self._files.pipe_in) self._files.pipe_in = \ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK) # FD_CLOEXEC is enabled by default in Python >=3.4. if sys.hexversion < 0x3040000 and fcntl is not None: try: fcntl.FD_CLOEXEC except AttributeError: pass else: fcntl.fcntl( self._files.pipe_in, fcntl.F_SETFD, fcntl.fcntl(self._files.pipe_in, fcntl.F_GETFD) | fcntl.FD_CLOEXEC) self._reg_id = self.scheduler.io_add_watch(self._files.pipe_in, self._registered_events, self._input_handler)
def _testPipeLogger(self, test_string): producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=global_event_loop()) fd, log_file_path = tempfile.mkstemp() try: consumer = PipeLogger(background=True, input_fd=os.dup( producer.proc.stdout.fileno()), log_file_path=log_file_path) # Close the stdout pipe, since we duplicated it, and it # must be closed in order to avoid a ResourceWarning. producer.proc.stdout.close() producer.pipe_reader = consumer producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) with open(log_file_path, 'rb') as f: content = f.read() finally: os.close(fd) os.unlink(log_file_path) return content.decode('ascii', 'replace')
def _start(self): in_pr, in_pw = os.pipe() out_pr, out_pw = os.pipe() self._files = {} self._files["pipe_in"] = in_pr self._files["pipe_out"] = out_pw fcntl.fcntl(in_pr, fcntl.F_SETFL, fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK) self.scheduler.add_reader(in_pr, self._output_handler) self._registered = True self._proc = SpawnProcess( args=[ portage._python_interpreter, os.path.join(portage._bin_path, "lock-helper.py"), self.path, ], env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path), fd_pipes={ 0: out_pr, 1: in_pw, 2: sys.__stderr__.fileno() }, scheduler=self.scheduler, ) self._proc.addExitListener(self._proc_exit) self._proc.start() os.close(out_pr) os.close(in_pw)
def _testPipeReader(self, master_fd, slave_fd, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) scheduler = global_event_loop() consumer = PipeReader(input_files={"producer": master_file}, _use_array=self._use_array, scheduler=scheduler) consumer.start() producer = scheduler.run_until_complete( asyncio.create_subprocess_exec("bash", "-c", self._echo_cmd % test_string, stdout=slave_fd, loop=scheduler)) os.close(slave_fd) scheduler.run_until_complete(producer.wait()) scheduler.run_until_complete(consumer.async_wait()) self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def _unregister(self): if self.input_fd is not None: if isinstance(self.input_fd, int): self.scheduler.remove_reader(self.input_fd) os.close(self.input_fd) else: self.scheduler.remove_reader(self.input_fd.fileno()) self.input_fd.close() self.input_fd = None if self._io_loop_task is not None: self._io_loop_task.done() or self._io_loop_task.cancel() self._io_loop_task = None if self.stdout_fd is not None: os.close(self.stdout_fd) self.stdout_fd = None if self._log_file is not None: self.scheduler.remove_writer(self._log_file.fileno()) self._log_file.close() self._log_file = None if self._log_file_real is not None: # Avoid "ResourceWarning: unclosed file" since python 3.2. self._log_file_real.close() self._log_file_real = None self._registered = False
def _spawn(self, args, fd_pipes=None, **kwargs): """ Override SpawnProcess._spawn to fork a subprocess that calls self._run(). This uses multiprocessing.Process in order to leverage any pre-fork and post-fork interpreter housekeeping that it provides, promoting a healthy state for the forked interpreter. """ # Since multiprocessing.Process closes sys.__stdin__, create a # temporary duplicate of fd_pipes[0] so that sys.__stdin__ can # be restored in the subprocess, in case this is needed for # things like PROPERTIES=interactive support. stdin_dup = None try: stdin_fd = fd_pipes.get(0) if stdin_fd is not None and stdin_fd == portage._get_stdin( ).fileno(): stdin_dup = os.dup(stdin_fd) fcntl.fcntl(stdin_dup, fcntl.F_SETFD, fcntl.fcntl(stdin_fd, fcntl.F_GETFD)) fd_pipes[0] = stdin_dup self._proc = multiprocessing.Process(target=self._bootstrap, args=(fd_pipes, )) self._proc.start() finally: if stdin_dup is not None: os.close(stdin_dup) self._proc_join_task = asyncio.ensure_future(self._proc_join( self._proc, loop=self.scheduler), loop=self.scheduler) self._proc_join_task.add_done_callback( functools.partial(self._proc_join_done, self._proc)) return [self._proc.pid]
def _close_fds(): """ This is intended to be called after a fork, in order to close file descriptors for locks held by the parent process. This can be called safely after a fork without exec, unlike the _setup_pipes close_fds behavior. """ while _open_fds: os.close(_open_fds.pop())
def _fetch_uri(self, uri): if self.config.options.dry_run: # Simply report success. logging.info("dry-run: fetch '%s' from '%s'" % (self.distfile, uri)) self._success() self.returncode = os.EX_OK self._async_wait() return if self.config.options.temp_dir: self._fetch_tmp_dir_info = "temp-dir" distdir = self.config.options.temp_dir else: self._fetch_tmp_dir_info = "distfiles" distdir = self.config.options.distfiles tmp_basename = self.distfile + "._emirrordist_fetch_.%s" % portage.getpid( ) variables = {"DISTDIR": distdir, "URI": uri, "FILE": tmp_basename} self._fetch_tmp_file = os.path.join(distdir, tmp_basename) try: os.unlink(self._fetch_tmp_file) except OSError: pass args = portage.util.shlex_split(default_fetchcommand) args = [portage.util.varexpand(x, mydict=variables) for x in args] args = [ _unicode_encode(x, encoding=_encodings["fs"], errors="strict") for x in args ] null_fd = os.open(os.devnull, os.O_RDONLY) fetcher = PopenProcess( background=self.background, proc=subprocess.Popen(args, stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=self.scheduler, ) os.close(null_fd) fetcher.pipe_reader = PipeLogger( background=self.background, input_fd=fetcher.proc.stdout, log_file_path=self._log_path, scheduler=self.scheduler, ) self._start_task(fetcher, self._fetcher_exit)
def _spawn(self, args, fd_pipes=None, **kwargs): """ Fork a subprocess, apply local settings, and call fetch(). """ parent_pid = os.getpid() pid = None try: pid = os.fork() if pid != 0: if not isinstance(pid, int): raise AssertionError( "fork returned non-integer: %s" % (repr(pid),)) return [pid] rval = 1 try: # Use default signal handlers in order to avoid problems # killing subprocesses as reported in bug #353239. signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) # Unregister SIGCHLD handler and wakeup_fd for the parent # process's event loop (bug 655656). signal.signal(signal.SIGCHLD, signal.SIG_DFL) try: wakeup_fd = signal.set_wakeup_fd(-1) if wakeup_fd > 0: os.close(wakeup_fd) except (ValueError, OSError): pass portage.locks._close_fds() # We don't exec, so use close_fds=False # (see _setup_pipes docstring). portage.process._setup_pipes(fd_pipes, close_fds=False) rval = self._run() except SystemExit: raise except: traceback.print_exc() # os._exit() skips stderr flush! sys.stderr.flush() finally: os._exit(rval) finally: if pid == 0 or (pid is None and os.getpid() != parent_pid): # Call os._exit() from a finally block in order # to suppress any finally blocks from earlier # in the call stack (see bug #345289). This # finally block has to be setup before the fork # in order to avoid a race condition. os._exit(1)
def _check_sig_key(self): null_fd = os.open('/dev/null', os.O_RDONLY) popen_proc = PopenProcess(proc=subprocess.Popen( ["gpg", "--verify", self._manifest_path], stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), pipe_reader=PipeReader()) os.close(null_fd) popen_proc.pipe_reader.input_files = { "producer" : popen_proc.proc.stdout} self._start_task(popen_proc, self._check_sig_key_exit)
def _reopen_input(self): """ Re-open the input stream, in order to suppress POLLHUP events (bug #339976). """ self.scheduler.remove_reader(self._files.pipe_in) os.close(self._files.pipe_in) self._files.pipe_in = os.open(self.input_fifo, os.O_RDONLY | os.O_NONBLOCK) self.scheduler.add_reader(self._files.pipe_in, self._input_handler)
def _unregister(self): self._registered = False if self._files is not None: try: pipe_in = self._files.pop('pipe_in') except KeyError: pass else: self.scheduler.remove_reader(pipe_in) os.close(pipe_in)
def get_commit_message_with_editor(editor, message=None): """ Execute editor with a temporary file as it's argument and return the file content afterwards. @param editor: An EDITOR value from the environment @type: string @param message: An iterable of lines to show in the editor. @type: iterable @rtype: string or None @returns: A string on success or None if an error occurs. """ from tempfile import mkstemp fd, filename = mkstemp() try: os.write(fd, _unicode_encode( "\n# Please enter the commit message " + \ "for your changes.\n# (Comment lines starting " + \ "with '#' will not be included)\n", encoding=_encodings['content'], errors='backslashreplace')) if message: os.write( fd, _unicode_encode("#\n", encoding=_encodings['content'], errors='backslashreplace')) for line in message: os.write( fd, _unicode_encode("#" + line, encoding=_encodings['content'], errors='backslashreplace')) os.close(fd) retval = os.system(editor + " '%s'" % filename) if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK): return None try: mylines = codecs.open(_unicode_encode(filename, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace').readlines() except OSError as e: if e.errno != errno.ENOENT: raise del e return None return "".join(line for line in mylines if not line.startswith("#")) finally: try: os.unlink(filename) except OSError: pass
def _start(self): pr, pw = os.pipe() self.fd_pipes = {} self.fd_pipes[pw] = pw self._async_func_reader_pw = pw self._async_func_reader = PipeReader(input_files={"input": pr}, scheduler=self.scheduler) self._async_func_reader.addExitListener(self._async_func_reader_exit) self._async_func_reader.start() ForkProcess._start(self) os.close(pw)
def _unregister(self): """ Unregister from the scheduler and close open files. """ self._registered = False if self._files is not None: for f in self._files.values(): self.scheduler.remove_reader(f) os.close(f) self._files = None
def _reopen_input(self): """ Re-open the input stream, in order to suppress POLLHUP events (bug #339976). """ self.scheduler.unregister(self._reg_id) os.close(self._files.pipe_in) self._files.pipe_in = \ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK) self._reg_id = self.scheduler.register( self._files.pipe_in, self._registered_events, self._input_handler)
def _test_lock(fd, lock_path): os.close(fd) try: with open(lock_path, 'a') as f: fcntl.lockf(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as e: if e.errno == errno.EAGAIN: # Parent process holds lock, as expected. sys.exit(0) # Something went wrong. sys.exit(1)
def _test_lock(fd, lock_path): os.close(fd) try: with open(lock_path, 'a') as f: fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) except EnvironmentError as e: if e.errno == errno.EAGAIN: # Parent process holds lock, as expected. sys.exit(0) # Something went wrong. sys.exit(1)
def _reopen_input(self): """ Re-open the input stream, in order to suppress POLLHUP events (bug #339976). """ self.scheduler.source_remove(self._reg_id) os.close(self._files.pipe_in) self._files.pipe_in = \ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK) self._reg_id = self.scheduler.io_add_watch(self._files.pipe_in, self._registered_events, self._input_handler)
def _test_lock(fd, lock_path): os.close(fd) try: with open(lock_path, "a") as f: lock_fn(lock_path, f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except (TryAgain, EnvironmentError) as e: if isinstance(e, TryAgain) or e.errno == errno.EAGAIN: # Parent process holds lock, as expected. sys.exit(0) # Something went wrong. sys.exit(1)
def _start(self): pr, pw = os.pipe() self.fd_pipes = {} self.fd_pipes[pw] = pw self._digest_pw = pw self._digest_pipe_reader = PipeReader( input_files={"input":pr}, scheduler=self.scheduler) self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit) self._digest_pipe_reader.start() ForkProcess._start(self) os.close(pw)
def get_commit_message_with_editor(editor, message=None): """ Execute editor with a temporary file as it's argument and return the file content afterwards. @param editor: An EDITOR value from the environment @type: string @param message: An iterable of lines to show in the editor. @type: iterable @rtype: string or None @return: A string on success or None if an error occurs. """ fd, filename = mkstemp() try: os.write( fd, _unicode_encode( _( "\n# Please enter the commit message " + "for your changes.\n# (Comment lines starting " + "with '#' will not be included)\n" ), encoding=_encodings["content"], errors="backslashreplace", ), ) if message: os.write(fd, b"#\n") for line in message: os.write(fd, _unicode_encode("#" + line, encoding=_encodings["content"], errors="backslashreplace")) os.close(fd) retval = os.system(editor + " '%s'" % filename) if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK): return None try: with io.open( _unicode_encode(filename, encoding=_encodings["fs"], errors="strict"), mode="r", encoding=_encodings["content"], errors="replace", ) as f: mylines = f.readlines() except OSError as e: if e.errno != errno.ENOENT: raise del e return None return "".join(line for line in mylines if not line.startswith("#")) finally: try: os.unlink(filename) except OSError: pass
def _unregister(self): self._registered = False if self._reg_id is not None: self.scheduler.unregister(self._reg_id) self._reg_id = None if self._files is not None: try: pipe_in = self._files.pop('pipe_in') except KeyError: pass else: os.close(pipe_in)
def unlock(self): if self._proc is None: raise AssertionError('not locked') if self.returncode is None: raise AssertionError('lock not acquired yet') if self.returncode != os.EX_OK: raise AssertionError("lock process failed with returncode %s" \ % (self.returncode,)) self._unlocked = True os.write(self._files['pipe_out'], b'\0') os.close(self._files['pipe_out']) self._files = None self._proc.wait() self._proc = None
def _unlock(self): if self._proc is None: raise AssertionError('not locked') if not self._acquired: raise AssertionError('lock not acquired yet') if self.returncode != os.EX_OK: raise AssertionError("lock process failed with returncode %s" \ % (self.returncode,)) if self._unlock_future is not None: raise AssertionError("already unlocked") self._unlock_future = self.scheduler.create_future() os.write(self._files['pipe_out'], b'\0') os.close(self._files['pipe_out']) self._files = None
def _unregister(self): self._registered = False if self._thread is not None: self._thread.join() self._thread = None if self._reg_id is not None: self.scheduler.unregister(self._reg_id) self._reg_id = None if self._files is not None: for f in self._files.values(): os.close(f) self._files = None
def _unregister(self): """ Unregister from the scheduler and close open files. """ self._registered = False if self._reg_id is not None: self.scheduler.source_remove(self._reg_id) self._reg_id = None if self._files is not None: for f in self._files.values(): os.close(f) self._files = None
def hardlink_lockfile(lockfilename, max_wait=14400): """Does the NFS, hardlink shuffle to ensure locking on the disk. We create a PRIVATE lockfile, that is just a placeholder on the disk. Then we HARDLINK the real lockfile to that private file. If our file can 2 references, then we have the lock. :) Otherwise we lather, rise, and repeat. We default to a 4 hour timeout. """ start_time = time.time() myhardlock = hardlock_name(lockfilename) reported_waiting = False while(time.time() < (start_time + max_wait)): # We only need it to exist. myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0o660) os.close(myfd) if not os.path.exists(myhardlock): raise FileNotFound( _("Created lockfile is missing: %(filename)s") % \ {"filename" : myhardlock}) try: res = os.link(myhardlock, lockfilename) except OSError: pass if hardlink_is_mine(myhardlock, lockfilename): # We have the lock. if reported_waiting: writemsg("\n", noiselevel=-1) return True if reported_waiting: writemsg(".", noiselevel=-1) else: reported_waiting = True from portage.const import PORTAGE_BIN_PATH msg = _("\nWaiting on (hardlink) lockfile: (one '.' per 3 seconds)\n" "%(bin_path)s/clean_locks can fix stuck locks.\n" "Lockfile: %(lockfilename)s\n") % \ {"bin_path": PORTAGE_BIN_PATH, "lockfilename": lockfilename} writemsg(msg, noiselevel=-1) time.sleep(3) os.unlink(myhardlock) return False
def _unregister(self): """ Unregister from the scheduler and close open files. """ self._registered = False if self.input_files is not None: for f in self.input_files.values(): if isinstance(f, int): self.scheduler.remove_reader(f) os.close(f) else: self.scheduler.remove_reader(f.fileno()) f.close() self.input_files = None
def _testPipeReader(self, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ if self._use_pty: got_pty, master_fd, slave_fd = _create_pty_or_pipe() if not got_pty: os.close(slave_fd) os.close(master_fd) skip_reason = "pty not acquired" self.portage_skip = skip_reason self.fail(skip_reason) return else: master_fd, slave_fd = os.pipe() # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) slave_file = os.fdopen(slave_fd, 'wb', 0) task_scheduler = TaskScheduler(max_jobs=2) producer = SpawnProcess( args=["bash", "-c", self._echo_cmd % test_string], env=os.environ, fd_pipes={1:slave_fd}, scheduler=task_scheduler.sched_iface) task_scheduler.add(producer) slave_file.close() consumer = PipeReader( input_files={"producer" : master_file}, scheduler=task_scheduler.sched_iface, _use_array=self._use_array) task_scheduler.add(consumer) # This will ensure that both tasks have exited, which # is necessary to avoid "ResourceWarning: unclosed file" # warnings since Python 3.2 (and also ensures that we # don't leave any zombie child processes). task_scheduler.run() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def _unregister(self): """ Unregister from the scheduler and close open files. """ self._registered = False if self._reg_id is not None: self.scheduler.unregister(self._reg_id) self._reg_id = None if self._files is not None: for f in self._files.values(): if isinstance(f, int): os.close(f) else: f.close() self._files = None
def _get_lock_fn(): """ Returns fcntl.lockf if proven to work, and otherwise returns fcntl.flock. On some platforms fcntl.lockf is known to be broken. """ global _lock_fn if _lock_fn is not None: return _lock_fn def _test_lock(fd, lock_path): os.close(fd) try: with open(lock_path, 'a') as f: fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) except EnvironmentError as e: if e.errno == errno.EAGAIN: # Parent process holds lock, as expected. sys.exit(0) # Something went wrong. sys.exit(1) fd, lock_path = tempfile.mkstemp() try: try: fcntl.lockf(fd, fcntl.LOCK_EX) except EnvironmentError: pass else: proc = multiprocessing.Process(target=_test_lock, args=(fd, lock_path)) proc.start() proc.join() if proc.exitcode == os.EX_OK: # Use fcntl.lockf because the test passed. _lock_fn = fcntl.lockf return _lock_fn finally: os.close(fd) os.unlink(lock_path) # Fall back to fcntl.flock. _lock_fn = fcntl.flock return _lock_fn