def _testPipeLoggerToPipe(self, test_string, loop=None): """ Test PipeLogger writing to a pipe connected to a PipeReader. This verifies that PipeLogger does not deadlock when writing to a pipe that's drained by a PipeReader running in the same process (requires non-blocking write). """ producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=loop) pr, pw = os.pipe() consumer = producer.pipe_reader = PipeLogger( background=True, input_fd=producer.proc.stdout, log_file_path=os.fdopen(pw, 'wb', 0)) reader = _reader(pr, loop=loop) yield producer.async_start() content = yield reader yield producer.async_wait() yield consumer.async_wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) coroutine_return(content.decode('ascii', 'replace'))
def _fetch_uri(self, uri): if self.config.options.dry_run: # Simply report success. logging.info("dry-run: fetch '%s' from '%s'" % (self.distfile, uri)) self._success() self.returncode = os.EX_OK self._async_wait() return if self.config.options.temp_dir: self._fetch_tmp_dir_info = "temp-dir" distdir = self.config.options.temp_dir else: self._fetch_tmp_dir_info = "distfiles" distdir = self.config.options.distfiles tmp_basename = self.distfile + "._emirrordist_fetch_.%s" % portage.getpid( ) variables = {"DISTDIR": distdir, "URI": uri, "FILE": tmp_basename} self._fetch_tmp_file = os.path.join(distdir, tmp_basename) try: os.unlink(self._fetch_tmp_file) except OSError: pass args = portage.util.shlex_split(default_fetchcommand) args = [portage.util.varexpand(x, mydict=variables) for x in args] args = [ _unicode_encode(x, encoding=_encodings["fs"], errors="strict") for x in args ] null_fd = os.open(os.devnull, os.O_RDONLY) fetcher = PopenProcess( background=self.background, proc=subprocess.Popen(args, stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=self.scheduler, ) os.close(null_fd) fetcher.pipe_reader = PipeLogger( background=self.background, input_fd=fetcher.proc.stdout, log_file_path=self._log_path, scheduler=self.scheduler, ) self._start_task(fetcher, self._fetcher_exit)
def _start(self): filter_proc = None log_input = None if self.log_path is not None: log_filter_file = self.log_filter_file if log_filter_file is not None: split_value = shlex_split(log_filter_file) log_filter_file = split_value if split_value else None if log_filter_file: filter_input, stdin = os.pipe() log_input, filter_output = os.pipe() try: filter_proc = PopenProcess( proc=subprocess.Popen( log_filter_file, env=self.env, stdin=filter_input, stdout=filter_output, stderr=filter_output, ), scheduler=self.scheduler, ) filter_proc.start() except EnvironmentError: # Maybe the command is missing or broken somehow... os.close(filter_input) os.close(stdin) os.close(log_input) os.close(filter_output) else: self._stdin = os.fdopen(stdin, "wb", 0) os.close(filter_input) os.close(filter_output) if self._stdin is None: # Since log_filter_file is unspecified or refers to a file # that is missing or broken somehow, create a pipe that # logs directly to pipe_logger. log_input, stdin = os.pipe() self._stdin = os.fdopen(stdin, "wb", 0) # Set background=True so that pipe_logger does not log to stdout. pipe_logger = PipeLogger( background=True, scheduler=self.scheduler, input_fd=log_input, log_file_path=self.log_path, ) pipe_logger.start() self._main_task_cancel = functools.partial(self._main_cancel, filter_proc, pipe_logger) self._main_task = asyncio.ensure_future(self._main( filter_proc, pipe_logger), loop=self.scheduler) self._main_task.add_done_callback(self._main_exit)
def _start_gpg_proc(self): gpg_vars = self.gpg_vars if gpg_vars is None: gpg_vars = {} else: gpg_vars = gpg_vars.copy() gpg_vars["FILE"] = self._manifest_path gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars) gpg_cmd = shlex_split(gpg_cmd) gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)) # PipeLogger echos output and efficiently monitors for process # exit by listening for the stdout EOF event. gpg_proc.pipe_reader = PipeLogger(background=self.background, input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler) self._start_task(gpg_proc, self._gpg_proc_exit)
def _testPipeLogger(self, test_string): producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=global_event_loop()) fd, log_file_path = tempfile.mkstemp() try: consumer = PipeLogger(background=True, input_fd=producer.proc.stdout, log_file_path=log_file_path) producer.pipe_reader = consumer producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) with open(log_file_path, 'rb') as f: content = f.read() finally: os.close(fd) os.unlink(log_file_path) return content.decode('ascii', 'replace')
def _testPipeLogger(self, test_string): producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=global_event_loop()) fd, log_file_path = tempfile.mkstemp() try: consumer = PipeLogger(background=True, input_fd=os.dup( producer.proc.stdout.fileno()), log_file_path=log_file_path) # Close the stdout pipe, since we duplicated it, and it # must be closed in order to avoid a ResourceWarning. producer.proc.stdout.close() producer.pipe_reader = consumer producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) with open(log_file_path, 'rb') as f: content = f.read() finally: os.close(fd) os.unlink(log_file_path) return content.decode('ascii', 'replace')
def _check_sig_key(self): null_fd = os.open('/dev/null', os.O_RDONLY) popen_proc = PopenProcess(proc=subprocess.Popen( ["gpg", "--verify", self._manifest_path], stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), pipe_reader=PipeReader()) os.close(null_fd) popen_proc.pipe_reader.input_files = { "producer" : popen_proc.proc.stdout} self._start_task(popen_proc, self._check_sig_key_exit)
def _start_gpg_proc(self): gpg_vars = self.gpg_vars if gpg_vars is None: gpg_vars = {} else: gpg_vars = gpg_vars.copy() gpg_vars["FILE"] = self._manifest_path gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars) gpg_cmd = shlex_split(gpg_cmd) gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd)) self._start_task(gpg_proc, self._gpg_proc_exit)
def _testPipeReader(self, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ producer = PopenProcess( proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ), pipe_reader=PipeReaderBlockingIO(), scheduler=global_event_loop(), ) consumer = producer.pipe_reader consumer.input_files = {"producer": producer.proc.stdout} producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode("ascii", "replace")
def _testPipeReader(self, master_fd, slave_fd, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) scheduler = global_event_loop() consumer = PipeReader( input_files={"producer" : master_file}, _use_array=self._use_array, scheduler=scheduler) producer = PopenProcess( pipe_reader=consumer, proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string], stdout=slave_fd), scheduler=scheduler) producer.start() os.close(slave_fd) producer.wait() consumer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def _testPipeLogger(self, test_string): producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=global_event_loop()) fd, log_file_path = tempfile.mkstemp() try: consumer = PipeLogger(background=True, input_fd=os.dup(producer.proc.stdout.fileno()), log_file_path=log_file_path) # Close the stdout pipe, since we duplicated it, and it # must be closed in order to avoid a ResourceWarning. producer.proc.stdout.close() producer.pipe_reader = consumer producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) with open(log_file_path, 'rb') as f: content = f.read() finally: os.close(fd) os.unlink(log_file_path) return content.decode('ascii', 'replace')
def _testPipeReader(self, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ producer = PopenProcess(proc=subprocess.Popen( ["bash", "-c", self._echo_cmd % test_string], stdout=subprocess.PIPE, stderr=subprocess.STDOUT), pipe_reader=PipeReader(), scheduler=global_event_loop()) consumer = producer.pipe_reader consumer.input_files = {"producer" : producer.proc.stdout} producer.start() producer.wait() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def _fetch_uri(self, uri): if self.config.options.dry_run: # Simply report success. logging.info("dry-run: fetch '%s' from '%s'" % (self.distfile, uri)) self._success() self.returncode = os.EX_OK self.wait() return if self.config.options.temp_dir: self._fetch_tmp_dir_info = 'temp-dir' distdir = self.config.options.temp_dir else: self._fetch_tmp_dir_info = 'distfiles' distdir = self.config.options.distfiles tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid() variables = {"DISTDIR": distdir, "URI": uri, "FILE": tmp_basename} self._fetch_tmp_file = os.path.join(distdir, tmp_basename) try: os.unlink(self._fetch_tmp_file) except OSError: pass args = portage.util.shlex_split(default_fetchcommand) args = [portage.util.varexpand(x, mydict=variables) for x in args] if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \ not os.path.isabs(args[0]): # Python 3.1 _execvp throws TypeError for non-absolute executable # path passed as bytes (see http://bugs.python.org/issue8513). fullname = portage.process.find_binary(args[0]) if fullname is None: raise portage.exception.CommandNotFound(args[0]) args[0] = fullname args = [ _unicode_encode(x, encoding=_encodings['fs'], errors='strict') for x in args ] null_fd = os.open(os.devnull, os.O_RDONLY) fetcher = PopenProcess(background=self.background, proc=subprocess.Popen(args, stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=self.scheduler) os.close(null_fd) fetcher.pipe_reader = PipeLogger(background=self.background, input_fd=fetcher.proc.stdout, log_file_path=self._log_path, scheduler=self.scheduler) self._start_task(fetcher, self._fetcher_exit)
def _fetch_uri(self, uri): if self.config.options.dry_run: # Simply report success. logging.info("dry-run: fetch '%s' from '%s'" % (self.distfile, uri)) self._success() self.returncode = os.EX_OK self.wait() return if self.config.options.temp_dir: self._fetch_tmp_dir_info = 'temp-dir' distdir = self.config.options.temp_dir else: self._fetch_tmp_dir_info = 'distfiles' distdir = self.config.options.distfiles tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid() variables = { "DISTDIR": distdir, "URI": uri, "FILE": tmp_basename } self._fetch_tmp_file = os.path.join(distdir, tmp_basename) try: os.unlink(self._fetch_tmp_file) except OSError: pass args = portage.util.shlex_split(default_fetchcommand) args = [portage.util.varexpand(x, mydict=variables) for x in args] if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \ not os.path.isabs(args[0]): # Python 3.1 _execvp throws TypeError for non-absolute executable # path passed as bytes (see https://bugs.python.org/issue8513). fullname = portage.process.find_binary(args[0]) if fullname is None: raise portage.exception.CommandNotFound(args[0]) args[0] = fullname args = [_unicode_encode(x, encoding=_encodings['fs'], errors='strict') for x in args] null_fd = os.open(os.devnull, os.O_RDONLY) fetcher = PopenProcess(background=self.background, proc=subprocess.Popen(args, stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), scheduler=self.scheduler) os.close(null_fd) fetcher.pipe_reader = PipeLogger(background=self.background, input_fd=fetcher.proc.stdout, log_file_path=self._log_path, scheduler=self.scheduler) self._start_task(fetcher, self._fetcher_exit)