Ejemplo n.º 1
0
    def _testPipeReader(self, master_fd, slave_fd, test_string):
        """
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

        # WARNING: It is very important to use unbuffered mode here,
        # in order to avoid issue 5380 with python3.
        master_file = os.fdopen(master_fd, 'rb', 0)
        scheduler = global_event_loop()

        consumer = PipeReader(input_files={"producer": master_file},
                              _use_array=self._use_array,
                              scheduler=scheduler)
        consumer.start()

        producer = scheduler.run_until_complete(
            asyncio.create_subprocess_exec("bash",
                                           "-c",
                                           self._echo_cmd % test_string,
                                           stdout=slave_fd,
                                           loop=scheduler))

        os.close(slave_fd)
        scheduler.run_until_complete(producer.wait())
        scheduler.run_until_complete(consumer.async_wait())

        self.assertEqual(producer.returncode, os.EX_OK)
        self.assertEqual(consumer.returncode, os.EX_OK)

        return consumer.getvalue().decode('ascii', 'replace')
Ejemplo n.º 2
0
	def _testPipeReader(self, master_fd, slave_fd, test_string):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		# WARNING: It is very important to use unbuffered mode here,
		# in order to avoid issue 5380 with python3.
		master_file = os.fdopen(master_fd, 'rb', 0)
		scheduler = global_event_loop()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			_use_array=self._use_array,
			scheduler=scheduler)
		consumer.start()

		producer = scheduler.run_until_complete(asyncio.create_subprocess_exec(
			"bash", "-c", self._echo_cmd % test_string,
			stdout=slave_fd,
			loop=scheduler))

		os.close(slave_fd)
		scheduler.run_until_complete(producer.wait())
		scheduler.run_until_complete(consumer.async_wait())

		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		return consumer.getvalue().decode('ascii', 'replace')
Ejemplo n.º 3
0
class AsyncFunction(ForkProcess):
    """
	Execute a function call in a fork, and retrieve the function
	return value via pickling/unpickling, accessible as the
	"result" attribute after the forked process has exited.
	"""

    # NOTE: This class overrides the meaning of the SpawnProcess 'args'
    # attribute, and uses it to hold the positional arguments for the
    # 'target' function.
    __slots__ = ('kwargs', 'result', 'target', '_async_func_reader',
                 '_async_func_reader_pw')

    def _start(self):
        pr, pw = os.pipe()
        self.fd_pipes = {}
        self.fd_pipes[pw] = pw
        self._async_func_reader_pw = pw
        self._async_func_reader = PipeReader(input_files={"input": pr},
                                             scheduler=self.scheduler)
        self._async_func_reader.addExitListener(self._async_func_reader_exit)
        self._async_func_reader.start()
        ForkProcess._start(self)
        os.close(pw)

    def _run(self):
        try:
            result = self.target(*(self.args or []), **(self.kwargs or {}))
            os.write(self._async_func_reader_pw, pickle.dumps(result))
        except Exception:
            traceback.print_exc()
            return 1

        return os.EX_OK

    def _pipe_logger_exit(self, pipe_logger):
        # Ignore this event, since we want to ensure that we exit
        # only after _async_func_reader_exit has reached EOF.
        self._pipe_logger = None

    def _async_func_reader_exit(self, pipe_reader):
        try:
            self.result = pickle.loads(pipe_reader.getvalue())
        except Exception:
            # The child process will have printed a traceback in this case,
            # and returned an unsuccessful returncode.
            pass
        self._async_func_reader = None
        self._unregister()
        self.wait()

    def _unregister(self):
        ForkProcess._unregister(self)

        pipe_reader = self._async_func_reader
        if pipe_reader is not None:
            self._async_func_reader = None
            pipe_reader.removeExitListener(self._async_func_reader_exit)
            pipe_reader.cancel()
Ejemplo n.º 4
0
    def _receive_reply(self, input_fd):

        start_time = time.time()

        pipe_reader = PipeReader(input_files={"input_fd": input_fd},
                                 scheduler=global_event_loop())
        pipe_reader.start()

        eof = pipe_reader.poll() is not None

        while not eof:
            pipe_reader._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT)
            eof = pipe_reader.poll() is not None
            if not eof:
                if self._daemon_is_alive():
                    self._timeout_retry_msg(
                        start_time, portage.localization._("during read"))
                else:
                    pipe_reader.cancel()
                    self._no_daemon_msg()
                    return 2

        buf = pipe_reader.getvalue()

        retval = 2

        if not buf:

            portage.util.writemsg_level(
                "ebuild-ipc: %s\n" % (portage.localization._("read failed"), ),
                level=logging.ERROR,
                noiselevel=-1,
            )

        else:

            try:
                reply = pickle.loads(buf)
            except SystemExit:
                raise
            except Exception as e:
                # The pickle module can raise practically
                # any exception when given corrupt data.
                portage.util.writemsg_level("ebuild-ipc: %s\n" % (e, ),
                                            level=logging.ERROR,
                                            noiselevel=-1)

            else:

                (out, err, retval) = reply

                if out:
                    portage.util.writemsg_stdout(out, noiselevel=-1)

                if err:
                    portage.util.writemsg(err, noiselevel=-1)

        return retval
Ejemplo n.º 5
0
	def _receive_reply(self, input_fd):

		start_time = time.time()

		pipe_reader = PipeReader(input_files={"input_fd":input_fd},
			scheduler=global_event_loop())
		pipe_reader.start()

		eof = pipe_reader.poll() is not None

		while not eof:
			pipe_reader._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT)
			eof = pipe_reader.poll() is not None
			if not eof:
				if self._daemon_is_alive():
					self._timeout_retry_msg(start_time,
						portage.localization._('during read'))
				else:
					pipe_reader.cancel()
					self._no_daemon_msg()
					return 2

		buf = pipe_reader.getvalue()

		retval = 2

		if not buf:

			portage.util.writemsg_level(
				"ebuild-ipc: %s\n" % \
				(portage.localization._('read failed'),),
				level=logging.ERROR, noiselevel=-1)

		else:

			try:
				reply = pickle.loads(buf)
			except SystemExit:
				raise
			except Exception as e:
				# The pickle module can raise practically
				# any exception when given corrupt data.
				portage.util.writemsg_level(
					"ebuild-ipc: %s\n" % (e,),
					level=logging.ERROR, noiselevel=-1)

			else:

				(out, err, retval) = reply

				if out:
					portage.util.writemsg_stdout(out, noiselevel=-1)

				if err:
					portage.util.writemsg(err, noiselevel=-1)

		return retval
    def testLazyImportPortageBaseline(self):
        """
        Check what modules are imported by a baseline module import.
        """

        env = os.environ.copy()
        pythonpath = env.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is None:
            pythonpath = ""
        else:
            pythonpath = ":" + pythonpath
        pythonpath = PORTAGE_PYM_PATH + pythonpath
        env["PYTHONPATH"] = pythonpath

        # If python is patched to insert the path of the
        # currently installed portage module into sys.path,
        # then the above PYTHONPATH override doesn't help.
        env["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH

        scheduler = global_event_loop()
        master_fd, slave_fd = os.pipe()
        master_file = os.fdopen(master_fd, "rb", 0)
        slave_file = os.fdopen(slave_fd, "wb")
        producer = SpawnProcess(
            args=self._baseline_import_cmd,
            env=env,
            fd_pipes={1: slave_fd},
            scheduler=scheduler,
        )
        producer.start()
        slave_file.close()

        consumer = PipeReader(input_files={"producer": master_file},
                              scheduler=scheduler)

        consumer.start()
        consumer.wait()
        self.assertEqual(producer.wait(), os.EX_OK)
        self.assertEqual(consumer.wait(), os.EX_OK)

        output = consumer.getvalue().decode("ascii", "replace").split()

        unexpected_modules = " ".join(
            sorted(x for x in output if self._module_re.match(x) is not None
                   and x not in self._baseline_imports))

        self.assertEqual("", unexpected_modules)
	def testLazyImportPortageBaseline(self):
		"""
		Check what modules are imported by a baseline module import.
		"""

		env = os.environ.copy()
		pythonpath = env.get('PYTHONPATH')
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is None:
			pythonpath = ''
		else:
			pythonpath = ':' + pythonpath
		pythonpath = PORTAGE_PYM_PATH + pythonpath
		env['PYTHONPATH'] = pythonpath

		# If python is patched to insert the path of the
		# currently installed portage module into sys.path,
		# then the above PYTHONPATH override doesn't help.
		env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH

		scheduler = PollScheduler().sched_iface
		master_fd, slave_fd = os.pipe()
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb')
		producer = SpawnProcess(
			args=self._baseline_import_cmd,
			env=env, fd_pipes={1:slave_fd},
			scheduler=scheduler)
		producer.start()
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=scheduler)

		consumer.start()
		consumer.wait()
		self.assertEqual(producer.wait(), os.EX_OK)
		self.assertEqual(consumer.wait(), os.EX_OK)

		output = consumer.getvalue().decode('ascii', 'replace').split()

		unexpected_modules = " ".join(sorted(x for x in output \
			if self._module_re.match(x) is not None and \
			x not in self._baseline_imports))

		self.assertEqual("", unexpected_modules)
Ejemplo n.º 8
0
class _Reader(object):
    def __init__(self, future, input_file, loop):
        self._future = future
        self._pipe_reader = PipeReader(input_files={'input_file': input_file},
                                       scheduler=loop)

        self._future.add_done_callback(self._cancel_callback)
        self._pipe_reader.addExitListener(self._eof)
        self._pipe_reader.start()

    def _cancel_callback(self, future):
        if future.cancelled():
            self._cancel()

    def _eof(self, pipe_reader):
        self._pipe_reader = None
        self._future.set_result(pipe_reader.getvalue())

    def _cancel(self):
        if self._pipe_reader is not None and self._pipe_reader.poll() is None:
            self._pipe_reader.removeExitListener(self._eof)
            self._pipe_reader.cancel()
            self._pipe_reader = None
Ejemplo n.º 9
0
	def testPipeReader(self):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		test_string = 2 * "blah blah blah\n"

		scheduler = PollScheduler().sched_iface
		master_fd, slave_fd = os.pipe()
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb')
		producer = SpawnProcess(
			args=["bash", "-c", "echo -n '%s'" % test_string],
			env=os.environ, fd_pipes={1:slave_fd},
			scheduler=scheduler)
		producer.start()
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=scheduler)

		consumer.start()

		# This will ensure that both tasks have exited, which
		# is necessary to avoid "ResourceWarning: unclosed file"
		# warnings since Python 3.2 (and also ensures that we
		# don't leave any zombie child processes).
		scheduler.schedule()
		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		output = consumer.getvalue().decode('ascii', 'replace')
		self.assertEqual(test_string, output)
Ejemplo n.º 10
0
class FileDigester(ForkProcess):
    """
	Asynchronously generate file digests. Pass in file_path and
	hash_names, and after successful execution, the digests
	attribute will be a dict containing all of the requested
	digests.
	"""

    __slots__ = ('file_path', 'digests', 'hash_names', '_digest_pipe_reader',
                 '_digest_pw')

    def _start(self):
        pr, pw = os.pipe()
        self.fd_pipes = {}
        self.fd_pipes[pw] = pw
        self._digest_pw = pw
        self._digest_pipe_reader = PipeReader(input_files={"input": pr},
                                              scheduler=self.scheduler)
        self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
        self._digest_pipe_reader.start()
        ForkProcess._start(self)
        os.close(pw)

    def _run(self):
        digests = perform_multiple_checksums(self.file_path,
                                             hashes=self.hash_names)

        buf = "".join("%s=%s\n" % item
                      for item in digests.items()).encode('utf_8')

        while buf:
            buf = buf[os.write(self._digest_pw, buf):]

        return os.EX_OK

    def _parse_digests(self, data):

        digests = {}
        for line in data.decode('utf_8').splitlines():
            parts = line.split('=', 1)
            if len(parts) == 2:
                digests[parts[0]] = parts[1]

        self.digests = digests

    def _pipe_logger_exit(self, pipe_logger):
        # Ignore this event, since we want to ensure that we
        # exit only after _digest_pipe_reader has reached EOF.
        self._pipe_logger = None

    def _digest_pipe_reader_exit(self, pipe_reader):
        self._parse_digests(pipe_reader.getvalue())
        self._digest_pipe_reader = None
        self._unregister()
        self.wait()

    def _unregister(self):
        ForkProcess._unregister(self)

        pipe_reader = self._digest_pipe_reader
        if pipe_reader is not None:
            self._digest_pipe_reader = None
            pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
            pipe_reader.cancel()
Ejemplo n.º 11
0
class FileDigester(ForkProcess):
	"""
	Asynchronously generate file digests. Pass in file_path and
	hash_names, and after successful execution, the digests
	attribute will be a dict containing all of the requested
	digests.
	"""

	__slots__ = ('file_path', 'digests', 'hash_names',
		'_digest_pipe_reader', '_digest_pw')

	def _start(self):
		pr, pw = os.pipe()
		self.fd_pipes = {}
		self.fd_pipes[pw] = pw
		self._digest_pw = pw
		self._digest_pipe_reader = PipeReader(
			input_files={"input":pr},
			scheduler=self.scheduler)
		self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
		self._digest_pipe_reader.start()
		ForkProcess._start(self)
		os.close(pw)

	def _run(self):
		digests = perform_multiple_checksums(self.file_path,
			hashes=self.hash_names)

		buf = "".join("%s=%s\n" % item
			for item in digests.items()).encode('utf_8')

		while buf:
			buf = buf[os.write(self._digest_pw, buf):]

		return os.EX_OK

	def _parse_digests(self, data):

		digests = {}
		for line in data.decode('utf_8').splitlines():
			parts = line.split('=', 1)
			if len(parts) == 2:
				digests[parts[0]] = parts[1]

		self.digests = digests

	def _pipe_logger_exit(self, pipe_logger):
		# Ignore this event, since we want to ensure that we
		# exit only after _digest_pipe_reader has reached EOF.
		self._pipe_logger = None

	def _digest_pipe_reader_exit(self, pipe_reader):
		self._parse_digests(pipe_reader.getvalue())
		self._digest_pipe_reader = None
		self._unregister()
		self.wait()

	def _unregister(self):
		ForkProcess._unregister(self)

		pipe_reader = self._digest_pipe_reader
		if pipe_reader is not None:
			self._digest_pipe_reader = None
			pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
			pipe_reader.cancel()
Ejemplo n.º 12
0
class AsyncFunction(ForkProcess):
	"""
	Execute a function call in a fork, and retrieve the function
	return value via pickling/unpickling, accessible as the
	"result" attribute after the forked process has exited.
	"""

	# NOTE: This class overrides the meaning of the SpawnProcess 'args'
	# attribute, and uses it to hold the positional arguments for the
	# 'target' function.
	__slots__ = ('kwargs', 'result', 'target',
		'_async_func_reader', '_async_func_reader_pw')

	def _start(self):
		pr, pw = os.pipe()
		self.fd_pipes = {}
		self.fd_pipes[pw] = pw
		self._async_func_reader_pw = pw
		self._async_func_reader = PipeReader(
			input_files={"input":pr},
			scheduler=self.scheduler)
		self._async_func_reader.addExitListener(self._async_func_reader_exit)
		self._async_func_reader.start()
		ForkProcess._start(self)
		os.close(pw)

	def _run(self):
		try:
			result = self.target(*(self.args or []), **(self.kwargs or {}))
			os.write(self._async_func_reader_pw, pickle.dumps(result))
		except Exception:
			traceback.print_exc()
			return 1

		return os.EX_OK

	def _pipe_logger_exit(self, pipe_logger):
		# Ignore this event, since we want to ensure that we exit
		# only after _async_func_reader_exit has reached EOF.
		self._pipe_logger = None

	def _async_func_reader_exit(self, pipe_reader):
		try:
			self.result = pickle.loads(pipe_reader.getvalue())
		except Exception:
			# The child process will have printed a traceback in this case,
			# and returned an unsuccessful returncode.
			pass
		self._async_func_reader = None
		if self.returncode is None:
			self._async_waitpid()
		else:
			self._unregister()
			self._async_wait()

	def _unregister(self):
		ForkProcess._unregister(self)

		pipe_reader = self._async_func_reader
		if pipe_reader is not None:
			self._async_func_reader = None
			pipe_reader.removeExitListener(self._async_func_reader_exit)
			pipe_reader.cancel()