def testLazyImportPortageBaseline(self):
		"""
		Check what modules are imported by a baseline module import.
		"""

		env = os.environ.copy()
		pythonpath = env.get('PYTHONPATH')
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is None:
			pythonpath = ''
		else:
			pythonpath = ':' + pythonpath
		pythonpath = PORTAGE_PYM_PATH + pythonpath
		env['PYTHONPATH'] = pythonpath

		# If python is patched to insert the path of the
		# currently installed portage module into sys.path,
		# then the above PYTHONPATH override doesn't help.
		env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH

		scheduler = global_event_loop()
		master_fd, slave_fd = os.pipe()
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb')
		producer = SpawnProcess(
			args=self._baseline_import_cmd,
			env=env, fd_pipes={1:slave_fd},
			scheduler=scheduler)
		scheduler.run_until_complete(producer.async_start())
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=scheduler)

		scheduler.run_until_complete(consumer.async_start())
		consumer.wait()
		self.assertEqual(producer.wait(), os.EX_OK)
		self.assertEqual(consumer.wait(), os.EX_OK)

		output = consumer.getvalue().decode('ascii', 'replace').split()

		unexpected_modules = " ".join(sorted(x for x in output \
			if self._module_re.match(x) is not None and \
			x not in self._baseline_imports))

		self.assertEqual("", unexpected_modules)
Exemple #2
0
 def testLogfile(self):
     logfile = None
     try:
         fd, logfile = tempfile.mkstemp()
         os.close(fd)
         null_fd = os.open('/dev/null', os.O_RDWR)
         test_string = 2 * "blah blah blah\n"
         proc = SpawnProcess(
             args=[BASH_BINARY, "-c",
                   "echo -n '%s'" % test_string],
             env={},
             fd_pipes={
                 0: portage._get_stdin().fileno(),
                 1: null_fd,
                 2: null_fd
             },
             scheduler=global_event_loop(),
             logfile=logfile)
         global_event_loop().run_until_complete(proc.async_start())
         os.close(null_fd)
         self.assertEqual(proc.wait(), os.EX_OK)
         f = io.open(_unicode_encode(logfile,
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='r',
                     encoding=_encodings['content'],
                     errors='strict')
         log_content = f.read()
         f.close()
         # When logging passes through a pty, this comparison will fail
         # unless the oflag terminal attributes have the termios.OPOST
         # bit disabled. Otherwise, tranformations such as \n -> \r\n
         # may occur.
         self.assertEqual(test_string, log_content)
     finally:
         if logfile:
             try:
                 os.unlink(logfile)
             except EnvironmentError as e:
                 if e.errno != errno.ENOENT:
                     raise
                 del e
class _LockProcess(AbstractPollTask):
    """
	This uses the portage.locks module to acquire a lock asynchronously,
	using a subprocess. After the lock is acquired, the process
	writes to a pipe in order to notify a poll loop running in the main
	process. The unlock() method notifies the subprocess to release the
	lock and exit.
	"""

    __slots__ = ('path',) + \
     ('_acquired', '_kill_test', '_proc', '_files', '_unlock_future')

    def _start(self):
        self.scheduler.run_until_complete(self._async_start())

    @coroutine
    def _async_start(self):
        in_pr, in_pw = os.pipe()
        out_pr, out_pw = os.pipe()
        self._files = {}
        self._files['pipe_in'] = in_pr
        self._files['pipe_out'] = out_pw

        fcntl.fcntl(in_pr, fcntl.F_SETFL,
                    fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    in_pr, fcntl.F_SETFD,
                    fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

        self.scheduler.add_reader(in_pr, self._output_handler)
        self._registered = True
        self._proc = SpawnProcess(args=[
            portage._python_interpreter,
            os.path.join(portage._bin_path, 'lock-helper.py'), self.path
        ],
                                  env=dict(os.environ,
                                           PORTAGE_PYM_PATH=portage._pym_path),
                                  fd_pipes={
                                      0: out_pr,
                                      1: in_pw,
                                      2: sys.__stderr__.fileno()
                                  },
                                  scheduler=self.scheduler)
        self._proc.addExitListener(self._proc_exit)
        yield self._proc.async_start()
        os.close(out_pr)
        os.close(in_pw)

    def _proc_exit(self, proc):

        if self._files is not None:
            # Close pipe_out if it's still open, since it's useless
            # after the process has exited. This helps to avoid
            # "ResourceWarning: unclosed file" since Python 3.2.
            try:
                pipe_out = self._files.pop('pipe_out')
            except KeyError:
                pass
            else:
                os.close(pipe_out)

        if proc.returncode != os.EX_OK:
            # Typically, this will happen due to the
            # process being killed by a signal.

            if not self._acquired:
                # If the lock hasn't been aquired yet, the
                # caller can check the returncode and handle
                # this failure appropriately.
                if not (self.cancelled or self._kill_test):
                    writemsg_level("_LockProcess: %s\n" % \
                     _("failed to acquire lock on '%s'") % (self.path,),
                     level=logging.ERROR, noiselevel=-1)
                self._unregister()
                self.returncode = proc.returncode
                self._async_wait()
                return

            if not self.cancelled and \
             self._unlock_future is None:
                # We don't want lost locks going unnoticed, so it's
                # only safe to ignore if either the cancel() or
                # unlock() methods have been previously called.
                raise AssertionError("lock process failed with returncode %s" \
                 % (proc.returncode,))

        if self._unlock_future is not None:
            self._unlock_future.set_result(None)

    def _cancel(self):
        if self._proc is not None:
            self._proc.cancel()

    def _poll(self):
        if self._proc is not None:
            self._proc.poll()
        return self.returncode

    def _output_handler(self):
        buf = self._read_buf(self._files['pipe_in'])
        if buf:
            self._acquired = True
            self._unregister()
            self.returncode = os.EX_OK
            self._async_wait()

        return True

    def _unregister(self):
        self._registered = False

        if self._files is not None:
            try:
                pipe_in = self._files.pop('pipe_in')
            except KeyError:
                pass
            else:
                self.scheduler.remove_reader(pipe_in)
                os.close(pipe_in)

    def _unlock(self):
        if self._proc is None:
            raise AssertionError('not locked')
        if not self._acquired:
            raise AssertionError('lock not acquired yet')
        if self.returncode != os.EX_OK:
            raise AssertionError("lock process failed with returncode %s" \
             % (self.returncode,))
        if self._unlock_future is not None:
            raise AssertionError("already unlocked")
        self._unlock_future = self.scheduler.create_future()
        os.write(self._files['pipe_out'], b'\0')
        os.close(self._files['pipe_out'])
        self._files = None

    def async_unlock(self):
        """
		Release the lock asynchronously. Release notification is available
		via the add_done_callback method of the returned Future instance.

		@returns: Future, result is None
		"""
        self._unlock()
        return self._unlock_future