Beispiel #1
0
    def _nailgunned_stdio(self, sock):
        """Redirects stdio to the connected socket speaking the nailgun protocol."""
        # Determine output tty capabilities from the environment.
        stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(
            self._env)

        # If all stdio is a tty, there's only one logical I/O device (the tty device). This happens to
        # be addressable as a file in OSX and Linux, so we take advantage of that and directly open the
        # character device for output redirection - eliminating the need to directly marshall any
        # interactive stdio back/forth across the socket and permitting full, correct tty control with
        # no middle-man.
        if all((stdin_isatty, stdout_isatty, stderr_isatty)):
            stdin_ttyname, stdout_ttyname, stderr_ttyname = NailgunProtocol.ttynames_from_env(
                self._env)
            assert stdin_ttyname == stdout_ttyname == stderr_ttyname, (
                'expected all stdio ttys to be the same, but instead got: {}\n'
                'please file a bug at http://github.com/pantsbuild/pants'.
                format([stdin_ttyname, stdout_ttyname, stderr_ttyname]))
            with open(stdin_ttyname, 'rb+wb', 0) as tty:
                tty_fileno = tty.fileno()
                with stdio_as(stdin_fd=tty_fileno,
                              stdout_fd=tty_fileno,
                              stderr_fd=tty_fileno):

                    def finalizer():
                        termios.tcdrain(tty_fileno)

                    yield finalizer
        else:
            stdio_writers = ((ChunkType.STDOUT, stdout_isatty),
                             (ChunkType.STDERR, stderr_isatty))
            types, ttys = zip(*(stdio_writers))
            with NailgunStreamStdinReader.open(sock, stdin_isatty) as stdin_fd,\
                 NailgunStreamWriter.open_multi(sock, types, ttys) as ((stdout_fd, stderr_fd), writer),\
                 stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
                # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
                # exit chunk, to avoid any socket shutdown vs write races.
                stdout, stderr = sys.stdout, sys.stderr

                def finalizer():
                    try:
                        stdout.flush()
                        stderr.flush()
                    finally:
                        time.sleep(
                            .001
                        )  # HACK: Sleep 1ms in the main thread to free the GIL.
                        writer.stop()
                        writer.join()
                        stdout.close()
                        stderr.close()

                yield finalizer
Beispiel #2
0
def test_close_stdio(mock_close):
    mock_options = unittest.mock.Mock()
    mock_options_values = unittest.mock.Mock()
    mock_options.for_global_scope.return_value = mock_options_values
    mock_options_values.pants_subprocessdir = "non_existent_dir"
    mock_server = unittest.mock.Mock()

    def create_services(bootstrap_options, legacy_graph_scheduler):
        return PantsServices()

    pantsd = PantsDaemon(
        native=Native(),
        work_dir="test_work_dir",
        log_level=logging.INFO,
        server=mock_server,
        core=PantsDaemonCore(create_services),
        metadata_base_dir="/tmp/pants_test_metadata_dir",
        bootstrap_options=mock_options,
    )

    with stdio_as(-1, -1, -1):
        handles = (sys.stdin, sys.stdout, sys.stderr)
        fds = [h.fileno() for h in handles]
        pantsd._close_stdio()
        mock_close.assert_has_calls(unittest.mock.call(x) for x in fds)
        for handle in handles:
            assert handle.closed is True
    def assert_checker(self, relpath, contents, expected_code=0, expected_message=""):
        with temporary_dir() as td:
            with safe_open(os.path.join(td, relpath), "w") as fp:
                fp.write(contents)

            args = ["--root-dir={}".format(td)]
            for plugin_type in checker.plugins():
                opts = {"skip": False, "max_length": self._MAX_LENGTH, "ignore": ["E111"]}
                args.append("--{}-options={}".format(plugin_type.name(), json.dumps(opts)))
            args.append(relpath)

            with open(os.path.join(td, "stdout"), "w+") as stdout:
                with open(os.path.join(td, "stderr"), "w+") as stderr:
                    with stdio_as(
                        stdout_fd=stdout.fileno(), stderr_fd=stderr.fileno(), stdin_fd=-1
                    ):
                        with self.assertRaises(SystemExit) as error:
                            checker.main(args=args)

                    def read_stdio(fp):
                        fp.flush()
                        fp.seek(0)
                        return fp.read()

                    self.assertEqual(
                        expected_code,
                        error.exception.code,
                        "STDOUT:\n{}\nSTDERR:\n{}".format(read_stdio(stdout), read_stdio(stderr)),
                    )

                    self.assertEqual(expected_message, read_stdio(stdout).strip())
                    self.assertEqual("", read_stdio(stderr))
Beispiel #4
0
    def _pantsd_logging(self) -> Iterator[None]:
        """A context manager that runs with pantsd logging.

        Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that we can
        safely reuse those fd numbers.
        """

        # Ensure that stdio is closed so that we can safely reuse those file descriptors.
        for fd in (0, 1, 2):
            try:
                os.fdopen(fd)
                raise AssertionError(
                    f"pantsd logging cannot initialize while stdio is open: {fd}"
                )
            except OSError:
                pass

        # Redirect stdio to /dev/null for the rest of the run to reserve those file descriptors.
        with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
            # Reinitialize logging for the daemon context.
            global_options = self._bootstrap_options.for_global_scope()
            setup_logging(global_options, stderr_logging=False)

            log_dir = os.path.join(self._work_dir, self.name)
            setup_logging_to_file(global_options.level,
                                  log_dir=log_dir,
                                  log_filename=self.LOG_NAME)

            self._logger.debug("Logging reinitialized in pantsd context")
            yield
Beispiel #5
0
    def _nailgunned_stdio(self, sock):
        """Redirects stdio to the connected socket speaking the nailgun protocol."""
        # Determine output tty capabilities from the environment.
        stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(
            self._env)

        # Launch a thread to read stdin data from the socket (the only messages expected from the client
        # for the remainder of the protocol), and threads to copy from stdout/stderr pipes onto the
        # socket.
        with NailgunStreamWriter.open_multi(
               sock,
               (ChunkType.STDOUT, ChunkType.STDERR),
               None,
               (stdout_isatty, stderr_isatty)
             ) as ((stdout_fd, stderr_fd), writer),\
             NailgunStreamStdinReader.open(sock, stdin_isatty) as stdin_fd,\
             stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
            # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
            # exit chunk, to avoid any socket shutdown vs write races.
            stdout, stderr = sys.stdout, sys.stderr

            def finalizer():
                try:
                    stdout.flush()
                    stderr.flush()
                finally:
                    time.sleep(
                        .001
                    )  # HACK: Sleep 1ms in the main thread to free the GIL.
                    writer.stop()
                    writer.join()
                    stdout.close()
                    stderr.close()

            yield finalizer
Beispiel #6
0
    def _pantsd_logging(self):
        """A context manager that runs with pantsd logging.

    Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that
    we can safely reuse those fd numbers.
    """

        # Ensure that stdio is closed so that we can safely reuse those file descriptors.
        for fd in (0, 1, 2):
            try:
                os.fdopen(fd)
                raise AssertionError(
                    'pantsd logging cannot initialize while stdio is open: {}'.
                    format(fd))
            except OSError:
                pass

        # Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
        # for further forks.
        with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
            # Reinitialize logging for the daemon context.
            result = setup_logging(self._log_level,
                                   log_dir=self._log_dir,
                                   log_name=self.LOG_NAME)

            # Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
            # TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
            # for `1,2`, and allow them to be used via `stdio_as`.
            sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO,
                                       result.log_handler)
            sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN,
                                       result.log_handler)

            self._logger.debug('logging initialized')
            yield result.log_handler.stream
Beispiel #7
0
  def _pantsd_logging(self):
    """A context manager that runs with pantsd logging.

    Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that
    we can safely reuse those fd numbers.
    """

    # Ensure that stdio is closed so that we can safely reuse those file descriptors.
    for fd in (0, 1, 2):
      try:
        os.fdopen(fd)
        raise AssertionError(
            'pantsd logging cannot initialize while stdio is open: {}'.format(fd))
      except OSError:
        pass

    # Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
    # for further forks.
    with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
      # Reinitialize logging for the daemon context.
      init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
      result = setup_logging(self._log_level, log_dir=self._log_dir, log_name=self.LOG_NAME, native=self._native)
      self._native.override_thread_logging_destination_to_just_pantsd()

      # Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
      # TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
      # for `1,2`, and allow them to be used via `stdio_as`.
      sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
      sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)

      self._logger.debug('logging initialized')
      yield (result.log_handler.stream, result.log_handler.native_filename)
Beispiel #8
0
 def __call__(
     self,
     command: str,
     args: Tuple[str, ...],
     env: Dict[str, str],
     working_directory: bytes,
     cancellation_latch: PySessionCancellationLatch,
     stdin_fd: int,
     stdout_fd: int,
     stderr_fd: int,
 ) -> ExitCode:
     request_timeout = float(env.get("PANTSD_REQUEST_TIMEOUT_LIMIT", -1))
     # NB: Order matters: we acquire a lock before mutating either `sys.std*`, `os.environ`, etc.
     with self._one_run_at_a_time(
             stderr_fd,
             cancellation_latch=cancellation_latch,
             timeout=request_timeout,
     ), stdio_as(stdin_fd=stdin_fd,
                 stdout_fd=stdout_fd,
                 stderr_fd=stderr_fd), hermetic_environment_as(
                     **env), argv_as((command, ) + args):
         # NB: Run implements exception handling, so only the most primitive errors will escape
         # this function, where they will be logged to the pantsd.log by the server.
         logger.info(f"handling request: `{' '.join(args)}`")
         try:
             return self.single_daemonized_run(working_directory.decode(),
                                               cancellation_latch)
         finally:
             logger.info(f"request completed: `{' '.join(args)}`")
Beispiel #9
0
    def _pipe_stdio(self, sock, stdin_isatty, stdout_isatty, stderr_isatty):
        """Handles stdio redirection in the case of pipes and/or mixed pipes and ttys."""
        stdio_writers = ((ChunkType.STDOUT, stdout_isatty), (ChunkType.STDERR,
                                                             stderr_isatty))
        types, ttys = zip(*(stdio_writers))
        with NailgunStreamStdinReader.open(sock, stdin_isatty) as stdin_fd,\
             NailgunStreamWriter.open_multi(sock, types, ttys) as ((stdout_fd, stderr_fd), writer),\
             stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
            # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
            # exit chunk, to avoid any socket shutdown vs write races.
            stdout, stderr = sys.stdout, sys.stderr

            def finalizer():
                try:
                    stdout.flush()
                    stderr.flush()
                finally:
                    time.sleep(
                        .001
                    )  # HACK: Sleep 1ms in the main thread to free the GIL.
                    writer.stop()
                    writer.join()
                    stdout.close()
                    stderr.close()

            yield finalizer
Beispiel #10
0
  def assert_checker(self, relpath, contents, expected_code=0, expected_message=''):
    with temporary_dir() as td:
      with safe_open(os.path.join(td, relpath), 'w') as fp:
        fp.write(contents)

      args=['--root-dir={}'.format(td)]
      for plugin_type in checker.plugins():
        opts = {'skip': False, 'max_length': self._MAX_LENGTH, 'ignore': ['E111']}
        args.append('--{}-options={}'.format(plugin_type.name(), json.dumps(opts)))
      args.append(relpath)

      with open(os.path.join(td, 'stdout'), 'w+') as stdout:
        with open(os.path.join(td, 'stderr'), 'w+') as stderr:
          with stdio_as(stdout_fd=stdout.fileno(), stderr_fd=stderr.fileno(), stdin_fd=-1):
            with self.assertRaises(SystemExit) as error:
              checker.main(args=args)

          def read_stdio(fp):
            fp.flush()
            fp.seek(0)
            return fp.read()

          self.assertEqual(expected_code, error.exception.code,
                           'STDOUT:\n{}\nSTDERR:\n{}'.format(read_stdio(stdout),
                                                             read_stdio(stderr)))

          self.assertEqual(expected_message, read_stdio(stdout).strip())
          self.assertEqual('', read_stdio(stderr))
Beispiel #11
0
 def test_close_stdio(self, mock_close):
   with stdio_as(-1, -1, -1):
     handles = (sys.stdin, sys.stdout, sys.stderr)
     fds = [h.fileno() for h in handles]
     self.pantsd._close_stdio()
     mock_close.assert_has_calls(mock.call(x) for x in fds)
     for handle in handles:
       self.assertTrue(handle.closed, '{} was not closed'.format(handle))
Beispiel #12
0
 def test_close_stdio(self, mock_close):
     with stdio_as(-1, -1, -1):
         handles = (sys.stdin, sys.stdout, sys.stderr)
         fds = [h.fileno() for h in handles]
         self.pantsd._close_stdio()
         mock_close.assert_has_calls(unittest.mock.call(x) for x in fds)
         for handle in handles:
             self.assertTrue(handle.closed, f"{handle} was not closed")
Beispiel #13
0
    def test_close_fds(self, mock_close):
        mock_fd = mock.Mock()
        mock_fd.fileno.side_effect = [0, 1, 2]

        with stdio_as(mock_fd, mock_fd, mock_fd):
            self.pantsd._close_fds()

        self.assertEquals(mock_fd.close.call_count, 3)
        mock_close.assert_has_calls(mock.call(x) for x in [0, 1, 2])
Beispiel #14
0
 def test_stdio_as_dev_null(self):
   # Capture output to tempfiles.
   with self._stdio_as_tempfiles():
     # Read/write from/to `/dev/null`, which will be validated by the harness as not
     # affecting the tempfiles.
     with stdio_as(stdout_fd=-1, stderr_fd=-1, stdin_fd=-1):
       self.assertEquals(b'', sys.stdin.read())
       print('garbage', file=sys.stdout)
       print('garbage', file=sys.stderr)
Beispiel #15
0
  def test_close_fds(self, mock_close):
    mock_fd = mock.Mock()
    mock_fd.fileno.side_effect = [0, 1, 2]

    with stdio_as(mock_fd, mock_fd, mock_fd):
      self.pantsd._close_fds()

    self.assertEquals(mock_fd.close.call_count, 3)
    mock_close.assert_has_calls(mock.call(x) for x in [0, 1, 2])
Beispiel #16
0
 def test_stdio_as_dev_null(self) -> None:
     # Capture output to tempfiles.
     with self._stdio_as_tempfiles():
         # Read/write from/to `/dev/null`, which will be validated by the harness as not
         # affecting the tempfiles.
         with stdio_as(stdout_fd=-1, stderr_fd=-1, stdin_fd=-1):
             self.assertEqual("", sys.stdin.read())
             print("garbage", file=sys.stdout)
             print("garbage", file=sys.stderr)
Beispiel #17
0
    def test_close_fds(self):
        mock_stdout, mock_stderr, mock_stdin = mock.Mock(), mock.Mock(), mock.Mock()

        with stdio_as(mock_stdout, mock_stderr, mock_stdin):
            self.pantsd._close_fds()

        mock_stdout.close.assert_called_once_with()
        mock_stderr.close.assert_called_once_with()
        mock_stdin.close.assert_called_once_with()
Beispiel #18
0
 def new_io(self, stdin_data):
   with temporary_dir() as iodir:
     stdin = os.path.join(iodir, 'stdin')
     stdout = os.path.join(iodir, 'stdout')
     stderr = os.path.join(iodir, 'stderr')
     with open(stdin, 'w') as fp:
       fp.write(stdin_data)
     with open(stdin, 'r') as inp, open(stdout, 'w') as out, open(stderr, 'w') as err:
       with stdio_as(stdin_fd=inp.fileno(), stdout_fd=out.fileno(), stderr_fd=err.fileno()):
         yield (stdin, stdout, stderr)
Beispiel #19
0
    def test_stdio_as_stdin_default(self):
        old_stdout, old_stderr, old_stdin = sys.stdout, sys.stderr, sys.stdin

        with stdio_as(stdout=1, stderr=2):
            self.assertEquals(sys.stdout, 1)
            self.assertEquals(sys.stderr, 2)
            self.assertEquals(sys.stdin, old_stdin)

        self.assertEquals(sys.stdout, old_stdout)
        self.assertEquals(sys.stderr, old_stderr)
Beispiel #20
0
    def test_close_fds(self):
        mock_stdout, mock_stderr, mock_stdin = mock.Mock(), mock.Mock(
        ), mock.Mock()

        with stdio_as(mock_stdout, mock_stderr, mock_stdin):
            self.pantsd._close_fds()

        mock_stdout.close.assert_called_once_with()
        mock_stderr.close.assert_called_once_with()
        mock_stdin.close.assert_called_once_with()
 def new_io(self, stdin_data):
   with temporary_dir() as iodir:
     stdin = os.path.join(iodir, 'stdin')
     stdout = os.path.join(iodir, 'stdout')
     stderr = os.path.join(iodir, 'stderr')
     with open(stdin, 'w') as fp:
       fp.write(stdin_data)
     with open(stdin, 'r') as inp, open(stdout, 'w') as out, open(stderr, 'w') as err:
       with stdio_as(stdin_fd=inp.fileno(), stdout_fd=out.fileno(), stderr_fd=err.fileno()):
         yield (stdin, stdout, stderr)
Beispiel #22
0
  def test_stdio_as_stdin_default(self):
    old_stdout, old_stderr, old_stdin = sys.stdout, sys.stderr, sys.stdin

    with stdio_as(stdout=1, stderr=2):
      self.assertEquals(sys.stdout, 1)
      self.assertEquals(sys.stderr, 2)
      self.assertEquals(sys.stdin, old_stdin)

    self.assertEquals(sys.stdout, old_stdout)
    self.assertEquals(sys.stderr, old_stderr)
Beispiel #23
0
    def _pantsd_logging(self) -> Iterator[IO[str]]:
        """A context manager that runs with pantsd logging.

        Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that we can
        safely reuse those fd numbers.
        """

        # Ensure that stdio is closed so that we can safely reuse those file descriptors.
        for fd in (0, 1, 2):
            try:
                os.fdopen(fd)
                raise AssertionError(
                    f"pantsd logging cannot initialize while stdio is open: {fd}"
                )
            except OSError:
                pass

        # Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
        # for further forks.
        with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
            # Reinitialize logging for the daemon context.
            init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
            # We can't statically prove it, but we won't execute `launch()` (which
            # calls `run_sync` which calls `_pantsd_logging`) unless PantsDaemon
            # is launched with full_init=True. If PantsdDaemon is launched with
            # full_init=True, we can guarantee self._native and self._bootstrap_options
            # are non-None.
            native = cast(Native, self._native)
            bootstrap_options = cast(OptionValueContainer,
                                     self._bootstrap_options)

            level = self._log_level
            ignores = bootstrap_options.for_global_scope(
            ).ignore_pants_warnings
            clear_previous_loggers()
            setup_logging_to_stderr(level, warnings_filter_regexes=ignores)
            log_dir = os.path.join(self._work_dir, self.name)
            log_handler = setup_logging_to_file(
                level,
                log_dir=log_dir,
                log_filename=self.LOG_NAME,
                warnings_filter_regexes=ignores)

            native.override_thread_logging_destination_to_just_pantsd()

            # Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
            # TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
            # for `1,2`, and allow them to be used via `stdio_as`.
            sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO,
                                       log_handler)  # type: ignore[assignment]
            sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN,
                                       log_handler)  # type: ignore[assignment]

            self._logger.debug("logging initialized")
            yield log_handler.stream
Beispiel #24
0
    def _nailgunned_stdio(self, sock):
        """Redirects stdio to the connected socket speaking the nailgun protocol."""
        # Determine output tty capabilities from the environment.
        stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(
            self._env)

        # Launch a thread to read stdin data from the socket (the only messages expected from the client
        # for the remainder of the protocol), and threads to copy from stdout/stderr pipes onto the
        # socket.
        with NailgunStreamStdinReader.open(sock, isatty=stdin_isatty) as stdin,\
             NailgunStreamWriter.open(sock, ChunkType.STDOUT, None, isatty=stdout_isatty) as stdout,\
             NailgunStreamWriter.open(sock, ChunkType.STDERR, None, isatty=stderr_isatty) as stderr:
            with stdio_as(stdout=stdout, stderr=stderr, stdin=stdin):
                yield
Beispiel #25
0
    def _pantsd_logging(self) -> Iterator[IO[str]]:
        """A context manager that runs with pantsd logging.

        Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that we can
        safely reuse those fd numbers.
        """

        # Ensure that stdio is closed so that we can safely reuse those file descriptors.
        for fd in (0, 1, 2):
            try:
                os.fdopen(fd)
                raise AssertionError(
                    f"pantsd logging cannot initialize while stdio is open: {fd}"
                )
            except OSError:
                pass

        # Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
        # for further forks.
        with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
            # Reinitialize logging for the daemon context.
            use_color = self._bootstrap_options.for_global_scope().colors
            init_rust_logger(self._log_level,
                             self._log_show_rust_3rdparty,
                             use_color=use_color)

            level = self._log_level
            ignores = self._bootstrap_options.for_global_scope(
            ).ignore_pants_warnings
            clear_logging_handlers()
            log_dir = os.path.join(self._work_dir, self.name)
            log_handler = setup_logging_to_file(
                level,
                log_dir=log_dir,
                log_filename=self.LOG_NAME,
                warnings_filter_regexes=ignores)

            self._native.override_thread_logging_destination_to_just_pantsd()

            # Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
            # TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
            # for `1,2`, and allow them to be used via `stdio_as`.
            sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO,
                                       log_handler)  # type: ignore[assignment]
            sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN,
                                       log_handler)  # type: ignore[assignment]

            self._logger.debug("logging initialized")
            yield log_handler.stream
Beispiel #26
0
 def test_isort_check_only(self):
   isort_task = self._create_task(target_roots=[self.a_library], passthru_args=['--check-only'])
   with temporary_dir() as output_dir:
     with open(os.path.join(output_dir, 'stdout'), 'w+') as stdout:
       with stdio_as(stdout_fd=stdout.fileno(), stderr_fd=stdout.fileno(), stdin_fd=-1):
         try:
           isort_task.execute()
         except TaskError:
           stdout.flush()
           stdout.seek(0)
           output = stdout.read()
           self.assertIn("a_1.py Imports are incorrectly sorted.", output)
           self.assertIn("a_2.py Imports are incorrectly sorted.", output)
         else:
           fail("--check-only test for {} is supposed to fail, but passed.".format(self.a_library))
Beispiel #27
0
 def test_isort_check_only(self):
   isort_task = self._create_task(target_roots=[self.a_library], passthru_args=['--check-only'])
   with temporary_dir() as output_dir:
     with open(os.path.join(output_dir, 'stdout'), 'w+') as stdout:
       with stdio_as(stdout_fd=stdout.fileno(), stderr_fd=stdout.fileno(), stdin_fd=-1):
         try:
           isort_task.execute()
         except TaskError:
           stdout.flush()
           stdout.seek(0)
           output = stdout.read()
           self.assertIn("a_1.py Imports are incorrectly sorted.", output)
           self.assertIn("a_2.py Imports are incorrectly sorted.", output)
         else:
           fail("--check-only test for {} is supposed to fail, but passed.".format(self.a_library))
  def _nailgunned_stdio(self, sock):
    """Redirects stdio to the connected socket speaking the nailgun protocol."""
    # Determine output tty capabilities from the environment.
    _, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(self._env)

    # TODO(kwlzn): Implement remote input reading and fix the non-fork()-safe sys.stdin reference
    # in NailgunClient to enable support for interactive goals like `repl` etc.

    # Construct StreamWriters for stdout, stderr.
    streams = (
      NailgunStreamWriter(sock, ChunkType.STDOUT, isatty=stdout_isatty),
      NailgunStreamWriter(sock, ChunkType.STDERR, isatty=stderr_isatty)
    )

    # Launch the stdin StreamReader and redirect stdio.
    with stdio_as(*streams):
      yield
Beispiel #29
0
  def _nailgunned_stdio(self, sock):
    """Redirects stdio to the connected socket speaking the nailgun protocol."""
    # Determine output tty capabilities from the environment.
    _, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(self._env)

    # TODO(kwlzn): Implement remote input reading and fix the non-fork()-safe sys.stdin reference
    # in NailgunClient to enable support for interactive goals like `repl` etc.

    # Construct StreamWriters for stdout, stderr.
    streams = (
      NailgunStreamWriter(sock, ChunkType.STDOUT, isatty=stdout_isatty),
      NailgunStreamWriter(sock, ChunkType.STDERR, isatty=stderr_isatty)
    )

    # Launch the stdin StreamReader and redirect stdio.
    with stdio_as(*streams):
      yield
Beispiel #30
0
    def _pipe_stdio(cls, maybe_shutdown_socket, stdin_isatty, stdout_isatty,
                    stderr_isatty, handle_stdin):
        """Handles stdio redirection in the case of pipes and/or mixed pipes and ttys."""
        stdio_writers = ((ChunkType.STDOUT, stdout_isatty), (ChunkType.STDERR,
                                                             stderr_isatty))
        types, ttys = zip(*(stdio_writers))

        @contextmanager
        def maybe_handle_stdin(want):
            if want:
                with NailgunStreamStdinReader.open(maybe_shutdown_socket,
                                                   stdin_isatty) as fd:
                    yield fd
            else:
                with open("/dev/null", "rb") as fh:
                    yield fh.fileno()

        # TODO https://github.com/pantsbuild/pants/issues/7653
        with maybe_handle_stdin(
                handle_stdin) as stdin_fd, PipedNailgunStreamWriter.open_multi(
                    maybe_shutdown_socket.socket, types,
                    ttys) as ((stdout_pipe, stderr_pipe),
                              writer), stdio_as(stdout_fd=stdout_pipe.write_fd,
                                                stderr_fd=stderr_pipe.write_fd,
                                                stdin_fd=stdin_fd):
            # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
            # exit chunk, to avoid any socket shutdown vs write races.
            stdout, stderr = sys.stdout, sys.stderr

            def finalizer():
                try:
                    stdout.flush()
                    stderr.flush()
                finally:
                    time.sleep(
                        0.001
                    )  # HACK: Sleep 1ms in the main thread to free the GIL.
                    stdout_pipe.stop_writing()
                    stderr_pipe.stop_writing()
                    writer.join(timeout=60)
                    if writer.isAlive():
                        raise NailgunStreamWriterError(
                            "pantsd timed out while waiting for the stdout/err to finish writing to the socket."
                        )

            yield finalizer
Beispiel #31
0
  def _nailgunned_stdio(self, sock):
    """Redirects stdio to the connected socket speaking the nailgun protocol."""
    # Determine output tty capabilities from the environment.
    _, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(self._env)

    # Construct a StreamReader for stdin.
    stdin_reader = NailgunStreamReader(sys.stdin, sock)

    # Construct StreamWriters for stdout, stderr.
    streams = (
      NailgunStreamWriter(sock, ChunkType.STDOUT, isatty=stdout_isatty),
      NailgunStreamWriter(sock, ChunkType.STDERR, isatty=stderr_isatty),
      stdin_reader
    )

    # Launch the stdin StreamReader and redirect stdio.
    with stdin_reader.running(), stdio_as(*streams):
      yield
 def _tty_stdio(self):
   """Handles stdio redirection in the case of all stdio descriptors being the same tty."""
   # If all stdio is a tty, there's only one logical I/O device (the tty device). This happens to
   # be addressable as a file in OSX and Linux, so we take advantage of that and directly open the
   # character device for output redirection - eliminating the need to directly marshall any
   # interactive stdio back/forth across the socket and permitting full, correct tty control with
   # no middle-man.
   stdin_ttyname, stdout_ttyname, stderr_ttyname = NailgunProtocol.ttynames_from_env(self._env)
   assert stdin_ttyname == stdout_ttyname == stderr_ttyname, (
     'expected all stdio ttys to be the same, but instead got: {}\n'
     'please file a bug at http://github.com/pantsbuild/pants'
     .format([stdin_ttyname, stdout_ttyname, stderr_ttyname])
   )
   with open(stdin_ttyname, 'rb+wb', 0) as tty:
     tty_fileno = tty.fileno()
     with stdio_as(stdin_fd=tty_fileno, stdout_fd=tty_fileno, stderr_fd=tty_fileno):
       def finalizer():
         termios.tcdrain(tty_fileno)
       yield finalizer
Beispiel #33
0
 def _tty_stdio(cls, env):
   """Handles stdio redirection in the case of all stdio descriptors being the same tty."""
   # If all stdio is a tty, there's only one logical I/O device (the tty device). This happens to
   # be addressable as a file in OSX and Linux, so we take advantage of that and directly open the
   # character device for output redirection - eliminating the need to directly marshall any
   # interactive stdio back/forth across the socket and permitting full, correct tty control with
   # no middle-man.
   stdin_ttyname, stdout_ttyname, stderr_ttyname = NailgunProtocol.ttynames_from_env(env)
   assert stdin_ttyname == stdout_ttyname == stderr_ttyname, (
     'expected all stdio ttys to be the same, but instead got: {}\n'
     'please file a bug at http://github.com/pantsbuild/pants'
     .format([stdin_ttyname, stdout_ttyname, stderr_ttyname])
   )
   with open(stdin_ttyname, 'rb+', 0) as tty:
     tty_fileno = tty.fileno()
     with stdio_as(stdin_fd=tty_fileno, stdout_fd=tty_fileno, stderr_fd=tty_fileno):
       def finalizer():
         termios.tcdrain(tty_fileno)
       yield finalizer
Beispiel #34
0
    def _stdio_as_tempfiles(self) -> Iterator[None]:
        """Harness to replace `sys.std*` with tempfiles.

        Validates that all files are read/written/flushed correctly, and acts as a contextmanager to
        allow for recursive tests.
        """

        # Prefix contents written within this instance with a unique string to differentiate
        # them from other instances.
        uuid_str = str(uuid.uuid4())

        def u(string: str) -> str:
            return f"{uuid_str}#{string}"

        stdin_data = u("stdio")
        stdout_data = u("stdout")
        stderr_data = u("stderr")

        with temporary_file(binary_mode=False) as tmp_stdin, temporary_file(
                binary_mode=False) as tmp_stdout, temporary_file(
                    binary_mode=False) as tmp_stderr:
            print(stdin_data, file=tmp_stdin)
            tmp_stdin.seek(0)
            # Read prepared content from stdin, and write content to stdout/stderr.
            with stdio_as(
                    stdout_fd=tmp_stdout.fileno(),
                    stderr_fd=tmp_stderr.fileno(),
                    stdin_fd=tmp_stdin.fileno(),
            ):
                self.assertEqual(sys.stdin.fileno(), 0)
                self.assertEqual(sys.stdout.fileno(), 1)
                self.assertEqual(sys.stderr.fileno(), 2)

                self.assertEqual(stdin_data, sys.stdin.read().strip())
                print(stdout_data, file=sys.stdout)
                yield
                print(stderr_data, file=sys.stderr)

            tmp_stdout.seek(0)
            tmp_stderr.seek(0)
            self.assertEqual(stdout_data, tmp_stdout.read().strip())
            self.assertEqual(stderr_data, tmp_stderr.read().strip())
Beispiel #35
0
    def _nailgunned_stdio(self, sock):
        """Redirects stdio to the connected socket speaking the nailgun protocol."""
        # Determine output tty capabilities from the environment.
        _, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(
            self._env)

        # Construct a StreamReader for stdin.
        stdin_reader = NailgunStreamReader(sys.stdin, sock)

        # Construct StreamWriters for stdout, stderr.
        streams = (NailgunStreamWriter(sock,
                                       ChunkType.STDOUT,
                                       isatty=stdout_isatty),
                   NailgunStreamWriter(sock,
                                       ChunkType.STDERR,
                                       isatty=stderr_isatty), stdin_reader)

        # Launch the stdin StreamReader and redirect stdio.
        with stdin_reader.running(), stdio_as(*streams):
            yield
Beispiel #36
0
    def _pipe_stdio(cls, sock, stdin_isatty, stdout_isatty, stderr_isatty,
                    handle_stdin):
        """Handles stdio redirection in the case of pipes and/or mixed pipes and ttys."""
        stdio_writers = ((ChunkType.STDOUT, stdout_isatty), (ChunkType.STDERR,
                                                             stderr_isatty))
        types, ttys = zip(*(stdio_writers))

        @contextmanager
        def maybe_handle_stdin(want):
            if want:
                # TODO: Launching this thread pre-fork to handle @rule input currently results
                # in an unhandled SIGILL in `src/python/pants/engine/scheduler.py, line 313 in pre_fork`.
                # More work to be done here in https://github.com/pantsbuild/pants/issues/6005
                with NailgunStreamStdinReader.open(sock, stdin_isatty) as fd:
                    yield fd
            else:
                with open('/dev/null', 'rb') as fh:
                    yield fh.fileno()

        with maybe_handle_stdin(handle_stdin) as stdin_fd,\
             NailgunStreamWriter.open_multi(sock, types, ttys) as ((stdout_fd, stderr_fd), writer),\
             stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
            # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
            # exit chunk, to avoid any socket shutdown vs write races.
            stdout, stderr = sys.stdout, sys.stderr

            def finalizer():
                try:
                    stdout.flush()
                    stderr.flush()
                finally:
                    time.sleep(
                        .001
                    )  # HACK: Sleep 1ms in the main thread to free the GIL.
                    writer.stop()
                    writer.join()
                    stdout.close()
                    stderr.close()

            yield finalizer
Beispiel #37
0
    def _stdio_as_tempfiles(self):
        """Harness to replace `sys.std*` with tempfiles.

    Validates that all files are read/written/flushed correctly, and acts as a
    contextmanager to allow for recursive tests.
    """

        # Prefix contents written within this instance with a unique string to differentiate
        # them from other instances.
        uuid_str = str(uuid.uuid4())

        def u(string):
            return '{}#{}'.format(uuid_str, string)

        stdin_data = u('stdio')
        stdout_data = u('stdout')
        stderr_data = u('stderr')

        with temporary_file() as tmp_stdin,\
             temporary_file() as tmp_stdout,\
             temporary_file() as tmp_stderr:
            print(stdin_data, file=tmp_stdin)
            tmp_stdin.seek(0)
            # Read prepared content from stdin, and write content to stdout/stderr.
            with stdio_as(stdout_fd=tmp_stdout.fileno(),
                          stderr_fd=tmp_stderr.fileno(),
                          stdin_fd=tmp_stdin.fileno()):
                self.assertEquals(sys.stdin.fileno(), 0)
                self.assertEquals(sys.stdout.fileno(), 1)
                self.assertEquals(sys.stderr.fileno(), 2)

                self.assertEquals(stdin_data, sys.stdin.read().strip())
                print(stdout_data, file=sys.stdout)
                yield
                print(stderr_data, file=sys.stderr)

            tmp_stdout.seek(0)
            tmp_stderr.seek(0)
            self.assertEquals(stdout_data, tmp_stdout.read().strip())
            self.assertEquals(stderr_data, tmp_stderr.read().strip())
Beispiel #38
0
  def _stdio_as_tempfiles(self):
    """Harness to replace `sys.std*` with tempfiles.

    Validates that all files are read/written/flushed correctly, and acts as a
    contextmanager to allow for recursive tests.
    """

    # Prefix contents written within this instance with a unique string to differentiate
    # them from other instances.
    uuid_str = str(uuid.uuid4())
    def u(string):
      return '{}#{}'.format(uuid_str, string)
    stdin_data = u('stdio')
    stdout_data = u('stdout')
    stderr_data = u('stderr')

    with temporary_file() as tmp_stdin,\
         temporary_file() as tmp_stdout,\
         temporary_file() as tmp_stderr:
      print(stdin_data, file=tmp_stdin)
      tmp_stdin.seek(0)
      # Read prepared content from stdin, and write content to stdout/stderr.
      with stdio_as(stdout_fd=tmp_stdout.fileno(),
                    stderr_fd=tmp_stderr.fileno(),
                    stdin_fd=tmp_stdin.fileno()):
        self.assertEquals(sys.stdin.fileno(), 0)
        self.assertEquals(sys.stdout.fileno(), 1)
        self.assertEquals(sys.stderr.fileno(), 2)

        self.assertEquals(stdin_data, sys.stdin.read().strip())
        print(stdout_data, file=sys.stdout)
        yield
        print(stderr_data, file=sys.stderr)

      tmp_stdout.seek(0)
      tmp_stderr.seek(0)
      self.assertEquals(stdout_data, tmp_stdout.read().strip())
      self.assertEquals(stderr_data, tmp_stderr.read().strip())
  def _pipe_stdio(cls, sock, stdin_isatty, stdout_isatty, stderr_isatty, handle_stdin):
    """Handles stdio redirection in the case of pipes and/or mixed pipes and ttys."""
    stdio_writers = (
      (ChunkType.STDOUT, stdout_isatty),
      (ChunkType.STDERR, stderr_isatty)
    )
    types, ttys = zip(*(stdio_writers))

    @contextmanager
    def maybe_handle_stdin(want):
      if want:
        # TODO: Launching this thread pre-fork to handle @rule input currently results
        # in an unhandled SIGILL in `src/python/pants/engine/scheduler.py, line 313 in pre_fork`.
        # More work to be done here in https://github.com/pantsbuild/pants/issues/6005
        with NailgunStreamStdinReader.open(sock, stdin_isatty) as fd:
          yield fd
      else:
        with open('/dev/null', 'rb') as fh:
          yield fh.fileno()

    with maybe_handle_stdin(handle_stdin) as stdin_fd,\
         NailgunStreamWriter.open_multi(sock, types, ttys) as ((stdout_fd, stderr_fd), writer),\
         stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
      # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
      # exit chunk, to avoid any socket shutdown vs write races.
      stdout, stderr = sys.stdout, sys.stderr
      def finalizer():
        try:
          stdout.flush()
          stderr.flush()
        finally:
          time.sleep(.001)  # HACK: Sleep 1ms in the main thread to free the GIL.
          writer.stop()
          writer.join()
          stdout.close()
          stderr.close()
      yield finalizer
 def _pipe_stdio(self, sock, stdin_isatty, stdout_isatty, stderr_isatty):
   """Handles stdio redirection in the case of pipes and/or mixed pipes and ttys."""
   stdio_writers = (
     (ChunkType.STDOUT, stdout_isatty),
     (ChunkType.STDERR, stderr_isatty)
   )
   types, ttys = zip(*(stdio_writers))
   with NailgunStreamStdinReader.open(sock, stdin_isatty) as stdin_fd,\
        NailgunStreamWriter.open_multi(sock, types, ttys) as ((stdout_fd, stderr_fd), writer),\
        stdio_as(stdout_fd=stdout_fd, stderr_fd=stderr_fd, stdin_fd=stdin_fd):
     # N.B. This will be passed to and called by the `DaemonExiter` prior to sending an
     # exit chunk, to avoid any socket shutdown vs write races.
     stdout, stderr = sys.stdout, sys.stderr
     def finalizer():
       try:
         stdout.flush()
         stderr.flush()
       finally:
         time.sleep(.001)  # HACK: Sleep 1ms in the main thread to free the GIL.
         writer.stop()
         writer.join()
         stdout.close()
         stderr.close()
     yield finalizer