def execute_helper(self,
                       code,
                       timeout=TIMEOUT,
                       silent=False,
                       store_history=True,
                       stop_on_error=True):
        msg_id = self.kc.execute(code=code,
                                 silent=silent,
                                 store_history=store_history,
                                 stop_on_error=stop_on_error)

        reply = self.get_non_kernel_info_reply(timeout=timeout)
        validate_message(reply, "execute_reply", msg_id)

        busy_msg = run_sync(self.kc.iopub_channel.get_msg)(timeout=1)
        validate_message(busy_msg, "status", msg_id)
        self.assertEqual(busy_msg["content"]["execution_state"], "busy")

        output_msgs = []
        while True:
            msg = run_sync(self.kc.iopub_channel.get_msg)(timeout=0.1)
            validate_message(msg, msg["msg_type"], msg_id)
            if msg["msg_type"] == "status":
                self.assertEqual(msg["content"]["execution_state"], "idle")
                break
            elif msg["msg_type"] == "execute_input":
                self.assertEqual(msg["content"]["code"], code)
                continue
            output_msgs.append(msg)

        return reply, output_msgs
Beispiel #2
0
    def execute_helper(self,
                       code,
                       timeout=TIMEOUT,
                       silent=False,
                       store_history=True,
                       stop_on_error=True):
        msg_id = self.kc.execute(code=code,
                                 silent=silent,
                                 store_history=store_history,
                                 stop_on_error=stop_on_error)

        reply = self.get_non_kernel_info_reply(timeout=timeout)
        validate_message(reply, 'execute_reply', msg_id)

        busy_msg = run_sync(self.kc.iopub_channel.get_msg)(timeout=1)
        validate_message(busy_msg, 'status', msg_id)
        self.assertEqual(busy_msg['content']['execution_state'], 'busy')

        output_msgs = []
        while True:
            msg = run_sync(self.kc.iopub_channel.get_msg)(timeout=0.1)
            validate_message(msg, msg['msg_type'], msg_id)
            if msg['msg_type'] == 'status':
                self.assertEqual(msg['content']['execution_state'], 'idle')
                break
            elif msg['msg_type'] == 'execute_input':
                self.assertEqual(msg['content']['code'], code)
                continue
            output_msgs.append(msg)

        return reply, output_msgs
 def handle_sigint(signum, frame):
     # First proxy signal to subkernel, then run original handler
     try:
         run_sync(km.signal_kernel)(signum)
     except RuntimeError as exc:
         LOG.error(
             f"Failed to interrupt subkernel {binding_name}: {exc}")
     binding.update_progress("Idle")
     orig_handler(signum, frame)
Beispiel #4
0
def test_run_sync_clean_up_task(loop):
    async def coro_never_called():
        pytest.fail("The call to this coroutine is not expected")

    # Ensure that run_sync cancels the pending task
    with mock.patch.object(loop, "run_until_complete") as patched_loop:
        patched_loop.side_effect = KeyboardInterrupt
        with mock.patch("asyncio.ensure_future") as patched_ensure_future:
            mock_future = mock.Mock()
            patched_ensure_future.return_value = mock_future
            with pytest.raises(KeyboardInterrupt):
                run_sync(coro_never_called)()
            mock_future.cancel.assert_called_once()
            # Suppress 'coroutine ... was never awaited' warning
            patched_ensure_future.call_args[0][0].close()
Beispiel #5
0
    def handle_execute_reply(self, msg_id, timeout=None):
        kwargs = {"timeout": timeout}
        msg = run_sync(self.client.shell_channel.get_msg)(**kwargs)
        if msg["parent_header"].get("msg_id", None) == msg_id:

            self.handle_iopub(msg_id)

            content = msg["content"]
            status = content['status']

            if status == "aborted":
                sys.stdout.write("Aborted\n")
                return
            elif status == 'ok':
                # handle payloads
                for item in content.get("payload", []):
                    source = item['source']
                    if source == 'page':
                        page.page(item['data']['text/plain'])
                    elif source == 'set_next_input':
                        self.next_input = item['text']
                    elif source == 'ask_exit':
                        self.keepkernel = item.get('keepkernel', False)
                        self.ask_exit()

            elif status == 'error':
                pass

            self.execution_count = int(content["execution_count"] + 1)
Beispiel #6
0
    def run_cell(self, cell, store_history=True):
        """Run a complete IPython cell.

        Parameters
        ----------
        cell : str
          The code (including IPython code such as %magic functions) to run.
        store_history : bool
          If True, the raw and translated cell will be stored in IPython's
          history. For user code calling back into IPython's machinery, this
          should be set to False.
        """
        if (not cell) or cell.isspace():
            # pressing enter flushes any pending display
            self.handle_iopub()
            return

        # flush stale replies, which could have been ignored, due to missed heartbeats
        while run_sync(self.client.shell_channel.msg_ready)():
            run_sync(self.client.shell_channel.get_msg)()
        # execute takes 'hidden', which is the inverse of store_hist
        msg_id = self.client.execute(cell, not store_history)

        # first thing is wait for any side effects (output, stdin, etc.)
        self._executing = True
        self._execution_state = "busy"
        while self._execution_state != 'idle' and self.client.is_alive():
            try:
                self.handle_input_request(msg_id, timeout=0.05)
            except Empty:
                # display intermediate print statements, etc.
                self.handle_iopub(msg_id)
            except ZMQError as e:
                # Carry on if polling was interrupted by a signal
                if e.errno != errno.EINTR:
                    raise

        # after all of that is done, wait for the execute reply
        while self.client.is_alive():
            try:
                self.handle_execute_reply(msg_id, timeout=0.05)
            except Empty:
                pass
            else:
                break
        self._executing = False
Beispiel #7
0
 def flush_channels(self):
     for channel in (self.kc.shell_channel, self.kc.iopub_channel):
         while True:
             try:
                 msg = run_sync(channel.get_msg)(timeout=0.1)
             except Empty:
                 break
             else:
                 validate_message(msg)
Beispiel #8
0
 def start(self):
     """Start the polling of the kernel."""
     if self._pcallback is None:
         if asyncio.iscoroutinefunction(self.poll):
             cb = run_sync(self.poll)
         else:
             cb = self.poll
         self._pcallback = ioloop.PeriodicCallback(
             self.poll, 1000*self.time_to_dead,
         )
         self._pcallback.start()
Beispiel #9
0
    def handle_input_request(self, msg_id, timeout=0.1):
        """ Method to capture raw_input
        """
        req = run_sync(self.client.stdin_channel.get_msg)(timeout=timeout)
        # in case any iopub came while we were waiting:
        self.handle_iopub(msg_id)
        if msg_id == req["parent_header"].get("msg_id"):
            # wrap SIGINT handler
            real_handler = signal.getsignal(signal.SIGINT)

            def double_int(sig, frame):
                # call real handler (forwards sigint to kernel),
                # then raise local interrupt, stopping local raw_input
                real_handler(sig, frame)
                raise KeyboardInterrupt

            signal.signal(signal.SIGINT, double_int)
            content = req['content']
            read = getpass if content.get('password', False) else input
            try:
                raw_data = read(content["prompt"])
            except EOFError:
                # turn EOFError into EOF character
                raw_data = '\x04'
            except KeyboardInterrupt:
                sys.stdout.write('\n')
                return
            finally:
                # restore SIGINT handler
                signal.signal(signal.SIGINT, real_handler)

            # only send stdin reply if there *was not* another request
            # or execution finished while we were reading.
            if not (run_sync(self.client.stdin_channel.msg_ready)()
                    or run_sync(self.client.shell_channel.msg_ready)()):
                self.client.input(raw_data)
Beispiel #10
0
    def complete_request(self, code, cursor_pos):
        # send completion request to kernel
        # Give the kernel up to 5s to respond
        msg_id = self.client.complete(
            code=code,
            cursor_pos=cursor_pos,
        )

        msg = run_sync(self.client.shell_channel.get_msg)(timeout=self.timeout)
        if msg['parent_header']['msg_id'] == msg_id:
            return msg['content']

        return {
            'matches': [],
            'cursor_start': 0,
            'cursor_end': 0,
            'metadata': {},
            'status': 'ok'
        }
Beispiel #11
0
 def handle_is_complete_reply(self, msg_id, timeout=None):
     """
     Wait for a repsonse from the kernel, and return two values:
         more? - (boolean) should the frontend ask for more input
         indent - an indent string to prefix the input
     Overloaded methods may want to examine the comeplete source. Its is
     in the self._source_lines_buffered list.
     """
     ## Get the is_complete response:
     msg = None
     try:
         kwargs = {"timeout": timeout}
         msg = run_sync(self.client.shell_channel.get_msg)(**kwargs)
     except Empty:
         warn('The kernel did not respond to an is_complete_request. '
              'Setting `use_kernel_is_complete` to False.')
         self.use_kernel_is_complete = False
         return False, ""
     ## Handle response:
     if msg["parent_header"].get("msg_id", None) != msg_id:
         warn(
             'The kernel did not respond properly to an is_complete_request: %s.'
             % str(msg))
         return False, ""
     else:
         status = msg["content"].get("status", None)
         indent = msg["content"].get("indent", "")
     ## Return more? and indent string
     if status == "complete":
         return False, indent
     elif status == "incomplete":
         return True, indent
     elif status == "invalid":
         raise SyntaxError()
     elif status == "unknown":
         return False, indent
     else:
         warn('The kernel sent an invalid is_complete_reply status: "%s".' %
              status)
         return False, indent
Beispiel #12
0
class SpyderKernelManager(QtKernelManager):
    """
    Spyder kernels that live in a conda environment are now properly activated
    with custom activation scripts located at plugins/ipythonconsole/scripts.

    However, on windows the batch script is terminated but not the kernel it
    started so this subclass overrides the `_kill_kernel` method to properly
    kill the started kernels by using psutil.
    """

    def __init__(self, *args, **kwargs):
        self.shutting_down = False
        return QtKernelManager.__init__(self, *args, **kwargs)

    @staticmethod
    def kill_proc_tree(pid, sig=signal.SIGTERM, include_parent=True,
                       timeout=None, on_terminate=None):
        """
        Kill a process tree (including grandchildren) with sig and return a
        (gone, still_alive) tuple.

        "on_terminate", if specified, is a callabck function which is called
        as soon as a child terminates.

        This is an new method not present in QtKernelManager.
        """
        assert pid != os.getpid()  # Won't kill myself!

        # This is necessary to avoid showing an error when restarting the
        # kernel after it failed to start in the first place.
        # Fixes spyder-ide/spyder#11872
        try:
            parent = psutil.Process(pid)
        except psutil.NoSuchProcess:
            return ([], [])

        children = parent.children(recursive=True)

        if include_parent:
            children.append(parent)

        for child_process in children:
            # This is necessary to avoid an error when restarting the
            # kernel that started a PyQt5 application in the background.
            # Fixes spyder-ide/spyder#13999
            try:
                child_process.send_signal(sig)
            except psutil.AccessDenied:
                return ([], [])

        gone, alive = psutil.wait_procs(
            children,
            timeout=timeout,
            callback=on_terminate,
        )

        return (gone, alive)

    def _kill_kernel_5(self):
        """
        Kill the running kernel.

        Override private method of jupyter_client 5 to be able to correctly
        close kernel that was started via a batch/bash script for correct conda
        env activation.
        """
        if self.has_kernel:

            # Signal the kernel to terminate (sends SIGKILL on Unix and calls
            # TerminateProcess() on Win32).
            try:
                if hasattr(signal, 'SIGKILL'):
                    self.signal_kernel(signal.SIGKILL)
                else:
                    # This is the additional line that was added to properly
                    # kill the kernel started by Spyder.
                    self.kill_proc_tree(self.kernel.pid)

                    self.kernel.kill()
            except OSError as e:
                # In Windows, we will get an Access Denied error if the process
                # has already terminated. Ignore it.
                if sys.platform == 'win32':
                    if e.winerror != 5:
                        raise
                # On Unix, we may get an ESRCH error if the process has already
                # terminated. Ignore it.
                else:
                    from errno import ESRCH
                    if e.errno != ESRCH:
                        raise

            # Block until the kernel terminates.
            self.kernel.wait()
            self.kernel = None
        else:
            raise RuntimeError("Cannot kill kernel. No kernel is running!")

    async def _async_kill_kernel(self, restart: bool = False) -> None:
        """Kill the running kernel.

        Override private method of jupyter_client 7 to be able to correctly
        close kernel that was started via a batch/bash script for correct conda
        env activation.
        """
        if self.has_kernel:
            assert self.provisioner is not None

            # This is the additional line that was added to properly
            # kill the kernel started by Spyder.
            self.kill_proc_tree(self.provisioner.process.pid)

            await self.provisioner.kill(restart=restart)

            # Wait until the kernel terminates.
            import asyncio
            try:
                await asyncio.wait_for(self._async_wait(), timeout=5.0)
            except asyncio.TimeoutError:
                # Wait timed out, just log warning but continue
                #  - not much more we can do.
                self.log.warning("Wait for final termination of kernel timed"
                                 " out - continuing...")
                pass
            else:
                # Process is no longer alive, wait and clear
                if self.has_kernel:
                    await self.provisioner.wait()

    # override alias for jupyter_client < 7
    if JUPYTER_CLIENT_GE_7:
        _kill_kernel = run_sync(_async_kill_kernel)
    else:
        _kill_kernel = _kill_kernel_5
Beispiel #13
0
    def handle_iopub(self, msg_id=''):
        """Process messages on the IOPub channel

           This method consumes and processes messages on the IOPub channel,
           such as stdout, stderr, execute_result and status.

           It only displays output that is caused by this session.
        """
        while run_sync(self.client.iopub_channel.msg_ready)():
            sub_msg = run_sync(self.client.iopub_channel.get_msg)()
            msg_type = sub_msg['header']['msg_type']

            # Update execution_count in case it changed in another session
            if msg_type == "execute_input":
                self.execution_count = int(
                    sub_msg["content"]["execution_count"]) + 1

            if self.include_output(sub_msg):
                if msg_type == 'status':
                    self._execution_state = sub_msg["content"][
                        "execution_state"]

                elif msg_type == 'stream':
                    if sub_msg["content"]["name"] == "stdout":
                        if self._pending_clearoutput:
                            print("\r", end="")
                            self._pending_clearoutput = False
                        print(sub_msg["content"]["text"], end="")
                        sys.stdout.flush()
                    elif sub_msg["content"]["name"] == "stderr":
                        if self._pending_clearoutput:
                            print("\r", file=sys.stderr, end="")
                            self._pending_clearoutput = False
                        print(sub_msg["content"]["text"],
                              file=sys.stderr,
                              end="")
                        sys.stderr.flush()

                elif msg_type == 'execute_result':
                    if self._pending_clearoutput:
                        print("\r", end="")
                        self._pending_clearoutput = False
                    self.execution_count = int(
                        sub_msg["content"]["execution_count"])
                    if not self.from_here(sub_msg):
                        sys.stdout.write(self.other_output_prefix)
                    format_dict = sub_msg["content"]["data"]
                    self.handle_rich_data(format_dict)

                    if 'text/plain' not in format_dict:
                        continue

                    # prompt_toolkit writes the prompt at a slightly lower level,
                    # so flush streams first to ensure correct ordering.
                    sys.stdout.flush()
                    sys.stderr.flush()
                    self.print_out_prompt()
                    text_repr = format_dict['text/plain']
                    if '\n' in text_repr:
                        # For multi-line results, start a new line after prompt
                        print()
                    print(text_repr)

                    # Remote: add new prompt
                    if not self.from_here(sub_msg):
                        sys.stdout.write('\n')
                        sys.stdout.flush()
                        self.print_remote_prompt()

                elif msg_type == 'display_data':
                    data = sub_msg["content"]["data"]
                    handled = self.handle_rich_data(data)
                    if not handled:
                        if not self.from_here(sub_msg):
                            sys.stdout.write(self.other_output_prefix)
                        # if it was an image, we handled it by now
                        if 'text/plain' in data:
                            print(data['text/plain'])

                # If execute input: print it
                elif msg_type == 'execute_input':
                    content = sub_msg['content']
                    ec = content.get('execution_count',
                                     self.execution_count - 1)

                    # New line
                    sys.stdout.write('\n')
                    sys.stdout.flush()

                    # With `Remote In [3]: `
                    self.print_remote_prompt(ec=ec)

                    # And the code
                    sys.stdout.write(content['code'] + '\n')

                elif msg_type == 'clear_output':
                    if sub_msg["content"]["wait"]:
                        self._pending_clearoutput = True
                    else:
                        print("\r", end="")

                elif msg_type == 'error':
                    for frame in sub_msg["content"]["traceback"]:
                        print(frame, file=sys.stderr)
Beispiel #14
0
class SpyderKernelManager(QtKernelManager):
    """
    Spyder kernels that live in a conda environment are now properly activated
    with custom activation scripts located at plugins/ipythonconsole/scripts.

    However, on windows the batch script is terminated but not the kernel it
    started so this subclass overrides the `_kill_kernel` method to properly
    kill the started kernels by using psutil.
    """

    client_class = DottedObjectName(
        'spyder.plugins.ipythonconsole.utils.client.SpyderKernelClient')

    def __init__(self, *args, **kwargs):
        self.shutting_down = False
        return QtKernelManager.__init__(self, *args, **kwargs)

    @staticmethod
    async def kill_proc_tree(pid,
                             sig=signal.SIGTERM,
                             include_parent=True,
                             timeout=None,
                             on_terminate=None):
        """
        Kill a process tree (including grandchildren) with sig and return a
        (gone, still_alive) tuple.

        "on_terminate", if specified, is a callabck function which is called
        as soon as a child terminates.

        This is an new method not present in QtKernelManager.
        """
        assert pid != os.getpid()  # Won't kill myself!

        # This is necessary to avoid showing an error when restarting the
        # kernel after it failed to start in the first place.
        # Fixes spyder-ide/spyder#11872
        try:
            parent = psutil.Process(pid)
        except psutil.NoSuchProcess:
            return ([], [])

        children = parent.children(recursive=True)

        if include_parent:
            children.append(parent)

        for child_process in children:
            # This is necessary to avoid an error when restarting the
            # kernel that started a PyQt5 application in the background.
            # Fixes spyder-ide/spyder#13999
            try:
                child_process.send_signal(sig)
            except psutil.AccessDenied:
                return ([], [])

        gone, alive = psutil.wait_procs(
            children,
            timeout=timeout,
            callback=on_terminate,
        )

        return (gone, alive)

    async def _async_kill_kernel(self, restart: bool = False) -> None:
        """Kill the running kernel.
        Override private method of jupyter_client 7 to be able to correctly
        close kernel that was started via a batch/bash script for correct conda
        env activation.
        """
        if self.has_kernel:
            assert self.provisioner is not None

            # This is the additional line that was added to properly
            # kill the kernel started by Spyder.
            await self.kill_proc_tree(self.provisioner.process.pid)

            await self.provisioner.kill(restart=restart)

            # Wait until the kernel terminates.
            import asyncio
            try:
                await asyncio.wait_for(self._async_wait(), timeout=5.0)
            except asyncio.TimeoutError:
                # Wait timed out, just log warning but continue
                #  - not much more we can do.
                self.log.warning("Wait for final termination of kernel timed"
                                 " out - continuing...")
                pass
            else:
                # Process is no longer alive, wait and clear
                if self.has_kernel:
                    await self.provisioner.wait()

    _kill_kernel = run_sync(_async_kill_kernel)

    async def _async_send_kernel_sigterm(self, restart: bool = False) -> None:
        """similar to _kill_kernel, but with sigterm (not sigkill), but do not block"""
        if self.has_kernel:
            assert self.provisioner is not None

            # This is the line that was added to properly kill kernels started
            # by Spyder.
            await self.kill_proc_tree(self.provisioner.process.pid)

    _send_kernel_sigterm = run_sync(_async_send_kernel_sigterm)
 def subkernel_download(self, line):
     """Download a file/directory from a subkernel's file system."""
     args = parse_argstring(self.subkernel_download, line)
     run_sync(self.download_handler)(self.binding_manager.get(args.name),
                                     args.remote_path, args.local_path)