Exemple #1
0
class MeasureControl(Control):
    _view_name = Unicode('LeafletMeasureControlView').tag(sync=True)
    _model_name = Unicode('LeafletMeasureControlModel').tag(sync=True)

    _length_units = ['feet', 'meters', 'miles', 'kilometers']
    _area_units = ['acres', 'hectares', 'sqfeet', 'sqmeters', 'sqmiles']
    _custom_units_dict = {}
    _custom_units = Dict().tag(sync=True)

    position = Enum(
        ['topright', 'topleft', 'bottomright', 'bottomleft'],
        default_value='topright',
        help="""Possible values are topleft, topright, bottomleft
                or bottomright"""
    ).tag(sync=True, o=True)

    primary_length_unit = Enum(
        values=_length_units,
        default_value='feet',
        help="""Possible values are feet, meters, miles, kilometers or any user
                defined unit"""
    ).tag(sync=True, o=True)

    secondary_length_unit = Enum(
        values=_length_units,
        default_value=None,
        allow_none=True,
        help="""Possible values are feet, meters, miles, kilometers or any user
                defined unit"""
    ).tag(sync=True, o=True)

    primary_area_unit = Enum(
        values=_area_units,
        default_value='acres',
        help="""Possible values are acres, hectares, sqfeet, sqmeters, sqmiles
                or any user defined unit"""
    ).tag(sync=True, o=True)

    secondary_area_unit = Enum(
        values=_area_units,
        default_value=None,
        allow_none=True,
        help="""Possible values are acres, hectares, sqfeet, sqmeters, sqmiles
                or any user defined unit"""
    ).tag(sync=True, o=True)

    active_color = Color('#ABE67E').tag(sync=True, o=True)
    completed_color = Color('#C8F2BE').tag(sync=True, o=True)

    popup_options = Dict({
      'className': 'leaflet-measure-resultpopup',
      'autoPanPadding': [10, 10]
    }).tag(sync=True, o=True)

    capture_z_index = Int(10000).tag(sync=True, o=True)

    def add_length_unit(self, name, factor, decimals=0):
        self._length_units.append(name)
        self._add_unit(name, factor, decimals)

    def add_area_unit(self, name, factor, decimals=0):
        self._area_units.append(name)
        self._add_unit(name, factor, decimals)

    def _add_unit(self, name, factor, decimals):
        self._custom_units_dict[name] = {
            'factor': factor,
            'display': name,
            'decimals': decimals
        }
        self._custom_units = dict(**self._custom_units_dict)
Exemple #2
0
class ExecutePreprocessor(Preprocessor):
    """
    Executes all the cells in a notebook
    """

    timeout = Integer(30,
                      allow_none=True,
                      help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, an exception (TimeoutError
            on python 3+, RuntimeError on python 2) is raised.

            `None` or `-1` will disable the timeout. If `timeout_func` is set,
            it overrides `timeout`.
            """)).tag(config=True)

    timeout_func = Any(default_value=None,
                       allow_none=True,
                       help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, an exception
            (TimeoutError on python 3+, RuntimeError on python 2) is
            raised.

            Returning `None` or `-1` will disable the timeout for the cell.
            Not setting `timeout_func` will cause the preprocessor to
            default to using the `timeout` trait for all cells. The
            `timeout_func` trait overrides `timeout` if it is not `None`.
            """)).tag(config=True)

    interrupt_on_timeout = Bool(False,
                                help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """)).tag(config=True)

    startup_timeout = Integer(60,
                              help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """)).tag(config=True)

    allow_errors = Bool(False,
                        help=dedent("""
            If `False` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If `True`, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """)).tag(config=True)

    force_raise_errors = Bool(False,
                              help=dedent("""
            If False (default), errors from executing the notebook can be
            allowed with a `raises-exception` tag on a single cell, or the
            `allow_errors` configurable option for all cells. An allowed error
            will be recorded in notebook output, and execution will continue.
            If an error occurs when it is not explicitly allowed, a
            `CellExecutionError` will be raised.
            If True, `CellExecutionError` will be raised for any error that occurs
            while executing the notebook. This overrides both the
            `allow_errors` option and the `raises-exception` cell tag.
            """)).tag(config=True)

    extra_arguments = List(Unicode())

    kernel_name = Unicode('',
                          help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """)).tag(config=True)

    raise_on_iopub_timeout = Bool(False,
                                  help=dedent("""
            If `False` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If `True`, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """)).tag(config=True)

    store_widget_state = Bool(True,
                              help=dedent("""
            If `True` (default), then the state of the Jupyter widgets created
            at the kernel will be stored in the metadata of the notebook.
            """)).tag(config=True)

    iopub_timeout = Integer(4,
                            allow_none=False,
                            help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """)).tag(config=True)

    shutdown_kernel = Enum(['graceful', 'immediate'],
                           default_value='graceful',
                           help=dedent("""
            If `graceful` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its `atexit` hooks.
            If `immediate`, then the kernel is signaled to immediately
            terminate.
            """)).tag(config=True)

    ipython_hist_file = Unicode(
        default_value=':memory:',
        help=
        """Path to file to use for SQLite history database for an IPython kernel.

        The specific value `:memory:` (including the colon
        at both end but not the back ticks), avoids creating a history file. Otherwise, IPython
        will create a history file for each kernel.

        When running kernels simultaneously (e.g. via multiprocessing) saving history a single
        SQLite file can result in database errors, so using `:memory:` is recommended in non-interactive
        contexts.

        """).tag(config=True)

    kernel_manager_class = Type(config=True,
                                help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _kernel_manager_class_default(self):
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        try:
            from jupyter_client import KernelManager
        except ImportError:
            raise ImportError(
                "`nbconvert --execute` requires the jupyter_client package: `pip install jupyter_client`"
            )
        return KernelManager

    _display_id_map = Dict(help=dedent("""
              mapping of locations of outputs with a given display_id
              tracks cell index and output index within cell.outputs for
              each appearance of the display_id
              {
                   'display_id': {
                  cell_idx: [output_idx,]
                   }
              }
              """))

    def start_new_kernel(self, **kwargs):
        """Creates a new kernel manager and kernel client.

        Parameters
        ----------
        kwargs :
            Any options for `self.kernel_manager_class.start_kernel()`. Because
            that defaults to KernelManager, this will likely include options
            accepted by `KernelManager.start_kernel()``, which includes `cwd`.

        Returns
        -------
        km : KernelManager
            A kernel manager as created by self.kernel_manager_class.
        kc : KernelClient
            Kernel client as created by the kernel manager `km`.
        """
        if not self.kernel_name:
            self.kernel_name = self.nb.metadata.get('kernelspec',
                                                    {}).get('name', 'python')
        km = self.kernel_manager_class(kernel_name=self.kernel_name,
                                       config=self.config)
        if km.ipykernel and self.ipython_hist_file:
            self.extra_arguments += [
                '--HistoryManager.hist_file={}'.format(self.ipython_hist_file)
            ]
        km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)

        kc = km.client()
        kc.start_channels()
        try:
            kc.wait_for_ready(timeout=self.startup_timeout)
        except RuntimeError:
            kc.stop_channels()
            km.shutdown_kernel()
            raise
        kc.allow_stdin = False
        return km, kc

    @contextmanager
    def setup_preprocessor(self, nb, resources, km=None, **kwargs):
        """
        Context manager for setting up the class to execute a notebook.

        The assigns `nb` to `self.nb` where it will be modified in-place. It also creates
        and assigns the Kernel Manager (`self.km`) and Kernel Client(`self.kc`).

        It is intended to yield to a block that will execute codeself.

        When control returns from the yield it stops the client's zmq channels, shuts
        down the kernel, and removes the now unused attributes.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
        km : KernerlManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """
        path = resources.get('metadata', {}).get('path', '') or None
        self.nb = nb
        # clear display_id map
        self._display_id_map = {}
        self.widget_state = {}
        self.widget_buffers = {}

        if km is None:
            kwargs["cwd"] = path
            self.km, self.kc = self.start_new_kernel(**kwargs)
            try:
                # Yielding unbound args for more easier understanding and downstream consumption
                yield nb, self.km, self.kc
            finally:
                self.kc.stop_channels()
                self.km.shutdown_kernel(
                    now=self.shutdown_kernel == 'immediate')

                for attr in ['nb', 'km', 'kc']:
                    delattr(self, attr)
        else:
            self.km = km
            if not km.has_kernel:
                km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)
            self.kc = km.client()

            self.kc.start_channels()
            try:
                self.kc.wait_for_ready(timeout=self.startup_timeout)
            except RuntimeError:
                self.kc.stop_channels()
                raise
            self.kc.allow_stdin = False
            try:
                yield nb, self.km, self.kc
            finally:
                for attr in ['nb', 'km', 'kc']:
                    delattr(self, attr)

    def preprocess(self, nb, resources=None, km=None):
        """
        Preprocess notebook executing each code cell.

        The input argument `nb` is modified in-place.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary (optional)
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
        km: KernelManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """

        if not resources:
            resources = {}

        with self.setup_preprocessor(nb, resources, km=km):
            self.log.info("Executing notebook with kernel: %s" %
                          self.kernel_name)
            nb, resources = super(ExecutePreprocessor,
                                  self).preprocess(nb, resources)
            info_msg = self._wait_for_reply(self.kc.kernel_info())
            nb.metadata['language_info'] = info_msg['content']['language_info']
            self.set_widgets_metadata()

        return nb, resources

    def set_widgets_metadata(self):
        if self.widget_state:
            self.nb.metadata.widgets = {
                'application/vnd.jupyter.widget-state+json': {
                    'state': {
                        model_id: _serialize_widget_state(state)
                        for model_id, state in self.widget_state.items()
                        if '_model_name' in state
                    },
                    'version_major': 2,
                    'version_minor': 0,
                }
            }
            for key, widget in self.nb.metadata.widgets[
                    'application/vnd.jupyter.widget-state+json'][
                        'state'].items():
                buffers = self.widget_buffers.get(key)
                if buffers:
                    widget['buffers'] = buffers

    def preprocess_cell(self, cell, resources, cell_index, store_history=True):
        """
        Executes a single code cell. See base.py for details.

        To execute all cells see :meth:`preprocess`.
        """
        if cell.cell_type != 'code' or not cell.source.strip():
            return cell, resources

        reply, outputs = self.run_cell(cell, cell_index, store_history)
        # Backwards compatibility for processes that wrap run_cell
        cell.outputs = outputs

        cell_allows_errors = (self.allow_errors
                              or "raises-exception" in cell.metadata.get(
                                  "tags", []))

        if self.force_raise_errors or not cell_allows_errors:
            if (reply is not None) and reply['content']['status'] == 'error':
                raise CellExecutionError.from_cell_and_msg(
                    cell, reply['content'])
        return cell, resources

    def _update_display_id(self, display_id, msg):
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    def _poll_for_reply(self, msg_id, cell=None, timeout=None):
        try:
            # check with timeout if kernel is still alive
            msg = self.kc.shell_channel.get_msg(timeout=timeout)
            if msg['parent_header'].get('msg_id') == msg_id:
                return msg
        except Empty:
            # received no message, check if kernel is still alive
            self._check_alive()
            # kernel still alive, wait for a message

    def _get_timeout(self, cell):
        if self.timeout_func is not None and cell is not None:
            timeout = self.timeout_func(cell)
        else:
            timeout = self.timeout

        if not timeout or timeout < 0:
            timeout = None

        return timeout

    def _handle_timeout(self, timeout, cell=None):
        self.log.error("Timeout waiting for execute reply (%is)." % timeout)
        if self.interrupt_on_timeout:
            self.log.error("Interrupting kernel")
            self.km.interrupt_kernel()
        else:
            raise CellTimeoutError.error_from_timeout_and_cell(
                "Cell execution timed out", timeout, cell)

    def _check_alive(self):
        if not self.kc.is_alive():
            self.log.error("Kernel died while waiting for execute reply.")
            raise DeadKernelError("Kernel died")

    def _wait_for_reply(self, msg_id, cell=None):
        # wait for finish, with timeout
        timeout = self._get_timeout(cell)
        cummulative_time = 0
        timeout_interval = 5
        while True:
            try:
                msg = self.kc.shell_channel.get_msg(timeout=timeout_interval)
            except Empty:
                self._check_alive()
                cummulative_time += timeout_interval
                if timeout and cummulative_time > timeout:
                    self._handle_timeout(timeout, cell)
                    break
            else:
                if msg['parent_header'].get('msg_id') == msg_id:
                    return msg

    def _timeout_with_deadline(self, timeout, deadline):
        if deadline is not None and deadline - monotonic() < timeout:
            timeout = deadline - monotonic()

        if timeout < 0:
            timeout = 0

        return timeout

    def _passed_deadline(self, deadline):
        if deadline is not None and deadline - monotonic() <= 0:
            return True
        return False

    def run_cell(self, cell, cell_index=0, store_history=False):
        parent_msg_id = self.kc.execute(cell.source,
                                        store_history=store_history,
                                        stop_on_error=not self.allow_errors)
        self.log.debug("Executing cell:\n%s", cell.source)
        exec_timeout = self._get_timeout(cell)
        deadline = None
        if exec_timeout is not None:
            deadline = monotonic() + exec_timeout

        cell.outputs = []
        self.clear_before_next_output = False

        # This loop resolves #659. By polling iopub_channel's and shell_channel's
        # output we avoid dropping output and important signals (like idle) from
        # iopub_channel. Prior to this change, iopub_channel wasn't polled until
        # after exec_reply was obtained from shell_channel, leading to the
        # aforementioned dropped data.

        # These two variables are used to track what still needs polling:
        # more_output=true => continue to poll the iopub_channel
        more_output = True
        # polling_exec_reply=true => continue to poll the shell_channel
        polling_exec_reply = True

        while more_output or polling_exec_reply:
            if polling_exec_reply:
                if self._passed_deadline(deadline):
                    self._handle_timeout(exec_timeout, cell)
                    polling_exec_reply = False
                    continue

                # Avoid exceeding the execution timeout (deadline), but stop
                # after at most 1s so we can poll output from iopub_channel.
                timeout = self._timeout_with_deadline(1, deadline)
                exec_reply = self._poll_for_reply(parent_msg_id, cell, timeout)
                if exec_reply is not None:
                    polling_exec_reply = False

            if more_output:
                try:
                    timeout = self.iopub_timeout
                    if polling_exec_reply:
                        # Avoid exceeding the execution timeout (deadline) while
                        # polling for output.
                        timeout = self._timeout_with_deadline(
                            timeout, deadline)
                    msg = self.kc.iopub_channel.get_msg(timeout=timeout)
                except Empty:
                    if polling_exec_reply:
                        # Still waiting for execution to finish so we expect that
                        # output may not always be produced yet.
                        continue

                    if self.raise_on_iopub_timeout:
                        raise CellTimeoutError.error_from_timeout_and_cell(
                            "Timeout waiting for IOPub output",
                            self.iopub_timeout, cell)
                    else:
                        self.log.warning("Timeout waiting for IOPub output")
                        more_output = False
                        continue
            if msg['parent_header'].get('msg_id') != parent_msg_id:
                # not an output from our execution
                continue

            try:
                # Will raise CellExecutionComplete when completed
                self.process_message(msg, cell, cell_index)
            except CellExecutionComplete:
                more_output = False

        # Return cell.outputs still for backwards compatibility
        return exec_reply, cell.outputs

    def process_message(self, msg, cell, cell_index):
        """
        Processes a kernel message, updates cell state, and returns the
        resulting output object that was appended to cell.outputs.

        The input argument `cell` is modified in-place.

        Parameters
        ----------
        msg : dict
            The kernel message being processed.
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionComplete
          Once a message arrives which indicates computation completeness.

        """
        msg_type = msg['msg_type']
        self.log.debug("msg_type: %s", msg_type)
        content = msg['content']
        self.log.debug("content: %s", content)

        display_id = content.get('transient', {}).get('display_id', None)
        if display_id and msg_type in {
                'execute_result', 'display_data', 'update_display_data'
        }:
            self._update_display_id(display_id, msg)

        # set the prompt number for the input and the output
        if 'execution_count' in content:
            cell['execution_count'] = content['execution_count']

        if msg_type == 'status':
            if content['execution_state'] == 'idle':
                raise CellExecutionComplete()
        elif msg_type == 'clear_output':
            self.clear_output(cell.outputs, msg, cell_index)
        elif msg_type.startswith('comm'):
            self.handle_comm_msg(cell.outputs, msg, cell_index)
        # Check for remaining messages we don't process
        elif msg_type not in ['execute_input', 'update_display_data']:
            # Assign output as our processed "result"
            return self.output(cell.outputs, msg, display_id, cell_index)

    def output(self, outs, msg, display_id, cell_index):
        msg_type = msg['msg_type']

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg_type)
            return

        if self.clear_before_next_output:
            self.log.debug('Executing delayed clear_output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)
            self.clear_before_next_output = False

        if display_id:
            # record output index in:
            #   _display_id_map[display_id][cell_idx]
            cell_map = self._display_id_map.setdefault(display_id, {})
            output_idx_list = cell_map.setdefault(cell_index, [])
            output_idx_list.append(len(outs))

        outs.append(out)

        return out

    def clear_output(self, outs, msg, cell_index):
        content = msg['content']
        if content.get('wait'):
            self.log.debug('Wait to clear output')
            self.clear_before_next_output = True
        else:
            self.log.debug('Immediate clear output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)

    def clear_display_id_mapping(self, cell_index):
        for display_id, cell_map in self._display_id_map.items():
            if cell_index in cell_map:
                cell_map[cell_index] = []

    def handle_comm_msg(self, outs, msg, cell_index):
        content = msg['content']
        data = content['data']
        if self.store_widget_state and 'state' in data:  # ignore custom msg'es
            self.widget_state.setdefault(content['comm_id'],
                                         {}).update(data['state'])
            if 'buffer_paths' in data and data['buffer_paths']:
                self.widget_buffers[content['comm_id']] = _get_buffer_data(msg)
Exemple #3
0
class JuliaMagics(Magics):
    """A set of magics useful for interactive work with Julia.
    """

    highlight = Bool(
        True,
        config=True,
        help="""
        Enable highlighting in `%%julia` magic by monkey-patching
        IPython internal (`TerminalInteractiveShell`).
        """,
    )
    completion = Bool(
        True,
        config=True,
        help="""
        Enable code completion in `%julia` and `%%julia` magics by
        monkey-patching IPython internal (`IPCompleter`).
        """,
    )
    redirect_output_streams = Enum(
        ["auto", True, False],
        "auto",
        config=True,
        help="""
        Connect Julia's stdout and stderr to Python's standard stream.
        "auto" (default) means to do so only in Jupyter.
        """,
    )
    revise = Bool(
        False,
        config=True,
        help="""
        Enable Revise.jl integration.  Revise.jl must be installed
        before using this option (run `using Pkg; Pkg.add("Revise")`).
        """,
    )

    def __init__(self, shell):
        """
        Parameters
        ----------
        shell : IPython shell

        """

        super(JuliaMagics, self).__init__(shell)
        print("Initializing Julia interpreter. This may take some time...",
              end="")
        # Flush, otherwise the Julia startup will keep stdout buffered
        sys.stdout.flush()
        self._julia = Julia(init_julia=True)
        print()

    @no_var_expand
    @line_cell_magic
    def julia(self, line, cell=None):
        """
        Execute code in Julia, and pull some of the results back into the
        Python namespace.
        """
        src = compat.unicode_type(line if cell is None else cell)

        # fmt: off

        # We assume the caller's frame is the first parent frame not in the
        # IPython module. This seems to work with IPython back to ~v5, and
        # is at least somewhat immune to future IPython internals changes,
        # although by no means guaranteed to be perfect.
        caller_frame = sys._getframe(3)
        while caller_frame.f_globals.get('__name__').startswith("IPython"):
            caller_frame = caller_frame.f_back

        return self._julia.eval("""
        _PyJuliaHelper.@prepare_for_pyjulia_call begin %s end
        """ % src)(self.shell.user_ns, caller_frame.f_locals)
class TerminalInteractiveShell(InteractiveShell):
    space_for_menu = Integer(6, help='Number of line at the bottom of the screen '
                                                  'to reserve for the completion menu'
                            ).tag(config=True)

    def _space_for_menu_changed(self, old, new):
        self._update_layout()

    pt_cli = None
    debugger_history = None
    _pt_app = None

    simple_prompt = Bool(_use_simple_prompt,
        help="""Use `raw_input` for the REPL, without completion, multiline input, and prompt colors.

            Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
            IPython own testing machinery, and emacs inferior-shell integration through elpy.

            This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
            environment variable is set, or the current terminal is not a tty.

            """
            ).tag(config=True)

    @property
    def debugger_cls(self):
        return Pdb if self.simple_prompt else TerminalPdb

    confirm_exit = Bool(True,
        help="""
        Set to confirm when you try to exit IPython with an EOF (Control-D
        in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
        you can force a direct exit without any confirmation.""",
    ).tag(config=True)

    editing_mode = Unicode('emacs',
        help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
    ).tag(config=True)

    mouse_support = Bool(False,
        help="Enable mouse support in the prompt"
    ).tag(config=True)

    highlighting_style = Unicode('legacy',
            help="The name of a Pygments style to use for syntax highlighting: \n %s" % ', '.join(get_all_styles())
    ).tag(config=True)

    
    @observe('highlighting_style')
    @observe('colors')
    def _highlighting_style_changed(self, change):
        self.refresh_style()

    def refresh_style(self):
        self._style = self._make_style_from_name(self.highlighting_style)


    highlighting_style_overrides = Dict(
        help="Override highlighting format for specific tokens"
    ).tag(config=True)

    true_color = Bool(False,
        help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
              "If your terminal supports true color, the following command "
              "should print 'TRUECOLOR' in orange: "
              "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")
    ).tag(config=True)

    editor = Unicode(get_default_editor(),
        help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
    ).tag(config=True)

    prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True)

    prompts = Instance(Prompts)

    @default('prompts')
    def _prompts_default(self):
        return self.prompts_class(self)

    @observe('prompts')
    def _(self, change):
        self._update_layout()

    @default('displayhook_class')
    def _displayhook_class_default(self):
        return RichPromptDisplayHook

    term_title = Bool(True,
        help="Automatically set the terminal title"
    ).tag(config=True)

    display_completions = Enum(('column', 'multicolumn','readlinelike'), 
        help= ( "Options for displaying tab completions, 'column', 'multicolumn', and "
                "'readlinelike'. These options are for `prompt_toolkit`, see "
                "`prompt_toolkit` documentation for more information."
                ),
        default_value='multicolumn').tag(config=True)

    highlight_matching_brackets = Bool(True,
        help="Highlight matching brackets .",
    ).tag(config=True)

    @observe('term_title')
    def init_term_title(self, change=None):
        # Enable or disable the terminal title.
        if self.term_title:
            toggle_set_term_title(True)
            set_term_title('IPython: ' + abbrev_cwd())
        else:
            toggle_set_term_title(False)

    def init_display_formatter(self):
        super(TerminalInteractiveShell, self).init_display_formatter()
        # terminal only supports plain text
        self.display_formatter.active_types = ['text/plain']

    def init_prompt_toolkit_cli(self):
        if self.simple_prompt:
            # Fall back to plain non-interactive output for tests.
            # This is very limited, and only accepts a single line.
            def prompt():
                return cast_unicode_py2(input('In [%d]: ' % self.execution_count))
            self.prompt_for_code = prompt
            return

        # Set up keyboard shortcuts
        kbmanager = KeyBindingManager.for_prompt()
        register_ipython_shortcuts(kbmanager.registry, self)

        # Pre-populate history from IPython's history database
        history = InMemoryHistory()
        last_cell = u""
        for __, ___, cell in self.history_manager.get_tail(self.history_load_length,
                                                        include_latest=True):
            # Ignore blank lines and consecutive duplicates
            cell = cell.rstrip()
            if cell and (cell != last_cell):
                history.append(cell)

        self._style = self._make_style_from_name(self.highlighting_style)
        style = DynamicStyle(lambda: self._style)

        editing_mode = getattr(EditingMode, self.editing_mode.upper())

        self._pt_app = create_prompt_application(
                            editing_mode=editing_mode,
                            key_bindings_registry=kbmanager.registry,
                            history=history,
                            completer=IPythonPTCompleter(shell=self),
                            enable_history_search=True,
                            style=style,
                            mouse_support=self.mouse_support,
                            **self._layout_options()
        )
        self._eventloop = create_eventloop(self.inputhook)
        self.pt_cli = CommandLineInterface(
            self._pt_app, eventloop=self._eventloop,
            output=create_output(true_color=self.true_color))

    def _make_style_from_name(self, name):
        """
        Small wrapper that make an IPython compatible style from a style name

        We need that to add style for prompt ... etc. 
        """
        style_overrides = {}
        if name == 'legacy':
            legacy = self.colors.lower()
            if legacy == 'linux':
                style_cls = get_style_by_name('monokai')
                style_overrides = _style_overrides_linux
            elif legacy == 'lightbg':
                style_overrides = _style_overrides_light_bg
                style_cls = get_style_by_name('pastie')
            elif legacy == 'neutral':
                # The default theme needs to be visible on both a dark background
                # and a light background, because we can't tell what the terminal
                # looks like. These tweaks to the default theme help with that.
                style_cls = get_style_by_name('default')
                style_overrides.update({
                    Token.Number: '#007700',
                    Token.Operator: 'noinherit',
                    Token.String: '#BB6622',
                    Token.Name.Function: '#2080D0',
                    Token.Name.Class: 'bold #2080D0',
                    Token.Name.Namespace: 'bold #2080D0',
                    Token.Prompt: '#009900',
                    Token.PromptNum: '#00ff00 bold',
                    Token.OutPrompt: '#990000',
                    Token.OutPromptNum: '#ff0000 bold',
                })
            elif legacy =='nocolor':
                style_cls=_NoStyle
                style_overrides = {}
            else :
                raise ValueError('Got unknown colors: ', legacy)
        else :
            style_cls = get_style_by_name(name)
            style_overrides = {
                Token.Prompt: '#009900',
                Token.PromptNum: '#00ff00 bold',
                Token.OutPrompt: '#990000',
                Token.OutPromptNum: '#ff0000 bold',
            }
        style_overrides.update(self.highlighting_style_overrides)
        style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls,
                                            style_dict=style_overrides)

        return style

    def _layout_options(self):
        """
        Return the current layout option for the current Terminal InteractiveShell
        """
        return {
                'lexer':IPythonPTLexer(),
                'reserve_space_for_menu':self.space_for_menu,
                'get_prompt_tokens':self.prompts.in_prompt_tokens,
                'get_continuation_tokens':self.prompts.continuation_prompt_tokens,
                'multiline':True,
                'display_completions_in_columns': (self.display_completions == 'multicolumn'),

                # Highlight matching brackets, but only when this setting is
                # enabled, and only when the DEFAULT_BUFFER has the focus.
                'extra_input_processors': [ConditionalProcessor(
                        processor=HighlightMatchingBracketProcessor(chars='[](){}'),
                        filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() &
                            Condition(lambda cli: self.highlight_matching_brackets))],
                }

    def _update_layout(self):
        """
        Ask for a re computation of the application layout, if for example ,
        some configuration options have changed.
        """
        if self._pt_app:
            self._pt_app.layout = create_prompt_layout(**self._layout_options())

    def prompt_for_code(self):
        document = self.pt_cli.run(
            pre_run=self.pre_prompt, reset_current_buffer=True)
        return document.text

    def enable_win_unicode_console(self):
        import win_unicode_console

        if PY3:
            win_unicode_console.enable()
        else:
            # https://github.com/ipython/ipython/issues/9768
            from win_unicode_console.streams import (TextStreamWrapper,
                                 stdout_text_transcoded, stderr_text_transcoded)

            class LenientStrStreamWrapper(TextStreamWrapper):
                def write(self, s):
                    if isinstance(s, bytes):
                        s = s.decode(self.encoding, 'replace')

                    self.base.write(s)

            stdout_text_str = LenientStrStreamWrapper(stdout_text_transcoded)
            stderr_text_str = LenientStrStreamWrapper(stderr_text_transcoded)

            win_unicode_console.enable(stdout=stdout_text_str,
                                       stderr=stderr_text_str)

    def init_io(self):
        if sys.platform not in {'win32', 'cli'}:
            return

        self.enable_win_unicode_console()

        import colorama
        colorama.init()

        # For some reason we make these wrappers around stdout/stderr.
        # For now, we need to reset them so all output gets coloured.
        # https://github.com/ipython/ipython/issues/8669
        # io.std* are deprecated, but don't show our own deprecation warnings
        # during initialization of the deprecated API.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', DeprecationWarning)
            io.stdout = io.IOStream(sys.stdout)
            io.stderr = io.IOStream(sys.stderr)

    def init_magics(self):
        super(TerminalInteractiveShell, self).init_magics()
        self.register_magics(TerminalMagics)

    def init_alias(self):
        # The parent class defines aliases that can be safely used with any
        # frontend.
        super(TerminalInteractiveShell, self).init_alias()

        # Now define aliases that only make sense on the terminal, because they
        # need direct access to the console in a way that we can't emulate in
        # GUI or web frontend
        if os.name == 'posix':
            for cmd in ['clear', 'more', 'less', 'man']:
                self.alias_manager.soft_define_alias(cmd, cmd)


    def __init__(self, *args, **kwargs):
        super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
        self.init_prompt_toolkit_cli()
        self.init_term_title()
        self.keep_running = True

        self.debugger_history = InMemoryHistory()

    def ask_exit(self):
        self.keep_running = False

    rl_next_input = None

    def pre_prompt(self):
        if self.rl_next_input:
            self.pt_cli.application.buffer.text = cast_unicode_py2(self.rl_next_input)
            self.rl_next_input = None

    def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):

        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn('interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)

        self.keep_running = True
        while self.keep_running:
            print(self.separate_in, end='')

            try:
                code = self.prompt_for_code()
            except EOFError:
                if (not self.confirm_exit) \
                        or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
                    self.ask_exit()

            else:
                if code:
                    self.run_cell(code, store_history=True)

    def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
        # An extra layer of protection in case someone mashing Ctrl-C breaks
        # out of our internal code.
        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn('mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
        while True:
            try:
                self.interact()
                break
            except KeyboardInterrupt:
                print("\nKeyboardInterrupt escaped interact()\n")

    _inputhook = None
    def inputhook(self, context):
        if self._inputhook is not None:
            self._inputhook(context)

    def enable_gui(self, gui=None):
        if gui:
            self._inputhook = get_inputhook_func(gui)
        else:
            self._inputhook = None

    # Run !system commands directly, not through pipes, so terminal programs
    # work correctly.
    system = InteractiveShell.system_raw

    def auto_rewrite_input(self, cmd):
        """Overridden from the parent class to use fancy rewriting prompt"""
        if not self.show_rewritten_input:
            return

        tokens = self.prompts.rewrite_prompt_tokens()
        if self.pt_cli:
            self.pt_cli.print_tokens(tokens)
            print(cmd)
        else:
            prompt = ''.join(s for t, s in tokens)
            print(prompt, cmd, sep='')

    _prompts_before = None
    def switch_doctest_mode(self, mode):
        """Switch prompts to classic for %doctest_mode"""
        if mode:
            self._prompts_before = self.prompts
            self.prompts = ClassicPrompts(self)
        elif self._prompts_before:
            self.prompts = self._prompts_before
            self._prompts_before = None
        self._update_layout()
Exemple #5
0
class IPCompleter(Completer):
    """Extension of the completer class with IPython-specific features"""
    @observe('greedy')
    def _greedy_changed(self, change):
        """update the splitter and readline delims when greedy is changed"""
        if change['new']:
            self.splitter.delims = GREEDY_DELIMS
        else:
            self.splitter.delims = DELIMS

    merge_completions = Bool(
        True,
        help="""Whether to merge completion results into a single list
        
        If False, only the completion results from the first non-empty
        completer will be returned.
        """).tag(config=True)
    omit__names = Enum(
        (0, 1, 2),
        default_value=2,
        help="""Instruct the completer to omit private method names
        
        Specifically, when completing on ``object.<tab>``.
        
        When 2 [default]: all names that start with '_' will be excluded.
        
        When 1: all 'magic' names (``__foo__``) will be excluded.
        
        When 0: nothing will be excluded.
        """).tag(config=True)
    limit_to__all__ = Bool(
        False,
        help="""
        DEPRECATED as of version 5.0.
        
        Instruct the completer to use __all__ for the completion
        
        Specifically, when completing on ``object.<tab>``.
        
        When True: only those names in obj.__all__ will be included.
        
        When False [default]: the __all__ attribute is ignored 
        """,
    ).tag(config=True)

    def __init__(self,
                 shell=None,
                 namespace=None,
                 global_namespace=None,
                 use_readline=False,
                 config=None,
                 **kwargs):
        """IPCompleter() -> completer

        Return a completer object suitable for use by the readline library
        via readline.set_completer().

        Inputs:

        - shell: a pointer to the ipython shell itself.  This is needed
          because this completer knows about magic functions, and those can
          only be accessed via the ipython instance.

        - namespace: an optional dict where completions are performed.

        - global_namespace: secondary optional dict for completions, to
          handle cases (such as IPython embedded inside functions) where
          both Python scopes are visible.

        use_readline : bool, optional
          DEPRECATED, ignored.
        """

        self.magic_escape = ESC_MAGIC
        self.splitter = CompletionSplitter()

        if use_readline:
            warnings.warn(
                'The use_readline parameter is deprecated and ignored since IPython 6.0.',
                DeprecationWarning,
                stacklevel=2)

        # _greedy_changed() depends on splitter and readline being defined:
        Completer.__init__(self,
                           namespace=namespace,
                           global_namespace=global_namespace,
                           config=config,
                           **kwargs)

        # List where completion matches will be stored
        self.matches = []
        self.shell = shell
        # Regexp to split filenames with spaces in them
        self.space_name_re = re.compile(r'([^\\] )')
        # Hold a local ref. to glob.glob for speed
        self.glob = glob.glob

        # Determine if we are running on 'dumb' terminals, like (X)Emacs
        # buffers, to avoid completion problems.
        term = os.environ.get('TERM', 'xterm')
        self.dumb_terminal = term in ['dumb', 'emacs']

        # Special handling of backslashes needed in win32 platforms
        if sys.platform == "win32":
            self.clean_glob = self._clean_glob_win32
        else:
            self.clean_glob = self._clean_glob

        #regexp to parse docstring for function signature
        self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
        self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
        #use this if positional argument name is also needed
        #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')

        # All active matcher routines for completion
        self.matchers = [
            self.python_matches,
            self.file_matches,
            self.magic_matches,
            self.python_func_kw_matches,
            self.dict_key_matches,
        ]

        # This is set externally by InteractiveShell
        self.custom_completers = None

    def all_completions(self, text):
        """
        Wrapper around the complete method for the benefit of emacs.
        """
        return self.complete(text)[1]

    def _clean_glob(self, text):
        return self.glob("%s*" % text)

    def _clean_glob_win32(self, text):
        return [f.replace("\\", "/") for f in self.glob("%s*" % text)]

    def file_matches(self, text):
        """Match filenames, expanding ~USER type strings.

        Most of the seemingly convoluted logic in this completer is an
        attempt to handle filenames with spaces in them.  And yet it's not
        quite perfect, because Python's readline doesn't expose all of the
        GNU readline details needed for this to be done correctly.

        For a filename with a space in it, the printed completions will be
        only the parts after what's already been typed (instead of the
        full completions, as is normally done).  I don't think with the
        current (as of Python 2.3) Python readline it's possible to do
        better."""

        # chars that require escaping with backslash - i.e. chars
        # that readline treats incorrectly as delimiters, but we
        # don't want to treat as delimiters in filename matching
        # when escaped with backslash
        if text.startswith('!'):
            text = text[1:]
            text_prefix = u'!'
        else:
            text_prefix = u''

        text_until_cursor = self.text_until_cursor
        # track strings with open quotes
        open_quotes = has_open_quotes(text_until_cursor)

        if '(' in text_until_cursor or '[' in text_until_cursor:
            lsplit = text
        else:
            try:
                # arg_split ~ shlex.split, but with unicode bugs fixed by us
                lsplit = arg_split(text_until_cursor)[-1]
            except ValueError:
                # typically an unmatched ", or backslash without escaped char.
                if open_quotes:
                    lsplit = text_until_cursor.split(open_quotes)[-1]
                else:
                    return []
            except IndexError:
                # tab pressed on empty line
                lsplit = ""

        if not open_quotes and lsplit != protect_filename(lsplit):
            # if protectables are found, do matching on the whole escaped name
            has_protectables = True
            text0, text = text, lsplit
        else:
            has_protectables = False
            text = os.path.expanduser(text)

        if text == "":
            return [
                text_prefix + cast_unicode_py2(protect_filename(f))
                for f in self.glob("*")
            ]

        # Compute the matches from the filesystem
        if sys.platform == 'win32':
            m0 = self.clean_glob(text)
        else:
            m0 = self.clean_glob(text.replace('\\', ''))

        if has_protectables:
            # If we had protectables, we need to revert our changes to the
            # beginning of filename so that we don't double-write the part
            # of the filename we have so far
            len_lsplit = len(lsplit)
            matches = [
                text_prefix + text0 + protect_filename(f[len_lsplit:])
                for f in m0
            ]
        else:
            if open_quotes:
                # if we have a string with an open quote, we don't need to
                # protect the names at all (and we _shouldn't_, as it
                # would cause bugs when the filesystem call is made).
                matches = m0
            else:
                matches = [text_prefix + protect_filename(f) for f in m0]

        # Mark directories in input list by appending '/' to their names.
        return [
            cast_unicode_py2(x + '/') if os.path.isdir(x) else x
            for x in matches
        ]

    def magic_matches(self, text):
        """Match magics"""
        # Get all shell magics now rather than statically, so magics loaded at
        # runtime show up too.
        lsm = self.shell.magics_manager.lsmagic()
        line_magics = lsm['line']
        cell_magics = lsm['cell']
        pre = self.magic_escape
        pre2 = pre + pre

        # Completion logic:
        # - user gives %%: only do cell magics
        # - user gives %: do both line and cell magics
        # - no prefix: do both
        # In other words, line magics are skipped if the user gives %% explicitly
        bare_text = text.lstrip(pre)
        comp = [pre2 + m for m in cell_magics if m.startswith(bare_text)]
        if not text.startswith(pre2):
            comp += [pre + m for m in line_magics if m.startswith(bare_text)]
        return [cast_unicode_py2(c) for c in comp]

    def python_matches(self, text):
        """Match attributes or global python names"""
        if "." in text:
            try:
                matches = self.attr_matches(text)
                if text.endswith('.') and self.omit__names:
                    if self.omit__names == 1:
                        # true if txt is _not_ a __ name, false otherwise:
                        no__name = (
                            lambda txt: re.match(r'.*\.__.*?__', txt) is None)
                    else:
                        # true if txt is _not_ a _ name, false otherwise:
                        no__name = (lambda txt: re.match(
                            r'\._.*?', txt[txt.rindex('.'):]) is None)
                    matches = filter(no__name, matches)
            except NameError:
                # catches <undefined attributes>.<tab>
                matches = []
        else:
            matches = self.global_matches(text)
        return matches

    def _default_arguments_from_docstring(self, doc):
        """Parse the first line of docstring for call signature.

        Docstring should be of the form 'min(iterable[, key=func])\n'.
        It can also parse cython docstring of the form
        'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
        """
        if doc is None:
            return []

        #care only the firstline
        line = doc.lstrip().splitlines()[0]

        #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
        #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
        sig = self.docstring_sig_re.search(line)
        if sig is None:
            return []
        # iterable[, key=func]' -> ['iterable[' ,' key=func]']
        sig = sig.groups()[0].split(',')
        ret = []
        for s in sig:
            #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
            ret += self.docstring_kwd_re.findall(s)
        return ret

    def _default_arguments(self, obj):
        """Return the list of default arguments of obj if it is callable,
        or empty list otherwise."""
        call_obj = obj
        ret = []
        if inspect.isbuiltin(obj):
            pass
        elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
            if inspect.isclass(obj):
                #for cython embededsignature=True the constructor docstring
                #belongs to the object itself not __init__
                ret += self._default_arguments_from_docstring(
                    getattr(obj, '__doc__', ''))
                # for classes, check for __init__,__new__
                call_obj = (getattr(obj, '__init__', None)
                            or getattr(obj, '__new__', None))
            # for all others, check if they are __call__able
            elif hasattr(obj, '__call__'):
                call_obj = obj.__call__
        ret += self._default_arguments_from_docstring(
            getattr(call_obj, '__doc__', ''))

        _keeps = (inspect.Parameter.KEYWORD_ONLY,
                  inspect.Parameter.POSITIONAL_OR_KEYWORD)

        try:
            sig = inspect.signature(call_obj)
            ret.extend(k for k, v in sig.parameters.items()
                       if v.kind in _keeps)
        except ValueError:
            pass

        return list(set(ret))

    def python_func_kw_matches(self, text):
        """Match named parameters (kwargs) of the last open function"""

        if "." in text:  # a parameter cannot be dotted
            return []
        try:
            regexp = self.__funcParamsRegex
        except AttributeError:
            regexp = self.__funcParamsRegex = re.compile(
                r'''
                '.*?(?<!\\)' |    # single quoted strings or
                ".*?(?<!\\)" |    # double quoted strings or
                \w+          |    # identifier
                \S                # other characters
                ''', re.VERBOSE | re.DOTALL)
        # 1. find the nearest identifier that comes before an unclosed
        # parenthesis before the cursor
        # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
        tokens = regexp.findall(self.text_until_cursor)
        iterTokens = reversed(tokens)
        openPar = 0

        for token in iterTokens:
            if token == ')':
                openPar -= 1
            elif token == '(':
                openPar += 1
                if openPar > 0:
                    # found the last unclosed parenthesis
                    break
        else:
            return []
        # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
        ids = []
        isId = re.compile(r'\w+$').match

        while True:
            try:
                ids.append(next(iterTokens))
                if not isId(ids[-1]):
                    ids.pop()
                    break
                if not next(iterTokens) == '.':
                    break
            except StopIteration:
                break

        # Find all named arguments already assigned to, as to avoid suggesting
        # them again
        usedNamedArgs = set()
        par_level = -1
        for token, next_token in zip(tokens, tokens[1:]):
            if token == '(':
                par_level += 1
            elif token == ')':
                par_level -= 1

            if par_level != 0:
                continue

            if next_token != '=':
                continue

            usedNamedArgs.add(token)

        # lookup the candidate callable matches either using global_matches
        # or attr_matches for dotted names
        if len(ids) == 1:
            callableMatches = self.global_matches(ids[0])
        else:
            callableMatches = self.attr_matches('.'.join(ids[::-1]))
        argMatches = []
        for callableMatch in callableMatches:
            try:
                namedArgs = self._default_arguments(
                    eval(callableMatch, self.namespace))
            except:
                continue

            # Remove used named arguments from the list, no need to show twice
            for namedArg in set(namedArgs) - usedNamedArgs:
                if namedArg.startswith(text):
                    argMatches.append(u"%s=" % namedArg)
        return argMatches

    def dict_key_matches(self, text):
        "Match string keys in a dictionary, after e.g. 'foo[' "

        def get_keys(obj):
            # Objects can define their own completions by defining an
            # _ipy_key_completions_() method.
            method = get_real_method(obj, '_ipython_key_completions_')
            if method is not None:
                return method()

            # Special case some common in-memory dict-like types
            if isinstance(obj, dict) or\
               _safe_isinstance(obj, 'pandas', 'DataFrame'):
                try:
                    return list(obj.keys())
                except Exception:
                    return []
            elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
                 _safe_isinstance(obj, 'numpy', 'void'):
                return obj.dtype.names or []
            return []

        try:
            regexps = self.__dict_key_regexps
        except AttributeError:
            dict_key_re_fmt = r'''(?x)
            (  # match dict-referring expression wrt greedy setting
                %s
            )
            \[   # open bracket
            \s*  # and optional whitespace
            ([uUbB]?  # string prefix (r not handled)
                (?:   # unclosed string
                    '(?:[^']|(?<!\\)\\')*
                |
                    "(?:[^"]|(?<!\\)\\")*
                )
            )?
            $
            '''
            regexps = self.__dict_key_regexps = {
                False:
                re.compile(dict_key_re_fmt % '''
                                  # identifiers separated by .
                                  (?!\d)\w+
                                  (?:\.(?!\d)\w+)*
                                  '''),
                True:
                re.compile(dict_key_re_fmt % '''
                                 .+
                                 ''')
            }

        match = regexps[self.greedy].search(self.text_until_cursor)
        if match is None:
            return []

        expr, prefix = match.groups()
        try:
            obj = eval(expr, self.namespace)
        except Exception:
            try:
                obj = eval(expr, self.global_namespace)
            except Exception:
                return []

        keys = get_keys(obj)
        if not keys:
            return keys
        closing_quote, token_offset, matches = match_dict_keys(
            keys, prefix, self.splitter.delims)
        if not matches:
            return matches

        # get the cursor position of
        # - the text being completed
        # - the start of the key text
        # - the start of the completion
        text_start = len(self.text_until_cursor) - len(text)
        if prefix:
            key_start = match.start(2)
            completion_start = key_start + token_offset
        else:
            key_start = completion_start = match.end()

        # grab the leading prefix, to make sure all completions start with `text`
        if text_start > key_start:
            leading = ''
        else:
            leading = text[text_start:completion_start]

        # the index of the `[` character
        bracket_idx = match.end(1)

        # append closing quote and bracket as appropriate
        # this is *not* appropriate if the opening quote or bracket is outside
        # the text given to this method
        suf = ''
        continuation = self.line_buffer[len(self.text_until_cursor):]
        if key_start > text_start and closing_quote:
            # quotes were opened inside text, maybe close them
            if continuation.startswith(closing_quote):
                continuation = continuation[len(closing_quote):]
            else:
                suf += closing_quote
        if bracket_idx > text_start:
            # brackets were opened inside text, maybe close them
            if not continuation.startswith(']'):
                suf += ']'

        return [leading + k + suf for k in matches]

    def unicode_name_matches(self, text):
        u"""Match Latex-like syntax for unicode characters base 
        on the name of the character.
        
        This does  \\GREEK SMALL LETTER ETA -> η

        Works only on valid python 3 identifier, or on combining characters that 
        will combine to form a valid identifier.
        
        Used on Python 3 only.
        """
        slashpos = text.rfind('\\')
        if slashpos > -1:
            s = text[slashpos + 1:]
            try:
                unic = unicodedata.lookup(s)
                # allow combining chars
                if ('a' + unic).isidentifier():
                    return '\\' + s, [unic]
            except KeyError:
                pass
        return u'', []

    def latex_matches(self, text):
        u"""Match Latex syntax for unicode characters.
        
        This does both \\alp -> \\alpha and \\alpha -> α
        
        Used on Python 3 only.
        """
        slashpos = text.rfind('\\')
        if slashpos > -1:
            s = text[slashpos:]
            if s in latex_symbols:
                # Try to complete a full latex symbol to unicode
                # \\alpha -> α
                return s, [latex_symbols[s]]
            else:
                # If a user has partially typed a latex symbol, give them
                # a full list of options \al -> [\aleph, \alpha]
                matches = [k for k in latex_symbols if k.startswith(s)]
                return s, matches
        return u'', []

    def dispatch_custom_completer(self, text):
        if not self.custom_completers:
            return

        line = self.line_buffer
        if not line.strip():
            return None

        # Create a little structure to pass all the relevant information about
        # the current completion to any custom completer.
        event = Bunch()
        event.line = line
        event.symbol = text
        cmd = line.split(None, 1)[0]
        event.command = cmd
        event.text_until_cursor = self.text_until_cursor

        # for foo etc, try also to find completer for %foo
        if not cmd.startswith(self.magic_escape):
            try_magic = self.custom_completers.s_matches(self.magic_escape +
                                                         cmd)
        else:
            try_magic = []

        for c in itertools.chain(
                self.custom_completers.s_matches(cmd), try_magic,
                self.custom_completers.flat_matches(self.text_until_cursor)):
            try:
                res = c(event)
                if res:
                    # first, try case sensitive match
                    withcase = [
                        cast_unicode_py2(r) for r in res if r.startswith(text)
                    ]
                    if withcase:
                        return withcase
                    # if none, then case insensitive ones are ok too
                    text_low = text.lower()
                    return [
                        cast_unicode_py2(r) for r in res
                        if r.lower().startswith(text_low)
                    ]
            except TryNext:
                pass

        return None

    @_strip_single_trailing_space
    def complete(self, text=None, line_buffer=None, cursor_pos=None):
        """Find completions for the given text and line context.

        Note that both the text and the line_buffer are optional, but at least
        one of them must be given.

        Parameters
        ----------
          text : string, optional
            Text to perform the completion on.  If not given, the line buffer
            is split using the instance's CompletionSplitter object.

          line_buffer : string, optional
            If not given, the completer attempts to obtain the current line
            buffer via readline.  This keyword allows clients which are
            requesting for text completions in non-readline contexts to inform
            the completer of the entire text.

          cursor_pos : int, optional
            Index of the cursor in the full line buffer.  Should be provided by
            remote frontends where kernel has no access to frontend state.

        Returns
        -------
        text : str
          Text that was actually used in the completion.

        matches : list
          A list of completion matches.
        """
        # if the cursor position isn't given, the only sane assumption we can
        # make is that it's at the end of the line (the common case)
        if cursor_pos is None:
            cursor_pos = len(line_buffer) if text is None else len(text)

        if self.use_main_ns:
            self.namespace = __main__.__dict__

        if PY3:

            base_text = text if not line_buffer else line_buffer[:cursor_pos]
            latex_text, latex_matches = self.latex_matches(base_text)
            if latex_matches:
                return latex_text, latex_matches
            name_text = ''
            name_matches = []
            for meth in (self.unicode_name_matches, back_latex_name_matches,
                         back_unicode_name_matches):
                name_text, name_matches = meth(base_text)
                if name_text:
                    return name_text, name_matches

        # if text is either None or an empty string, rely on the line buffer
        if not text:
            text = self.splitter.split_line(line_buffer, cursor_pos)

        # If no line buffer is given, assume the input text is all there was
        if line_buffer is None:
            line_buffer = text

        self.line_buffer = line_buffer
        self.text_until_cursor = self.line_buffer[:cursor_pos]

        # Start with a clean slate of completions
        self.matches[:] = []
        custom_res = self.dispatch_custom_completer(text)
        if custom_res is not None:
            # did custom completers produce something?
            self.matches = custom_res
        else:
            # Extend the list of completions with the results of each
            # matcher, so we return results to the user from all
            # namespaces.
            if self.merge_completions:
                self.matches = []
                for matcher in self.matchers:
                    try:
                        self.matches.extend(matcher(text))
                    except:
                        # Show the ugly traceback if the matcher causes an
                        # exception, but do NOT crash the kernel!
                        sys.excepthook(*sys.exc_info())
            else:
                for matcher in self.matchers:
                    self.matches = matcher(text)
                    if self.matches:
                        break
        # FIXME: we should extend our api to return a dict with completions for
        # different types of objects.  The rlcomplete() method could then
        # simply collapse the dict into a list for readline, but we'd have
        # richer completion semantics in other evironments.
        self.matches = sorted(set(self.matches), key=completions_sorting_key)

        return text, self.matches
Exemple #6
0
class Reply(Reference):
    status = Enum(('ok', 'error'), default_value='ok')
Exemple #7
0
class Status(Reference):
    execution_state = Enum(('busy', 'idle', 'starting'), default_value='busy')
Exemple #8
0
class InProcessKernel(IPythonKernel):

    #-------------------------------------------------------------------------
    # InProcessKernel interface
    #-------------------------------------------------------------------------

    # The frontends connected to this kernel.
    frontends = List(
        Instance('ipykernel.inprocess.client.InProcessKernelClient',
                 allow_none=True))

    # The GUI environment that the kernel is running under. This need not be
    # specified for the normal operation for the kernel, but is required for
    # IPython's GUI support (including pylab). The default is 'inline' because
    # it is safe under all GUI toolkits.
    gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
               default_value='inline')

    raw_input_str = Any()
    stdout = Any()
    stderr = Any()

    #-------------------------------------------------------------------------
    # Kernel interface
    #-------------------------------------------------------------------------

    shell_class = Type(allow_none=True)
    shell_streams = List()
    control_stream = Any()
    iopub_socket = Instance(DummySocket, ())
    stdin_socket = Instance(DummySocket, ())

    def __init__(self, **traits):
        super(InProcessKernel, self).__init__(**traits)

        self.iopub_socket.on_trait_change(self._io_dispatch, 'message_sent')
        self.shell.kernel = self

    def execute_request(self, stream, ident, parent):
        """ Override for temporary IO redirection. """
        with self._redirected_io():
            super(InProcessKernel, self).execute_request(stream, ident, parent)

    def start(self):
        """ Override registration of dispatchers for streams. """
        self.shell.exit_now = False

    def _abort_queue(self, stream):
        """ The in-process kernel doesn't abort requests. """
        pass

    def _input_request(self, prompt, ident, parent, password=False):
        # Flush output before making the request.
        self.raw_input_str = None
        sys.stderr.flush()
        sys.stdout.flush()

        # Send the input request.
        content = json_clean(dict(prompt=prompt, password=password))
        msg = self.session.msg(u'input_request', content, parent)
        for frontend in self.frontends:
            if frontend.session.session == parent['header']['session']:
                frontend.stdin_channel.call_handlers(msg)
                break
        else:
            logging.error('No frontend found for raw_input request')
            return str()

        # Await a response.
        while self.raw_input_str is None:
            frontend.stdin_channel.process_events()
        return self.raw_input_str

    #-------------------------------------------------------------------------
    # Protected interface
    #-------------------------------------------------------------------------

    @contextmanager
    def _redirected_io(self):
        """ Temporarily redirect IO to the kernel.
        """
        sys_stdout, sys_stderr = sys.stdout, sys.stderr
        sys.stdout, sys.stderr = self.stdout, self.stderr
        yield
        sys.stdout, sys.stderr = sys_stdout, sys_stderr

    #------ Trait change handlers --------------------------------------------

    def _io_dispatch(self):
        """ Called when a message is sent to the IO socket.
        """
        ident, msg = self.session.recv(self.iopub_socket, copy=False)
        for frontend in self.frontends:
            frontend.iopub_channel.call_handlers(msg)

    #------ Trait initializers -----------------------------------------------

    def _log_default(self):
        return logging.getLogger(__name__)

    def _session_default(self):
        from jupyter_client.session import Session
        return Session(parent=self, key=b'')

    def _shell_class_default(self):
        return InProcessInteractiveShell

    def _stdout_default(self):
        from ipykernel.iostream import OutStream
        return OutStream(self.session,
                         self.iopub_socket,
                         u'stdout',
                         pipe=False)

    def _stderr_default(self):
        from ipykernel.iostream import OutStream
        return OutStream(self.session,
                         self.iopub_socket,
                         u'stderr',
                         pipe=False)
Exemple #9
0
class WinHPCJob(Configurable):

    job_id = Unicode('')
    job_name = Unicode('MyJob', config=True)
    min_cores = Integer(1, config=True)
    max_cores = Integer(1, config=True)
    min_sockets = Integer(1, config=True)
    max_sockets = Integer(1, config=True)
    min_nodes = Integer(1, config=True)
    max_nodes = Integer(1, config=True)
    unit_type = Unicode("Core", config=True)
    auto_calculate_min = Bool(True, config=True)
    auto_calculate_max = Bool(True, config=True)
    run_until_canceled = Bool(False, config=True)
    is_exclusive = Bool(False, config=True)
    username = Unicode(find_username(), config=True)
    job_type = Unicode('Batch', config=True)
    priority = Enum(
        ('Lowest', 'BelowNormal', 'Normal', 'AboveNormal', 'Highest'),
        default_value='Highest',
        config=True)
    requested_nodes = Unicode('', config=True)
    project = Unicode('IPython', config=True)
    xmlns = Unicode('http://schemas.microsoft.com/HPCS2008/scheduler/')
    version = Unicode("2.000")
    tasks = List([])

    @property
    def owner(self):
        return self.username

    def _write_attr(self, root, attr, key):
        s = as_str(getattr(self, attr, ''))
        if s:
            root.set(key, s)

    def as_element(self):
        # We have to add _A_ type things to get the right order than
        # the MSFT XML parser expects.
        root = ET.Element('Job')
        self._write_attr(root, 'version', '_A_Version')
        self._write_attr(root, 'job_name', '_B_Name')
        self._write_attr(root, 'unit_type', '_C_UnitType')
        self._write_attr(root, 'min_cores', '_D_MinCores')
        self._write_attr(root, 'max_cores', '_E_MaxCores')
        self._write_attr(root, 'min_sockets', '_F_MinSockets')
        self._write_attr(root, 'max_sockets', '_G_MaxSockets')
        self._write_attr(root, 'min_nodes', '_H_MinNodes')
        self._write_attr(root, 'max_nodes', '_I_MaxNodes')
        self._write_attr(root, 'run_until_canceled', '_J_RunUntilCanceled')
        self._write_attr(root, 'is_exclusive', '_K_IsExclusive')
        self._write_attr(root, 'username', '_L_UserName')
        self._write_attr(root, 'job_type', '_M_JobType')
        self._write_attr(root, 'priority', '_N_Priority')
        self._write_attr(root, 'requested_nodes', '_O_RequestedNodes')
        self._write_attr(root, 'auto_calculate_max', '_P_AutoCalculateMax')
        self._write_attr(root, 'auto_calculate_min', '_Q_AutoCalculateMin')
        self._write_attr(root, 'project', '_R_Project')
        self._write_attr(root, 'owner', '_S_Owner')
        self._write_attr(root, 'xmlns', '_T_xmlns')
        dependencies = ET.SubElement(root, "Dependencies")
        etasks = ET.SubElement(root, "Tasks")
        for t in self.tasks:
            etasks.append(t.as_element())
        return root

    def tostring(self):
        """Return the string representation of the job description XML."""
        root = self.as_element()
        indent(root)
        txt = ET.tostring(root, encoding="utf-8").decode('utf-8')
        # Now remove the tokens used to order the attributes.
        txt = re.sub(r'_[A-Z]_', '', txt)
        txt = '<?xml version="1.0" encoding="utf-8"?>\n' + txt
        return txt

    def write(self, filename):
        """Write the XML job description to a file."""
        txt = self.tostring()
        with open(filename, 'w') as f:
            f.write(txt)

    def add_task(self, task):
        """Add a task to the job.

        Parameters
        ----------
        task : :class:`WinHPCTask`
            The task object to add.
        """
        self.tasks.append(task)
class TaskScheduler(SessionFactory):
    """Python TaskScheduler object.

    This is the simplest object that supports msg_id based
    DAG dependencies. *Only* task msg_ids are checked, not
    msg_ids of jobs submitted via the MUX queue.

    """

    hwm = Integer(1, config=True,
        help="""specify the High Water Mark (HWM) for the downstream
        socket in the Task scheduler. This is the maximum number
        of allowed outstanding tasks on each engine.
        
        The default (1) means that only one task can be outstanding on each
        engine.  Setting TaskScheduler.hwm=0 means there is no limit, and the
        engines continue to be assigned tasks while they are working,
        effectively hiding network latency behind computation, but can result
        in an imbalance of work when submitting many heterogenous tasks all at
        once.  Any positive value greater than one is a compromise between the
        two.

        """
    )
    scheme_name = Enum(('leastload', 'pure', 'lru', 'plainrandom', 'weighted', 'twobin'),
        'leastload', config=True,
help="""select the task scheduler scheme  [default: Python LRU]
        Options are: 'pure', 'lru', 'plainrandom', 'weighted', 'twobin','leastload'"""
    )
    def _scheme_name_changed(self, old, new):
        self.log.debug("Using scheme %r"%new)
        self.scheme = globals()[new]

    # input arguments:
    scheme = Instance(FunctionType) # function for determining the destination
    def _scheme_default(self):
        return leastload
    client_stream = Instance(zmqstream.ZMQStream, allow_none=True) # client-facing stream
    engine_stream = Instance(zmqstream.ZMQStream, allow_none=True) # engine-facing stream
    notifier_stream = Instance(zmqstream.ZMQStream, allow_none=True) # hub-facing sub stream
    mon_stream = Instance(zmqstream.ZMQStream, allow_none=True) # hub-facing pub stream
    query_stream = Instance(zmqstream.ZMQStream, allow_none=True) # hub-facing DEALER stream

    # internals:
    queue = Instance(deque) # sorted list of Jobs
    def _queue_default(self):
        return deque()
    queue_map = Dict() # dict by msg_id of Jobs (for O(1) access to the Queue)
    graph = Dict() # dict by msg_id of [ msg_ids that depend on key ]
    retries = Dict() # dict by msg_id of retries remaining (non-neg ints)
    # waiting = List() # list of msg_ids ready to run, but haven't due to HWM
    pending = Dict() # dict by engine_uuid of submitted tasks
    completed = Dict() # dict by engine_uuid of completed tasks
    failed = Dict() # dict by engine_uuid of failed tasks
    destinations = Dict() # dict by msg_id of engine_uuids where jobs ran (reverse of completed+failed)
    clients = Dict() # dict by msg_id for who submitted the task
    targets = List() # list of target IDENTs
    loads = List() # list of engine loads
    # full = Set() # set of IDENTs that have HWM outstanding tasks
    all_completed = Set() # set of all completed tasks
    all_failed = Set() # set of all failed tasks
    all_done = Set() # set of all finished tasks=union(completed,failed)
    all_ids = Set() # set of all submitted task IDs

    ident = CBytes() # ZMQ identity. This should just be self.session.session
                     # but ensure Bytes
    def _ident_default(self):
        return self.session.bsession

    def start(self):
        self.query_stream.on_recv(self.dispatch_query_reply)
        self.session.send(self.query_stream, "connection_request", {})
        
        self.engine_stream.on_recv(self.dispatch_result, copy=False)
        self.client_stream.on_recv(self.dispatch_submission, copy=False)

        self._notification_handlers = dict(
            registration_notification = self._register_engine,
            unregistration_notification = self._unregister_engine
        )
        self.notifier_stream.on_recv(self.dispatch_notification)
        self.log.info("Scheduler started [%s]" % self.scheme_name)

    def resume_receiving(self):
        """Resume accepting jobs."""
        self.client_stream.on_recv(self.dispatch_submission, copy=False)

    def stop_receiving(self):
        """Stop accepting jobs while there are no engines.
        Leave them in the ZMQ queue."""
        self.client_stream.on_recv(None)

    #-----------------------------------------------------------------------
    # [Un]Registration Handling
    #-----------------------------------------------------------------------
    
    
    def dispatch_query_reply(self, msg):
        """handle reply to our initial connection request"""
        try:
            idents,msg = self.session.feed_identities(msg)
        except ValueError:
            self.log.warn("task::Invalid Message: %r",msg)
            return
        try:
            msg = self.session.deserialize(msg)
        except ValueError:
            self.log.warn("task::Unauthorized message from: %r"%idents)
            return
        
        content = msg['content']
        for uuid in content.get('engines', {}).values():
            self._register_engine(cast_bytes(uuid))

    
    @util.log_errors
    def dispatch_notification(self, msg):
        """dispatch register/unregister events."""
        try:
            idents,msg = self.session.feed_identities(msg)
        except ValueError:
            self.log.warn("task::Invalid Message: %r",msg)
            return
        try:
            msg = self.session.deserialize(msg)
        except ValueError:
            self.log.warn("task::Unauthorized message from: %r"%idents)
            return

        msg_type = msg['header']['msg_type']

        handler = self._notification_handlers.get(msg_type, None)
        if handler is None:
            self.log.error("Unhandled message type: %r"%msg_type)
        else:
            try:
                handler(cast_bytes(msg['content']['uuid']))
            except Exception:
                self.log.error("task::Invalid notification msg: %r", msg, exc_info=True)

    def _register_engine(self, uid):
        """New engine with ident `uid` became available."""
        # head of the line:
        self.targets.insert(0,uid)
        self.loads.insert(0,0)

        # initialize sets
        self.completed[uid] = set()
        self.failed[uid] = set()
        self.pending[uid] = {}

        # rescan the graph:
        self.update_graph(None)

    def _unregister_engine(self, uid):
        """Existing engine with ident `uid` became unavailable."""
        if len(self.targets) == 1:
            # this was our only engine
            pass

        # handle any potentially finished tasks:
        self.engine_stream.flush()

        # don't pop destinations, because they might be used later
        # map(self.destinations.pop, self.completed.pop(uid))
        # map(self.destinations.pop, self.failed.pop(uid))

        # prevent this engine from receiving work
        idx = self.targets.index(uid)
        self.targets.pop(idx)
        self.loads.pop(idx)

        # wait 5 seconds before cleaning up pending jobs, since the results might
        # still be incoming
        if self.pending[uid]:
            self.loop.add_timeout(self.loop.time() + 5,
                lambda : self.handle_stranded_tasks(uid),
            )
        else:
            self.completed.pop(uid)
            self.failed.pop(uid)


    def handle_stranded_tasks(self, engine):
        """Deal with jobs resident in an engine that died."""
        lost = self.pending[engine]
        for msg_id in lost.keys():
            if msg_id not in self.pending[engine]:
                # prevent double-handling of messages
                continue

            raw_msg = lost[msg_id].raw_msg
            idents,msg = self.session.feed_identities(raw_msg, copy=False)
            parent = self.session.unpack(msg[1].bytes)
            idents = [engine, idents[0]]

            # build fake error reply
            try:
                raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
            except:
                content = error.wrap_exception()
            # build fake metadata
            md = dict(
                status=u'error',
                engine=engine.decode('ascii'),
                date=util.utcnow(),
            )
            msg = self.session.msg('apply_reply', content, parent=parent, metadata=md)
            raw_reply = list(map(zmq.Message, self.session.serialize(msg, ident=idents)))
            # and dispatch it
            self.dispatch_result(raw_reply)

        # finally scrub completed/failed lists
        self.completed.pop(engine)
        self.failed.pop(engine)


    #-----------------------------------------------------------------------
    # Job Submission
    #-----------------------------------------------------------------------
    

    @util.log_errors
    def dispatch_submission(self, raw_msg):
        """Dispatch job submission to appropriate handlers."""
        # ensure targets up to date:
        self.notifier_stream.flush()
        try:
            idents, msg = self.session.feed_identities(raw_msg, copy=False)
            msg = self.session.deserialize(msg, content=False, copy=False)
        except Exception:
            self.log.error("task::Invaid task msg: %r"%raw_msg, exc_info=True)
            return


        # send to monitor
        self.mon_stream.send_multipart([b'intask']+raw_msg, copy=False)

        header = msg['header']
        md = msg['metadata']
        msg_id = header['msg_id']
        self.all_ids.add(msg_id)

        # get targets as a set of bytes objects
        # from a list of unicode objects
        targets = md.get('targets', [])
        targets = set(map(cast_bytes, targets))

        retries = md.get('retries', 0)
        self.retries[msg_id] = retries

        # time dependencies
        after = md.get('after', None)
        if after:
            after = Dependency(after)
            if after.all:
                if after.success:
                    after = Dependency(after.difference(self.all_completed),
                                success=after.success,
                                failure=after.failure,
                                all=after.all,
                    )
                if after.failure:
                    after = Dependency(after.difference(self.all_failed),
                                success=after.success,
                                failure=after.failure,
                                all=after.all,
                    )
            if after.check(self.all_completed, self.all_failed):
                # recast as empty set, if `after` already met,
                # to prevent unnecessary set comparisons
                after = MET
        else:
            after = MET

        # location dependencies
        follow = Dependency(md.get('follow', []))

        timeout = md.get('timeout', None)
        if timeout:
            timeout = float(timeout)

        job = Job(msg_id=msg_id, raw_msg=raw_msg, idents=idents, msg=msg,
                 header=header, targets=targets, after=after, follow=follow,
                 timeout=timeout, metadata=md,
        )
        # validate and reduce dependencies:
        for dep in after,follow:
            if not dep: # empty dependency
                continue
            # check valid:
            if msg_id in dep or dep.difference(self.all_ids):
                self.queue_map[msg_id] = job
                return self.fail_unreachable(msg_id, error.InvalidDependency)
            # check if unreachable:
            if dep.unreachable(self.all_completed, self.all_failed):
                self.queue_map[msg_id] = job
                return self.fail_unreachable(msg_id)

        if after.check(self.all_completed, self.all_failed):
            # time deps already met, try to run
            if not self.maybe_run(job):
                # can't run yet
                if msg_id not in self.all_failed:
                    # could have failed as unreachable
                    self.save_unmet(job)
        else:
            self.save_unmet(job)

    def job_timeout(self, job, timeout_id):
        """callback for a job's timeout.
        
        The job may or may not have been run at this point.
        """
        if job.timeout_id != timeout_id:
            # not the most recent call
            return
        now = time.time()
        if job.timeout >= (now + 1):
            self.log.warn("task %s timeout fired prematurely: %s > %s",
                job.msg_id, job.timeout, now
            )
        if job.msg_id in self.queue_map:
            # still waiting, but ran out of time
            self.log.info("task %r timed out", job.msg_id)
            self.fail_unreachable(job.msg_id, error.TaskTimeout)

    def fail_unreachable(self, msg_id, why=error.ImpossibleDependency):
        """a task has become unreachable, send a reply with an ImpossibleDependency
        error."""
        if msg_id not in self.queue_map:
            self.log.error("task %r already failed!", msg_id)
            return
        job = self.queue_map.pop(msg_id)
        # lazy-delete from the queue
        job.removed = True
        for mid in job.dependents:
            if mid in self.graph:
                self.graph[mid].remove(msg_id)

        try:
            raise why()
        except:
            content = error.wrap_exception()
        self.log.debug("task %r failing as unreachable with: %s", msg_id, content['ename'])

        self.all_done.add(msg_id)
        self.all_failed.add(msg_id)

        msg = self.session.send(self.client_stream, 'apply_reply', content,
                                                parent=job.header, ident=job.idents)
        self.session.send(self.mon_stream, msg, ident=[b'outtask']+job.idents)

        self.update_graph(msg_id, success=False)

    def available_engines(self):
        """return a list of available engine indices based on HWM"""
        if not self.hwm:
            return list(range(len(self.targets)))
        available = []
        for idx in range(len(self.targets)):
            if self.loads[idx] < self.hwm:
                available.append(idx)
        return available

    def maybe_run(self, job):
        """check location dependencies, and run if they are met."""
        msg_id = job.msg_id
        self.log.debug("Attempting to assign task %s", msg_id)
        available = self.available_engines()
        if not available:
            # no engines, definitely can't run
            return False
        
        if job.follow or job.targets or job.blacklist or self.hwm:
            # we need a can_run filter
            def can_run(idx):
                # check hwm
                if self.hwm and self.loads[idx] == self.hwm:
                    return False
                target = self.targets[idx]
                # check blacklist
                if target in job.blacklist:
                    return False
                # check targets
                if job.targets and target not in job.targets:
                    return False
                # check follow
                return job.follow.check(self.completed[target], self.failed[target])

            indices = list(filter(can_run, available))

            if not indices:
                # couldn't run
                if job.follow.all:
                    # check follow for impossibility
                    dests = set()
                    relevant = set()
                    if job.follow.success:
                        relevant = self.all_completed
                    if job.follow.failure:
                        relevant = relevant.union(self.all_failed)
                    for m in job.follow.intersection(relevant):
                        dests.add(self.destinations[m])
                    if len(dests) > 1:
                        self.queue_map[msg_id] = job
                        self.fail_unreachable(msg_id)
                        return False
                if job.targets:
                    # check blacklist+targets for impossibility
                    job.targets.difference_update(job.blacklist)
                    if not job.targets or not job.targets.intersection(self.targets):
                        self.queue_map[msg_id] = job
                        self.fail_unreachable(msg_id)
                        return False
                return False
        else:
            indices = None

        self.submit_task(job, indices)
        return True

    def save_unmet(self, job):
        """Save a message for later submission when its dependencies are met."""
        msg_id = job.msg_id
        self.log.debug("Adding task %s to the queue", msg_id)
        self.queue_map[msg_id] = job
        self.queue.append(job)
        # track the ids in follow or after, but not those already finished
        for dep_id in job.after.union(job.follow).difference(self.all_done):
            if dep_id not in self.graph:
                self.graph[dep_id] = set()
            self.graph[dep_id].add(msg_id)
        
        # schedule timeout callback
        if job.timeout:
            timeout_id = job.timeout_id = job.timeout_id + 1
            self.loop.add_timeout(time.time() + job.timeout,
                lambda : self.job_timeout(job, timeout_id)
            )
        

    def submit_task(self, job, indices=None):
        """Submit a task to any of a subset of our targets."""
        if indices:
            loads = [self.loads[i] for i in indices]
        else:
            loads = self.loads
        idx = self.scheme(loads)
        if indices:
            idx = indices[idx]
        target = self.targets[idx]
        # print (target, map(str, msg[:3]))
        # send job to the engine
        self.engine_stream.send(target, flags=zmq.SNDMORE, copy=False)
        self.engine_stream.send_multipart(job.raw_msg, copy=False)
        # update load
        self.add_job(idx)
        self.pending[target][job.msg_id] = job
        # notify Hub
        content = dict(msg_id=job.msg_id, engine_id=target.decode('ascii'))
        self.session.send(self.mon_stream, 'task_destination', content=content,
                        ident=[b'tracktask',self.ident])


    #-----------------------------------------------------------------------
    # Result Handling
    #-----------------------------------------------------------------------
    
    
    @util.log_errors
    def dispatch_result(self, raw_msg):
        """dispatch method for result replies"""
        try:
            idents,msg = self.session.feed_identities(raw_msg, copy=False)
            msg = self.session.deserialize(msg, content=False, copy=False)
            engine = idents[0]
            try:
                idx = self.targets.index(engine)
            except ValueError:
                pass # skip load-update for dead engines
            else:
                self.finish_job(idx)
        except Exception:
            self.log.error("task::Invalid result: %r", raw_msg, exc_info=True)
            return

        md = msg['metadata']
        parent = msg['parent_header']
        if md.get('dependencies_met', True):
            success = (md['status'] == 'ok')
            msg_id = parent['msg_id']
            retries = self.retries[msg_id]
            if not success and retries > 0:
                # failed
                self.retries[msg_id] = retries - 1
                self.handle_unmet_dependency(idents, parent)
            else:
                del self.retries[msg_id]
                # relay to client and update graph
                self.handle_result(idents, parent, raw_msg, success)
                # send to Hub monitor
                self.mon_stream.send_multipart([b'outtask']+raw_msg, copy=False)
        else:
            self.handle_unmet_dependency(idents, parent)

    def handle_result(self, idents, parent, raw_msg, success=True):
        """handle a real task result, either success or failure"""
        # first, relay result to client
        engine = idents[0]
        client = idents[1]
        # swap_ids for ROUTER-ROUTER mirror
        raw_msg[:2] = [client,engine]
        # print (map(str, raw_msg[:4]))
        self.client_stream.send_multipart(raw_msg, copy=False)
        # now, update our data structures
        msg_id = parent['msg_id']
        self.pending[engine].pop(msg_id)
        if success:
            self.completed[engine].add(msg_id)
            self.all_completed.add(msg_id)
        else:
            self.failed[engine].add(msg_id)
            self.all_failed.add(msg_id)
        self.all_done.add(msg_id)
        self.destinations[msg_id] = engine

        self.update_graph(msg_id, success)

    def handle_unmet_dependency(self, idents, parent):
        """handle an unmet dependency"""
        engine = idents[0]
        msg_id = parent['msg_id']

        job = self.pending[engine].pop(msg_id)
        job.blacklist.add(engine)

        if job.blacklist == job.targets:
            self.queue_map[msg_id] = job
            self.fail_unreachable(msg_id)
        elif not self.maybe_run(job):
            # resubmit failed
            if msg_id not in self.all_failed:
                # put it back in our dependency tree
                self.save_unmet(job)

        if self.hwm:
            try:
                idx = self.targets.index(engine)
            except ValueError:
                pass # skip load-update for dead engines
            else:
                if self.loads[idx] == self.hwm-1:
                    self.update_graph(None)

    def update_graph(self, dep_id=None, success=True):
        """dep_id just finished. Update our dependency
        graph and submit any jobs that just became runnable.

        Called with dep_id=None to update entire graph for hwm, but without finishing a task.
        """
        # print ("\n\n***********")
        # pprint (dep_id)
        # pprint (self.graph)
        # pprint (self.queue_map)
        # pprint (self.all_completed)
        # pprint (self.all_failed)
        # print ("\n\n***********\n\n")
        # update any jobs that depended on the dependency
        msg_ids = self.graph.pop(dep_id, [])

        # recheck *all* jobs if
        # a) we have HWM and an engine just become no longer full
        # or b) dep_id was given as None
        
        if dep_id is None or self.hwm and any( [ load==self.hwm-1 for load in self.loads ]):
            jobs = self.queue
            using_queue = True
        else:
            using_queue = False
            jobs = deque(sorted( self.queue_map[msg_id] for msg_id in msg_ids ))
        
        to_restore = []
        while jobs:
            job = jobs.popleft()
            if job.removed:
                continue
            msg_id = job.msg_id
            
            put_it_back = True
            
            if job.after.unreachable(self.all_completed, self.all_failed)\
                    or job.follow.unreachable(self.all_completed, self.all_failed):
                self.fail_unreachable(msg_id)
                put_it_back = False

            elif job.after.check(self.all_completed, self.all_failed): # time deps met, maybe run
                if self.maybe_run(job):
                    put_it_back = False
                    self.queue_map.pop(msg_id)
                    for mid in job.dependents:
                        if mid in self.graph:
                            self.graph[mid].remove(msg_id)
                    
                    # abort the loop if we just filled up all of our engines.
                    # avoids an O(N) operation in situation of full queue,
                    # where graph update is triggered as soon as an engine becomes
                    # non-full, and all tasks after the first are checked,
                    # even though they can't run.
                    if not self.available_engines():
                        break
            
            if using_queue and put_it_back:
                # popped a job from the queue but it neither ran nor failed,
                # so we need to put it back when we are done
                # make sure to_restore preserves the same ordering
                to_restore.append(job)
        
        # put back any tasks we popped but didn't run
        if using_queue:
            self.queue.extendleft(to_restore)
    
    #----------------------------------------------------------------------
    # methods to be overridden by subclasses
    #----------------------------------------------------------------------

    def add_job(self, idx):
        """Called after self.targets[idx] just got the job with header.
        Override with subclasses.  The default ordering is simple LRU.
        The default loads are the number of outstanding jobs."""
        self.loads[idx] += 1
        for lis in (self.targets, self.loads):
            lis.append(lis.pop(idx))


    def finish_job(self, idx):
        """Called after self.targets[idx] just finished a job.
        Override with subclasses."""
        self.loads[idx] -= 1
Exemple #11
0
class Canvas(_CanvasBase):
    """Create a Canvas widget.

    Args:
        width (int): The width (in pixels) of the canvas
        height (int): The height (in pixels) of the canvas
        caching (boolean): Whether commands should be cached or not
    """

    _model_name = Unicode('CanvasModel').tag(sync=True)
    _view_name = Unicode('CanvasView').tag(sync=True)

    #: (valid HTML color) The color for filling rectangles and paths. Default to ``'black'``.
    fill_style = Color('black')

    #: (valid HTML color) The color for rectangles and paths stroke. Default to ``'black'``.
    stroke_style = Color('black')

    #: (float) Transparency level. Default to ``1.0``.
    global_alpha = Float(1.0)

    #: (str) Font for the text rendering. Default to ``'12px serif'``.
    font = Unicode('12px serif')

    #: (str) Text alignment, possible values are ``'start'``, ``'end'``, ``'left'``, ``'right'``, and ``'center'``.
    #: Default to ``'start'``.
    text_align = Enum(['start', 'end', 'left', 'right', 'center'],
                      default_value='start')

    #: (str) Text baseline, possible values are ``'top'``, ``'hanging'``, ``'middle'``, ``'alphabetic'``, ``'ideographic'``
    #: and ``'bottom'``.
    #: Default to ``'alphabetic'``.
    text_baseline = Enum(
        ['top', 'hanging', 'middle', 'alphabetic', 'ideographic', 'bottom'],
        default_value='alphabetic')

    #: (str) Text direction, possible values are ``'ltr'``, ``'rtl'``, and ``'inherit'``.
    #: Default to ``'inherit'``.
    direction = Enum(['ltr', 'rtl', 'inherit'], default_value='inherit')

    #: (str) Global composite operation, possible values are listed below:
    #: https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Compositing#globalCompositeOperation
    global_composite_operation = Enum([
        'source-over', 'source-in', 'source-out', 'source-atop',
        'destination-over', 'destination-in', 'destination-out',
        'destination-atop', 'lighter', 'copy', 'xor', 'multiply', 'screen',
        'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn',
        'hard-light', 'soft-light', 'difference', 'exclusion', 'hue',
        'saturation', 'color', 'luminosity'
    ],
                                      default_value='source-over')

    #: (float) Indicates the horizontal distance the shadow should extend from the object.
    #: This value isn't affected by the transformation matrix. The default is 0.
    shadow_offset_x = Float(0.0)

    #: (float) Indicates the vertical distance the shadow should extend from the object.
    #: This value isn't affected by the transformation matrix. The default is 0.
    shadow_offset_y = Float(0.0)

    #: (float) Indicates the size of the blurring effect; this value doesn't correspond to a number of pixels
    #: and is not affected by the current transformation matrix. The default value is 0.
    shadow_blur = Float(0.0)

    #: (valid HTML color) A standard CSS color value indicating the color of the shadow effect; by default,
    #: it is fully-transparent black.
    shadow_color = Color('rgba(0, 0, 0, 0)')

    #: (float) Sets the width of lines drawn in the future, must be a positive number. Default to ``1.0``.
    line_width = Float(1.0)

    #: (str) Sets the appearance of the ends of lines, possible values are ``'butt'``, ``'round'`` and ``'square'``.
    #: Default to ``'butt'``.
    line_cap = Enum(['butt', 'round', 'square'], default_value='butt')

    #: (str) Sets the appearance of the "corners" where lines meet, possible values are ``'round'``, ``'bevel'`` and ``'miter'``.
    #: Default to ``'miter'``
    line_join = Enum(['round', 'bevel', 'miter'], default_value='miter')

    #: (float) Establishes a limit on the miter when two lines join at a sharp angle, to let you control how thick
    #: the junction becomes. Default to ``10.``.
    miter_limit = Float(10.)

    _line_dash = List()

    #: (float) Specifies where to start a dash array on a line. Default is ``0.``.
    line_dash_offset = Float(0.)

    _client_ready_callbacks = Instance(CallbackDispatcher, ())

    _mouse_move_callbacks = Instance(CallbackDispatcher, ())
    _mouse_down_callbacks = Instance(CallbackDispatcher, ())
    _mouse_up_callbacks = Instance(CallbackDispatcher, ())
    _mouse_out_callbacks = Instance(CallbackDispatcher, ())

    _touch_start_callbacks = Instance(CallbackDispatcher, ())
    _touch_end_callbacks = Instance(CallbackDispatcher, ())
    _touch_move_callbacks = Instance(CallbackDispatcher, ())
    _touch_cancel_callbacks = Instance(CallbackDispatcher, ())

    ATTRS = {
        'fill_style': 0,
        'stroke_style': 1,
        'global_alpha': 2,
        'font': 3,
        'text_align': 4,
        'text_baseline': 5,
        'direction': 6,
        'global_composite_operation': 7,
        'line_width': 8,
        'line_cap': 9,
        'line_join': 10,
        'miter_limit': 11,
        'line_dash_offset': 12,
        'shadow_offset_x': 13,
        'shadow_offset_y': 14,
        'shadow_blur': 15,
        'shadow_color': 16,
    }

    def __init__(self, *args, **kwargs):
        """Create a Canvas widget."""
        #: Whether commands should be cached or not
        self.caching = kwargs.get('caching', False)
        self._commands_cache = []
        self._buffers_cache = []

        if 'size' in kwargs:
            size = kwargs['size']

            kwargs['width'] = size[0]
            kwargs['height'] = size[1]

            del kwargs['size']

            warnings.warn(
                'size is deprecated and will be removed in a future release, please use width and height instead.',
                DeprecationWarning)

        super(Canvas, self).__init__(*args, **kwargs)

        self.on_msg(self._handle_frontend_event)

    def sleep(self, time):
        """Make the Canvas sleep for `time` milliseconds."""
        self._send_canvas_command(COMMANDS['sleep'], [time])

    # Rectangles methods
    def fill_rect(self, x, y, width, height=None):
        """Draw a filled rectangle of size ``(width, height)`` at the ``(x, y)`` position."""
        if height is None:
            height = width

        self._send_canvas_command(COMMANDS['fillRect'], [x, y, width, height])

    def stroke_rect(self, x, y, width, height=None):
        """Draw a rectangular outline of size ``(width, height)`` at the ``(x, y)`` position."""
        if height is None:
            height = width

        self._send_canvas_command(COMMANDS['strokeRect'],
                                  [x, y, width, height])

    def fill_rects(self, x, y, width, height=None):
        """Draw filled rectangles of sizes ``(width, height)`` at the ``(x, y)`` positions.

        Where ``x``, ``y``, ``width`` and ``height`` arguments are NumPy arrays, lists or scalar values.
        If ``height`` is None, it is set to the same value as width.
        """
        args = []
        buffers = []

        populate_args(x, args, buffers)
        populate_args(y, args, buffers)
        populate_args(width, args, buffers)

        if height is None:
            args.append(args[-1])
        else:
            populate_args(height, args, buffers)

        self._send_canvas_command(COMMANDS['fillRects'], args, buffers)

    def stroke_rects(self, x, y, width, height=None):
        """Draw a rectangular outlines of sizes ``(width, height)`` at the ``(x, y)`` positions.

        Where ``x``, ``y``, ``width`` and ``height`` arguments are NumPy arrays, lists or scalar values.
        If ``height`` is None, it is set to the same value as width.
        """
        args = []
        buffers = []

        populate_args(x, args, buffers)
        populate_args(y, args, buffers)
        populate_args(width, args, buffers)

        if height is None:
            args.append(args[-1])
        else:
            populate_args(height, args, buffers)

        self._send_canvas_command(COMMANDS['strokeRects'], args, buffers)

    def clear_rect(self, x, y, width, height=None):
        """Clear the specified rectangular area of size ``(width, height)`` at the ``(x, y)`` position, making it fully transparent."""
        if height is None:
            height = width

        self._send_canvas_command(COMMANDS['clearRect'], [x, y, width, height])

    # Arc methods
    def fill_arc(self,
                 x,
                 y,
                 radius,
                 start_angle,
                 end_angle,
                 anticlockwise=False):
        """Draw a filled arc centered at ``(x, y)`` with a radius of ``radius`` from ``start_angle`` to ``end_angle``."""
        self._send_canvas_command(
            COMMANDS['fillArc'],
            [x, y, radius, start_angle, end_angle, anticlockwise])

    def fill_circle(self, x, y, radius):
        """Draw a filled circle centered at ``(x, y)`` with a radius of ``radius``."""
        self._send_canvas_command(COMMANDS['fillCircle'], [x, y, radius])

    def stroke_arc(self,
                   x,
                   y,
                   radius,
                   start_angle,
                   end_angle,
                   anticlockwise=False):
        """Draw an arc outline centered at ``(x, y)`` with a radius of ``radius``."""
        self._send_canvas_command(
            COMMANDS['strokeArc'],
            [x, y, radius, start_angle, end_angle, anticlockwise])

    def stroke_circle(self, x, y, radius):
        """Draw a circle centered at ``(x, y)`` with a radius of ``radius``."""
        self._send_canvas_command(COMMANDS['strokeCircle'], [x, y, radius])

    def fill_arcs(self,
                  x,
                  y,
                  radius,
                  start_angle,
                  end_angle,
                  anticlockwise=False):
        """Draw filled arcs centered at ``(x, y)`` with a radius of ``radius``.

        Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
        """
        args = []
        buffers = []

        populate_args(x, args, buffers)
        populate_args(y, args, buffers)
        populate_args(radius, args, buffers)
        populate_args(start_angle, args, buffers)
        populate_args(end_angle, args, buffers)
        args.append(anticlockwise)

        self._send_canvas_command(COMMANDS['fillArcs'], args, buffers)

    def stroke_arcs(self,
                    x,
                    y,
                    radius,
                    start_angle,
                    end_angle,
                    anticlockwise=False):
        """Draw an arc outlines centered at ``(x, y)`` with a radius of ``radius``.

        Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
        """
        args = []
        buffers = []

        populate_args(x, args, buffers)
        populate_args(y, args, buffers)
        populate_args(radius, args, buffers)
        populate_args(start_angle, args, buffers)
        populate_args(end_angle, args, buffers)
        args.append(anticlockwise)

        self._send_canvas_command(COMMANDS['strokeArcs'], args, buffers)

    def fill_circles(self, x, y, radius):
        """Draw filled circles centered at ``(x, y)`` with a radius of ``radius``.

        Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
        """
        args = []
        buffers = []

        populate_args(x, args, buffers)
        populate_args(y, args, buffers)
        populate_args(radius, args, buffers)

        self._send_canvas_command(COMMANDS['fillCircles'], args, buffers)

    def stroke_circles(self, x, y, radius):
        """Draw a circle outlines centered at ``(x, y)`` with a radius of ``radius``.

        Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
        """
        args = []
        buffers = []

        populate_args(x, args, buffers)
        populate_args(y, args, buffers)
        populate_args(radius, args, buffers)

        self._send_canvas_command(COMMANDS['strokeCircles'], args, buffers)

    # Lines methods
    def stroke_line(self, x1, y1, x2, y2):
        """Draw a line from ``(x1, y1)`` to ``(x2, y2)``."""
        self._send_canvas_command(COMMANDS['strokeLine'], [x1, y1, x2, y2])

    # Paths methods
    def begin_path(self):
        """Call this method when you want to create a new path."""
        self._send_canvas_command(COMMANDS['beginPath'])

    def close_path(self):
        """Add a straight line from the current point to the start of the current path.

        If the shape has already been closed or has only one point, this function does nothing.
        This method doesn't draw anything to the canvas directly. You can render the path using the stroke() or fill() methods.
        """
        self._send_canvas_command(COMMANDS['closePath'])

    def stroke(self):
        """Stroke (outlines) the current path with the current ``stroke_style``."""
        self._send_canvas_command(COMMANDS['stroke'])

    def fill(self, rule_or_path='nonzero'):
        """Fill the current path with the current ``fill_style`` and given the rule, or fill the given Path2D.

        Possible rules are ``nonzero`` and ``evenodd``.
        """
        if isinstance(rule_or_path, Path2D):
            self._send_canvas_command(
                COMMANDS['fillPath'],
                [widget_serialization['to_json'](rule_or_path, None)])
        else:
            self._send_canvas_command(COMMANDS['fill'], [rule_or_path])

    def move_to(self, x, y):
        """Move the "pen" to the given ``(x, y)`` coordinates."""
        self._send_canvas_command(COMMANDS['moveTo'], [x, y])

    def line_to(self, x, y):
        """Add a straight line to the current path by connecting the path's last point to the specified ``(x, y)`` coordinates.

        Like other methods that modify the current path, this method does not directly render anything. To
        draw the path onto the canvas, you can use the fill() or stroke() methods.
        """
        self._send_canvas_command(COMMANDS['lineTo'], [x, y])

    def rect(self, x, y, width, height):
        """Add a rectangle of size ``(width, height)`` at the ``(x, y)`` position in the current path."""
        self._send_canvas_command(COMMANDS['rect'], [x, y, width, height])

    def arc(self, x, y, radius, start_angle, end_angle, anticlockwise=False):
        """Add a circular arc centered at ``(x, y)`` with a radius of ``radius`` to the current path.

        The path starts at ``start_angle`` and ends at ``end_angle``, and travels in the direction given by
        ``anticlockwise`` (defaulting to clockwise: ``False``).
        """
        self._send_canvas_command(
            COMMANDS['arc'],
            [x, y, radius, start_angle, end_angle, anticlockwise])

    def ellipse(self,
                x,
                y,
                radius_x,
                radius_y,
                rotation,
                start_angle,
                end_angle,
                anticlockwise=False):
        """Add an ellipse centered at ``(x, y)`` with the radii ``radius_x`` and ``radius_y`` to the current path.

        The path starts at ``start_angle`` and ends at ``end_angle``, and travels in the direction given by
        ``anticlockwise`` (defaulting to clockwise: ``False``).
        """
        self._send_canvas_command(COMMANDS['ellipse'], [
            x, y, radius_x, radius_y, rotation, start_angle, end_angle,
            anticlockwise
        ])

    def arc_to(self, x1, y1, x2, y2, radius):
        """Add a circular arc to the current path.

        Using the given control points ``(x1, y1)`` and ``(x2, y2)`` and the ``radius``.
        """
        self._send_canvas_command(COMMANDS['arcTo'], [x1, y1, x2, y2, radius])

    def quadratic_curve_to(self, cp1x, cp1y, x, y):
        """Add a quadratic Bezier curve to the current path.

        It requires two points: the first one is a control point and the second one is the end point.
        The starting point is the latest point in the current path, which can be changed using move_to()
        before creating the quadratic Bezier curve.
        """
        self._send_canvas_command(COMMANDS['quadraticCurveTo'],
                                  [cp1x, cp1y, x, y])

    def bezier_curve_to(self, cp1x, cp1y, cp2x, cp2y, x, y):
        """Add a cubic Bezier curve to the current path.

        It requires three points: the first two are control points and the third one is the end point.
        The starting point is the latest point in the current path, which can be changed using move_to()
        before creating the Bezier curve.
        """
        self._send_canvas_command(COMMANDS['bezierCurveTo'],
                                  [cp1x, cp1y, cp2x, cp2y, x, y])

    # Text methods
    def fill_text(self, text, x, y, max_width=None):
        """Fill a given text at the given ``(x, y)`` position. Optionally with a maximum width to draw."""
        self._send_canvas_command(COMMANDS['fillText'],
                                  [text, x, y, max_width])

    def stroke_text(self, text, x, y, max_width=None):
        """Stroke a given text at the given ``(x, y)`` position. Optionally with a maximum width to draw."""
        self._send_canvas_command(COMMANDS['strokeText'],
                                  [text, x, y, max_width])

    # Line methods
    def get_line_dash(self):
        """Return the current line dash pattern array containing an even number of non-negative numbers."""
        return self._line_dash

    def set_line_dash(self, segments):
        """Set the current line dash pattern."""
        if len(segments) % 2:
            self._line_dash = segments + segments
        else:
            self._line_dash = segments
        self._send_canvas_command(COMMANDS['setLineDash'], [self._line_dash])

    # Image methods
    def draw_image(self, image, x=0, y=0, width=None, height=None):
        """Draw an ``image`` on the Canvas at the coordinates (``x``, ``y``) and scale it to (``width``, ``height``)."""
        if (not isinstance(image, (Canvas, MultiCanvas, Image))):
            raise TypeError(
                'The image argument should be an Image, a Canvas or a MultiCanvas widget'
            )

        if width is not None and height is None:
            height = width

        serialized_image = widget_serialization['to_json'](image, None)

        self._send_canvas_command(COMMANDS['drawImage'],
                                  [serialized_image, x, y, width, height])

    def put_image_data(self, image_data, x=0, y=0):
        """Draw an image on the Canvas.

        ``image_data`` should be  a NumPy array containing the image to draw and ``x`` and ``y`` the pixel position where to
        draw. Unlike the CanvasRenderingContext2D.putImageData method, this method **is** affected by the canvas transformation
        matrix, and supports transparency.
        """
        image_metadata, image_buffer = binary_image(image_data)
        self._send_canvas_command(COMMANDS['putImageData'],
                                  [image_metadata, x, y], [image_buffer])

    def create_image_data(self, width, height):
        """Create a NumPy array of shape (width, height, 4) representing a table of pixel colors."""
        return np.zeros((width, height, 4), dtype=int)

    # Clipping
    def clip(self):
        """Turn the path currently being built into the current clipping path.

        You can use clip() instead of close_path() to close a path and turn it into a clipping
        path instead of stroking or filling the path.
        """
        self._send_canvas_command(COMMANDS['clip'])

    # Transformation methods
    def save(self):
        """Save the entire state of the canvas."""
        self._send_canvas_command(COMMANDS['save'])

    def restore(self):
        """Restore the most recently saved canvas state."""
        self._send_canvas_command(COMMANDS['restore'])

    def translate(self, x, y):
        """Move the canvas and its origin on the grid.

        ``x`` indicates the horizontal distance to move,
        and ``y`` indicates how far to move the grid vertically.
        """
        self._send_canvas_command(COMMANDS['translate'], [x, y])

    def rotate(self, angle):
        """Rotate the canvas clockwise around the current origin by the ``angle`` number of radians."""
        self._send_canvas_command(COMMANDS['rotate'], [angle])

    def scale(self, x, y=None):
        """Scale the canvas units by ``x`` horizontally and by ``y`` vertically. Both parameters are real numbers.

        If ``y`` is not provided, it is defaulted to the same value as ``x``.
        Values that are smaller than 1.0 reduce the unit size and values above 1.0 increase the unit size.
        Values of 1.0 leave the units the same size.
        """
        if y is None:
            y = x
        self._send_canvas_command(COMMANDS['scale'], [x, y])

    def transform(self, a, b, c, d, e, f):
        """Multiply the current transformation matrix with the matrix described by its arguments.

        The transformation matrix is described by:
        ``[[a, c, e], [b, d, f], [0, 0, 1]]``.
        """
        self._send_canvas_command(COMMANDS['transform'], [a, b, c, d, e, f])

    def set_transform(self, a, b, c, d, e, f):
        """Reset the current transform to the identity matrix, and then invokes the transform() method with the same arguments.

        This basically undoes the current transformation, then sets the specified transform, all in one step.
        """
        self._send_canvas_command(COMMANDS['setTransform'], [a, b, c, d, e, f])

    def reset_transform(self):
        """Reset the current transform to the identity matrix.

        This is the same as calling: set_transform(1, 0, 0, 1, 0, 0).
        """
        self._send_canvas_command(COMMANDS['resetTransform'])

    # Extras
    def clear(self):
        """Clear the entire canvas. This is the same as calling ``clear_rect(0, 0, canvas.width, canvas.height)``."""
        self._send_command([COMMANDS['clear']])

    def flush(self):
        """Flush all the cached commands and clear the cache."""
        if not self.caching or not len(self._commands_cache):
            return

        self._send_custom(self._commands_cache, self._buffers_cache)

        self._commands_cache = []
        self._buffers_cache = []

    # Events
    def on_client_ready(self, callback, remove=False):
        """Register a callback that will be called when a new client is ready to receive draw commands.

        When a new client connects to the kernel he will get an empty Canvas (because the canvas is
        almost stateless, the new client does not know what draw commands were previously sent). So
        this function is useful for replaying your drawing whenever a new client connects and is
        ready to receive draw commands.
        """
        self._client_ready_callbacks.register_callback(callback, remove=remove)

    def on_mouse_move(self, callback, remove=False):
        """Register a callback that will be called on mouse move."""
        self._mouse_move_callbacks.register_callback(callback, remove=remove)

    def on_mouse_down(self, callback, remove=False):
        """Register a callback that will be called on mouse click down."""
        self._mouse_down_callbacks.register_callback(callback, remove=remove)

    def on_mouse_up(self, callback, remove=False):
        """Register a callback that will be called on mouse click up."""
        self._mouse_up_callbacks.register_callback(callback, remove=remove)

    def on_mouse_out(self, callback, remove=False):
        """Register a callback that will be called on mouse out of the canvas."""
        self._mouse_out_callbacks.register_callback(callback, remove=remove)

    def on_touch_start(self, callback, remove=False):
        """Register a callback that will be called on touch start (new finger on the screen)."""
        self._touch_start_callbacks.register_callback(callback, remove=remove)

    def on_touch_end(self, callback, remove=False):
        """Register a callback that will be called on touch end (a finger is not touching the screen anymore)."""
        self._touch_end_callbacks.register_callback(callback, remove=remove)

    def on_touch_move(self, callback, remove=False):
        """Register a callback that will be called on touch move (finger moving on the screen)."""
        self._touch_move_callbacks.register_callback(callback, remove=remove)

    def on_touch_cancel(self, callback, remove=False):
        """Register a callback that will be called on touch cancel."""
        self._touch_cancel_callbacks.register_callback(callback, remove=remove)

    def __setattr__(self, name, value):
        super(Canvas, self).__setattr__(name, value)

        if name in self.ATTRS:
            self._send_command([COMMANDS['set'], [self.ATTRS[name], value]])

    def _send_canvas_command(self, name, args=[], buffers=[]):
        while len(args) and args[len(args) - 1] is None:
            args.pop()
        self._send_command([name, args, len(buffers)], buffers)

    def _send_command(self, command, buffers=[]):
        if self.caching:
            self._commands_cache.append(command)
            self._buffers_cache += buffers
        else:
            self._send_custom(command, buffers)

    def _send_custom(self, command, buffers=[]):
        metadata, command_buffer = commands_to_buffer(command)
        self.send(metadata, buffers=[command_buffer] + buffers)

    def _handle_frontend_event(self, _, content, buffers):
        if content.get('event', '') == 'client_ready':
            self._client_ready_callbacks()

        if content.get('event', '') == 'mouse_move':
            self._mouse_move_callbacks(content['x'], content['y'])
        if content.get('event', '') == 'mouse_down':
            self._mouse_down_callbacks(content['x'], content['y'])
        if content.get('event', '') == 'mouse_up':
            self._mouse_up_callbacks(content['x'], content['y'])
        if content.get('event', '') == 'mouse_out':
            self._mouse_out_callbacks(content['x'], content['y'])

        if content.get('event', '') == 'touch_start':
            self._touch_start_callbacks([(touch['x'], touch['y'])
                                         for touch in content['touches']])
        if content.get('event', '') == 'touch_end':
            self._touch_end_callbacks([(touch['x'], touch['y'])
                                       for touch in content['touches']])
        if content.get('event', '') == 'touch_move':
            self._touch_move_callbacks([(touch['x'], touch['y'])
                                        for touch in content['touches']])
        if content.get('event', '') == 'touch_cancel':
            self._touch_cancel_callbacks([(touch['x'], touch['y'])
                                          for touch in content['touches']])
Exemple #12
0
class Axis(BaseAxis):
    """A line axis.

    A line axis is the visual representation of a numerical or date scale.

    Attributes
    ----------

    icon: string (class-level attribute)
        The font-awesome icon name for this object.
    axis_types: dict (class-level attribute)
        A registry of existing axis types.
    orientation: {'horizontal', 'vertical'}
        The orientation of the axis, either vertical or horizontal
    side: {'bottom', 'top', 'left', 'right'} or None (default: None)
        The side of the axis, either bottom, top, left or right.
    label: string (default: '')
        The axis label
    tick_format: string or None (default: '')
        The tick format for the axis.
    scale: Scale
        The scale represented by the axis
    num_ticks: int or None (default: None)
        If tick_values is None, number of ticks
    tick_values: numpy.ndarray or None (default: [])
        Tick values for the axis
    offset: dict (default: {})
        Contains a scale and a value {'scale': scale or None, 'value': value of the offset}
        If offset['scale'] is None, the corresponding figure scale is used
        instead.
    label_location: {'middle', 'start', 'end'}
        The location of the label along the axis, one of 'start', 'end' or
        'middle'
    label_color: Color or None (default: None)
        The color of the axis label
    grid_lines: {'none', 'solid', 'dashed'}
        The display of the grid lines
    grid_color: Color or None (default: None)
        The color of the grid lines
    color: Color or None (default: None)
        The color of the line
    label_offset: string or None (default: None)
        Label displacement from the axis line. Units allowed are 'em', 'px'
        and 'ex'. Positive values are away from the figure and negative
        values are towards the figure with resepect to the axis line.
    visible: bool (default: True)
        A visibility toggle for the axis
    """
    icon = 'fa-arrows'
    orientation = Enum(['horizontal', 'vertical'],
                       default_value='horizontal',
                       sync=True)
    side = Enum(['bottom', 'top', 'left', 'right'],
                allow_none=True,
                default_value=None,
                sync=True)
    label = Unicode(sync=True)
    grid_lines = Enum(['none', 'solid', 'dashed'],
                      default_value='none',
                      sync=True)
    tick_format = Unicode(None, allow_none=True, sync=True)
    scale = Instance(Scale, sync=True, **widget_serialization)
    num_ticks = Int(default_value=None, sync=True, allow_none=True)
    tick_values = NdArray(sync=True)
    offset = Dict(sync=True, **widget_serialization)
    label_location = Enum(['middle', 'start', 'end'],
                          default_value='middle',
                          sync=True)
    label_color = Color(None, sync=True, allow_none=True)
    grid_color = Color(None, sync=True, allow_none=True)
    color = Color(None, sync=True, allow_none=True)
    label_offset = Unicode(default_value=None, sync=True, allow_none=True)

    visible = Bool(True, sync=True)

    _view_name = Unicode('Axis', sync=True)
    _view_module = Unicode('nbextensions/bqplot/Axis', sync=True)
    _model_name = Unicode('AxisModel', sync=True)
    _model_module = Unicode('nbextensions/bqplot/AxisModel', sync=True)
    _ipython_display_ = None  # We cannot display an axis outside of a figure.
Exemple #13
0
class ExecutePreprocessor(Preprocessor):
    """
    Executes all the cells in a notebook
    """

    timeout = Integer(30, allow_none=True,
        help=dedent(
            """
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, an exception (TimeoutError
            on python 3+, RuntimeError on python 2) is raised.

            `None` or `-1` will disable the timeout. If `timeout_func` is set,
            it overrides `timeout`.
            """
        )
    ).tag(config=True)

    timeout_func = Any(
        default_value=None,
        allow_none=True,
        help=dedent(
            """
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, an exception
            (TimeoutError on python 3+, RuntimeError on python 2) is
            raised.

            Returning `None` or `-1` will disable the timeout for the cell.
            Not setting `timeout_func` will cause the preprocessor to
            default to using the `timeout` trait for all cells. The
            `timeout_func` trait overrides `timeout` if it is not `None`.
            """
        )
    ).tag(config=True)

    interrupt_on_timeout = Bool(False,
        help=dedent(
            """
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """
        )
    ).tag(config=True)

    startup_timeout = Integer(60,
        help=dedent(
            """
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """
        )
    ).tag(config=True)

    allow_errors = Bool(False,
        help=dedent(
            """
            If `False` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If `True`, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """
        )
    ).tag(config=True)

    force_raise_errors = Bool(False,
        help=dedent(
            """
            If False (default), errors from executing the notebook can be
            allowed with a `raises-exception` tag on a single cell, or the
            `allow_errors` configurable option for all cells. An allowed error
            will be recorded in notebook output, and execution will continue.
            If an error occurs when it is not explicitly allowed, a
            `CellExecutionError` will be raised.
            If True, `CellExecutionError` will be raised for any error that occurs
            while executing the notebook. This overrides both the
            `allow_errors` option and the `raises-exception` cell tag.
            """
        )
    ).tag(config=True)

    extra_arguments = List(Unicode())

    kernel_name = Unicode('',
        help=dedent(
            """
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """
        )
    ).tag(config=True)

    raise_on_iopub_timeout = Bool(False,
        help=dedent(
            """
            If `False` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If `True`, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """
            )
    ).tag(config=True)

    store_widget_state = Bool(True,
        help=dedent(
            """
            If `True` (default), then the state of the Jupyter widgets created
            at the kernel will be stored in the metadata of the notebook.
            """
            )
    ).tag(config=True)

    iopub_timeout = Integer(4, allow_none=False,
        help=dedent(
            """
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """
        )
    ).tag(config=True)

    shutdown_kernel = Enum(['graceful', 'immediate'],
        default_value='graceful',
        help=dedent(
            """
            If `graceful` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its `atexit` hooks.
            If `immediate`, then the kernel is signaled to immediately
            terminate.
            """
            )
    ).tag(config=True)

    kernel_manager_class = Type(
        config=True,
        help='The kernel manager class to use.'
    )
    @default('kernel_manager_class')
    def _kernel_manager_class_default(self):
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        try:
            from jupyter_client import KernelManager
        except ImportError:
            raise ImportError("`nbconvert --execute` requires the jupyter_client package: `pip install jupyter_client`")
        return KernelManager

    _display_id_map = Dict(
        help=dedent(
              """
              mapping of locations of outputs with a given display_id
              tracks cell index and output index within cell.outputs for
              each appearance of the display_id
              {
                   'display_id': {
                  cell_idx: [output_idx,]
                   }
              }
              """))

    def start_new_kernel(self, **kwargs):
        """Creates a new kernel manager and kernel client.

        Parameters
        ----------
        kwargs :
            Any options for `self.kernel_manager_class.start_kernel()`. Because
            that defaults to KernelManager, this will likely include options
            accepted by `KernelManager.start_kernel()``, which includes `cwd`.

        Returns
        -------
        km : KernelManager
            A kernel manager as created by self.kernel_manager_class.
        kc : KernelClient
            Kernel client as created by the kernel manager `km`.
        """
        if not self.kernel_name:
            self.kernel_name = self.nb.metadata.get(
                'kernelspec', {}).get('name', 'python')
        km = self.kernel_manager_class(kernel_name=self.kernel_name,
                                       config=self.config)
        km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)

        kc = km.client()
        kc.start_channels()
        try:
            kc.wait_for_ready(timeout=self.startup_timeout)
        except RuntimeError:
            kc.stop_channels()
            km.shutdown_kernel()
            raise
        kc.allow_stdin = False
        return km, kc

    @contextmanager
    def setup_preprocessor(self, nb, resources, km=None):
        """
        Context manager for setting up the class to execute a notebook.

        The assigns `nb` to `self.nb` where it will be modified in-place. It also creates
        and assigns the Kernel Manager (`self.km`) and Kernel Client(`self.kc`).

        It is intended to yield to a block that will execute codeself.

        When control returns from the yield it stops the client's zmq channels, shuts
        down the kernel, and removes the now unused attributes.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
        km : KernerlManager (optional)
            Optional kernel manaher. If none is provided, a kernel manager will
            be created.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """
        path = resources.get('metadata', {}).get('path', '') or None
        self.nb = nb
        # clear display_id map
        self._display_id_map = {}
        self.widget_state = {}
        self.widget_buffers = {}

        if km is None:
            self.km, self.kc = self.start_new_kernel(cwd=path)
            try:
                # Yielding unbound args for more easier understanding and downstream consumption
                yield nb, self.km, self.kc
            finally:
                self.kc.stop_channels()
                self.km.shutdown_kernel(now=self.shutdown_kernel == 'immediate')

                for attr in ['nb', 'km', 'kc']:
                    delattr(self, attr)
        else:
            self.km = km
            if not km.has_kernel:
                km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)
            self.kc = km.client()

            self.kc.start_channels()
            try:
                self.kc.wait_for_ready(timeout=self.startup_timeout)
            except RuntimeError:
                self.kc.stop_channels()
                raise
            self.kc.allow_stdin = False
            try:
                yield nb, self.km, self.kc
            finally:
                for attr in ['nb', 'km', 'kc']:
                    delattr(self, attr)

    def preprocess(self, nb, resources, km=None):
        """
        Preprocess notebook executing each code cell.

        The input argument `nb` is modified in-place.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
        km: KernelManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """

        with self.setup_preprocessor(nb, resources, km=km):
            self.log.info("Executing notebook with kernel: %s" % self.kernel_name)
            nb, resources = super(ExecutePreprocessor, self).preprocess(nb, resources)
            info_msg = self._wait_for_reply(self.kc.kernel_info())
            nb.metadata['language_info'] = info_msg['content']['language_info']
            self.set_widgets_metadata()

        return nb, resources

    def set_widgets_metadata(self):
        if self.widget_state:
            self.nb.metadata.widgets = {
                'application/vnd.jupyter.widget-state+json': {
                    'state': {
                        model_id: _serialize_widget_state(state)
                        for model_id, state in self.widget_state.items() if '_model_name' in state
                    },
                    'version_major': 2,
                    'version_minor': 0,
                }
            }
            for key, widget in self.nb.metadata.widgets['application/vnd.jupyter.widget-state+json']['state'].items():
                buffers = self.widget_buffers.get(key)
                if buffers:
                    widget['buffers'] = buffers

    def preprocess_cell(self, cell, resources, cell_index):
        """
        Executes a single code cell. See base.py for details.

        To execute all cells see :meth:`preprocess`.
        """
        if cell.cell_type != 'code' or not cell.source.strip():
            return cell, resources

        reply, outputs = self.run_cell(cell, cell_index)
        cell.outputs = outputs

        cell_allows_errors = (self.allow_errors or "raises-exception"
                              in cell.metadata.get("tags", []))

        if self.force_raise_errors or not cell_allows_errors:
            for out in outputs:
                if out.output_type == 'error':
                    raise CellExecutionError.from_cell_and_msg(cell, out)
            if (reply is not None) and reply['content']['status'] == 'error':
                raise CellExecutionError.from_cell_and_msg(cell, reply['content'])
        return cell, resources

    def _update_display_id(self, display_id, msg):
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id, self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items():
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    def _wait_for_reply(self, msg_id, cell=None):
        # wait for finish, with timeout
        while True:
            try:
                if self.timeout_func is not None and cell is not None:
                    timeout = self.timeout_func(cell)
                else:
                    timeout = self.timeout

                if not timeout or timeout < 0:
                    timeout = None

                if timeout is not None:
                    # timeout specified
                    msg = self.kc.shell_channel.get_msg(timeout=timeout)
                else:
                    # no timeout specified, if kernel dies still handle this correctly
                    while True:
                        try:
                            # check every few seconds if kernel is still alive
                            msg = self.kc.shell_channel.get_msg(timeout=5)
                        except Empty:
                            # received no message, check if kernel is still alive
                            if not self.kc.is_alive():
                                self.log.error(
                                    "Kernel died while waiting for execute reply.")
                                raise RuntimeError("Kernel died")

                            # kernel still alive, wait for a message
                            continue
                        # message received
                        break
            except Empty:
                self.log.error(
                    "Timeout waiting for execute reply (%is)." % self.timeout)
                if self.interrupt_on_timeout:
                    self.log.error("Interrupting kernel")
                    self.km.interrupt_kernel()
                    break
                else:
                    raise TimeoutError("Cell execution timed out")

            if msg['parent_header'].get('msg_id') == msg_id:
                return msg
            else:
                # not our reply
                continue

    def run_cell(self, cell, cell_index=0):
        msg_id = self.kc.execute(cell.source)
        self.log.debug("Executing cell:\n%s", cell.source)
        exec_reply = self._wait_for_reply(msg_id, cell)

        outs = cell.outputs = []
        self.clear_before_next_output = False

        while True:
            try:
                # We've already waited for execute_reply, so all output
                # should already be waiting. However, on slow networks, like
                # in certain CI systems, waiting < 1 second might miss messages.
                # So long as the kernel sends a status:idle message when it
                # finishes, we won't actually have to wait this long, anyway.
                msg = self.kc.iopub_channel.get_msg(timeout=self.iopub_timeout)
            except Empty:
                self.log.warning("Timeout waiting for IOPub output")
                if self.raise_on_iopub_timeout:
                    raise RuntimeError("Timeout waiting for IOPub output")
                else:
                    break
            if msg['parent_header'].get('msg_id') != msg_id:
                # not an output from our execution
                continue

            msg_type = msg['msg_type']
            self.log.debug("output: %s", msg_type)
            content = msg['content']

            # set the prompt number for the input and the output
            if 'execution_count' in content:
                cell['execution_count'] = content['execution_count']

            if msg_type == 'status':
                if content['execution_state'] == 'idle':
                    break
                else:
                    continue
            elif msg_type == 'execute_input':
                continue
            elif msg_type == 'clear_output':
                self.clear_output(outs, msg, cell_index)
                continue
            elif msg_type.startswith('comm'):
                self.handle_comm_msg(outs, msg, cell_index)
                continue

            display_id = None
            if msg_type in {'execute_result', 'display_data', 'update_display_data'}:
                display_id = msg['content'].get('transient', {}).get('display_id', None)
                if display_id:
                    self._update_display_id(display_id, msg)
                if msg_type == 'update_display_data':
                    # update_display_data doesn't get recorded
                    continue

            self.output(outs, msg, display_id, cell_index)

        return exec_reply, outs

    def output(self, outs, msg, display_id, cell_index):
        msg_type = msg['msg_type']
        if self.clear_before_next_output:
            self.log.debug('Executing delayed clear_output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)
            self.clear_before_next_output = False

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg_type)
            return
        if display_id:
            # record output index in:
            #   _display_id_map[display_id][cell_idx]
            cell_map = self._display_id_map.setdefault(display_id, {})
            output_idx_list = cell_map.setdefault(cell_index, [])
            output_idx_list.append(len(outs))
        outs.append(out)

    def clear_output(self, outs, msg, cell_index):
        content = msg['content']
        if content.get('wait'):
            self.log.debug('Wait to clear output')
            self.clear_before_next_output = True
        else:
            self.log.debug('Immediate clear output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)

    def clear_display_id_mapping(self, cell_index):
        for display_id, cell_map in self._display_id_map.items():
            if cell_index in cell_map:
                cell_map[cell_index] = []

    def handle_comm_msg(self, outs, msg, cell_index):
        content = msg['content']
        data = content['data']
        if self.store_widget_state and 'state' in data:  # ignore custom msg'es
            self.widget_state.setdefault(content['comm_id'], {}).update(data['state'])
            if 'buffer_paths' in data and data['buffer_paths']:
                self.widget_buffers[content['comm_id']] = _get_buffer_data(msg)
Exemple #14
0
class VoilaConfiguration(traitlets.config.Configurable):
    """Common configuration options between the server extension and the application."""
    allow_template_override = Enum(['YES', 'NOTEBOOK', 'NO'],
                                   'YES',
                                   help='''
    Allow overriding the template (YES), or not (NO), or only from the notebook metadata.
    ''').tag(config=True)
    allow_theme_override = Enum(['YES', 'NOTEBOOK', 'NO'],
                                'YES',
                                help='''
    Allow overriding the theme (YES), or not (NO), or only from the notebook metadata.
    ''').tag(config=True)
    template = Unicode('lab',
                       config=True,
                       allow_none=True,
                       help=('template name to be used by voila.'))
    resources = Dict(allow_none=True,
                     help="""
        extra resources used by templates;
        example use with --template=reveal
        --VoilaConfiguration.resources="{'reveal': {'transition': 'fade', 'scroll': True}}"
        """).tag(config=True)
    theme = Unicode('light').tag(config=True)
    strip_sources = Bool(
        True, help='Strip sources from rendered html').tag(config=True)
    enable_nbextensions = Bool(
        False,
        config=True,
        help=('Set to True for Voilà to load notebook extensions'))

    file_whitelist = List(
        Unicode(),
        [r'.*\.(png|jpg|gif|svg)'],
        help=r"""
    List of regular expressions for controlling which static files are served.
    All files that are served should at least match 1 whitelist rule, and no blacklist rule
    Example: --VoilaConfiguration.file_whitelist="['.*\.(png|jpg|gif|svg)', 'public.*']"
    """,
    ).tag(config=True)

    file_blacklist = List(Unicode(), [r'.*\.(ipynb|py)'],
                          help=r"""
    List of regular expressions for controlling which static files are forbidden to be served.
    All files that are served should at least match 1 whitelist rule, and no blacklist rule
    Example:
    --VoilaConfiguration.file_whitelist="['.*']" # all files
    --VoilaConfiguration.file_blacklist="['private.*', '.*\.(ipynb)']" # except files in the private dir and notebook files
    """).tag(config=True)

    language_kernel_mapping = Dict(
        {},
        help="""Mapping of language name to kernel name
        Example mapping python to use xeus-python, and C++11 to use xeus-cling:
        --VoilaConfiguration.extension_language_mapping='{"python": "xpython", "C++11": "xcpp11"}'
        """,
    ).tag(config=True)

    extension_language_mapping = Dict(
        {},
        help='''Mapping of file extension to kernel language
        Example mapping .py files to a python language kernel, and .cpp to a C++11 language kernel:
        --VoilaConfiguration.extension_language_mapping='{".py": "python", ".cpp": "C++11"}'
        ''',
    ).tag(config=True)

    http_keep_alive_timeout = Int(10,
                                  help="""
    When a cell takes a long time to execute, the http connection can timeout (possibly because of a proxy).
    Voila sends a 'heartbeat' message after the timeout is passed to keep the http connection alive.
    """).tag(config=True)

    show_tracebacks = Bool(
        False,
        config=True,
        help=('Whether to send tracebacks to clients on exceptions.'))
Exemple #15
0
class NotebookClient(LoggingConfigurable):
    """
    Encompasses a Client for executing cells in a notebook
    """

    timeout: int = Integer(
        None,
        allow_none=True,
        help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, a TimeoutError is raised.

            ``None`` or ``-1`` will disable the timeout. If ``timeout_func`` is set,
            it overrides ``timeout``.
            """),
    ).tag(config=True)

    timeout_func: t.Any = Any(
        default_value=None,
        allow_none=True,
        help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, a TimeoutError
            is raised.

            Returning ``None`` or ``-1`` will disable the timeout for the cell.
            Not setting ``timeout_func`` will cause the client to
            default to using the ``timeout`` trait for all cells. The
            ``timeout_func`` trait overrides ``timeout`` if it is not ``None``.
            """),
    ).tag(config=True)

    interrupt_on_timeout: bool = Bool(
        False,
        help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """),
    ).tag(config=True)

    startup_timeout: int = Integer(
        60,
        help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """),
    ).tag(config=True)

    allow_errors: bool = Bool(
        False,
        help=dedent("""
            If ``False`` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If ``True``, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """),
    ).tag(config=True)

    force_raise_errors: bool = Bool(
        False,
        help=dedent("""
            If False (default), errors from executing the notebook can be
            allowed with a ``raises-exception`` tag on a single cell, or the
            ``allow_errors`` configurable option for all cells. An allowed error
            will be recorded in notebook output, and execution will continue.
            If an error occurs when it is not explicitly allowed, a
            `CellExecutionError` will be raised.
            If True, `CellExecutionError` will be raised for any error that occurs
            while executing the notebook. This overrides both the
            ``allow_errors`` option and the ``raises-exception`` cell tag.
            """),
    ).tag(config=True)

    extra_arguments: t.List = List(Unicode()).tag(config=True)

    kernel_name: str = Unicode(
        '',
        help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """),
    ).tag(config=True)

    raise_on_iopub_timeout: bool = Bool(
        False,
        help=dedent("""
            If ``False`` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If ``True``, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """),
    ).tag(config=True)

    store_widget_state: bool = Bool(
        True,
        help=dedent("""
            If ``True`` (default), then the state of the Jupyter widgets created
            at the kernel will be stored in the metadata of the notebook.
            """),
    ).tag(config=True)

    record_timing: bool = Bool(
        True,
        help=dedent("""
            If ``True`` (default), then the execution timings of each cell will
            be stored in the metadata of the notebook.
            """),
    ).tag(config=True)

    iopub_timeout: int = Integer(
        4,
        allow_none=False,
        help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """),
    ).tag(config=True)

    shell_timeout_interval: int = Integer(
        5,
        allow_none=False,
        help=dedent("""
            The time to wait (in seconds) for Shell output before retrying.
            This generally doesn't need to be set, but if one needs to check
            for dead kernels at a faster rate this can help.
            """),
    ).tag(config=True)

    shutdown_kernel = Enum(
        ['graceful', 'immediate'],
        default_value='graceful',
        help=dedent("""
            If ``graceful`` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its ``atexit`` hooks.
            If ``immediate``, then the kernel is signaled to immediately
            terminate.
            """),
    ).tag(config=True)

    ipython_hist_file: str = Unicode(
        default_value=':memory:',
        help=
        """Path to file to use for SQLite history database for an IPython kernel.

        The specific value ``:memory:`` (including the colon
        at both end but not the back ticks), avoids creating a history file. Otherwise, IPython
        will create a history file for each kernel.

        When running kernels simultaneously (e.g. via multiprocessing) saving history a single
        SQLite file can result in database errors, so using ``:memory:`` is recommended in
        non-interactive contexts.
        """,
    ).tag(config=True)

    kernel_manager_class: KernelManager = Type(
        config=True, help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _kernel_manager_class_default(self) -> KernelManager:
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        from jupyter_client import AsyncKernelManager

        return AsyncKernelManager

    _display_id_map: t.Dict[str, t.Dict] = Dict(help=dedent("""
              mapping of locations of outputs with a given display_id
              tracks cell index and output index within cell.outputs for
              each appearance of the display_id
              {
                   'display_id': {
                  cell_idx: [output_idx,]
                   }
              }
              """))

    display_data_priority: t.List = List(
        [
            'text/html',
            'application/pdf',
            'text/latex',
            'image/svg+xml',
            'image/png',
            'image/jpeg',
            'text/markdown',
            'text/plain',
        ],
        help="""
            An ordered list of preferred output type, the first
            encountered will usually be used when converting discarding
            the others.
            """,
    ).tag(config=True)

    resources: t.Dict = Dict(help=dedent("""
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
            """))

    def __init__(self,
                 nb: NotebookNode,
                 km: t.Optional[KernelManager] = None,
                 **kw) -> None:
        """Initializes the execution manager.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        km : KernelManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.
        """
        super().__init__(**kw)
        self.nb: NotebookNode = nb
        self.km: t.Optional[KernelManager] = km
        self.owns_km: bool = km is None  # whether the NotebookClient owns the kernel manager
        self.kc: t.Optional[KernelClient] = None
        self.reset_execution_trackers()
        self.widget_registry: t.Dict[str, t.Dict] = {
            '@jupyter-widgets/output': {
                'OutputModel': OutputWidget
            }
        }
        # comm_open_handlers should return an object with a .handle_msg(msg) method or None
        self.comm_open_handlers: t.Dict[str, t.Any] = {
            'jupyter.widget': self.on_comm_open_jupyter_widget
        }

    def reset_execution_trackers(self) -> None:
        """Resets any per-execution trackers.
        """
        self.task_poll_for_reply: t.Optional[asyncio.Future] = None
        self.code_cells_executed = 0
        self._display_id_map = {}
        self.widget_state: t.Dict[str, t.Dict] = {}
        self.widget_buffers: t.Dict[str, t.List[t.Dict[str, str]]] = {}
        # maps to list of hooks, where the last is used, this is used
        # to support nested use of output widgets.
        self.output_hook_stack: t.Any = collections.defaultdict(list)
        # our front-end mimicing Output widgets
        self.comm_objects: t.Dict[str, t.Any] = {}

    def create_kernel_manager(self) -> KernelManager:
        """Creates a new kernel manager.

        Returns
        -------
        km : KernelManager
            Kernel manager whose client class is asynchronous.
        """
        if not self.kernel_name:
            kn = self.nb.metadata.get('kernelspec', {}).get('name')
            if kn is not None:
                self.kernel_name = kn

        if not self.kernel_name:
            self.km = self.kernel_manager_class(config=self.config)
        else:
            self.km = self.kernel_manager_class(kernel_name=self.kernel_name,
                                                config=self.config)
        self.km.client_class = 'jupyter_client.asynchronous.AsyncKernelClient'
        return self.km

    async def _async_cleanup_kernel(self) -> None:
        assert self.km is not None
        now = self.shutdown_kernel == "immediate"
        try:
            # Queue the manager to kill the process, and recover gracefully if it's already dead.
            if await ensure_async(self.km.is_alive()):
                await ensure_async(self.km.shutdown_kernel(now=now))
        except RuntimeError as e:
            # The error isn't specialized, so we have to check the message
            if 'No kernel is running!' not in str(e):
                raise
        finally:
            # Remove any state left over even if we failed to stop the kernel
            await ensure_async(self.km.cleanup_resources())
            if getattr(self, "kc") and self.kc is not None:
                await ensure_async(self.kc.stop_channels())
                self.kc = None
                self.km = None

    _cleanup_kernel = run_sync(_async_cleanup_kernel)

    async def async_start_new_kernel(self, **kwargs) -> None:
        """Creates a new kernel.

        Parameters
        ----------
        kwargs :
            Any options for ``self.kernel_manager_class.start_kernel()``. Because
            that defaults to AsyncKernelManager, this will likely include options
            accepted by ``AsyncKernelManager.start_kernel()``, which includes ``cwd``.
        """
        assert self.km is not None
        resource_path = self.resources.get('metadata', {}).get('path') or None
        if resource_path and 'cwd' not in kwargs:
            kwargs["cwd"] = resource_path

        has_history_manager_arg = any(
            arg.startswith('--HistoryManager.hist_file')
            for arg in self.extra_arguments)
        if (hasattr(self.km, 'ipykernel') and self.km.ipykernel
                and self.ipython_hist_file and not has_history_manager_arg):
            self.extra_arguments += [
                '--HistoryManager.hist_file={}'.format(self.ipython_hist_file)
            ]

        await ensure_async(
            self.km.start_kernel(extra_arguments=self.extra_arguments,
                                 **kwargs))

    start_new_kernel = run_sync(async_start_new_kernel)

    async def async_start_new_kernel_client(self) -> KernelClient:
        """Creates a new kernel client.

        Returns
        -------
        kc : KernelClient
            Kernel client as created by the kernel manager ``km``.
        """
        assert self.km is not None
        self.kc = self.km.client()
        await ensure_async(self.kc.start_channels())
        try:
            await ensure_async(
                self.kc.wait_for_ready(timeout=self.startup_timeout))
        except RuntimeError:
            await self._async_cleanup_kernel()
            raise
        self.kc.allow_stdin = False
        return self.kc

    start_new_kernel_client = run_sync(async_start_new_kernel_client)

    @contextmanager
    def setup_kernel(self, **kwargs) -> t.Generator:
        """
        Context manager for setting up the kernel to execute a notebook.

        The assigns the Kernel Manager (``self.km``) if missing and Kernel Client(``self.kc``).

        When control returns from the yield it stops the client's zmq channels, and shuts
        down the kernel.
        """
        # by default, cleanup the kernel client if we own the kernel manager
        # and keep it alive if we don't
        cleanup_kc = kwargs.pop('cleanup_kc', self.owns_km)

        # Can't use run_until_complete on an asynccontextmanager function :(
        if self.km is None:
            self.km = self.create_kernel_manager()

        if not self.km.has_kernel:
            self.start_new_kernel(**kwargs)
            self.start_new_kernel_client()
        try:
            yield
        finally:
            if cleanup_kc:
                self._cleanup_kernel()

    @asynccontextmanager
    async def async_setup_kernel(self, **kwargs) -> t.AsyncGenerator:
        """
        Context manager for setting up the kernel to execute a notebook.

        This assigns the Kernel Manager (``self.km``) if missing and Kernel Client(``self.kc``).

        When control returns from the yield it stops the client's zmq channels, and shuts
        down the kernel.

        Handlers for SIGINT and SIGTERM are also added to cleanup in case of unexpected shutdown.
        """
        # by default, cleanup the kernel client if we own the kernel manager
        # and keep it alive if we don't
        cleanup_kc = kwargs.pop('cleanup_kc', self.owns_km)
        if self.km is None:
            self.km = self.create_kernel_manager()

        # self._cleanup_kernel uses run_async, which ensures the ioloop is running again.
        # This is necessary as the ioloop has stopped once atexit fires.
        atexit.register(self._cleanup_kernel)

        def on_signal():
            asyncio.ensure_future(self._async_cleanup_kernel())
            atexit.unregister(self._cleanup_kernel)

        loop = asyncio.get_event_loop()
        try:
            loop.add_signal_handler(signal.SIGINT, on_signal)
            loop.add_signal_handler(signal.SIGTERM, on_signal)
        except (NotImplementedError, RuntimeError):
            # NotImplementedError: Windows does not support signals.
            # RuntimeError: Raised when add_signal_handler is called outside the main thread
            pass

        if not self.km.has_kernel:
            await self.async_start_new_kernel(**kwargs)
            await self.async_start_new_kernel_client()
        try:
            yield
        finally:
            if cleanup_kc:
                await self._async_cleanup_kernel()

            atexit.unregister(self._cleanup_kernel)
            try:
                loop.remove_signal_handler(signal.SIGINT)
                loop.remove_signal_handler(signal.SIGTERM)
            except (NotImplementedError, RuntimeError):
                pass

    async def async_execute(self,
                            reset_kc: bool = False,
                            **kwargs) -> NotebookNode:
        """
        Executes each code cell.

        Parameters
        ----------
        kwargs :
            Any option for ``self.kernel_manager_class.start_kernel()``. Because
            that defaults to AsyncKernelManager, this will likely include options
            accepted by ``jupyter_client.AsyncKernelManager.start_kernel()``,
            which includes ``cwd``.

            ``reset_kc`` if True, the kernel client will be reset and a new one
            will be created (default: False).

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        """
        if reset_kc and self.owns_km:
            await self._async_cleanup_kernel()
        self.reset_execution_trackers()

        async with self.async_setup_kernel(**kwargs):
            assert self.kc is not None
            self.log.info("Executing notebook with kernel: %s" %
                          self.kernel_name)
            msg_id = await ensure_async(self.kc.kernel_info())
            info_msg = await self.async_wait_for_reply(msg_id)
            if info_msg is not None:
                if 'language_info' in info_msg['content']:
                    self.nb.metadata['language_info'] = info_msg['content'][
                        'language_info']
                else:
                    raise RuntimeError(
                        'Kernel info received message content has no "language_info" key. '
                        'Content is:\n' + str(info_msg['content']))
            for index, cell in enumerate(self.nb.cells):
                # Ignore `'execution_count' in content` as it's always 1
                # when store_history is False
                await self.async_execute_cell(
                    cell, index, execution_count=self.code_cells_executed + 1)
            self.set_widgets_metadata()

        return self.nb

    execute = run_sync(async_execute)

    def set_widgets_metadata(self) -> None:
        if self.widget_state:
            self.nb.metadata.widgets = {
                'application/vnd.jupyter.widget-state+json': {
                    'state': {
                        model_id: self._serialize_widget_state(state)
                        for model_id, state in self.widget_state.items()
                        if '_model_name' in state
                    },
                    'version_major': 2,
                    'version_minor': 0,
                }
            }
            for key, widget in self.nb.metadata.widgets[
                    'application/vnd.jupyter.widget-state+json'][
                        'state'].items():
                buffers = self.widget_buffers.get(key)
                if buffers:
                    widget['buffers'] = buffers

    def _update_display_id(self, display_id: str, msg: t.Dict) -> None:
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    async def _async_poll_for_reply(
            self, msg_id: str, cell: NotebookNode, timeout: t.Optional[int],
            task_poll_output_msg: asyncio.Future,
            task_poll_kernel_alive: asyncio.Future) -> t.Dict:

        assert self.kc is not None
        new_timeout: t.Optional[float] = None
        if timeout is not None:
            deadline = monotonic() + timeout
            new_timeout = float(timeout)
        while True:
            try:
                msg = await ensure_async(
                    self.kc.shell_channel.get_msg(timeout=new_timeout))
                if msg['parent_header'].get('msg_id') == msg_id:
                    if self.record_timing:
                        cell['metadata']['execution'][
                            'shell.execute_reply'] = timestamp()
                    try:
                        await asyncio.wait_for(task_poll_output_msg,
                                               self.iopub_timeout)
                    except (asyncio.TimeoutError, Empty):
                        if self.raise_on_iopub_timeout:
                            task_poll_kernel_alive.cancel()
                            raise CellTimeoutError.error_from_timeout_and_cell(
                                "Timeout waiting for IOPub output",
                                self.iopub_timeout, cell)
                        else:
                            self.log.warning(
                                "Timeout waiting for IOPub output")
                    task_poll_kernel_alive.cancel()
                    return msg
                else:
                    if new_timeout is not None:
                        new_timeout = max(0, deadline - monotonic())
            except Empty:
                # received no message, check if kernel is still alive
                assert timeout is not None
                task_poll_kernel_alive.cancel()
                await self._async_check_alive()
                await self._async_handle_timeout(timeout, cell)

    async def _async_poll_output_msg(self, parent_msg_id: str,
                                     cell: NotebookNode,
                                     cell_index: int) -> None:

        assert self.kc is not None
        while True:
            msg = await ensure_async(
                self.kc.iopub_channel.get_msg(timeout=None))
            if msg['parent_header'].get('msg_id') == parent_msg_id:
                try:
                    # Will raise CellExecutionComplete when completed
                    self.process_message(msg, cell, cell_index)
                except CellExecutionComplete:
                    return

    async def _async_poll_kernel_alive(self) -> None:
        while True:
            await asyncio.sleep(1)
            try:
                await self._async_check_alive()
            except DeadKernelError:
                assert self.task_poll_for_reply is not None
                self.task_poll_for_reply.cancel()
                return

    def _get_timeout(self, cell: t.Optional[NotebookNode]) -> int:
        if self.timeout_func is not None and cell is not None:
            timeout = self.timeout_func(cell)
        else:
            timeout = self.timeout

        if not timeout or timeout < 0:
            timeout = None

        return timeout

    async def _async_handle_timeout(self,
                                    timeout: int,
                                    cell: t.Optional[NotebookNode] = None
                                    ) -> None:

        self.log.error("Timeout waiting for execute reply (%is)." % timeout)
        if self.interrupt_on_timeout:
            self.log.error("Interrupting kernel")
            assert self.km is not None
            await ensure_async(self.km.interrupt_kernel())
        else:
            raise CellTimeoutError.error_from_timeout_and_cell(
                "Cell execution timed out", timeout, cell)

    async def _async_check_alive(self) -> None:
        assert self.kc is not None
        if not await ensure_async(self.kc.is_alive()):
            self.log.error("Kernel died while waiting for execute reply.")
            raise DeadKernelError("Kernel died")

    async def async_wait_for_reply(
            self,
            msg_id: str,
            cell: t.Optional[NotebookNode] = None) -> t.Optional[t.Dict]:

        assert self.kc is not None
        # wait for finish, with timeout
        timeout = self._get_timeout(cell)
        cummulative_time = 0
        while True:
            try:
                msg = await ensure_async(
                    self.kc.shell_channel.get_msg(
                        timeout=self.shell_timeout_interval))
            except Empty:
                await self._async_check_alive()
                cummulative_time += self.shell_timeout_interval
                if timeout and cummulative_time > timeout:
                    await self._async_async_handle_timeout(timeout, cell)
                    break
            else:
                if msg['parent_header'].get('msg_id') == msg_id:
                    return msg
        return None

    wait_for_reply = run_sync(async_wait_for_reply)
    # Backwards compatability naming for papermill
    _wait_for_reply = wait_for_reply

    def _passed_deadline(self, deadline: int) -> bool:
        if deadline is not None and deadline - monotonic() <= 0:
            return True
        return False

    def _check_raise_for_error(self, cell: NotebookNode,
                               exec_reply: t.Optional[t.Dict]) -> None:

        cell_allows_errors = self.allow_errors or "raises-exception" in cell.metadata.get(
            "tags", [])

        if self.force_raise_errors or not cell_allows_errors:
            if (exec_reply is not None
                ) and exec_reply['content']['status'] == 'error':
                raise CellExecutionError.from_cell_and_msg(
                    cell, exec_reply['content'])

    async def async_execute_cell(self,
                                 cell: NotebookNode,
                                 cell_index: int,
                                 execution_count: t.Optional[int] = None,
                                 store_history: bool = True) -> NotebookNode:
        """
        Executes a single code cell.

        To execute all cells see :meth:`execute`.

        Parameters
        ----------
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.
        execution_count : int
            The execution count to be assigned to the cell (default: Use kernel response)
        store_history : bool
            Determines if history should be stored in the kernel (default: False).
            Specific to ipython kernels, which can store command histories.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionError
            If execution failed and should raise an exception, this will be raised
            with defaults about the failure.

        Returns
        -------
        cell : NotebookNode
            The cell which was just processed.
        """
        assert self.kc is not None
        if cell.cell_type != 'code' or not cell.source.strip():
            self.log.debug("Skipping non-executing cell %s", cell_index)
            return cell

        if self.record_timing and 'execution' not in cell['metadata']:
            cell['metadata']['execution'] = {}

        self.log.debug("Executing cell:\n%s", cell.source)
        parent_msg_id = await ensure_async(
            self.kc.execute(cell.source,
                            store_history=store_history,
                            stop_on_error=not self.allow_errors))
        # We launched a code cell to execute
        self.code_cells_executed += 1
        exec_timeout = self._get_timeout(cell)

        cell.outputs = []
        self.clear_before_next_output = False

        task_poll_kernel_alive = asyncio.ensure_future(
            self._async_poll_kernel_alive())
        task_poll_output_msg = asyncio.ensure_future(
            self._async_poll_output_msg(parent_msg_id, cell, cell_index))
        self.task_poll_for_reply = asyncio.ensure_future(
            self._async_poll_for_reply(parent_msg_id, cell, exec_timeout,
                                       task_poll_output_msg,
                                       task_poll_kernel_alive))
        try:
            exec_reply = await self.task_poll_for_reply
        except asyncio.CancelledError:
            # can only be cancelled by task_poll_kernel_alive when the kernel is dead
            task_poll_output_msg.cancel()
            raise DeadKernelError("Kernel died")
        except Exception as e:
            # Best effort to cancel request if it hasn't been resolved
            try:
                # Check if the task_poll_output is doing the raising for us
                if not isinstance(e, CellControlSignal):
                    task_poll_output_msg.cancel()
            finally:
                raise

        if execution_count:
            cell['execution_count'] = execution_count
        self._check_raise_for_error(cell, exec_reply)
        self.nb['cells'][cell_index] = cell
        return cell

    execute_cell = run_sync(async_execute_cell)

    def process_message(self, msg: t.Dict, cell: NotebookNode,
                        cell_index: int) -> t.Optional[t.List]:
        """
        Processes a kernel message, updates cell state, and returns the
        resulting output object that was appended to cell.outputs.

        The input argument *cell* is modified in-place.

        Parameters
        ----------
        msg : dict
            The kernel message being processed.
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionComplete
          Once a message arrives which indicates computation completeness.

        """
        msg_type = msg['msg_type']
        self.log.debug("msg_type: %s", msg_type)
        content = msg['content']
        self.log.debug("content: %s", content)

        display_id = content.get('transient', {}).get('display_id', None)
        if display_id and msg_type in {
                'execute_result', 'display_data', 'update_display_data'
        }:
            self._update_display_id(display_id, msg)

        # set the prompt number for the input and the output
        if 'execution_count' in content:
            cell['execution_count'] = content['execution_count']

        if self.record_timing:
            if msg_type == 'status':
                if content['execution_state'] == 'idle':
                    cell['metadata']['execution'][
                        'iopub.status.idle'] = timestamp()
                elif content['execution_state'] == 'busy':
                    cell['metadata']['execution'][
                        'iopub.status.busy'] = timestamp()
            elif msg_type == 'execute_input':
                cell['metadata']['execution'][
                    'iopub.execute_input'] = timestamp()

        if msg_type == 'status':
            if content['execution_state'] == 'idle':
                raise CellExecutionComplete()
        elif msg_type == 'clear_output':
            self.clear_output(cell.outputs, msg, cell_index)
        elif msg_type.startswith('comm'):
            self.handle_comm_msg(cell.outputs, msg, cell_index)
        # Check for remaining messages we don't process
        elif msg_type not in ['execute_input', 'update_display_data']:
            # Assign output as our processed "result"
            return self.output(cell.outputs, msg, display_id, cell_index)
        return None

    def output(self, outs: t.List, msg: t.Dict, display_id: str,
               cell_index: int) -> t.Optional[t.List]:

        msg_type = msg['msg_type']

        parent_msg_id = msg['parent_header'].get('msg_id')
        if self.output_hook_stack[parent_msg_id]:
            # if we have a hook registered, it will overrride our
            # default output behaviour (e.g. OutputWidget)
            hook = self.output_hook_stack[parent_msg_id][-1]
            hook.output(outs, msg, display_id, cell_index)
            return None

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg_type)
            return None

        if self.clear_before_next_output:
            self.log.debug('Executing delayed clear_output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)
            self.clear_before_next_output = False

        if display_id:
            # record output index in:
            #   _display_id_map[display_id][cell_idx]
            cell_map = self._display_id_map.setdefault(display_id, {})
            output_idx_list = cell_map.setdefault(cell_index, [])
            output_idx_list.append(len(outs))

        outs.append(out)

        return out

    def clear_output(self, outs: t.List, msg: t.Dict, cell_index: int) -> None:

        content = msg['content']

        parent_msg_id = msg['parent_header'].get('msg_id')
        if self.output_hook_stack[parent_msg_id]:
            # if we have a hook registered, it will overrride our
            # default clear_output behaviour (e.g. OutputWidget)
            hook = self.output_hook_stack[parent_msg_id][-1]
            hook.clear_output(outs, msg, cell_index)
            return

        if content.get('wait'):
            self.log.debug('Wait to clear output')
            self.clear_before_next_output = True
        else:
            self.log.debug('Immediate clear output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)

    def clear_display_id_mapping(self, cell_index: int) -> None:

        for display_id, cell_map in self._display_id_map.items():
            if cell_index in cell_map:
                cell_map[cell_index] = []

    def handle_comm_msg(self, outs: t.List, msg: t.Dict,
                        cell_index: int) -> None:

        content = msg['content']
        data = content['data']
        if self.store_widget_state and 'state' in data:  # ignore custom msg'es
            self.widget_state.setdefault(content['comm_id'],
                                         {}).update(data['state'])
            if 'buffer_paths' in data and data['buffer_paths']:
                self.widget_buffers[
                    content['comm_id']] = self._get_buffer_data(msg)
        # There are cases where we need to mimic a frontend, to get similar behaviour as
        # when using the Output widget from Jupyter lab/notebook
        if msg['msg_type'] == 'comm_open':
            target = msg['content'].get('target_name')
            handler = self.comm_open_handlers.get(target)
            if handler:
                comm_id = msg['content']['comm_id']
                comm_object = handler(msg)
                if comm_object:
                    self.comm_objects[comm_id] = comm_object
            else:
                self.log.warning(
                    f'No handler found for comm target {target!r}')
        elif msg['msg_type'] == 'comm_msg':
            content = msg['content']
            comm_id = msg['content']['comm_id']
            if comm_id in self.comm_objects:
                self.comm_objects[comm_id].handle_msg(msg)

    def _serialize_widget_state(self, state: t.Dict) -> t.Dict[str, t.Any]:
        """Serialize a widget state, following format in @jupyter-widgets/schema."""
        return {
            'model_name': state.get('_model_name'),
            'model_module': state.get('_model_module'),
            'model_module_version': state.get('_model_module_version'),
            'state': state,
        }

    def _get_buffer_data(self, msg: t.Dict) -> t.List[t.Dict[str, str]]:
        encoded_buffers = []
        paths = msg['content']['data']['buffer_paths']
        buffers = msg['buffers']
        for path, buffer in zip(paths, buffers):
            encoded_buffers.append({
                'data':
                base64.b64encode(buffer).decode('utf-8'),
                'encoding':
                'base64',
                'path':
                path,
            })
        return encoded_buffers

    def register_output_hook(self, msg_id: str, hook: OutputWidget) -> None:
        """Registers an override object that handles output/clear_output instead.

        Multiple hooks can be registered, where the last one will be used (stack based)
        """
        # mimics
        # https://jupyterlab.github.io/jupyterlab/services/interfaces/kernel.ikernelconnection.html#registermessagehook
        self.output_hook_stack[msg_id].append(hook)

    def remove_output_hook(self, msg_id: str, hook: OutputWidget) -> None:
        """Unregisters an override object that handles output/clear_output instead"""
        # mimics
        # https://jupyterlab.github.io/jupyterlab/services/interfaces/kernel.ikernelconnection.html#removemessagehook
        removed_hook = self.output_hook_stack[msg_id].pop()
        assert removed_hook == hook

    def on_comm_open_jupyter_widget(self, msg: t.Dict):
        content = msg['content']
        data = content['data']
        state = data['state']
        comm_id = msg['content']['comm_id']
        module = self.widget_registry.get(state['_model_module'])
        if module:
            widget_class = module.get(state['_model_name'])
            if widget_class:
                return widget_class(comm_id, state, self.kc, self)
Exemple #16
0
class TextFileContentsManager(FileContentsManager, Configurable):
    """
    A FileContentsManager Class that reads and stores notebooks to classical
    Jupyter notebooks (.ipynb), R Markdown notebooks (.Rmd), Julia (.jl),
    Python (.py) or R scripts (.R)
    """

    nb_extensions = [ext for ext in NOTEBOOK_EXTENSIONS if ext != '.ipynb']

    def all_nb_extensions(self):
        """
        Notebook extensions, including ipynb
        :return:
        """
        return ['.ipynb'] + self.nb_extensions

    default_jupytext_formats = Unicode(
        u'',
        help='Save notebooks to these file extensions. '
        'Can be any of ipynb,Rmd,md,jl,py,R,nb.jl,nb.py,nb.R '
        'comma separated. If you want another format than the '
        'default one, append the format name to the extension, '
        'e.g. ipynb,py:percent to save the notebook to '
        'hydrogen/spyder/vscode compatible scripts',
        config=True)

    preferred_jupytext_formats_save = Unicode(
        u'',
        help='Preferred format when saving notebooks as text, per extension. '
        'Use "jl:percent,py:percent,R:percent" if you want to save '
        'Julia, Python and R scripts in the double percent format and '
        'only write "jupytext_formats": "py" in the notebook metadata.',
        config=True)

    preferred_jupytext_formats_read = Unicode(
        u'',
        help='Preferred format when reading notebooks from text, per '
        'extension. Use "py:sphinx" if you want to read all python '
        'scripts as Sphinx gallery scripts.',
        config=True)

    default_notebook_metadata_filter = Unicode(
        u'',
        help="Cell metadata that should be save in the text representations. "
        "Examples: 'all', '-all', 'widgets,nteract', 'kernelspec,jupytext-all'",
        config=True)

    default_cell_metadata_filter = Unicode(
        u'',
        help=
        "Notebook metadata that should be saved in the text representations. "
        "Examples: 'all', 'hide_input,hide_output'",
        config=True)

    freeze_metadata = Bool(
        False,
        help=
        'Filter notebook and cell metadata that are not in the text notebook. '
        'Use this to avoid creating a YAML header when editing text files.',
        config=True)

    comment_magics = Enum(
        values=[True, False],
        allow_none=True,
        help=
        'Should Jupyter magic commands be commented out in the text representation?',
        config=True)

    sphinx_convert_rst2md = Bool(
        False,
        help='When opening a Sphinx Gallery script, convert the '
        'reStructuredText to markdown',
        config=True)

    outdated_text_notebook_margin = Float(
        1.0,
        help='Refuse to overwrite inputs of a ipynb notebooks with those of a '
        'text notebook when the text notebook plus margin is older than '
        'the ipynb notebook',
        config=True)

    def replace_auto_ext(self, group, auto_ext):
        """Replace any .auto extension with the given extension, and if none,
        removes that alternative format from the group"""
        result = []
        for fmt in group:
            if not fmt.endswith('.auto'):
                result.append(fmt)
            elif auto_ext:
                result.append(fmt.replace('.auto', auto_ext))
        return result

    def format_group(self, fmt, nbk=None):
        """Return the group of extensions that contains 'fmt'"""
        if nbk:
            transition_to_jupytext_section_in_metadata(nbk.metadata,
                                                       fmt.endswith('.ipynb'))

        jupytext_formats = (
            (nbk.metadata.get('jupytext', {}).get('formats') if nbk else None)
            or self.default_jupytext_formats)

        try:
            jupytext_formats = check_formats(jupytext_formats)
        except ValueError as err:
            raise HTTPError(400, str(err))

        auto_ext = nbk.metadata.get('language_info',
                                    {}).get('file_extension') if nbk else None
        if auto_ext == '.r':
            auto_ext = '.R'
        # Find group that contains the current format
        for group in jupytext_formats:
            if auto_ext and fmt.replace(auto_ext, '.auto') in group:
                return self.replace_auto_ext(group, auto_ext)
            if fmt in group:
                return self.replace_auto_ext(group, auto_ext)

        # No such group, but 'ipynb'? Return current fmt + 'ipynb'
        if ['.ipynb'] in jupytext_formats:
            return ['.ipynb', fmt]

        return [fmt]

    def preferred_format(self, ext, preferred):
        """Returns the preferred format for that extension"""
        for fmt_ext, format_name in parse_formats(preferred):
            if fmt_ext == ext:
                return format_name
            if not (ext.endswith('.md') or ext.endswith('.Rmd')):
                if fmt_ext == '.auto':
                    return format_name
                if fmt_ext.endswith('.auto'):
                    base_ext, ext_ext = os.path.splitext(ext)
                    base_fmt, _ = os.path.splitext(fmt_ext)
                    if base_ext == base_fmt and ext_ext:
                        return format_name

        return None

    def _read_notebook(self, os_path, as_version=4):
        """Read a notebook from an os path."""
        _, fmt, ext = file_fmt_ext(os_path)
        if ext in self.nb_extensions:
            format_name = self.preferred_format(
                fmt, self.preferred_jupytext_formats_read)
            with mock.patch(
                    'nbformat.reads',
                    _jupytext_reads(fmt, format_name,
                                    self.sphinx_convert_rst2md,
                                    self.freeze_metadata)):
                return super(TextFileContentsManager,
                             self)._read_notebook(os_path, as_version)
        else:
            return super(TextFileContentsManager,
                         self)._read_notebook(os_path, as_version)

    def set_comment_magics_if_none(self, nb):
        """Set the 'comment_magics' metadata if default is not None"""
        if self.comment_magics is not None and 'comment_magics' not in nb.metadata.get(
                'jupytext', {}):
            nb.metadata.setdefault('jupytext',
                                   {})['comment_magics'] = self.comment_magics

    def _save_notebook(self, os_path, nb):
        """Save a notebook to an os_path."""
        self.set_comment_magics_if_none(nb)
        os_file, fmt, _ = file_fmt_ext(os_path)
        for alt_fmt in self.format_group(fmt, nb):
            os_path_fmt = os_file + alt_fmt
            self.log.info("Saving %s", os.path.basename(os_path_fmt))
            alt_ext = '.' + alt_fmt.split('.')[-1]

            if alt_ext in self.nb_extensions:
                format_name = format_name_for_ext(nb.metadata, alt_fmt, self.default_jupytext_formats,
                                                  explicit_default=False) or \
                              self.preferred_format(alt_fmt, self.preferred_jupytext_formats_save)
                with mock.patch('nbformat.writes',
                                _jupytext_writes(alt_fmt, format_name)):
                    super(TextFileContentsManager,
                          self)._save_notebook(os_path_fmt, nb)
            else:
                super(TextFileContentsManager,
                      self)._save_notebook(os_path_fmt, nb)

    def get(self,
            path,
            content=True,
            type=None,
            format=None,
            load_alternative_format=True):
        """ Takes a path for an entity and returns its model"""
        path = path.strip('/')
        nb_file, fmt, ext = file_fmt_ext(path)

        if self.exists(path) and (type == 'notebook' or
                                  (type is None
                                   and ext in self.all_nb_extensions())):
            model = self._notebook_model(path, content=content)
            if fmt != ext and content:
                model['name'], _ = os.path.splitext(model['name'])
            if not content:
                return model

            if not load_alternative_format:
                return model

            fmt_group = self.format_group(fmt, model['content'])

            source_format = fmt
            outputs_format = fmt

            # Source format is first non ipynb format found on disk
            if fmt.endswith('.ipynb'):
                for alt_fmt in fmt_group:
                    if not alt_fmt.endswith('.ipynb') and self.exists(nb_file +
                                                                      alt_fmt):
                        source_format = alt_fmt
                        break
            # Outputs taken from ipynb if in group, if file exists
            else:
                for alt_fmt in fmt_group:
                    if alt_fmt.endswith('.ipynb') and self.exists(nb_file +
                                                                  alt_fmt):
                        outputs_format = alt_fmt
                        break

            if source_format != fmt:
                self.log.info(u'Reading SOURCE from {}'.format(
                    os.path.basename(nb_file + source_format)))
                model_outputs = model
                model = self.get(nb_file + source_format,
                                 content=content,
                                 type=type,
                                 format=format,
                                 load_alternative_format=False)
            elif outputs_format != fmt:
                self.log.info(u'Reading OUTPUTS from {}'.format(
                    os.path.basename(nb_file + outputs_format)))
                model_outputs = self.get(nb_file + outputs_format,
                                         content=content,
                                         type=type,
                                         format=format,
                                         load_alternative_format=False)
            else:
                model_outputs = None

            try:
                check_file_version(model['content'], nb_file + source_format,
                                   nb_file + outputs_format)
            except ValueError as err:
                raise HTTPError(400, str(err))

            # Make sure we're not overwriting ipynb cells with an outdated
            # text file
            try:
                if model_outputs and model_outputs['last_modified'] > model['last_modified'] + \
                        timedelta(seconds=self.outdated_text_notebook_margin):
                    raise HTTPError(
                        400, '''{out} (last modified {out_last})
                        seems more recent than {src} (last modified {src_last})
                        Please either:
                        - open {src} in a text editor, make sure it is up to date, and save it,
                        - or delete {src} if not up to date,
                        - or increase check margin by adding, say,
                            c.ContentsManager.outdated_text_notebook_margin = 5 # in seconds # or float("inf")
                        to your .jupyter/jupyter_notebook_config.py file
                        '''.format(src=nb_file + source_format,
                                   src_last=model['last_modified'],
                                   out=nb_file + outputs_format,
                                   out_last=model_outputs['last_modified']))
            except OverflowError:
                pass

            jupytext_metadata = model['content']['metadata'].setdefault(
                'jupytext', {})
            if self.default_notebook_metadata_filter:
                (jupytext_metadata.setdefault(
                    'metadata_filter',
                    {}).setdefault('notebook',
                                   self.default_notebook_metadata_filter))
            if self.default_cell_metadata_filter:
                (jupytext_metadata.setdefault(
                    'metadata_filter',
                    {}).setdefault('cells', self.default_cell_metadata_filter))

            for filter_level in ['notebook', 'cells']:
                filter = jupytext_metadata.get('metadata_filter',
                                               {}).get(filter_level)
                if filter is not None:
                    jupytext_metadata['metadata_filter'][
                        filter_level] = metadata_filter_as_dict(filter)

            if model_outputs:
                combine_inputs_with_outputs(model['content'],
                                            model_outputs['content'])
            elif not fmt.endswith('.ipynb'):
                self.notary.sign(model['content'])
                self.mark_trusted_cells(model['content'], path)

            return model

        return super(TextFileContentsManager,
                     self).get(path, content, type, format)

    def trust_notebook(self, path):
        """Trust the current notebook"""
        if path.endswith('.ipynb'):
            super(TextFileContentsManager, self).trust_notebook(path)
        else:
            # Otherwise, we need to read the notebook to determine
            # which extension ends with '.ipynb':
            model = self.get(path)
            file, fmt, _ = file_fmt_ext(path)
            for alt_fmt in self.format_group(fmt, model['content']):
                if alt_fmt.endswith('.ipynb'):
                    super(TextFileContentsManager,
                          self).trust_notebook(file + alt_fmt)

    def rename_file(self, old_path, new_path):
        """Rename the current notebook, as well as its
         alternative representations"""
        old_file, org_fmt, _ = file_fmt_ext(old_path)
        new_file, new_fmt, _ = file_fmt_ext(new_path)

        if org_fmt == new_fmt:
            for alt_fmt in self.format_group(org_fmt):
                if self.file_exists(old_file + alt_fmt):
                    super(TextFileContentsManager,
                          self).rename_file(old_file + alt_fmt,
                                            new_file + alt_fmt)
        else:
            super(TextFileContentsManager,
                  self).rename_file(old_path, new_path)
class FBDelegateBearerAuthenticator(FBAuthenticator):

    EXPECTED_ERROR_CODES = [400, 401, 403]

    # Override if necessary
    scope = List(Unicode(), ["email"],
                 config=True,
                 help="The OAuth scopes to request.")

    # Override to value of the endpoint you wish to call.
    endpoint = Unicode(config=True, help="Bearer endpoint to auth via.")

    auth_header = Unicode("Authorization",
                          config=True,
                          help="Header to use for Bearer validation.")

    auth_header_format = Unicode(
        r"Bearer {}",
        config=True,
        help="Header format string to use for Bearer.")

    auth_http_verb = Enum(
        ["POST", "GET"],
        "POST",
        config=True,
        help="Whether to use a POST or GET for the request.",
    )

    async def authorize(self, access_token, user_id):
        # call the endpoint, and verify we get a 200 response
        header_value = self.auth_header_format.format(access_token)
        headers = {}
        headers[self.auth_header] = header_value
        data = b"" if self.auth_http_verb == "POST" else None

        try:
            self.log.info(
                "Attempting to authorize user %s via endpoint %s",
                user_id,
                self.endpoint,
            )
            auth_req = urllib.request.Request(self.endpoint,
                                              data=data,
                                              headers=headers)
            with urllib.request.urlopen(auth_req) as response:
                # If we're here, the user has passed authorization via the delegate.
                body = response.read()
                self.log.info("Auth response for user %s: %s", user_id, body)
                return {
                    "name": user_id,
                    "auth_state": {
                        "access_token": access_token,
                        "fb_user": {
                            "username": user_id
                        },
                    },
                }
        except urllib.error.HTTPError as e:
            if e.code in self.EXPECTED_ERROR_CODES:
                self.log.warning("User failed delegate Auth Check",
                                 exc_info=True)
                raise HTTPError(
                    403, f"You are not authorized (delegate code: {e.code})")
            else:
                self.log.exception(
                    "Authorization failed with an unexpected HTTPError code")
                # We don't expect this code - treat as internal server error.
                raise HTTPError(
                    500, f"Authorization failed (delegate code: {e.code})")

        except Exception:
            # We don't expect this exception - treat as internal server error.
            self.log.exception(
                "Authorization failed with an unexpected exception")
            raise HTTPError(500, "Authorization failed")
Exemple #18
0
class NbGraderAPI(LoggingConfigurable):
    """A high-level API for using nbgrader."""

    coursedir = Instance(CourseDirectory, allow_none=True)
    authenticator = Instance(Authenticator, allow_none=True)
    exchange = Instance(ExchangeFactory, allow_none=True)

    # The log level for the application
    log_level = Enum(
        (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
        default_value=logging.INFO,
        help="Set the log level by value or name."
    ).tag(config=True)

    timezone = Unicode(
        "UTC",
        help="Timezone for displaying timestamps"
    ).tag(config=True)

    timestamp_format = Unicode(
        "%Y-%m-%d %H:%M:%S %Z",
        help="Format string for displaying timestamps"
    ).tag(config=True)

    @observe('log_level')
    def _log_level_changed(self, change):
        """Adjust the log level when log_level is set."""
        new = change.new
        if isinstance(new, str):
            new = getattr(logging, new)
            self.log_level = new
        self.log.setLevel(new)

    def __init__(self, coursedir=None, authenticator=None, exchange=None, **kwargs):
        """Initialize the API.

        Arguments
        ---------
        coursedir: :class:`nbgrader.coursedir.CourseDirectory`
            (Optional) A course directory object.
        authenticator : :class:~`nbgrader.auth.BaseAuthenticator`
            (Optional) An authenticator instance for communicating with an
            external database.
        exchange : :class:~`nbgrader.exchange.ExchangeFactory`
            (Optional) A factory for creating the exchange classes used
            for distributing assignments and feedback.
        kwargs:
            Additional keyword arguments (e.g. ``parent``, ``config``)

        """
        self.log.setLevel(self.log_level)
        super(NbGraderAPI, self).__init__(**kwargs)

        if coursedir is None:
            self.coursedir = CourseDirectory(parent=self)
        else:
            self.coursedir = coursedir

        if authenticator is None:
            self.authenticator = Authenticator(parent=self)
        else:
            self.authenticator = authenticator

        if exchange is None:
            self.exchange = ExchangeFactory(parent=self)
        else:
            self.exchange = exchange

        if sys.platform != 'win32':
            lister = self.exchange.List(
                coursedir=self.coursedir,
                authenticator=self.authenticator,
                parent=self)
            self.course_id = self.coursedir.course_id
            if hasattr(lister, "root"):
                self.exchange_root = lister.root
            else:
                # For non-fs based exchanges
                self.exchange_root = ''

            try:
                lister.start()
            except ExchangeError:
                self.exchange_missing = True
            else:
                self.exchange_missing = False

        else:
            self.course_id = ''
            self.exchange_root = ''
            self.exchange_missing = True

    @property
    def exchange_is_functional(self):
        return self.course_id and not self.exchange_missing and sys.platform != 'win32'

    @property
    def gradebook(self):
        """An instance of :class:`nbgrader.api.Gradebook`.

        Note that each time this property is accessed, a new gradebook is
        created. The user is responsible for destroying the gradebook through
        :func:`~nbgrader.api.Gradebook.close`.

        """
        return Gradebook(self.coursedir.db_url, self.course_id)

    def get_source_assignments(self):
        """Get the names of all assignments in the `source` directory.

        Returns
        -------
        assignments: set
            A set of assignment names

        """
        filenames = glob.glob(self.coursedir.format_path(
            self.coursedir.source_directory,
            student_id='.',
            assignment_id='*'))

        assignments = set([])
        for filename in filenames:
            # skip files that aren't directories
            if not os.path.isdir(filename):
                continue

            # parse out the assignment name
            regex = self.coursedir.format_path(
                self.coursedir.source_directory,
                student_id='.',
                assignment_id='(?P<assignment_id>.*)',
                escape=True)

            matches = re.match(regex, filename)
            if matches:
                assignments.add(matches.groupdict()['assignment_id'])

        return assignments

    def get_released_assignments(self):
        """Get the names of all assignments that have been released to the
        exchange directory. If the course id is blank, this returns an empty
        set.

        Returns
        -------
        assignments: set
            A set of assignment names

        """
        if self.exchange_is_functional:
            lister = self.exchange.List(
                coursedir=self.coursedir,
                authenticator=self.authenticator,
                parent=self)
            released = set([x['assignment_id'] for x in lister.start()])
        else:
            released = set([])

        return released

    def get_submitted_students(self, assignment_id):
        """Get the ids of students that have submitted a given assignment
        (determined by whether or not a submission exists in the `submitted`
        directory).

        Arguments
        ---------
        assignment_id: string
            The name of the assignment. May be * to select for all assignments.

        Returns
        -------
        students: set
            A set of student ids

        """
        # get the names of all student submissions in the `submitted` directory
        filenames = glob.glob(self.coursedir.format_path(
            self.coursedir.submitted_directory,
            student_id='*',
            assignment_id=assignment_id))

        students = set([])
        for filename in filenames:
            # skip files that aren't directories
            if not os.path.isdir(filename):
                continue

            # parse out the student id
            if assignment_id == "*":
                assignment_id = ".*"
            regex = self.coursedir.format_path(
                self.coursedir.submitted_directory,
                student_id='(?P<student_id>.*)',
                assignment_id=assignment_id,
                escape=True)

            matches = re.match(regex, filename)
            if matches:
                students.add(matches.groupdict()['student_id'])

        return students

    def get_submitted_timestamp(self, assignment_id, student_id):
        """Gets the timestamp of a submitted assignment.

        Arguments
        ---------
        assignment_id: string
            The assignment name
        student_id: string
            The student id

        Returns
        -------
        timestamp: datetime.datetime or None
            The timestamp of the submission, or None if the timestamp does
            not exist

        """
        assignment_dir = os.path.abspath(self.coursedir.format_path(
            self.coursedir.submitted_directory,
            student_id,
            assignment_id))

        timestamp_pth = os.path.join(assignment_dir, 'timestamp.txt')
        if os.path.exists(timestamp_pth):
            with open(timestamp_pth, 'r') as fh:
                return parse_utc(fh.read().strip())

    def get_autograded_students(self, assignment_id):
        """Get the ids of students whose submission for a given assignment
        has been autograded. This is determined based on satisfying all of the
        following criteria:

        1. There is a directory present in the `autograded` directory.
        2. The submission is present in the database.
        3. The timestamp of the autograded submission is the same as the
           timestamp of the original submission (in the `submitted` directory).

        Returns
        -------
        students: set
            A set of student ids

        """
        # get all autograded submissions
        with self.gradebook as gb:
            ag_timestamps = dict(gb.db\
                .query(Student.id, SubmittedAssignment.timestamp)\
                .join(SubmittedAssignment)\
                .filter(SubmittedAssignment.name == assignment_id)\
                .all())
            ag_students = set(ag_timestamps.keys())

        students = set([])
        for student_id in ag_students:
            # skip files that aren't directories
            filename = self.coursedir.format_path(
                self.coursedir.autograded_directory,
                student_id=student_id,
                assignment_id=assignment_id)
            if not os.path.isdir(filename):
                continue

            # get the timestamps and check whether the submitted timestamp is
            # newer than the autograded timestamp
            submitted_timestamp = self.get_submitted_timestamp(assignment_id, student_id)
            autograded_timestamp = ag_timestamps[student_id]
            if submitted_timestamp != autograded_timestamp:
                continue

            students.add(student_id)

        return students

    def get_assignment(self, assignment_id, released=None):
        """Get information about an assignment given its name.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        released: list
            (Optional) A set of names of released assignments, obtained via
            self.get_released_assignments().

        Returns
        -------
        assignment: dict
            A dictionary containing information about the assignment

        """
        # get the set of released assignments if not given
        if not released:
            released = self.get_released_assignments()

        # check whether there is a source version of the assignment
        sourcedir = os.path.abspath(self.coursedir.format_path(
            self.coursedir.source_directory,
            student_id='.',
            assignment_id=assignment_id))
        if not os.path.isdir(sourcedir):
            return

        # see if there is information about the assignment in the database
        try:
            with self.gradebook as gb:
                db_assignment = gb.find_assignment(assignment_id)
                assignment = db_assignment.to_dict()
                if db_assignment.duedate:
                    ts = as_timezone(db_assignment.duedate, self.timezone)
                    assignment["display_duedate"] = ts.strftime(self.timestamp_format)
                    assignment["duedate_notimezone"] = ts.replace(tzinfo=None).isoformat()
                else:
                    assignment["display_duedate"] = None
                    assignment["duedate_notimezone"] = None
                assignment["duedate_timezone"] = to_numeric_tz(self.timezone)
                assignment["average_score"] = gb.average_assignment_score(assignment_id)
                assignment["average_code_score"] = gb.average_assignment_code_score(assignment_id)
                assignment["average_written_score"] = gb.average_assignment_written_score(assignment_id)
                assignment["average_task_score"] = gb.average_assignment_task_score(assignment_id)

        except MissingEntry:
            assignment = {
                "id": None,
                "name": assignment_id,
                "duedate": None,
                "display_duedate": None,
                "duedate_notimezone": None,
                "duedate_timezone": to_numeric_tz(self.timezone),
                "average_score": 0,
                "average_code_score": 0,
                "average_written_score": 0,
                "average_task_score": 0,
                "max_score": 0,
                "max_code_score": 0,
                "max_written_score": 0,
                "max_task_score": 0
            }

        # get released status
        if not self.exchange_is_functional:
            assignment["releaseable"] = False
            assignment["status"] = "draft"
        else:
            assignment["releaseable"] = True
            if assignment_id in released:
                assignment["status"] = "released"
            else:
                assignment["status"] = "draft"

        # get source directory
        assignment["source_path"] = os.path.relpath(sourcedir, self.coursedir.root)

        # get release directory
        releasedir = os.path.abspath(self.coursedir.format_path(
            self.coursedir.release_directory,
            student_id='.',
            assignment_id=assignment_id))
        if os.path.exists(releasedir):
            assignment["release_path"] = os.path.relpath(releasedir, self.coursedir.root)
        else:
            assignment["release_path"] = None

        # number of submissions
        assignment["num_submissions"] = len(self.get_submitted_students(assignment_id))

        return assignment

    def get_assignments(self):
        """Get a list of information about all assignments.

        Returns
        -------
        assignments: list
            A list of dictionaries containing information about each assignment

        """
        released = self.get_released_assignments()

        assignments = []
        for x in self.get_source_assignments():
            assignments.append(self.get_assignment(x, released=released))

        assignments.sort(key=lambda x: (x["duedate"] if x["duedate"] is not None else "None", x["name"]))
        return assignments

    def get_notebooks(self, assignment_id):
        """Get a list of notebooks in an assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        notebooks: list
            A list of dictionaries containing information about each notebook

        """
        with self.gradebook as gb:
            try:
                assignment = gb.find_assignment(assignment_id)
            except MissingEntry:
                assignment = None

            # if the assignment exists in the database
            if assignment and assignment.notebooks:
                notebooks = []
                for notebook in assignment.notebooks:
                    x = notebook.to_dict()
                    x["average_score"] = gb.average_notebook_score(notebook.name, assignment.name)
                    x["average_code_score"] = gb.average_notebook_code_score(notebook.name, assignment.name)
                    x["average_written_score"] = gb.average_notebook_written_score(notebook.name, assignment.name)
                    x["average_task_score"] = gb.average_notebook_task_score(notebook.name, assignment.name)
                    notebooks.append(x)

            # if it doesn't exist in the database
            else:
                sourcedir = self.coursedir.format_path(
                    self.coursedir.source_directory,
                    student_id='.',
                    assignment_id=assignment_id)
                escaped_sourcedir = self.coursedir.format_path(
                    self.coursedir.source_directory,
                    student_id='.',
                    assignment_id=assignment_id,
                    escape=True)

                notebooks = []
                for filename in glob.glob(os.path.join(sourcedir, "*.ipynb")):
                    regex = re.escape(os.path.sep).join([escaped_sourcedir, "(?P<notebook_id>.*).ipynb"])
                    matches = re.match(regex, filename)
                    notebook_id = matches.groupdict()['notebook_id']
                    notebooks.append({
                        "name": notebook_id,
                        "id": None,
                        "average_score": 0,
                        "average_code_score": 0,
                        "average_written_score": 0,
                        "average_task_score": 0,
                        "max_score": 0,
                        "max_code_score": 0,
                        "max_written_score": 0,
                        "max_task_score": 0,
                        "needs_manual_grade": False,
                        "num_submissions": 0
                    })

        return notebooks

    def get_submission(self, assignment_id, student_id, ungraded=None, students=None):
        """Get information about a student's submission of an assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The student's id
        ungraded: set
            (Optional) A set of student ids corresponding to students whose
            submissions have not yet been autograded.
        students: dict
            (Optional) A dictionary of dictionaries, keyed by student id,
            containing information about students.

        Returns
        -------
        submission: dict
            A dictionary containing information about the submission

        """
        if ungraded is None:
            autograded = self.get_autograded_students(assignment_id)
            ungraded = self.get_submitted_students(assignment_id) - autograded
        if students is None:
            students = {x['id']: x for x in self.get_students()}

        if student_id in ungraded:
            ts = self.get_submitted_timestamp(assignment_id, student_id)
            if ts:
                timestamp = ts.isoformat()
                display_timestamp = as_timezone(ts, self.timezone).strftime(self.timestamp_format)
            else:
                timestamp = None
                display_timestamp = None

            submission = {
                "id": None,
                "name": assignment_id,
                "timestamp": timestamp,
                "display_timestamp": display_timestamp,
                "score": 0.0,
                "max_score": 0.0,
                "code_score": 0.0,
                "max_code_score": 0.0,
                "written_score": 0.0,
                "max_written_score": 0.0,
                "task_score": 0.0,
                "max_task_score": 0.0,
                "needs_manual_grade": False,
                "autograded": False,
                "submitted": True,
                "student": student_id,
            }

            if student_id not in students:
                submission["last_name"] = None
                submission["first_name"] = None
            else:
                submission["last_name"] = students[student_id]["last_name"]
                submission["first_name"] = students[student_id]["first_name"]

        elif student_id in autograded:
            with self.gradebook as gb:
                try:
                    db_submission = gb.find_submission(assignment_id, student_id)
                    submission = db_submission.to_dict()
                    if db_submission.timestamp:
                        submission["display_timestamp"] = as_timezone(
                            db_submission.timestamp, self.timezone).strftime(self.timestamp_format)
                    else:
                        submission["display_timestamp"] = None

                except MissingEntry:
                    return None

            submission["autograded"] = True
            submission["submitted"] = True

        else:
            submission = {
                "id": None,
                "name": assignment_id,
                "timestamp": None,
                "display_timestamp": None,
                "score": 0.0,
                "max_score": 0.0,
                "code_score": 0.0,
                "max_code_score": 0.0,
                "written_score": 0.0,
                "max_written_score": 0.0,
                "task_score": 0.0,
                "max_task_score": 0.0,
                "needs_manual_grade": False,
                "autograded": False,
                "submitted": False,
                "student": student_id,
            }

            if student_id not in students:
                submission["last_name"] = None
                submission["first_name"] = None
            else:
                submission["last_name"] = students[student_id]["last_name"]
                submission["first_name"] = students[student_id]["first_name"]

        return submission

    def get_submissions(self, assignment_id):
        """Get a list of submissions of an assignment. Each submission
        corresponds to a student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        notebooks: list
            A list of dictionaries containing information about each submission

        """
        with self.gradebook as gb:
            db_submissions = gb.submission_dicts(assignment_id)

        ungraded = self.get_submitted_students(assignment_id) - self.get_autograded_students(assignment_id)
        students = {x['id']: x for x in self.get_students()}
        submissions = []
        for submission in db_submissions:
            if submission["student"] in ungraded:
                continue
            ts = submission["timestamp"]
            if ts:
                submission["timestamp"] = ts.isoformat()
                submission["display_timestamp"] = as_timezone(
                    ts, self.timezone).strftime(self.timestamp_format)
            else:
                submission["timestamp"] = None
                submission["display_timestamp"] = None
            submission["autograded"] = True
            submission["submitted"] = True
            submissions.append(submission)

        for student_id in ungraded:
            submission = self.get_submission(
                assignment_id, student_id, ungraded=ungraded, students=students)
            submissions.append(submission)

        submissions.sort(key=lambda x: x["student"])
        return submissions

    def _filter_existing_notebooks(self, assignment_id, notebooks):
        """Filters a list of notebooks so that it only includes those notebooks
        which actually exist on disk.

        This functionality is necessary for cases where student delete or rename
        on or more notebooks in their assignment, but still submit the assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        notebooks: list
            List of :class:`~nbgrader.api.SubmittedNotebook` objects

        Returns
        -------
        submissions: list
            List of :class:`~nbgrader.api.SubmittedNotebook` objects

        """
        # Making a filesystem call for every notebook in the assignment
        # can be very slow on certain setups, such as using NFS, see
        # https://github.com/jupyter/nbgrader/issues/929
        #
        # If students are using the exchange and submitting with
        # ExchangeSubmit.strict == True, then all the notebooks we expect
        # should be here already so we don't need to filter for only
        # existing notebooks in that case.
        if self.exchange_is_functional:
            app = self.exchange.Submit(
                    coursedir=self.coursedir,
                    authenticator=self.authenticator,
                    parent=self)
            if app.strict:
                return sorted(notebooks, key=lambda x: x.id)

        submissions = list()
        for nb in notebooks:
            filename = os.path.join(
                os.path.abspath(self.coursedir.format_path(
                    self.coursedir.autograded_directory,
                    student_id=nb.student.id,
                    assignment_id=assignment_id)),
                "{}.ipynb".format(nb.name))

            if os.path.exists(filename):
                submissions.append(nb)

        return sorted(submissions, key=lambda x: x.id)

    def get_notebook_submission_indices(self, assignment_id, notebook_id):
        """Get a dictionary mapping unique submission ids to indices of the
        submissions relative to the full list of submissions.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        notebook_id: string
            The name of the notebook

        Returns
        -------
        indices: dict
            A dictionary mapping submission ids to the index of each submission

        """
        with self.gradebook as gb:
            notebooks = gb.notebook_submissions(notebook_id, assignment_id)
            submissions = self._filter_existing_notebooks(assignment_id, notebooks)
        return dict([(x.id, i) for i, x in enumerate(submissions)])

    def get_notebook_submissions(self, assignment_id, notebook_id):
        """Get a list of submissions for a particular notebook in an assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        notebook_id: string
            The name of the notebook

        Returns
        -------
        submissions: list
            A list of dictionaries containing information about each submission.

        """
        with self.gradebook as gb:
            try:
                gb.find_notebook(notebook_id, assignment_id)
            except MissingEntry:
                return []

            submissions = gb.notebook_submission_dicts(notebook_id, assignment_id)

        indices = self.get_notebook_submission_indices(assignment_id, notebook_id)
        for nb in submissions:
            nb['index'] = indices.get(nb['id'], None)

        submissions = [x for x in submissions if x['index'] is not None]
        submissions.sort(key=lambda x: x["id"])
        return submissions

    def get_student(self, student_id, submitted=None):
        """Get a dictionary containing information about the given student.

        Arguments
        ---------
        student_id: string
            The unique id of the student
        submitted: set
            (Optional) A set of unique ids of students who have submitted an assignment

        Returns
        -------
        student: dictionary
            A dictionary containing information about the student, or None if
            the student does not exist

        """
        if submitted is None:
            submitted = self.get_submitted_students("*")

        try:
            with self.gradebook as gb:
                student = gb.find_student(student_id).to_dict()

        except MissingEntry:
            if student_id in submitted:
                student = {
                    "id": student_id,
                    "last_name": None,
                    "first_name": None,
                    "email": None,
                    "lms_user_id": None,
                    "score": 0.0,
                    "max_score": 0.0
                }

            else:
                return None

        return student

    def get_students(self):
        """Get a list containing information about all the students in class.

        Returns
        -------
        students: list
            A list of dictionaries containing information about all the students

        """
        with self.gradebook as gb:
            in_db = set([x.id for x in gb.students])
            students = gb.student_dicts()

        submitted = self.get_submitted_students("*")
        for student_id in (submitted - in_db):
            students.append({
                "id": student_id,
                "last_name": None,
                "first_name": None,
                "email": None,
                "lms_user_id": None,
                "score": 0.0,
                "max_score": 0.0
            })

        students.sort(key=lambda x: (x["last_name"] or "None", x["first_name"] or "None", x["id"]))
        return students

    def get_student_submissions(self, student_id):
        """Get information about all submissions from a particular student.

        Arguments
        ---------
        student_id: string
            The unique id of the student

        Returns
        -------
        submissions: list
            A list of dictionaries containing information about all the student's
            submissions

        """
        # return just an empty list if the student doesn't exist
        submissions = []
        for assignment_id in self.get_source_assignments():
            submission = self.get_submission(assignment_id, student_id)
            submissions.append(submission)

        submissions.sort(key=lambda x: x["name"])
        return submissions

    def get_student_notebook_submissions(self, student_id, assignment_id):
        """Gets information about all notebooks within a submitted assignment.

        Arguments
        ---------
        student_id: string
            The unique id of the student
        assignment_id: string
            The name of the assignment

        Returns
        -------
        submissions: list
            A list of dictionaries containing information about the submissions

        """
        with self.gradebook as gb:
            try:
                assignment = gb.find_submission(assignment_id, student_id)
                student = assignment.student
            except MissingEntry:
                return []

            submissions = []
            for notebook in assignment.notebooks:
                filename = os.path.join(
                    os.path.abspath(self.coursedir.format_path(
                        self.coursedir.autograded_directory,
                        student_id=student_id,
                        assignment_id=assignment_id)),
                    "{}.ipynb".format(notebook.name))

                if os.path.exists(filename):
                    submissions.append(notebook.to_dict())
                else:
                    submissions.append({
                        "id": None,
                        "name": notebook.name,
                        "student": student_id,
                        "last_name": student.last_name,
                        "first_name": student.first_name,
                        "score": 0,
                        "max_score": notebook.max_score,
                        "code_score": 0,
                        "max_code_score": notebook.max_code_score,
                        "written_score": 0,
                        "max_written_score": notebook.max_written_score,
                        "task_score": 0,
                        "max_task_score": notebook.max_task_score,
                        "needs_manual_grade": False,
                        "failed_tests": False,
                        "flagged": False
                    })

        submissions.sort(key=lambda x: x["name"])
        return submissions

    def assign(self, *args, **kwargs):
        """Deprecated, please use `generate_assignment` instead."""
        msg = (
            "The `assign` method is deprecated, please use `generate_assignment` "
            "instead. This method will be removed in a future version of nbgrader.")
        warnings.warn(msg, DeprecationWarning)
        self.log.warning(msg)
        return self.generate_assignment(*args, **kwargs)

    def generate_assignment(self, assignment_id, force=True, create=True):
        """Run ``nbgrader generate_assignment`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        force: bool
            Whether to force creating the student version, even if it already
            exists.
        create: bool
            Whether to create the assignment in the database, if it doesn't
            already exist.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        with temp_attrs(self.coursedir, assignment_id=assignment_id):
            app = GenerateAssignment(coursedir=self.coursedir, parent=self)
            app.force = force
            app.create_assignment = create
            return capture_log(app)

    def unrelease(self, assignment_id):
        """Run ``nbgrader list --remove`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if sys.platform != 'win32':
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = self.exchange.List(
                    coursedir=self.coursedir,
                    authenticator=self.authenticator,
                    parent=self)
                app.remove = True
                return capture_log(app)

    def release(self, *args, **kwargs):
        """Deprecated, please use `release_assignment` instead."""
        msg = (
            "The `release` method is deprecated, please use `release_assignment` "
            "instead. This method will be removed in a future version of nbgrader.")
        warnings.warn(msg, DeprecationWarning)
        self.log.warning(msg)
        return self.release_assignment(*args, **kwargs)

    def release_assignment(self, assignment_id):
        """Run ``nbgrader release_assignment`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if sys.platform != 'win32':
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = self.exchange.ReleaseAssignment(
                    coursedir=self.coursedir,
                    authenticator=self.authenticator,
                    parent=self)
                return capture_log(app)

    def collect(self, assignment_id, update=True):
        """Run ``nbgrader collect`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        update: bool
            Whether to update already-collected assignments with newer
            submissions, if they exist

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if sys.platform != 'win32':
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = self.exchange.Collect(
                    coursedir=self.coursedir,
                    authenticator=self.authenticator,
                    parent=self)
                app.update = update
                return capture_log(app)

    def autograde(self, assignment_id, student_id, force=True, create=True):
        """Run ``nbgrader autograde`` for a particular assignment and student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The unique id of the student
        force: bool
            Whether to autograde the submission, even if it's already been
            autograded
        create: bool
            Whether to create students in the database if they don't already
            exist

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id=student_id):
            app = Autograde(coursedir=self.coursedir, parent=self)
            app.force = force
            app.create_student = create
            return capture_log(app)

    def generate_feedback(self, assignment_id, student_id=None, force=True):
        """Run ``nbgrader generate_feedback`` for a particular assignment and student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The name of the student (optional). If not provided, then generate
            feedback from autograded submissions.
        force: bool
            Whether to force generating feedback, even if it already exists.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        # Because we may be using HTMLExporter.template_file in other
        # parts of the the UI, we need to make sure that the template
        # is explicitply 'feedback.tpl` here:
        c = Config()
        c.HTMLExporter.template_file = 'feedback.tpl'
        if student_id is not None:
            with temp_attrs(self.coursedir,
                            assignment_id=assignment_id,
                            student_id=student_id):
                app = GenerateFeedback(coursedir=self.coursedir, parent=self)
                app.update_config(c)
                app.force = force
                return capture_log(app)
        else:
            with temp_attrs(self.coursedir,
                            assignment_id=assignment_id):
                app = GenerateFeedback(coursedir=self.coursedir, parent=self)
                app.update_config(c)
                app.force = force
                return capture_log(app)

    def release_feedback(self, assignment_id, student_id=None):
        """Run ``nbgrader release_feedback`` for a particular assignment/student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        assignment_id: string
            The name of the student (optional). If not provided, then release
            all generated feedback.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if student_id is not None:
            with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id=student_id):
                app = self.exchange.ReleaseFeedback(
                    coursedir=self.coursedir,
                    authentictor=self.authenticator,
                    parent=self)
                return capture_log(app)
        else:
            with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id='*'):
                app = self.exchange.ReleaseFeedback(
                    coursedir=self.coursedir,
                    authentictor=self.authenticator,
                    parent=self)
                return capture_log(app)

    def fetch_feedback(self, assignment_id, student_id):
        """Run ``nbgrader fetch_feedback`` for a particular assignment/student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The name of the student.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output
            - value (list of dict): all submitted assignments

        """
        with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id=student_id):
            app = self.exchange.FetchFeedback(
                coursedir=self.coursedir,
                authentictor=self.authenticator,
                parent=self)
            ret_dic = capture_log(app)
            # assignment tab needs a 'value' field with the info needed to repopulate
            # the tables.
        with temp_attrs(self.coursedir, assignment_id='*', student_id=student_id):
            lister_rel = self.exchange.List(
                inbound=False, cached=True,
                coursedir=self.coursedir,
                authenticator=self.authenticator,
                config=self.config)
            assignments = lister_rel.start()
            ret_dic["value"] = sorted(assignments, key=lambda x: (x['course_id'], x['assignment_id']))
        return ret_dic
Exemple #19
0
class ExecuteReplyOkay(Reply):
    status = Enum(('ok', ))
    user_expressions = Dict()
class DisplayIntegrator(Tool):
    name = "ctapipe-display-integration"
    description = __doc__

    event_index = Int(0, help='Event index to view.').tag(config=True)
    use_event_id = Bool(
        False,
        help='event_index will obtain an event using event_id instead of '
             'index.'
    ).tag(config=True)
    telescope = Int(
        None,
        allow_none=True,
        help='Telescope to view. Set to None to display the first'
             'telescope with data.'
    ).tag(config=True)
    channel = Enum([0, 1], 0, help='Channel to view').tag(config=True)

    extractor_product = tool_utils.enum_trait(
        ImageExtractor,
        default='NeighborPeakWindowSum'
    )

    aliases = Dict(
        dict(
            f='EventSource.input_url',
            max_events='EventSource.max_events',
            extractor='DisplayIntegrator.extractor_product',
            E='DisplayIntegrator.event_index',
            T='DisplayIntegrator.telescope',
            C='DisplayIntegrator.channel',
        )
    )
    flags = Dict(
        dict(
            id=(
                {
                    'DisplayDL1Calib': {
                        'use_event_index': True
                    }
                }, 'event_index will obtain an event using '
                   'event_id instead of index.')
        )
    )
    classes = List(
        [
            EventSource,
            CameraDL1Calibrator,
        ] + tool_utils.classes_with_traits(ImageExtractor)
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.eventseeker = None
        self.dl0 = None
        self.extractor = None
        self.dl1 = None

    def setup(self):
        self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"

        event_source = EventSource.from_config(parent=self)
        self.eventseeker = EventSeeker(event_source, parent=self)
        self.extractor = ImageExtractor.from_name(
            self.extractor_product,
            parent=self,
        )
        self.dl0 = CameraDL0Reducer(parent=self)
        self.dl1 = CameraDL1Calibrator(extractor=self.extractor, parent=self)

    def start(self):
        event_num = self.event_index
        if self.use_event_id:
            event_num = str(event_num)
        event = self.eventseeker[event_num]

        # Calibrate
        self.dl0.reduce(event)
        self.dl1.calibrate(event)

        # Select telescope
        tels = list(event.r0.tels_with_data)
        telid = self.telescope
        if telid is None:
            telid = tels[0]
        if telid not in tels:
            self.log.error(
                "[event] please specify one of the following "
                "telescopes for this event: {}".format(tels)
            )
            exit()

        extractor_name = self.extractor.__class__.__name__

        plot(event, telid, self.channel, extractor_name)

    def finish(self):
        pass
Exemple #21
0
class Stream(Reference):
    name = Enum(('stdout', 'stderr'), default_value='stdout')
    text = Unicode()
Exemple #22
0
class JupytextConfiguration(Configurable):
    """Jupytext Configuration's options"""

    default_jupytext_formats = Unicode(
        u"",
        help="Save notebooks to these file extensions. "
        "Can be any of ipynb,Rmd,md,jl,py,R,nb.jl,nb.py,nb.R "
        "comma separated. If you want another format than the "
        "default one, append the format name to the extension, "
        "e.g. ipynb,py:percent to save the notebook to "
        "hydrogen/spyder/vscode compatible scripts",
        config=True,
    )

    preferred_jupytext_formats_save = Unicode(
        u"",
        help="Preferred format when saving notebooks as text, per extension. "
        'Use "jl:percent,py:percent,R:percent" if you want to save '
        "Julia, Python and R scripts in the double percent format and "
        'only write "jupytext_formats": "py" in the notebook metadata.',
        config=True,
    )

    preferred_jupytext_formats_read = Unicode(
        u"",
        help="Preferred format when reading notebooks from text, per "
        'extension. Use "py:sphinx" if you want to read all python '
        "scripts as Sphinx gallery scripts.",
        config=True,
    )

    default_notebook_metadata_filter = Unicode(
        u"",
        help="Cell metadata that should be save in the text representations. "
        "Examples: 'all', '-all', 'widgets,nteract', 'kernelspec,jupytext-all'",
        config=True,
    )

    default_cell_metadata_filter = Unicode(
        u"",
        help=
        "Notebook metadata that should be saved in the text representations. "
        "Examples: 'all', 'hide_input,hide_output'",
        config=True,
    )

    comment_magics = Enum(
        values=[True, False],
        allow_none=True,
        help=
        "Should Jupyter magic commands be commented out in the text representation?",
        config=True,
    )

    split_at_heading = Bool(
        False,
        help=
        "Split markdown cells on headings (Markdown and R Markdown formats only)",
        config=True,
    )

    sphinx_convert_rst2md = Bool(
        False,
        help=
        "When opening a Sphinx Gallery script, convert the reStructuredText to markdown",
        config=True,
    )

    outdated_text_notebook_margin = Float(
        1.0,
        help="Refuse to overwrite inputs of a ipynb notebooks with those of a "
        "text notebook when the text notebook plus margin is older than "
        "the ipynb notebook (NB: This option is ignored by Jupytext CLI)",
        config=True,
    )

    default_cell_markers = Unicode(
        u"",
        help=
        'Start and end cell markers for the light format, comma separated. Use "{{{,}}}" to mark cells'
        'as foldable regions in Vim, and "region,endregion" to mark cells as Vscode/PyCharm regions',
        config=True,
    )

    notebook_extensions = Unicode(
        u",".join(NOTEBOOK_EXTENSIONS),
        help="A comma separated list of notebook extensions",
        config=True,
    )

    def set_default_format_options(self, format_options, read=False):
        """Set default format option"""
        if self.default_notebook_metadata_filter:
            format_options.setdefault("notebook_metadata_filter",
                                      self.default_notebook_metadata_filter)
        if self.default_cell_metadata_filter:
            format_options.setdefault("cell_metadata_filter",
                                      self.default_cell_metadata_filter)
        if self.comment_magics is not None:
            format_options.setdefault("comment_magics", self.comment_magics)
        if self.split_at_heading:
            format_options.setdefault("split_at_heading",
                                      self.split_at_heading)
        if not read and self.default_cell_markers:
            format_options.setdefault("cell_markers",
                                      self.default_cell_markers)
        if read and self.sphinx_convert_rst2md:
            format_options.setdefault("rst2md", self.sphinx_convert_rst2md)

    def default_formats(self, path):
        """Return the default formats, if they apply to the current path #157"""
        from .paired_paths import (
            base_path,
            InconsistentPath,
        )

        formats = long_form_multiple_formats(self.default_jupytext_formats)
        for fmt in formats:
            try:
                base_path(path, fmt)
                return self.default_jupytext_formats
            except InconsistentPath:
                continue

        return None
Exemple #23
0
class NotebookNotary(LoggingConfigurable):
    """A class for computing and verifying notebook signatures."""

    data_dir = Unicode()

    def _data_dir_default(self):
        app = None
        try:
            if JupyterApp.initialized():
                app = JupyterApp.instance()
        except MultipleInstanceError:
            pass
        if app is None:
            # create an app, without the global instance
            app = JupyterApp()
            app.initialize(argv=[])
        return app.data_dir

    db_file = Unicode(
        config=True,
        help="""The sqlite file in which to store notebook signatures.
        By default, this will be in your Jupyter data directory.
        You can set it to ':memory:' to disable sqlite writing to the filesystem.
        """)

    def _db_file_default(self):
        if not self.data_dir:
            return ':memory:'
        return os.path.join(self.data_dir, u'nbsignatures.db')

    # 64k entries ~ 12MB
    cache_size = Integer(65535,
                         config=True,
                         help="""The number of notebook signatures to cache.
        When the number of signatures exceeds this value,
        the oldest 25% of signatures will be culled.
        """)
    db = Any()

    def _db_default(self):
        if sqlite3 is None:
            self.log.warn("Missing SQLite3, all notebooks will be untrusted!")
            return
        kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES
                      | sqlite3.PARSE_COLNAMES)
        try:
            db = sqlite3.connect(self.db_file, **kwargs)
            self.init_db(db)
        except (sqlite3.DatabaseError, sqlite3.OperationalError):
            if self.db_file != ':memory:':
                old_db_location = os.path.join(self.data_dir,
                                               self.db_file + ".bak")
                self.log.warn(
                    """The signatures database cannot be opened; maybe it is corrupted or encrypted.  You may need to rerun your notebooks to ensure that they are trusted to run Javascript.  The old signatures database has been renamed to %s and a new one has been created.""",
                    old_db_location)
                try:
                    os.rename(self.db_file, self.db_file + u'.bak')
                    db = sqlite3.connect(self.db_file, **kwargs)
                    self.init_db(db)
                except (sqlite3.DatabaseError, sqlite3.OperationalError):
                    self.log.warn(
                        """Failed commiting signatures database to disk.  You may need to move the database file to a non-networked file system, using config option `NotebookNotary.db_file`.  Using in-memory signatures database for the remainder of this session."""
                    )
                    self.db_file = ':memory:'
                    db = sqlite3.connect(self.db_file, **kwargs)
                    self.init_db(db)
            else:
                raise
        return db

    def init_db(self, db):
        db.execute("""
        CREATE TABLE IF NOT EXISTS nbsignatures
        (
            id integer PRIMARY KEY AUTOINCREMENT,
            algorithm text,
            signature text,
            path text,
            last_seen timestamp
        )""")
        db.execute("""
        CREATE INDEX IF NOT EXISTS algosig ON nbsignatures(algorithm, signature)
        """)
        db.commit()

    algorithm = Enum(algorithms,
                     default_value='sha256',
                     config=True,
                     help="""The hashing algorithm used to sign notebooks.""")

    def _algorithm_changed(self, name, old, new):
        self.digestmod = getattr(hashlib, self.algorithm)

    digestmod = Any()

    def _digestmod_default(self):
        return getattr(hashlib, self.algorithm)

    secret_file = Unicode(config=True,
                          help="""The file where the secret key is stored.""")

    def _secret_file_default(self):
        if not self.data_dir:
            return ''
        return os.path.join(self.data_dir, 'notebook_secret')

    secret = Bytes(config=True,
                   help="""The secret key with which notebooks are signed.""")

    def _secret_default(self):
        # note : this assumes an Application is running
        if os.path.exists(self.secret_file):
            with io.open(self.secret_file, 'rb') as f:
                return f.read()
        else:
            secret = base64.encodestring(os.urandom(1024))
            self._write_secret_file(secret)
            return secret

    def _write_secret_file(self, secret):
        """write my secret to my secret_file"""
        self.log.info("Writing notebook-signing key to %s", self.secret_file)
        with io.open(self.secret_file, 'wb') as f:
            f.write(secret)
        try:
            os.chmod(self.secret_file, 0o600)
        except OSError:
            self.log.warn("Could not set permissions on %s", self.secret_file)
        return secret

    def compute_signature(self, nb):
        """Compute a notebook's signature
        
        by hashing the entire contents of the notebook via HMAC digest.
        """
        hmac = HMAC(self.secret, digestmod=self.digestmod)
        # don't include the previous hash in the content to hash
        with signature_removed(nb):
            # sign the whole thing
            for b in yield_everything(nb):
                hmac.update(b)

        return hmac.hexdigest()

    def check_signature(self, nb):
        """Check a notebook's stored signature
        
        If a signature is stored in the notebook's metadata,
        a new signature is computed and compared with the stored value.
        
        Returns True if the signature is found and matches, False otherwise.
        
        The following conditions must all be met for a notebook to be trusted:
        - a signature is stored in the form 'scheme:hexdigest'
        - the stored scheme matches the requested scheme
        - the requested scheme is available from hashlib
        - the computed hash from notebook_signature matches the stored hash
        """
        if nb.nbformat < 3:
            return False
        if self.db is None:
            return False
        signature = self.compute_signature(nb)
        r = self.db.execute(
            """SELECT id FROM nbsignatures WHERE
            algorithm = ? AND
            signature = ?;
            """, (self.algorithm, signature)).fetchone()
        if r is None:
            return False
        self.db.execute(
            """UPDATE nbsignatures SET last_seen = ? WHERE
            algorithm = ? AND
            signature = ?;
            """,
            (datetime.utcnow(), self.algorithm, signature),
        )
        self.db.commit()
        return True

    def sign(self, nb):
        """Sign a notebook, indicating that its output is trusted on this machine
        
        Stores hash algorithm and hmac digest in a local database of trusted notebooks.
        """
        if nb.nbformat < 3:
            return
        signature = self.compute_signature(nb)
        self.store_signature(signature, nb)

    def store_signature(self, signature, nb):
        if self.db is None:
            return
        self.db.execute(
            """INSERT OR IGNORE INTO nbsignatures
            (algorithm, signature, last_seen) VALUES (?, ?, ?)""",
            (self.algorithm, signature, datetime.utcnow()))
        self.db.execute(
            """UPDATE nbsignatures SET last_seen = ? WHERE
            algorithm = ? AND
            signature = ?;
            """,
            (datetime.utcnow(), self.algorithm, signature),
        )
        self.db.commit()
        n, = self.db.execute("SELECT Count(*) FROM nbsignatures").fetchone()
        if n > self.cache_size:
            self.cull_db()

    def unsign(self, nb):
        """Ensure that a notebook is untrusted
        
        by removing its signature from the trusted database, if present.
        """
        signature = self.compute_signature(nb)
        self.db.execute(
            """DELETE FROM nbsignatures WHERE
                algorithm = ? AND
                signature = ?;
            """, (self.algorithm, signature))
        self.db.commit()

    def cull_db(self):
        """Cull oldest 25% of the trusted signatures when the size limit is reached"""
        self.db.execute(
            """DELETE FROM nbsignatures WHERE id IN (
            SELECT id FROM nbsignatures ORDER BY last_seen DESC LIMIT -1 OFFSET ?
        );
        """, (max(int(0.75 * self.cache_size), 1), ))

    def mark_cells(self, nb, trusted):
        """Mark cells as trusted if the notebook's signature can be verified
        
        Sets ``cell.metadata.trusted = True | False`` on all code cells,
        depending on whether the stored signature can be verified.
        
        This function is the inverse of check_cells
        """
        if nb.nbformat < 3:
            return

        for cell in yield_code_cells(nb):
            cell['metadata']['trusted'] = trusted

    def _check_cell(self, cell, nbformat_version):
        """Do we trust an individual cell?
        
        Return True if:
        
        - cell is explicitly trusted
        - cell has no potentially unsafe rich output
        
        If a cell has no output, or only simple print statements,
        it will always be trusted.
        """
        # explicitly trusted
        if cell['metadata'].pop("trusted", False):
            return True

        # explicitly safe output
        if nbformat_version >= 4:
            unsafe_output_types = ['execute_result', 'display_data']
            safe_keys = {"output_type", "execution_count", "metadata"}
        else:  # v3
            unsafe_output_types = ['pyout', 'display_data']
            safe_keys = {"output_type", "prompt_number", "metadata"}

        for output in cell['outputs']:
            output_type = output['output_type']
            if output_type in unsafe_output_types:
                # if there are any data keys not in the safe whitelist
                output_keys = set(output)
                if output_keys.difference(safe_keys):
                    return False

        return True

    def check_cells(self, nb):
        """Return whether all code cells are trusted
        
        If there are no code cells, return True.
        
        This function is the inverse of mark_cells.
        """
        if nb.nbformat < 3:
            return False
        trusted = True
        for cell in yield_code_cells(nb):
            # only distrust a cell if it actually has some output to distrust
            if not self._check_cell(cell, nb.nbformat):
                trusted = False

        return trusted
class ZMQTerminalInteractiveShell(SingletonConfigurable):
    readline_use = False

    pt_cli = None

    _executing = False
    _execution_state = Unicode('')
    _pending_clearoutput = False
    _eventloop = None
    own_kernel = False  # Changed by ZMQTerminalIPythonApp

    editing_mode = Unicode(
        'emacs',
        config=True,
        help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
    )

    highlighting_style = Unicode(
        '',
        config=True,
        help="The name of a Pygments style to use for syntax highlighting")

    highlighting_style_overrides = Dict(
        config=True, help="Override highlighting format for specific tokens")

    true_color = Bool(
        False,
        config=True,
        help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
              "If your terminal supports true color, the following command "
              "should print 'TRUECOLOR' in orange: "
              "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\""))

    history_load_length = Integer(
        1000, config=True, help="How many history items to load into memory")

    banner = Unicode(
        'Jupyter console {version}\n\n{kernel_banner}',
        config=True,
        help=(
            "Text to display before the first prompt. Will be formatted with "
            "variables {version} and {kernel_banner}."))

    kernel_timeout = Float(
        60,
        config=True,
        help="""Timeout for giving up on a kernel (in seconds).

        On first connect and restart, the console tests whether the
        kernel is running and responsive by sending kernel_info_requests.
        This sets the timeout in seconds for how long the kernel can take
        before being presumed dead.
        """)

    image_handler = Enum(('PIL', 'stream', 'tempfile', 'callable'),
                         'PIL',
                         config=True,
                         allow_none=True,
                         help="""
        Handler for image type output.  This is useful, for example,
        when connecting to the kernel in which pylab inline backend is
        activated.  There are four handlers defined.  'PIL': Use
        Python Imaging Library to popup image; 'stream': Use an
        external program to show the image.  Image will be fed into
        the STDIN of the program.  You will need to configure
        `stream_image_handler`; 'tempfile': Use an external program to
        show the image.  Image will be saved in a temporally file and
        the program is called with the temporally file.  You will need
        to configure `tempfile_image_handler`; 'callable': You can set
        any Python callable which is called with the image data.  You
        will need to configure `callable_image_handler`.
        """)

    stream_image_handler = List(config=True,
                                help="""
        Command to invoke an image viewer program when you are using
        'stream' image handler.  This option is a list of string where
        the first element is the command itself and reminders are the
        options for the command.  Raw image data is given as STDIN to
        the program.
        """)

    tempfile_image_handler = List(config=True,
                                  help="""
        Command to invoke an image viewer program when you are using
        'tempfile' image handler.  This option is a list of string
        where the first element is the command itself and reminders
        are the options for the command.  You can use {file} and
        {format} in the string to represent the location of the
        generated image file and image format.
        """)

    callable_image_handler = Any(config=True,
                                 help="""
        Callable object called via 'callable' image handler with one
        argument, `data`, which is `msg["content"]["data"]` where
        `msg` is the message from iopub channel.  For example, you can
        find base64 encoded PNG data as `data['image/png']`. If your function
        can't handle the data supplied, it should return `False` to indicate
        this.
        """)

    mime_preference = List(
        default_value=['image/png', 'image/jpeg', 'image/svg+xml'],
        config=True,
        help="""
        Preferred object representation MIME type in order.  First
        matched MIME type will be used.
        """)

    use_kernel_is_complete = Bool(
        True,
        config=True,
        help="""Whether to use the kernel's is_complete message
        handling. If False, then the frontend will use its
        own is_complete handler.
        """)
    kernel_is_complete_timeout = Float(
        1,
        config=True,
        help="""Timeout (in seconds) for giving up on a kernel's is_complete
        response.

        If the kernel does not respond at any point within this time,
        the kernel will no longer be asked if code is complete, and the
        console will default to the built-in is_complete test.
        """)

    # This is configurable on JupyterConsoleApp; this copy is not configurable
    # to avoid a duplicate config option.
    confirm_exit = Bool(True,
                        help="""Set to display confirmation dialog on exit.
        You can always use 'exit' or 'quit', to force a
        direct exit without any confirmation.
        """)

    highlight_matching_brackets = Bool(
        True,
        help="Highlight matching brackets.",
    ).tag(config=True)

    manager = Instance('jupyter_client.KernelManager', allow_none=True)
    client = Instance('jupyter_client.KernelClient', allow_none=True)

    def _client_changed(self, name, old, new):
        self.session_id = new.session.session

    session_id = Unicode()

    def _banner1_default(self):
        return "Jupyter Console {version}\n".format(version=__version__)

    simple_prompt = Bool(
        False,
        help="""Use simple fallback prompt. Features may be limited.""").tag(
            config=True)

    def __init__(self, **kwargs):
        # This is where traits with a config_key argument are updated
        # from the values on config.
        super(ZMQTerminalInteractiveShell, self).__init__(**kwargs)
        self.configurables = [self]

        self.init_history()
        self.init_completer()
        self.init_io()

        self.init_kernel_info()
        self.init_prompt_toolkit_cli()
        self.keep_running = True
        self.execution_count = 1

    def init_completer(self):
        """Initialize the completion machinery.

        This creates completion machinery that can be used by client code,
        either interactively in-process (typically triggered by the readline
        library), programmatically (such as in test suites) or out-of-process
        (typically over the network by remote frontends).
        """
        self.Completer = ZMQCompleter(self, self.client, config=self.config)

    def init_history(self):
        """Sets up the command history. """
        self.history_manager = ZMQHistoryManager(client=self.client)
        self.configurables.append(self.history_manager)

    def get_prompt_tokens(self, ec=None):
        if ec is None:
            ec = self.execution_count
        return [
            (Token.Prompt, 'In ['),
            (Token.PromptNum, str(ec)),
            (Token.Prompt, ']: '),
        ]

    def get_continuation_tokens(self, width):
        return [
            (Token.Prompt, (' ' * (width - 2)) + ': '),
        ]

    def get_out_prompt_tokens(self):
        return [(Token.OutPrompt, 'Out['),
                (Token.OutPromptNum, str(self.execution_count)),
                (Token.OutPrompt, ']: ')]

    def print_out_prompt(self):
        tokens = self.get_out_prompt_tokens()
        print_formatted_text(PygmentsTokens(tokens),
                             end='',
                             style=self.pt_cli.app.style)

    def get_remote_prompt_tokens(self):
        return [
            (Token.RemotePrompt, self.other_output_prefix),
        ]

    def print_remote_prompt(self, ec=None):
        tokens = self.get_remote_prompt_tokens() + self.get_prompt_tokens(
            ec=ec)
        print_formatted_text(PygmentsTokens(tokens),
                             end='',
                             style=self.pt_cli.app.style)

    kernel_info = {}

    def init_kernel_info(self):
        """Wait for a kernel to be ready, and store kernel info"""
        timeout = self.kernel_timeout
        tic = time.time()
        self.client.hb_channel.unpause()
        msg_id = self.client.kernel_info()
        while True:
            try:
                reply = self.client.get_shell_msg(timeout=1)
            except Empty:
                if (time.time() - tic) > timeout:
                    raise RuntimeError(
                        "Kernel didn't respond to kernel_info_request")
            else:
                if reply['parent_header'].get('msg_id') == msg_id:
                    self.kernel_info = reply['content']
                    return

    def show_banner(self):
        print(self.banner.format(version=__version__,
                                 kernel_banner=self.kernel_info.get(
                                     'banner', '')),
              end='',
              flush=True)

    def init_prompt_toolkit_cli(self):
        if self.simple_prompt or ('JUPYTER_CONSOLE_TEST' in os.environ):
            # Simple restricted interface for tests so we can find prompts with
            # pexpect. Multi-line input not supported.
            @asyncio.coroutine
            def prompt():
                prompt = 'In [%d]: ' % self.execution_count
                raw = yield from async_input(prompt)
                return raw

            self.prompt_for_code = prompt
            self.print_out_prompt = \
                lambda: print('Out[%d]: ' % self.execution_count, end='')
            return

        kb = KeyBindings()
        insert_mode = vi_insert_mode | emacs_insert_mode

        @kb.add("enter",
                filter=(has_focus(DEFAULT_BUFFER)
                        & ~has_selection
                        & insert_mode))
        def _(event):
            b = event.current_buffer
            d = b.document
            if not (d.on_last_line or d.cursor_position_row >=
                    d.line_count - d.empty_line_count_at_the_end()):
                b.newline()
                return

            # Pressing enter flushes any pending display. This also ensures
            # the displayed execution_count is correct.
            self.handle_iopub()

            more, indent = self.check_complete(d.text)

            if (not more) and b.accept_handler:
                b.validate_and_handle()
            else:
                b.insert_text('\n' + indent)

        @kb.add("c-c", filter=has_focus(DEFAULT_BUFFER))
        def _(event):
            event.current_buffer.reset()

        @kb.add("c-\\", filter=has_focus(DEFAULT_BUFFER))
        def _(event):
            raise EOFError

        @kb.add("c-z",
                filter=Condition(lambda: suspend_to_background_supported()))
        def _(event):
            event.cli.suspend_to_background()

        # Pre-populate history from IPython's history database
        history = InMemoryHistory()
        last_cell = u""
        for _, _, cell in self.history_manager.get_tail(
                self.history_load_length, include_latest=True):
            # Ignore blank lines and consecutive duplicates
            cell = cell.rstrip()
            if cell and (cell != last_cell):
                history.append_string(cell)

        style_overrides = {
            Token.Prompt: '#009900',
            Token.PromptNum: '#00ff00 bold',
            Token.OutPrompt: '#ff2200',
            Token.OutPromptNum: '#ff0000 bold',
            Token.RemotePrompt: '#999900',
        }
        if self.highlighting_style:
            style_cls = get_style_by_name(self.highlighting_style)
        else:
            style_cls = get_style_by_name('default')
            # The default theme needs to be visible on both a dark background
            # and a light background, because we can't tell what the terminal
            # looks like. These tweaks to the default theme help with that.
            style_overrides.update({
                Token.Number: '#007700',
                Token.Operator: 'noinherit',
                Token.String: '#BB6622',
                Token.Name.Function: '#2080D0',
                Token.Name.Class: 'bold #2080D0',
                Token.Name.Namespace: 'bold #2080D0',
            })
        style_overrides.update(self.highlighting_style_overrides)
        style = merge_styles([
            style_from_pygments_cls(style_cls),
            style_from_pygments_dict(style_overrides),
        ])

        editing_mode = getattr(EditingMode, self.editing_mode.upper())
        langinfo = self.kernel_info.get('language_info', {})
        lexer = langinfo.get('pygments_lexer', langinfo.get('name', 'text'))

        # If enabled in the settings, highlight matching brackets
        # when the DEFAULT_BUFFER has the focus
        input_processors = [
            ConditionalProcessor(
                processor=HighlightMatchingBracketProcessor(chars='[](){}'),
                filter=has_focus(DEFAULT_BUFFER) & ~is_done
                & Condition(lambda: self.highlight_matching_brackets))
        ]

        # Tell prompt_toolkit to use the asyncio event loop.
        # Obsolete in prompt_toolkit.v3
        if not PTK3:
            use_asyncio_event_loop()

        self.pt_cli = PromptSession(
            message=(lambda: PygmentsTokens(self.get_prompt_tokens())),
            multiline=True,
            editing_mode=editing_mode,
            lexer=PygmentsLexer(get_pygments_lexer(lexer)),
            prompt_continuation=(
                lambda width, lineno, is_soft_wrap: PygmentsTokens(
                    self.get_continuation_tokens(width))),
            key_bindings=kb,
            history=history,
            completer=JupyterPTCompleter(self.Completer),
            enable_history_search=True,
            style=style,
            input_processors=input_processors,
            color_depth=(ColorDepth.TRUE_COLOR if self.true_color else None),
        )

    @asyncio.coroutine
    def prompt_for_code(self):
        if self.next_input:
            default = self.next_input
            self.next_input = None
        else:
            default = ''

        if PTK3:
            text = yield from self.pt_cli.prompt_async(default=default)
        else:
            text = yield from self.pt_cli.prompt(default=default, async_=True)

        return text

    def init_io(self):
        if sys.platform not in {'win32', 'cli'}:
            return

        import colorama
        colorama.init()

    def check_complete(self, code):
        if self.use_kernel_is_complete:
            msg_id = self.client.is_complete(code)
            try:
                return self.handle_is_complete_reply(
                    msg_id, timeout=self.kernel_is_complete_timeout)
            except SyntaxError:
                return False, ""
        else:
            lines = code.splitlines()
            if len(lines):
                more = (lines[-1] != "")
                return more, ""
            else:
                return False, ""

    def ask_exit(self):
        self.keep_running = False

    # This is set from payloads in handle_execute_reply
    next_input = None

    def pre_prompt(self):
        if self.next_input:
            # We can't set the buffer here, because it will be reset just after
            # this. Adding a callable to pre_run_callables does what we need
            # after the buffer is reset.
            s = self.next_input

            def set_doc():
                self.pt_cli.app.buffer.document = Document(s)

            if hasattr(self.pt_cli, 'pre_run_callables'):
                self.pt_cli.app.pre_run_callables.append(set_doc)
            else:
                # Older version of prompt_toolkit; it's OK to set the document
                # directly here.
                set_doc()
            self.next_input = None

    @asyncio.coroutine
    def interact(self, loop=None, display_banner=None):
        while self.keep_running:
            print('\n', end='')

            try:
                code = yield from self.prompt_for_code()
            except EOFError:
                if (not self.confirm_exit) or \
                        ask_yes_no('Do you really want to exit ([y]/n)?', 'y', 'n'):
                    self.ask_exit()

            else:
                if code:
                    self.run_cell(code, store_history=True)

    def mainloop(self):
        self.keepkernel = not self.own_kernel
        loop = asyncio.get_event_loop()
        # An extra layer of protection in case someone mashing Ctrl-C breaks
        # out of our internal code.
        while True:
            try:
                tasks = [self.interact(loop=loop)]

                if self.include_other_output:
                    # only poll the iopub channel asynchronously if we
                    # wish to include external content
                    tasks.append(self.handle_external_iopub(loop=loop))

                main_task = asyncio.wait(tasks,
                                         loop=loop,
                                         return_when=asyncio.FIRST_COMPLETED)
                _, pending = loop.run_until_complete(main_task)

                for task in pending:
                    task.cancel()
                try:
                    loop.run_until_complete(asyncio.gather(*pending))
                except asyncio.CancelledError:
                    pass
                loop.stop()
                loop.close()
                break
            except KeyboardInterrupt:
                print("\nKeyboardInterrupt escaped interact()\n")

        if self._eventloop:
            self._eventloop.close()
        if self.keepkernel and not self.own_kernel:
            print('keeping kernel alive')
        elif self.keepkernel and self.own_kernel:
            print("owning kernel, cannot keep it alive")
            self.client.shutdown()
        else:
            print("Shutting down kernel")
            self.client.shutdown()

    def run_cell(self, cell, store_history=True):
        """Run a complete IPython cell.

        Parameters
        ----------
        cell : str
          The code (including IPython code such as %magic functions) to run.
        store_history : bool
          If True, the raw and translated cell will be stored in IPython's
          history. For user code calling back into IPython's machinery, this
          should be set to False.
        """
        if (not cell) or cell.isspace():
            # pressing enter flushes any pending display
            self.handle_iopub()
            return

        # flush stale replies, which could have been ignored, due to missed heartbeats
        while self.client.shell_channel.msg_ready():
            self.client.shell_channel.get_msg()
        # execute takes 'hidden', which is the inverse of store_hist
        msg_id = self.client.execute(cell, not store_history)

        # first thing is wait for any side effects (output, stdin, etc.)
        self._executing = True
        self._execution_state = "busy"
        while self._execution_state != 'idle' and self.client.is_alive():
            try:
                self.handle_input_request(msg_id, timeout=0.05)
            except Empty:
                # display intermediate print statements, etc.
                self.handle_iopub(msg_id)
            except ZMQError as e:
                # Carry on if polling was interrupted by a signal
                if e.errno != errno.EINTR:
                    raise

        # after all of that is done, wait for the execute reply
        while self.client.is_alive():
            try:
                self.handle_execute_reply(msg_id, timeout=0.05)
            except Empty:
                pass
            else:
                break
        self._executing = False

    #-----------------
    # message handlers
    #-----------------

    def handle_execute_reply(self, msg_id, timeout=None):
        msg = self.client.shell_channel.get_msg(block=False, timeout=timeout)
        if msg["parent_header"].get("msg_id", None) == msg_id:

            self.handle_iopub(msg_id)

            content = msg["content"]
            status = content['status']

            if status == 'aborted':
                self.write('Aborted\n')
                return
            elif status == 'ok':
                # handle payloads
                for item in content.get("payload", []):
                    source = item['source']
                    if source == 'page':
                        page.page(item['data']['text/plain'])
                    elif source == 'set_next_input':
                        self.next_input = item['text']
                    elif source == 'ask_exit':
                        self.keepkernel = item.get('keepkernel', False)
                        self.ask_exit()

            elif status == 'error':
                pass

            self.execution_count = int(content["execution_count"] + 1)

    def handle_is_complete_reply(self, msg_id, timeout=None):
        """
        Wait for a repsonse from the kernel, and return two values:
            more? - (boolean) should the frontend ask for more input
            indent - an indent string to prefix the input
        Overloaded methods may want to examine the comeplete source. Its is
        in the self._source_lines_buffered list.
        """
        ## Get the is_complete response:
        msg = None
        try:
            msg = self.client.shell_channel.get_msg(block=True,
                                                    timeout=timeout)
        except Empty:
            warn('The kernel did not respond to an is_complete_request. '
                 'Setting `use_kernel_is_complete` to False.')
            self.use_kernel_is_complete = False
            return False, ""
        ## Handle response:
        if msg["parent_header"].get("msg_id", None) != msg_id:
            warn(
                'The kernel did not respond properly to an is_complete_request: %s.'
                % str(msg))
            return False, ""
        else:
            status = msg["content"].get("status", None)
            indent = msg["content"].get("indent", "")
        ## Return more? and indent string
        if status == "complete":
            return False, indent
        elif status == "incomplete":
            return True, indent
        elif status == "invalid":
            raise SyntaxError()
        elif status == "unknown":
            return False, indent
        else:
            warn('The kernel sent an invalid is_complete_reply status: "%s".' %
                 status)
            return False, indent

    include_other_output = Bool(False,
                                config=True,
                                help="""Whether to include output from clients
        other than this one sharing the same kernel.
        """)
    other_output_prefix = Unicode(
        "Remote ",
        config=True,
        help="""Prefix to add to outputs coming from clients other than this one.

        Only relevant if include_other_output is True.
        """)

    def from_here(self, msg):
        """Return whether a message is from this session"""
        return msg['parent_header'].get("session",
                                        self.session_id) == self.session_id

    def include_output(self, msg):
        """Return whether we should include a given output message"""
        from_here = self.from_here(msg)
        if msg['msg_type'] == 'execute_input':
            # only echo inputs not from here
            return self.include_other_output and not from_here

        if self.include_other_output:
            return True
        else:
            return from_here

    @asyncio.coroutine
    def handle_external_iopub(self, loop=None):
        while self.keep_running:
            # we need to check for keep_running from time to time as
            # we are blocking in an executor block which cannot be cancelled.
            poll_result = yield from loop.run_in_executor(
                None, self.client.iopub_channel.socket.poll, 500)
            if (poll_result):
                self.handle_iopub()

    def handle_iopub(self, msg_id=''):
        """Process messages on the IOPub channel

           This method consumes and processes messages on the IOPub channel,
           such as stdout, stderr, execute_result and status.

           It only displays output that is caused by this session.
        """
        while self.client.iopub_channel.msg_ready():
            sub_msg = self.client.iopub_channel.get_msg()
            msg_type = sub_msg['header']['msg_type']
            parent = sub_msg["parent_header"]

            # Update execution_count in case it changed in another session
            if msg_type == "execute_input":
                self.execution_count = int(
                    sub_msg["content"]["execution_count"]) + 1

            if self.include_output(sub_msg):
                if msg_type == 'status':
                    self._execution_state = sub_msg["content"][
                        "execution_state"]

                elif msg_type == 'stream':
                    if sub_msg["content"]["name"] == "stdout":
                        if self._pending_clearoutput:
                            print("\r", end="")
                            self._pending_clearoutput = False
                        print(sub_msg["content"]["text"], end="")
                        sys.stdout.flush()
                    elif sub_msg["content"]["name"] == "stderr":
                        if self._pending_clearoutput:
                            print("\r", file=sys.stderr, end="")
                            self._pending_clearoutput = False
                        print(sub_msg["content"]["text"],
                              file=sys.stderr,
                              end="")
                        sys.stderr.flush()

                elif msg_type == 'execute_result':
                    if self._pending_clearoutput:
                        print("\r", end="")
                        self._pending_clearoutput = False
                    self.execution_count = int(
                        sub_msg["content"]["execution_count"])
                    if not self.from_here(sub_msg):
                        sys.stdout.write(self.other_output_prefix)
                    format_dict = sub_msg["content"]["data"]
                    self.handle_rich_data(format_dict)

                    if 'text/plain' not in format_dict:
                        continue

                    # prompt_toolkit writes the prompt at a slightly lower level,
                    # so flush streams first to ensure correct ordering.
                    sys.stdout.flush()
                    sys.stderr.flush()
                    self.print_out_prompt()
                    text_repr = format_dict['text/plain']
                    if '\n' in text_repr:
                        # For multi-line results, start a new line after prompt
                        print()
                    print(text_repr)

                    # Remote: add new prompt
                    if not self.from_here(sub_msg):
                        sys.stdout.write('\n')
                        sys.stdout.flush()
                        self.print_remote_prompt()

                elif msg_type == 'display_data':
                    data = sub_msg["content"]["data"]
                    handled = self.handle_rich_data(data)
                    if not handled:
                        if not self.from_here(sub_msg):
                            sys.stdout.write(self.other_output_prefix)
                        # if it was an image, we handled it by now
                        if 'text/plain' in data:
                            print(data['text/plain'])

                # If execute input: print it
                elif msg_type == 'execute_input':
                    content = sub_msg['content']
                    ec = content.get('execution_count',
                                     self.execution_count - 1)

                    # New line
                    sys.stdout.write('\n')
                    sys.stdout.flush()

                    # With `Remote In [3]: `
                    self.print_remote_prompt(ec=ec)

                    # And the code
                    sys.stdout.write(content['code'] + '\n')

                elif msg_type == 'clear_output':
                    if sub_msg["content"]["wait"]:
                        self._pending_clearoutput = True
                    else:
                        print("\r", end="")

                elif msg_type == 'error':
                    for frame in sub_msg["content"]["traceback"]:
                        print(frame, file=sys.stderr)

    _imagemime = {
        'image/png': 'png',
        'image/jpeg': 'jpeg',
        'image/svg+xml': 'svg',
    }

    def handle_rich_data(self, data):
        for mime in self.mime_preference:
            if mime in data and mime in self._imagemime:
                if self.handle_image(data, mime):
                    return True
        return False

    def handle_image(self, data, mime):
        handler = getattr(self, 'handle_image_{0}'.format(self.image_handler),
                          None)
        if handler:
            return handler(data, mime)

    def handle_image_PIL(self, data, mime):
        if mime not in ('image/png', 'image/jpeg'):
            return False
        try:
            from PIL import Image, ImageShow
        except ImportError:
            return False
        raw = base64.decodebytes(data[mime].encode('ascii'))
        img = Image.open(BytesIO(raw))
        return ImageShow.show(img)

    def handle_image_stream(self, data, mime):
        raw = base64.decodebytes(data[mime].encode('ascii'))
        imageformat = self._imagemime[mime]
        fmt = dict(format=imageformat)
        args = [s.format(**fmt) for s in self.stream_image_handler]
        with open(os.devnull, 'w') as devnull:
            proc = subprocess.Popen(args,
                                    stdin=subprocess.PIPE,
                                    stdout=devnull,
                                    stderr=devnull)
            proc.communicate(raw)
        return (proc.returncode == 0)

    def handle_image_tempfile(self, data, mime):
        raw = base64.decodebytes(data[mime].encode('ascii'))
        imageformat = self._imagemime[mime]
        filename = 'tmp.{0}'.format(imageformat)
        with NamedFileInTemporaryDirectory(filename) as f, \
                open(os.devnull, 'w') as devnull:
            f.write(raw)
            f.flush()
            fmt = dict(file=f.name, format=imageformat)
            args = [s.format(**fmt) for s in self.tempfile_image_handler]
            rc = subprocess.call(args, stdout=devnull, stderr=devnull)
        return (rc == 0)

    def handle_image_callable(self, data, mime):
        res = self.callable_image_handler(data)
        if res is not False:
            # If handler func returns e.g. None, assume it has handled the data.
            res = True
        return res

    def handle_input_request(self, msg_id, timeout=0.1):
        """ Method to capture raw_input
        """
        req = self.client.stdin_channel.get_msg(timeout=timeout)
        # in case any iopub came while we were waiting:
        self.handle_iopub(msg_id)
        if msg_id == req["parent_header"].get("msg_id"):
            # wrap SIGINT handler
            real_handler = signal.getsignal(signal.SIGINT)

            def double_int(sig, frame):
                # call real handler (forwards sigint to kernel),
                # then raise local interrupt, stopping local raw_input
                real_handler(sig, frame)
                raise KeyboardInterrupt

            signal.signal(signal.SIGINT, double_int)
            content = req['content']
            read = getpass if content.get('password', False) else input
            try:
                raw_data = read(content["prompt"])
            except EOFError:
                # turn EOFError into EOF character
                raw_data = '\x04'
            except KeyboardInterrupt:
                sys.stdout.write('\n')
                return
            finally:
                # restore SIGINT handler
                signal.signal(signal.SIGINT, real_handler)

            # only send stdin reply if there *was not* another request
            # or execution finished while we were reading.
            if not (self.client.stdin_channel.msg_ready()
                    or self.client.shell_channel.msg_ready()):
                self.client.input(raw_data)
Exemple #25
0
class TerminalInteractiveShell(InteractiveShell):
    space_for_menu = Integer(
        6,
        help='Number of line at the bottom of the screen '
        'to reserve for the completion menu').tag(config=True)

    pt_app = None
    debugger_history = None

    simple_prompt = Bool(
        _use_simple_prompt,
        help=
        """Use `raw_input` for the REPL, without completion and prompt colors.

            Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
            IPython own testing machinery, and emacs inferior-shell integration through elpy.

            This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
            environment variable is set, or the current terminal is not a tty."""
    ).tag(config=True)

    @property
    def debugger_cls(self):
        return Pdb if self.simple_prompt else TerminalPdb

    confirm_exit = Bool(
        True,
        help="""
        Set to confirm when you try to exit IPython with an EOF (Control-D
        in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
        you can force a direct exit without any confirmation.""",
    ).tag(config=True)

    editing_mode = Unicode(
        'emacs',
        help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
    ).tag(config=True)

    mouse_support = Bool(
        False,
        help=
        "Enable mouse support in the prompt\n(Note: prevents selecting text with the mouse)"
    ).tag(config=True)

    # We don't load the list of styles for the help string, because loading
    # Pygments plugins takes time and can cause unexpected errors.
    highlighting_style = Union(
        [Unicode('legacy'), Type(klass=Style)],
        help="""The name or class of a Pygments style to use for syntax
        highlighting. To see available styles, run `pygmentize -L styles`."""
    ).tag(config=True)

    @validate('editing_mode')
    def _validate_editing_mode(self, proposal):
        if proposal['value'].lower() == 'vim':
            proposal['value'] = 'vi'
        elif proposal['value'].lower() == 'default':
            proposal['value'] = 'emacs'

        if hasattr(EditingMode, proposal['value'].upper()):
            return proposal['value'].lower()

        return self.editing_mode

    @observe('editing_mode')
    def _editing_mode(self, change):
        u_mode = change.new.upper()
        if self.pt_app:
            self.pt_app.editing_mode = u_mode

    @observe('highlighting_style')
    @observe('colors')
    def _highlighting_style_changed(self, change):
        self.refresh_style()

    def refresh_style(self):
        self._style = self._make_style_from_name_or_cls(
            self.highlighting_style)

    highlighting_style_overrides = Dict(
        help="Override highlighting format for specific tokens").tag(
            config=True)

    true_color = Bool(
        False,
        help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
              "If your terminal supports true color, the following command "
              "should print 'TRUECOLOR' in orange: "
              "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")).tag(
                  config=True)

    editor = Unicode(
        get_default_editor(),
        help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
    ).tag(config=True)

    prompts_class = Type(
        Prompts,
        help='Class used to generate Prompt token for prompt_toolkit').tag(
            config=True)

    prompts = Instance(Prompts)

    @default('prompts')
    def _prompts_default(self):
        return self.prompts_class(self)

#    @observe('prompts')
#    def _(self, change):
#        self._update_layout()

    @default('displayhook_class')
    def _displayhook_class_default(self):
        return RichPromptDisplayHook

    term_title = Bool(
        True, help="Automatically set the terminal title").tag(config=True)

    term_title_format = Unicode(
        "IPython: {cwd}",
        help=
        "Customize the terminal title format.  This is a python format string. "
        + "Available substitutions are: {cwd}.").tag(config=True)

    display_completions = Enum(
        ('column', 'multicolumn', 'readlinelike'),
        help=
        ("Options for displaying tab completions, 'column', 'multicolumn', and "
         "'readlinelike'. These options are for `prompt_toolkit`, see "
         "`prompt_toolkit` documentation for more information."),
        default_value='multicolumn').tag(config=True)

    highlight_matching_brackets = Bool(
        True,
        help="Highlight matching brackets.",
    ).tag(config=True)

    extra_open_editor_shortcuts = Bool(
        False,
        help=
        "Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. "
        "This is in addition to the F2 binding, which is always enabled.").tag(
            config=True)

    handle_return = Any(
        None,
        help="Provide an alternative handler to be called when the user presses "
        "Return. This is an advanced option intended for debugging, which "
        "may be changed or removed in later releases.").tag(config=True)

    enable_history_search = Bool(
        True,
        help="Allows to enable/disable the prompt toolkit history search").tag(
            config=True)

    @observe('term_title')
    def init_term_title(self, change=None):
        # Enable or disable the terminal title.
        if self.term_title:
            toggle_set_term_title(True)
            set_term_title(self.term_title_format.format(cwd=abbrev_cwd()))
        else:
            toggle_set_term_title(False)

    def init_display_formatter(self):
        super(TerminalInteractiveShell, self).init_display_formatter()
        # terminal only supports plain text
        self.display_formatter.active_types = ['text/plain']
        # disable `_ipython_display_`
        self.display_formatter.ipython_display_formatter.enabled = False

    def init_prompt_toolkit_cli(self):
        if self.simple_prompt:
            # Fall back to plain non-interactive output for tests.
            # This is very limited.
            def prompt():
                prompt_text = "".join(x[1]
                                      for x in self.prompts.in_prompt_tokens())
                lines = [input(prompt_text)]
                prompt_continuation = "".join(
                    x[1] for x in self.prompts.continuation_prompt_tokens())
                while self.check_complete('\n'.join(lines))[0] == 'incomplete':
                    lines.append(input(prompt_continuation))
                return '\n'.join(lines)

            self.prompt_for_code = prompt
            return

        # Set up keyboard shortcuts
        key_bindings = create_ipython_shortcuts(self)

        # Pre-populate history from IPython's history database
        history = InMemoryHistory()
        last_cell = u""
        for __, ___, cell in self.history_manager.get_tail(
                self.history_load_length, include_latest=True):
            # Ignore blank lines and consecutive duplicates
            cell = cell.rstrip()
            if cell and (cell != last_cell):
                history.append_string(cell)
                last_cell = cell

        self._style = self._make_style_from_name_or_cls(
            self.highlighting_style)
        self.style = DynamicStyle(lambda: self._style)

        editing_mode = getattr(EditingMode, self.editing_mode.upper())

        self.pt_app = PromptSession(
            editing_mode=editing_mode,
            key_bindings=key_bindings,
            history=history,
            completer=IPythonPTCompleter(shell=self),
            enable_history_search=self.enable_history_search,
            style=self.style,
            include_default_pygments_style=False,
            mouse_support=self.mouse_support,
            enable_open_in_editor=self.extra_open_editor_shortcuts,
            color_depth=(ColorDepth.TRUE_COLOR if self.true_color else None),
            **self._extra_prompt_options())

    def _make_style_from_name_or_cls(self, name_or_cls):
        """
        Small wrapper that make an IPython compatible style from a style name

        We need that to add style for prompt ... etc.
        """
        style_overrides = {}
        if name_or_cls == 'legacy':
            legacy = self.colors.lower()
            if legacy == 'linux':
                style_cls = get_style_by_name('monokai')
                style_overrides = _style_overrides_linux
            elif legacy == 'lightbg':
                style_overrides = _style_overrides_light_bg
                style_cls = get_style_by_name('pastie')
            elif legacy == 'neutral':
                # The default theme needs to be visible on both a dark background
                # and a light background, because we can't tell what the terminal
                # looks like. These tweaks to the default theme help with that.
                style_cls = get_style_by_name('default')
                style_overrides.update({
                    Token.Number:
                    '#007700',
                    Token.Operator:
                    'noinherit',
                    Token.String:
                    '#BB6622',
                    Token.Name.Function:
                    '#2080D0',
                    Token.Name.Class:
                    'bold #2080D0',
                    Token.Name.Namespace:
                    'bold #2080D0',
                    Token.Prompt:
                    '#009900',
                    Token.PromptNum:
                    '#ansibrightgreen bold',
                    Token.OutPrompt:
                    '#990000',
                    Token.OutPromptNum:
                    '#ansibrightred bold',
                })

                # Hack: Due to limited color support on the Windows console
                # the prompt colors will be wrong without this
                if os.name == 'nt':
                    style_overrides.update({
                        Token.Prompt: '#ansidarkgreen',
                        Token.PromptNum: '#ansigreen bold',
                        Token.OutPrompt: '#ansidarkred',
                        Token.OutPromptNum: '#ansired bold',
                    })
            elif legacy == 'nocolor':
                style_cls = _NoStyle
                style_overrides = {}
            else:
                raise ValueError('Got unknown colors: ', legacy)
        else:
            if isinstance(name_or_cls, str):
                style_cls = get_style_by_name(name_or_cls)
            else:
                style_cls = name_or_cls
            style_overrides = {
                Token.Prompt: '#009900',
                Token.PromptNum: '#ansibrightgreen bold',
                Token.OutPrompt: '#990000',
                Token.OutPromptNum: '#ansibrightred bold',
            }
        style_overrides.update(self.highlighting_style_overrides)
        style = merge_styles([
            style_from_pygments_cls(style_cls),
            style_from_pygments_dict(style_overrides),
        ])

        return style

    @property
    def pt_complete_style(self):
        return {
            'multicolumn': CompleteStyle.MULTI_COLUMN,
            'column': CompleteStyle.COLUMN,
            'readlinelike': CompleteStyle.READLINE_LIKE,
        }[self.display_completions]

    def _extra_prompt_options(self):
        """
        Return the current layout option for the current Terminal InteractiveShell
        """
        def get_message():
            return PygmentsTokens(self.prompts.in_prompt_tokens())

        return {
            'complete_in_thread':
            False,
            'lexer':
            IPythonPTLexer(),
            'reserve_space_for_menu':
            self.space_for_menu,
            'message':
            get_message,
            'prompt_continuation':
            (lambda width, lineno, is_soft_wrap: PygmentsTokens(
                self.prompts.continuation_prompt_tokens(width))),
            'multiline':
            True,
            'complete_style':
            self.pt_complete_style,

            # Highlight matching brackets, but only when this setting is
            # enabled, and only when the DEFAULT_BUFFER has the focus.
            'input_processors': [
                ConditionalProcessor(
                    processor=HighlightMatchingBracketProcessor(
                        chars='[](){}'),
                    filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()
                    & Condition(lambda: self.highlight_matching_brackets))
            ],
            'inputhook':
            self.inputhook,
        }

    def prompt_for_code(self):
        if self.rl_next_input:
            default = self.rl_next_input
            self.rl_next_input = None
        else:
            default = ''

        with patch_stdout(raw=True):
            text = self.pt_app.prompt(
                default=default,
                #                pre_run=self.pre_prompt,# reset_current_buffer=True,
                **self._extra_prompt_options())
        return text

    def enable_win_unicode_console(self):
        if sys.version_info >= (3, 6):
            # Since PEP 528, Python uses the unicode APIs for the Windows
            # console by default, so WUC shouldn't be needed.
            return

        import win_unicode_console
        win_unicode_console.enable()

    def init_io(self):
        if sys.platform not in {'win32', 'cli'}:
            return

        self.enable_win_unicode_console()

        import colorama
        colorama.init()

        # For some reason we make these wrappers around stdout/stderr.
        # For now, we need to reset them so all output gets coloured.
        # https://github.com/ipython/ipython/issues/8669
        # io.std* are deprecated, but don't show our own deprecation warnings
        # during initialization of the deprecated API.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', DeprecationWarning)
            io.stdout = io.IOStream(sys.stdout)
            io.stderr = io.IOStream(sys.stderr)

    def init_magics(self):
        super(TerminalInteractiveShell, self).init_magics()
        self.register_magics(TerminalMagics)

    def init_alias(self):
        # The parent class defines aliases that can be safely used with any
        # frontend.
        super(TerminalInteractiveShell, self).init_alias()

        # Now define aliases that only make sense on the terminal, because they
        # need direct access to the console in a way that we can't emulate in
        # GUI or web frontend
        if os.name == 'posix':
            for cmd in ['clear', 'more', 'less', 'man']:
                self.alias_manager.soft_define_alias(cmd, cmd)

    def __init__(self, *args, **kwargs):
        super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
        self.init_prompt_toolkit_cli()
        self.init_term_title()
        self.keep_running = True

        self.debugger_history = InMemoryHistory()

    def ask_exit(self):
        self.keep_running = False

    rl_next_input = None

    def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):

        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn(
                'interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.',
                DeprecationWarning,
                stacklevel=2)

        self.keep_running = True
        while self.keep_running:
            print(self.separate_in, end='')

            try:
                code = self.prompt_for_code()
            except EOFError:
                if (not self.confirm_exit) \
                        or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
                    self.ask_exit()

            else:
                if code:
                    self.run_cell(code, store_history=True)

    def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
        # An extra layer of protection in case someone mashing Ctrl-C breaks
        # out of our internal code.
        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn(
                'mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.',
                DeprecationWarning,
                stacklevel=2)
        while True:
            try:
                self.interact()
                break
            except KeyboardInterrupt as e:
                print("\n%s escaped interact()\n" % type(e).__name__)
            finally:
                # An interrupt during the eventloop will mess up the
                # internal state of the prompt_toolkit library.
                # Stopping the eventloop fixes this, see
                # https://github.com/ipython/ipython/pull/9867
                if hasattr(self, '_eventloop'):
                    self._eventloop.stop()

    _inputhook = None

    def inputhook(self, context):
        if self._inputhook is not None:
            self._inputhook(context)

    active_eventloop = None

    def enable_gui(self, gui=None):
        if gui:
            self.active_eventloop, self._inputhook =\
                get_inputhook_name_and_func(gui)
        else:
            self.active_eventloop = self._inputhook = None

    # Run !system commands directly, not through pipes, so terminal programs
    # work correctly.
    system = InteractiveShell.system_raw

    def auto_rewrite_input(self, cmd):
        """Overridden from the parent class to use fancy rewriting prompt"""
        if not self.show_rewritten_input:
            return

        tokens = self.prompts.rewrite_prompt_tokens()
        if self.pt_app:
            print_formatted_text(PygmentsTokens(tokens),
                                 end='',
                                 style=self.pt_app.app.style)
            print(cmd)
        else:
            prompt = ''.join(s for t, s in tokens)
            print(prompt, cmd, sep='')

    _prompts_before = None

    def switch_doctest_mode(self, mode):
        """Switch prompts to classic for %doctest_mode"""
        if mode:
            self._prompts_before = self.prompts
            self.prompts = ClassicPrompts(self)
        elif self._prompts_before:
            self.prompts = self._prompts_before
            self._prompts_before = None
Exemple #26
0
class Map(DOMWidget, InteractMixin):
    _view_name = Unicode('LeafletMapView').tag(sync=True)
    _model_name = Unicode('LeafletMapModel').tag(sync=True)
    _view_module = Unicode('jupyter-leaflet').tag(sync=True)
    _model_module = Unicode('jupyter-leaflet').tag(sync=True)

    _view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
    _model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)

    # URL of the window where the map is displayed
    window_url = Unicode(read_only=True).tag(sync=True)

    # Map options
    center = List(def_loc).tag(sync=True, o=True)
    zoom_start = CFloat(12).tag(sync=True, o=True)
    zoom = CFloat(12).tag(sync=True, o=True)
    max_zoom = CFloat(18).tag(sync=True, o=True)
    min_zoom = CFloat(1).tag(sync=True, o=True)
    interpolation = Unicode('bilinear').tag(sync=True, o=True)
    crs = Enum(values=allowed_crs, default_value='EPSG3857').tag(sync=True)

    # Specification of the basemap
    basemap = Union(
        (Dict(), Instance(TileLayer)),
        default_value=dict(
            url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
            max_zoom=19,
            attribution=
            'Map data (c) <a href="https://openstreetmap.org">OpenStreetMap</a> contributors'
        ))
    modisdate = Unicode('yesterday').tag(sync=True)

    # Interaction options
    dragging = Bool(True).tag(sync=True, o=True)
    touch_zoom = Bool(True).tag(sync=True, o=True)
    scroll_wheel_zoom = Bool(False).tag(sync=True, o=True)
    double_click_zoom = Bool(True).tag(sync=True, o=True)
    box_zoom = Bool(True).tag(sync=True, o=True)
    tap = Bool(True).tag(sync=True, o=True)
    tap_tolerance = Int(15).tag(sync=True, o=True)
    world_copy_jump = Bool(False).tag(sync=True, o=True)
    close_popup_on_click = Bool(True).tag(sync=True, o=True)
    bounce_at_zoom_limits = Bool(True).tag(sync=True, o=True)
    keyboard = Bool(True).tag(sync=True, o=True)
    keyboard_pan_offset = Int(80).tag(sync=True, o=True)
    keyboard_zoom_offset = Int(1).tag(sync=True, o=True)
    inertia = Bool(True).tag(sync=True, o=True)
    inertia_deceleration = Int(3000).tag(sync=True, o=True)
    inertia_max_speed = Int(1500).tag(sync=True, o=True)
    # inertia_threshold = Int(?, o=True).tag(sync=True)
    # fade_animation = Bool(?).tag(sync=True, o=True)
    # zoom_animation = Bool(?).tag(sync=True, o=True)
    zoom_animation_threshold = Int(4).tag(sync=True, o=True)
    # marker_zoom_animation = Bool(?).tag(sync=True, o=True)
    fullscreen = Bool(False).tag(sync=True, o=True)

    options = List(trait=Unicode()).tag(sync=True)

    style = InstanceDict(MapStyle).tag(sync=True, **widget_serialization)
    default_style = InstanceDict(MapStyle).tag(sync=True,
                                               **widget_serialization)
    dragging_style = InstanceDict(MapStyle).tag(sync=True,
                                                **widget_serialization)

    zoom_control = Bool(True)
    attribution_control = Bool(True)

    @default('dragging_style')
    def _default_dragging_style(self):
        return {'cursor': 'move'}

    @default('options')
    def _default_options(self):
        return [name for name in self.traits(o=True)]

    south = Float(def_loc[0], read_only=True).tag(sync=True)
    north = Float(def_loc[0], read_only=True).tag(sync=True)
    east = Float(def_loc[1], read_only=True).tag(sync=True)
    west = Float(def_loc[1], read_only=True).tag(sync=True)

    layers = Tuple().tag(trait=Instance(Layer),
                         sync=True,
                         **widget_serialization)

    @default('layers')
    def _default_layers(self):
        basemap = self.basemap if isinstance(self.basemap,
                                             TileLayer) else basemap_to_tiles(
                                                 self.basemap, self.modisdate)

        basemap.base = True

        return (basemap, )

    bounds = Tuple(read_only=True)
    bounds_polygon = Tuple(read_only=True)

    @observe('south', 'north', 'east', 'west')
    def _observe_bounds(self, change):
        self.set_trait('bounds',
                       ((self.south, self.west), (self.north, self.east)))
        self.set_trait('bounds_polygon',
                       ((self.north, self.west), (self.north, self.east),
                        (self.south, self.east), (self.south, self.west)))

    def __init__(self, **kwargs):
        self.zoom_control_instance = None
        self.attribution_control_instance = None

        super(Map, self).__init__(**kwargs)
        self.on_msg(self._handle_leaflet_event)

        if self.zoom_control:
            self.zoom_control_instance = ZoomControl()
            self.add_control(self.zoom_control_instance)

        if self.attribution_control:
            self.attribution_control_instance = AttributionControl(
                position='bottomright')
            self.add_control(self.attribution_control_instance)

    @observe('zoom_control')
    def observe_zoom_control(self, change):
        if change['new']:
            self.zoom_control_instance = ZoomControl()
            self.add_control(self.zoom_control_instance)
        else:
            if self.zoom_control_instance is not None and self.zoom_control_instance in self.controls:
                self.remove_control(self.zoom_control_instance)

    @observe('attribution_control')
    def observe_attribution_control(self, change):
        if change['new']:
            self.attribution_control_instance = AttributionControl(
                position='bottomright')
            self.add_control(self.attribution_control_instance)
        else:
            if self.attribution_control_instance is not None and self.attribution_control_instance in self.controls:
                self.remove_control(self.attribution_control_instance)

    _layer_ids = List()

    @validate('layers')
    def _validate_layers(self, proposal):
        '''Validate layers list.

        Makes sure only one instance of any given layer can exist in the
        layers list.
        '''
        self._layer_ids = [l.model_id for l in proposal.value]
        if len(set(self._layer_ids)) != len(self._layer_ids):
            raise LayerException(
                'duplicate layer detected, only use each layer once')
        return proposal.value

    def add_layer(self, layer):
        if isinstance(layer, dict):
            layer = basemap_to_tiles(layer)
        if layer.model_id in self._layer_ids:
            raise LayerException('layer already on map: %r' % layer)
        self.layers = tuple([l for l in self.layers] + [layer])

    def remove_layer(self, layer):
        if layer.model_id not in self._layer_ids:
            raise LayerException('layer not on map: %r' % layer)
        self.layers = tuple(
            [l for l in self.layers if l.model_id != layer.model_id])

    def substitute_layer(self, old, new):
        if isinstance(new, dict):
            new = basemap_to_tiles(new)
        if old.model_id not in self._layer_ids:
            raise LayerException(
                'Could not substitute layer: layer not on map.')
        self.layers = tuple(
            [new if l.model_id == old.model_id else l for l in self.layers])

    def clear_layers(self):
        self.layers = ()

    controls = Tuple().tag(trait=Instance(Control),
                           sync=True,
                           **widget_serialization)
    _control_ids = List()

    @validate('controls')
    def _validate_controls(self, proposal):
        '''Validate controls list.

        Makes sure only one instance of any given layer can exist in the
        controls list.
        '''
        self._control_ids = [c.model_id for c in proposal.value]
        if len(set(self._control_ids)) != len(self._control_ids):
            raise ControlException(
                'duplicate control detected, only use each control once')
        return proposal.value

    def add_control(self, control):
        if control.model_id in self._control_ids:
            raise ControlException('control already on map: %r' % control)
        self.controls = tuple([c for c in self.controls] + [control])

    def remove_control(self, control):
        if control.model_id not in self._control_ids:
            raise ControlException('control not on map: %r' % control)
        self.controls = tuple(
            [c for c in self.controls if c.model_id != control.model_id])

    def clear_controls(self):
        self.controls = ()

    def save(self, outfile, **kwargs):
        """Save the Map to an .html file.

        Parameters
        ----------
        outfile: str or file-like object
            The file to write the HTML output to.
        kwargs: keyword-arguments
            Extra parameters to pass to the ipywidgets.embed.embed_minimal_html function.
        """
        embed_minimal_html(outfile, views=[self], **kwargs)

    def __iadd__(self, item):
        if isinstance(item, Layer):
            self.add_layer(item)
        elif isinstance(item, Control):
            self.add_control(item)
        return self

    def __isub__(self, item):
        if isinstance(item, Layer):
            self.remove_layer(item)
        elif isinstance(item, Control):
            self.remove_control(item)
        return self

    def __add__(self, item):
        if isinstance(item, Layer):
            self.add_layer(item)
        elif isinstance(item, Control):
            self.add_control(item)
        return self

    # Event handling
    _interaction_callbacks = Instance(CallbackDispatcher, ())

    def _handle_leaflet_event(self, _, content, buffers):
        if content.get('event', '') == 'interaction':
            self._interaction_callbacks(**content)

    def on_interaction(self, callback, remove=False):
        self._interaction_callbacks.register_callback(callback, remove=remove)
Exemple #27
0
class PointCloud(PluginBlock):
    _view_name = Unicode('PointCloudView').tag(sync=True)
    _model_name = Unicode('PointCloudModel').tag(sync=True)

    points_size = Float(3.).tag(sync=True)
    percentage_points = Float(1.).tag(sync=True)
    distribution = Enum(('ordered', 'random'),
                        default_value='ordered').tag(sync=True)
    mode = Enum(('volume', 'surface'), default_value='volume').tag(sync=True)

    def interact(self):
        if not self.initialized_widgets:
            self._init_pointcloud_widgets()
            self.initialized_widgets = True

        widgets = (self.points_size_wid, self.percentage_points_wid,
                   self.distribution_wid)
        if self.mode_wid is not None:
            widgets = widgets + (self.mode_wid, )

        return HBox(self._interact() + (VBox(widgets), ))

    def __init__(self, *args, **kwargs):
        super(PointCloud, self).__init__(*args, **kwargs)
        self.initialized_widgets = False
        self.points_size_wid = None
        self.percentage_points_wid = None
        self.distribution_wid = None
        self.mode_wid = None

    def _init_pointcloud_widgets(self):
        self.points_size_wid = FloatSlider(description='Size',
                                           min=1.,
                                           max=20.,
                                           value=self.points_size)
        self.percentage_points_wid = FloatSlider(description='Nb points',
                                                 step=0.01,
                                                 min=0.0,
                                                 max=1.0,
                                                 value=self.percentage_points,
                                                 readout_format='.2%')
        self.distribution_wid = ToggleButtons(description='Distribution',
                                              options=['ordered', 'random'],
                                              value=self.distribution)

        # Check if it's a volumetric mesh
        block = self
        while not isinstance(block, DataBlock):
            block = block._parent_block
        if len(block.mesh.tetrahedrons) != 0:
            self.mode_wid = ToggleButtons(description='Mode',
                                          options=['volume', 'surface'],
                                          value=self.mode)

            link((self, 'mode'), (self.mode_wid, 'value'))

        link((self, 'points_size'), (self.points_size_wid, 'value'))
        link((self, 'percentage_points'),
             (self.percentage_points_wid, 'value'))
        link((self, 'distribution'), (self.distribution_wid, 'value'))

    def _validate_parent(self, parent):
        block = parent
        while not isinstance(block, DataBlock):
            if isinstance(block, VectorField) or isinstance(block, PointCloud):
                raise RuntimeError(
                    'Cannot apply a PointCloud after a VectorField effect or a PointCloud effect'
                )
            block = block._parent_block
Exemple #28
0
class JupytextConfiguration(Configurable):
    """Jupytext Configuration's options"""

    formats = Union(
        [Unicode(), List(Unicode()), Dict(Unicode)],
        help="Save notebooks to these file extensions. "
        "Can be any of ipynb,Rmd,md,jl,py,R,nb.jl,nb.py,nb.R "
        "comma separated. If you want another format than the "
        "default one, append the format name to the extension, "
        "e.g. ipynb,py:percent to save the notebook to "
        "hydrogen/spyder/vscode compatible scripts",
        config=True,
    )
    default_jupytext_formats = Unicode(
        help="Deprecated. Use 'formats' instead", config=True)

    preferred_jupytext_formats_save = Unicode(
        help="Preferred format when saving notebooks as text, per extension. "
        'Use "jl:percent,py:percent,R:percent" if you want to save '
        "Julia, Python and R scripts in the double percent format and "
        'only write "jupytext_formats": "py" in the notebook metadata.',
        config=True,
    )

    preferred_jupytext_formats_read = Unicode(
        help="Preferred format when reading notebooks from text, per "
        'extension. Use "py:sphinx" if you want to read all python '
        "scripts as Sphinx gallery scripts.",
        config=True,
    )

    notebook_metadata_filter = Unicode(
        help=
        "Notebook metadata that should be save in the text representations. "
        "Examples: 'all', '-all', 'widgets,nteract', 'kernelspec,jupytext-all'",
        config=True,
    )

    default_notebook_metadata_filter = Unicode(
        "",
        help="Deprecated. Use 'notebook_metadata_filter' instead",
        config=True)

    hide_notebook_metadata = Enum(
        values=[True, False],
        allow_none=True,
        help=
        "Should the notebook metadata be wrapped into an HTML comment in the Markdown format?",
        config=True,
    )

    root_level_metadata_as_raw_cell = Bool(
        True,
        help=
        "Should the root level metadata of text documents (like the fields 'title' or 'author' in "
        "R Markdown document) appear as a raw cell in the notebook (True), or go to the notebook"
        "metadata?",
        config=True,
    )

    cell_metadata_filter = Unicode(
        help="Cell metadata that should be saved in the text representations. "
        "Examples: 'all', 'hide_input,hide_output'",
        config=True,
    )

    default_cell_metadata_filter = Unicode(
        "", help="Deprecated. Use 'cell_metadata_filter' instead", config=True)

    comment_magics = Enum(
        values=[True, False],
        allow_none=True,
        help=
        "Should Jupyter magic commands be commented out in the text representation?",
        config=True,
    )

    split_at_heading = Bool(
        False,
        help=
        "Split markdown cells on headings (Markdown and R Markdown formats only)",
        config=True,
    )

    sphinx_convert_rst2md = Bool(
        False,
        help=
        "When opening a Sphinx Gallery script, convert the reStructuredText to markdown",
        config=True,
    )

    doxygen_equation_markers = Bool(
        False,
        help="Should equation markers use the DOxygen format? "
        "(see https://github.com/mwouts/jupytext/issues/517)",
        config=True,
    )

    outdated_text_notebook_margin = Float(
        1.0,
        help="Refuse to overwrite inputs of a ipynb notebooks with those of a "
        "text notebook when the text notebook plus margin is older than "
        "the ipynb notebook (NB: This option is ignored by Jupytext CLI)",
        config=True,
    )

    cell_markers = Unicode(
        help=
        'Start and end cell markers for the light format, comma separated. Use "{{{,}}}" to mark cells'
        'as foldable regions in Vim, and "region,endregion" to mark cells as Vscode/PyCharm regions',
        config=True,
    )

    default_cell_markers = Unicode(
        help="Deprecated. Use 'cell_markers' instead", config=True)

    notebook_extensions = Union(
        [List(Unicode(), NOTEBOOK_EXTENSIONS),
         Unicode()],
        help="A list of notebook extensions",
        config=True,
    )

    custom_cell_magics = Unicode(
        help=
        'A comma separated list of cell magics. Use e.g. custom_cell_magics = "configure,local" '
        'if you want code cells starting with the Spark magic cell commands "configure" and "local" '
        "to be commented out when converted to scripts.",
        config=True,
    )

    def set_default_format_options(self, format_options, read=False):
        """Set default format option"""
        if self.default_notebook_metadata_filter:
            warnings.warn(
                "The option 'default_notebook_metadata_filter' is deprecated. "
                "Please use 'notebook_metadata_filter' instead.",
                FutureWarning,
            )
            format_options.setdefault("notebook_metadata_filter",
                                      self.default_notebook_metadata_filter)
        if self.notebook_metadata_filter:
            format_options.setdefault("notebook_metadata_filter",
                                      self.notebook_metadata_filter)
        if self.default_cell_metadata_filter:
            warnings.warn(
                "The option 'default_cell_metadata_filter' is deprecated. "
                "Please use 'cell_metadata_filter' instead.",
                FutureWarning,
            )
            format_options.setdefault("cell_metadata_filter",
                                      self.default_cell_metadata_filter)
        if self.cell_metadata_filter:
            format_options.setdefault("cell_metadata_filter",
                                      self.cell_metadata_filter)
        if self.hide_notebook_metadata is not None:
            format_options.setdefault("hide_notebook_metadata",
                                      self.hide_notebook_metadata)
        if self.root_level_metadata_as_raw_cell is False:
            format_options.setdefault("root_level_metadata_as_raw_cell",
                                      self.root_level_metadata_as_raw_cell)
        if self.comment_magics is not None:
            format_options.setdefault("comment_magics", self.comment_magics)
        if self.split_at_heading:
            format_options.setdefault("split_at_heading",
                                      self.split_at_heading)
        if self.doxygen_equation_markers:
            format_options.setdefault("doxygen_equation_markers",
                                      self.doxygen_equation_markers)
        if not read:
            if self.default_cell_markers:
                warnings.warn(
                    "The option 'default_cell_markers' is deprecated. "
                    "Please use 'cell_markers' instead.",
                    FutureWarning,
                )
                format_options.setdefault("cell_markers",
                                          self.default_cell_markers)
            if self.cell_markers:
                format_options.setdefault("cell_markers", self.cell_markers)
        if read and self.sphinx_convert_rst2md:
            format_options.setdefault("rst2md", self.sphinx_convert_rst2md)
        if self.custom_cell_magics:
            format_options.setdefault("custom_cell_magics",
                                      self.custom_cell_magics)

    def default_formats(self, path):
        """Return the default formats, if they apply to the current path #157"""
        from .paired_paths import InconsistentPath, base_path

        if self.default_jupytext_formats:
            warnings.warn(
                "The option 'default_jupytext_formats' is deprecated. "
                "Please use 'formats' instead.",
                FutureWarning,
            )

        formats = self.formats or self.default_jupytext_formats
        for fmt in long_form_multiple_formats(formats):
            try:
                base_path(path, fmt)
                return formats
            except InconsistentPath:
                continue

        return None
Exemple #29
0
class Flow(Tool):
    '''
    A Flow-based framework. It executes steps in a sequential or
    multiprocess environment.
    User defined steps thanks to Python classes, and configuration in a json file
    The multiprocess mode is based on ZeroMQ library (http://zeromq.org) to
    pass messages between process. ZMQ library allows to stay away from class
    concurrency mechanisms like mutexes, critical sections semaphores,
    while being process safe. Passing data between steps is managed by the router.
    If a step is executed by several process, the router uses LRU pattern
    (least recently used ) to choose the step that will receive next data.
    The router also manage Queue for each step.
    '''
    description = 'run stages in multiprocess Flow based framework'
    gui = Bool(False, help='send status to GUI').tag(config=True)
    gui_address = Unicode('localhost:5565',
                          help='GUI adress and port').tag(config=True)
    mode = Enum(['sequential', 'multiprocess'],
                default_value='sequential',
                help='Flow mode',
                allow_none=True).tag(config=True)
    producer_conf = Dict(help='producer description: name , module, class',
                         allow_none=False).tag(config=True)
    stagers_conf = List(help='stagers list description in a set order:',
                        allow_none=False).tag(config=True)
    consumer_conf = Dict(default_value={
        'name': 'CONSUMER',
        'class': 'Producer',
        'module': 'producer',
        'prev': 'STAGE1'
    },
                         help='producer description: name , module, class',
                         allow_none=False).tag(config=True)
    ports_list = list(range(5555, 5600, 1))
    zmq_ports = List(ports_list, help='ZMQ ports').tag(config=True)
    aliases = Dict({
        'gui_address': 'Flow.gui_address',
        'mode': 'Flow.mode',
        'gui': 'Flow.gui'
    })
    examples = ('prompt%> ctapipe-flow \
    --config=examples/flow/switch.json')

    PRODUCER = 'PRODUCER'
    STAGER = 'STAGER'
    CONSUMER = 'CONSUMER'
    ROUTER = 'ROUTER'

    producer = None
    consumer = None
    stagers = list()
    router = None
    producer_step = None
    stager_steps = None
    consumer_step = None
    step_process = list()
    router_process = None
    ports = dict()

    def setup(self):
        if self.init() is False:
            self.log.error('Could not initialise Flow based framework')
            exit()

    def init(self):
        '''
        Create producers, stagers and consumers instance according to
         configuration

        Returns
        -------
        bool : True if Flow based framework is correctly setup and all producer,stager
         and consumer initialised Otherwise False
        '''
        # Verify configuration instance
        if not path.isfile(self.config_file):
            self.log.error(
                'Could not open Flow based framework config_file {}'.format(
                    self.config_file))
            return False
        if not self.generate_steps():
            self.log.error("Error during steps generation")
            return False
        if self.gui:
            self.context = zmq.Context()
            self.socket_pub = self.context.socket(zmq.PUB)
            if not self.connect_gui():
                return False
        if self.mode == 'sequential':
            return self.init_sequential()
        elif self.mode == 'multiprocess':
            return self.init_multiprocess()
        else:
            self.log.error("{} is not a valid mode for"
                           "Flow based framework".format(self.mode))

    def init_multiprocess(self):
        """
        Initialise Flow for multiprocess mode

        Returns
        -------
        True if every initialisation are correct
        Otherwise False
        """
        if not self.configure_ports():
            return False
        if not self.configure_producer():
            return False
        router_names = self.add_consumer_to_router()
        if not self.configure_consumer():
            return False
        if not self.configure_stagers(router_names):
            return False
        gui_address = None
        if self.gui:
            gui_address = self.gui_address
        self.router = RouterQueue(connections=router_names,
                                  gui_address=gui_address)
        for step in self.stager_steps:
            for t in step.process:
                self.step_process.append(t)
        self.display_conf()
        return True

    def init_sequential(self):
        """
        Initialise Flow for sequential mode

        Returns
        -------
        True if every initialisation are correct
        Otherwise False
        """
        self.configure_ports()
        self.sequential_instances = dict()
        # set coroutines
        # producer
        conf = self.get_step_conf(self.producer_step.name)
        module = conf['module']
        class_name = conf['class']
        try:
            coroutine = dynamic_class_from_module(class_name, module, self)
        except DynamicClassError as e:
            self.log.error('{}'.format(e))
            return False

        self.producer = ProducerSequential(
            coroutine,
            name=self.producer_step.name,
            connections=self.producer_step.connections,
            main_connection_name=self.producer_step.main_connection_name)
        self.producer.init()
        self.producer_step.process.append(self.producer)
        self.sequential_instances[self.producer_step.name] = self.producer
        # stages
        for step in (self.stager_steps):
            conf = self.get_step_conf(step.name)
            module = conf['module']
            class_name = conf['class']
            try:
                coroutine = dynamic_class_from_module(class_name, module, self)
            except DynamicClassError as e:
                self.log.error('{}'.format(e))
                return False

            stage = StagerSequential(
                coroutine,
                name=step.name,
                connections=step.connections,
                main_connection_name=step.main_connection_name)
            step.process.append(stage)
            self.sequential_instances[step.name] = stage
            self.stagers.append(stage)
            stage.init()
        # consumer
        conf = self.get_step_conf(self.consumer_step.name)
        module = conf['module']
        class_name = conf['class']
        try:
            coroutine = dynamic_class_from_module(class_name, module, self)
        except DynamicClassError as e:
            self.log.error('{}'.format(e))
            return False
        self.consumer = ConsumerSequential(coroutine, name=conf['name'])
        self.consumer_step.process.append(self.consumer)
        self.consumer.init()
        self.sequential_instances[self.consumer_step.name] = self.consumer
        self.display_conf()
        return True

    def configure_stagers(self, router_names):
        """ Creates Processes with users's coroutines for all stages        
        Parameters
        ----------
        router_names: List
            List to fill with routers name        
        Returns
        -------
        True if every instantiation is correct
        Otherwise False
        """
        # STAGERS
        for stager_step in self.stager_steps:
            # each stage need a router to connect it to prev stages
            name = stager_step.name + '_' + 'router'
            router_names[name] = [
                self.ports[stager_step.name + '_in'],
                self.ports[stager_step.name + '_out'], stager_step.queue_limit
            ]

            for i in range(stager_step.nb_process):
                conf = self.get_step_conf(stager_step.name)
                try:
                    stager_zmq = self.instantiation(
                        stager_step.name,
                        self.STAGER,
                        process_name=stager_step.name + '$$process_number$$' +
                        str(i),
                        port_in=stager_step.port_in,
                        connections=stager_step.connections,
                        main_connection_name=stager_step.main_connection_name,
                        config=conf)
                except FlowError as e:
                    self.log.error(e)
                    return False
                self.stagers.append(stager_zmq)
                stager_step.process.append(stager_zmq)
        return True

    def configure_consumer(self):
        """ Creates consumer Processes with users's coroutines        
        Returns
        -------
        True if every instantiation is correct
        Otherwise False
        """
        try:
            consumer_zmq = self.instantiation(
                self.consumer_step.name,
                self.CONSUMER,
                port_in=self.consumer_step.port_in,
                config=self.consumer_conf)
        except FlowError as e:
            self.log.error(e)
            return False
        self.consumer = consumer_zmq
        return True

    def add_consumer_to_router(self):
        """ Create router_names dictionary and
        Add consumer router ports        
        Returns
        -------
        The new router_names dictionary
        """
        # ROUTER
        router_names = dict()
        # each stage need a router to connect it to prev stages
        name = self.consumer_step.name + '_' + 'router'
        router_names[name] = [
            self.ports[self.consumer_step.name + '_in'],
            self.ports[self.consumer_step.name + '_out'],
            self.consumer_step.queue_limit
        ]
        return router_names

    def configure_producer(self):
        """ Creates producer Process with users's coroutines        
        Returns
        -------
        True if every instatiation is correct
        Otherwise False
        """
        # PRODUCER
        try:
            producer_zmq = self.instantiation(
                self.producer_step.name,
                self.PRODUCER,
                connections=self.producer_step.connections,
                main_connection_name=self.producer_step.main_connection_name,
                config=self.producer_conf)
        except FlowError as e:
            self.log.error(e)
            return False
        self.producer = producer_zmq
        return True

    def connect_gui(self):
        """ Connect ZMQ socket to send information to GUI        
        Returns
        -------
        True if everything correct
        Otherwise False
        """
        # Get port for GUI
        if self.gui_address is not None:
            try:
                self.socket_pub.connect('tcp://' + self.gui_address)
            except zmq.error.ZMQError as e:
                self.log.info(str(e) + 'tcp://' + self.gui_address)
                return False
        return True

    def generate_steps(self):
        """ Generate Flow based framework steps from configuration

        Returns
        -------
        True if everything correct
        Otherwise False
        """
        self.producer_step = self.get_pipe_steps(self.PRODUCER)
        self.stager_steps = self.get_pipe_steps(self.STAGER)
        self.consumer_step = self.get_pipe_steps(self.CONSUMER)
        if not self.producer_step:
            self.log.error("No producer in configuration")
            return False
        if not self.consumer_step:
            self.log.error("No consumer in configuration")
            return False
        return True

    def configure_ports(self):
        """
        Configures producer, stagers and consumer ZMQ ports        
        Returns
        -------
        True if everything correct
        Otherwise False
        """
        # configure connections (zmq port) for producer (one per next step)
        try:
            for next_step_name in self.producer_step.next_steps_name:
                if not next_step_name + '_in' in self.ports:
                    self.ports[next_step_name + '_in'] = str(
                        self.zmq_ports.pop())
                self.producer_step.connections[next_step_name] = self.ports[
                    next_step_name + '_in']
            self.producer_step.main_connection_name = (
                self.producer_step.next_steps_name[0])

            # configure port_in and connections (zmq port)
            # for all stages (one per next step)
            for stage in self.stager_steps:
                if stage.name + '_out' not in self.ports:
                    self.ports[stage.name + '_out'] = str(self.zmq_ports.pop())
                stage.port_in = self.ports[stage.name + '_out']
                for next_step_name in stage.next_steps_name:
                    if next_step_name + '_in' not in self.ports:
                        self.ports[next_step_name + '_in'] = str(
                            self.zmq_ports.pop())
                    stage.connections[next_step_name] = self.ports[
                        next_step_name + '_in']
                stage.main_connection_name = stage.next_steps_name[0]

            # configure port-in  (zmq port) for consumer
            if self.consumer_step.name + '_out' not in self.ports:
                self.ports[self.consumer_step.name + '_out'] = str(
                    self.zmq_ports.pop())
            self.consumer_step.port_in = self.ports[self.consumer_step.name +
                                                    '_out']
            return True
        except IndexError as e:
            self.log.error("Not enough ZMQ ports. Consider adding some port "
                           "to configuration.")
        except Exception as e:
            self.log.error("Could not configure ZMQ ports. {}".format(e))
            return False

    def get_step_by_name(self, name):
        ''' Find a PipeStep in self.producer_step or  self.stager_steps or
        self.consumer_step        
        Parameters
        ----------
        name : str
            step name            
        Returns
        -------
        PipeStep if found, otherwise None
        '''
        for step in (self.stager_steps +
                     [self.producer_step, self.consumer_step]):
            if step.name == name:
                return step
        return None

    def instantiation(self,
                      name,
                      stage_type,
                      process_name=None,
                      port_in=None,
                      connections=None,
                      main_connection_name=None,
                      config=None):
        '''
        Instantiate on Python object from name found in configuration        
        Parameters
        ----------
        name : str
                stage name
        stage_type	: str
        process_name : str
        port_in : str
                step ZMQ port in
        connections : dict
                key: StepName, value" connection ZMQ ports
        main_connection_name : str
            main ZMQ connection name. Connexion to use when user not precise
        '''
        stage = self.get_step_conf(name)
        module = stage['module']
        class_name = stage['class']
        obj = dynamic_class_from_module(class_name, module, self)
        if obj is None:
            raise FlowError('Cannot create instance of ' + name)
        obj.name = name
        if stage_type == self.STAGER:
            process = StagerZmq(obj,
                                port_in,
                                process_name,
                                connections=connections,
                                main_connection_name=main_connection_name)
        elif stage_type == self.PRODUCER:
            process = ProducerZmq(obj,
                                  name,
                                  connections=connections,
                                  main_connection_name=main_connection_name)
        elif stage_type == self.CONSUMER:
            process = ConsumerZMQ(obj, port_in, name)
        else:
            raise FlowError('Cannot create instance of', name, '. Type',
                            stage_type, 'does not exist.')
        # set coroutine socket to it's stager or producer socket .
        return process

    def get_pipe_steps(self, role):
        '''
        Create a list of Flow based framework steps from configuration and 
        filter by role        
        Parameters
        ----------
        role: str
                filter with role for step to be add in result list
                Accepted values: self.PRODUCER - self.STAGER  - self.CONSUMER                
        Returns
        -------
        PRODUCER,CONSUMER: a step name filter by specific role (PRODUCER,CONSUMER)
        STAGER: List of steps name filter by specific role
        '''
        # Create producer step
        try:
            if role == self.PRODUCER:
                prod_step = PipeStep(self.producer_conf['name'])
                prod_step.type = self.PRODUCER
                prod_step.next_steps_name = self.producer_conf[
                    'next_steps'].split(',')
                return prod_step
            elif role == self.STAGER:
                # Create stagers steps
                result = list()
                for stage_conf in self.stagers_conf:
                    try:
                        nb_process = int(stage_conf['nb_process'])
                    except Exception:
                        nb_process = 1
                    next_steps_name = stage_conf['next_steps'].split(',')
                    try:
                        queue_limit = stage_conf['queue_limit']
                    except Exception:
                        queue_limit = -1
                    stage_step = PipeStep(stage_conf['name'],
                                          next_steps_name=next_steps_name,
                                          nb_processes=nb_process,
                                          queue_limit=queue_limit)
                    stage_step.type = self.STAGER
                    result.append(stage_step)
                return result
            elif role == self.CONSUMER:
                # Create consumer step
                try:
                    queue_limit = self.consumer_conf['queue_limit']
                except:
                    queue_limit = -1
                cons_step = PipeStep(self.consumer_conf['name'],
                                     queue_limit=queue_limit)
                cons_step.type = self.CONSUMER
                return cons_step
            return result
        except KeyError:
            return None

    def def_step_for_gui(self):
        ''' 
        Create a list (levels_for_gui) containing all steps

        Returns
        -------
        the created list and actual time
        '''
        levels_for_gui = list()

        levels_for_gui.append(
            StagerRep(self.producer_step.name,
                      self.producer_step.next_steps_name,
                      nb_job_done=self.producer.nb_job_done,
                      running=self.producer.running,
                      step_type=StagerRep.PRODUCER))
        for step in self.stager_steps:
            nb_job_done = 0
            running = 0
            if self.mode == 'sequential':
                running = step.process[0].running
                nb_job_done = step.process[0].nb_job_done
                levels_for_gui.append(
                    StagerRep(step.name,
                              step.next_steps_name,
                              nb_job_done=nb_job_done,
                              running=running,
                              nb_process=len(step.process)))

            elif self.mode == 'multiprocess':
                for process in step.process:
                    nb_job_done += process.nb_job_done
                    running += process.running
                levels_for_gui.append(
                    StagerRep(process.name,
                              step.next_steps_name,
                              nb_job_done=nb_job_done,
                              running=running,
                              nb_process=len(step.process)))

        levels_for_gui.append(
            StagerRep(self.consumer_step.name,
                      nb_job_done=self.consumer.nb_job_done,
                      running=self.consumer.running,
                      step_type=StagerRep.CONSUMER))

        return (levels_for_gui, time())

    def display_conf(self):
        ''' Print steps and their next_steps
        '''
        self.log.info('')
        self.log.info(
            '------------------ Flow configuration ------------------')
        for step in ([self.producer_step] + self.stager_steps +
                     [self.consumer_step]):
            if self.mode == 'multiprocess':
                self.log.info('step {} (nb process {}) '.format(
                    step.name, str(step.nb_process)))
            else:
                self.log.info('step {}'.format(step.name))
            for next_step_name in step.next_steps_name:
                self.log.info('--> next {} '.format(next_step_name))
        self.log.info(
            '------------------ End Flow configuration ------------------')
        self.log.info('')

    def display_statistics(self):
        """
        Log each StagerRep statistic
        """
        steps, _ = self.def_step_for_gui()
        for step in steps:
            self.log.info(step.get_statistics())

    def start(self):
        """ run the Flow based framework steps
        """
        if self.mode == 'multiprocess':
            self.start_multiprocess()
        elif self.mode == 'sequential':
            self.start_sequential()

    def start_sequential(self):
        """ run the Flow based framework in sequential mode
        """
        if self.gui:
            self.socket_pub.send_multipart([b'MODE', dumps('sequential')])
        start_time = time()
        # self.producer.running = 0
        # Get producer instance's generator
        self.producer = self.sequential_instances[self.producer_step.name]
        # execute producer run coroutine
        prod_gen = self.producer.run()
        # only for gui
        if self.gui:
            self.producer.running = 1
            self.send_status_to_gui()
        # for each producer output
        for prod_result in prod_gen:
            if self.gui:
                self.producer.running = 0
                self.send_status_to_gui()
            # get next stage destination and input from producer output
            msg, destination = prod_result
            # run each steps until consumer return
            if msg is not None:
                destination, msg = self.run_generator(destination, msg)
            if self.gui:
                self.producer.running = 1
                self.send_status_to_gui()
        if self.gui:
            self.consumer.running = 0
            self.send_status_to_gui()
            # execute finish method for all steps
        for step in self.sequential_instances.values():
            step.finish()
        end_time = time()
        self.log.info('=== SEQUENTIAL MODE END ===')
        self.log.info('Compute time {} sec'.format(end_time - start_time))
        self.display_statistics()
        # send finish to GUI and close connections
        if self.gui:
            self.socket_pub.send_multipart([b'FINISH', dumps('finish')])
            self.socket_pub.close()
            self.context.destroy()
            self.context.term()

    def run_generator(self, destination, msg):
        """ Get step for destination. Create a genetor from its run method.
        re-enter in run_generator until Generator send values        
        Parameters
        ----------
        destination: str
            Next step name
        msg: a Pickle dumped msg        
        Returns
        -------
        Next destination and msg
        """
        stage = self.sequential_instances[destination]
        stage.running = 1
        if self.gui:
            self.send_status_to_gui()
        stage_gen = stage.run(msg)
        stage.running = 0
        if stage_gen:
            for result in stage_gen:
                if result:
                    msg, destination = result
                    destination, msg = self.run_generator(destination, msg)
                else:
                    msg = destination = None
        else:
            msg = destination = None
        return (msg, destination)

    def send_status_to_gui(self):
        """
        Update all StagerRep status and send them to GUI
        """
        self.socket_pub.send_multipart([b'MODE', dumps(self.mode)])
        levels_gui, conf_time = self.def_step_for_gui()
        self.socket_pub.send_multipart(
            [b'GUI_GRAPH', dumps([conf_time, levels_gui])])

    def start_multiprocess(self):
        ''' Start all Flow based framework processes.
        Regularly inform GUI of Flow based framework configuration in case of a new GUI
        instance was lunch
        Stop all processes without loosing data
        '''
        # send Flow based framework cofiguration to an optinal GUI instance
        if self.gui:
            self.send_status_to_gui()
        start_time = time()
        # Start all process
        self.consumer.start()
        self.router.start()
        for stage in self.stagers:
            stage.start()
        self.producer.start()
        # Wait producer end of run method
        self.wait_and_send_levels(self.producer)

        # Ensure that all queues are empty and all process are waiting for
        # new data since more that a specific tine
        while not self.wait_all_stagers(1000):  # 1000 ms
            if self.gui:
                self.send_status_to_gui()
            sleep(1)

        # Now send stop to stage process and wait they join
        for worker in self.step_process:
            self.wait_and_send_levels(worker)
        # Stop consumer and router process
        self.wait_and_send_levels(self.consumer)
        self.wait_and_send_levels(self.router)
        if self.gui:
            self.send_status_to_gui()
        # Wait 1 s to be sure this message will be display
        end_time = time()
        self.log.info('=== MULTUPROCESSUS MODE END ===')
        self.log.info('Compute time {} sec'.format(end_time - start_time))
        self.display_statistics()

        sleep(1)
        if self.gui:
            self.socket_pub.send_multipart([b'FINISH', dumps('finish')])
            self.socket_pub.close()
            self.context.destroy()
            self.context.term()

    def wait_all_stagers(self, mintime):
        """ Verify id all steps (stage + consumers) are finished their
        jobs and waiting        
        Returns
        -------
        True if all stages queue are empty and all Processes
        wait since mintime
        Otherwise False
        """
        if self.router.total_queue_size == 0:
            for worker in self.step_process:
                if worker.wait_since < mintime:  # 5000ms
                    return False
            return True
        return False

    def finish(self):
        self.log.info('===== Flow END ======')

    def wait_and_send_levels(self, processes_to_wait):
        '''
        Wait for a process to join and regularly send Flow based framework 
        state to GUI
        in case of a GUI will connect later        
        Parameters
        ----------
        processes_to_wait : process
                process to join
        conf_time : str
                represents time at which configuration has been built
        '''
        processes_to_wait.stop = 1

        while True:
            processes_to_wait.join(timeout=.1)
            if self.gui:
                self.send_status_to_gui()
            if not processes_to_wait.is_alive():
                return

    def get_step_conf(self, name):
        '''
        Search step by its name in self.stage_conf list,
        self.producer_conf and self.consumer_conf        
        Parameters
        ----------
        name : str
                stage name

        Returns
        -------
        Step name matching instance, or None is not found
        '''
        if self.producer_conf['name'] == name:
            return self.producer_conf
        if self.consumer_conf['name'] == name:
            return self.consumer_conf
        for step in self.stagers_conf:
            if step['name'] == name:
                return step
        return None

    def get_stager_indice(self, name):
        '''
        Search step by its name in self.stage_conf list        
        Parameters
        ----------
        name : str
                stage name                
        Returns
        -------
        indice in list, -1 if not found
        '''
        for index, step in enumerate(self.stagers_conf):
            if step['name'] == name:
                return index
        return -1
Exemple #30
0
class ExecutePreprocessor(Preprocessor):
    """
    Executes all the cells in a notebook
    """

    timeout = Integer(30,
                      allow_none=True,
                      help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, an exception (TimeoutError
            on python 3+, RuntimeError on python 2) is raised.

            `None` or `-1` will disable the timeout. If `timeout_func` is set,
            it overrides `timeout`.
            """)).tag(config=True)

    timeout_func = Any(default_value=None,
                       allow_none=True,
                       help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, an exception
            (TimeoutError on python 3+, RuntimeError on python 2) is
            raised.

            Returning `None` or `-1` will disable the timeout for the cell.
            Not setting `timeout_func` will cause the preprocessor to
            default to using the `timeout` trait for all cells. The
            `timeout_func` trait overrides `timeout` if it is not `None`.
            """)).tag(config=True)

    interrupt_on_timeout = Bool(False,
                                help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """)).tag(config=True)

    startup_timeout = Integer(60,
                              help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """)).tag(config=True)

    allow_errors = Bool(False,
                        help=dedent("""
            If `False` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If `True`, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """)).tag(config=True)

    extra_arguments = List(Unicode())

    kernel_name = Unicode('',
                          help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """)).tag(config=True)

    raise_on_iopub_timeout = Bool(False,
                                  help=dedent("""
            If `False` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If `True`, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """)).tag(config=True)

    iopub_timeout = Integer(4,
                            allow_none=False,
                            help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """)).tag(config=True)

    shutdown_kernel = Enum(['graceful', 'immediate'],
                           default_value='graceful',
                           help=dedent("""
            If `graceful` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its `atexit` hooks.
            If `immediate`, then the kernel is signaled to immediately
            terminate.
            """)).tag(config=True)

    kernel_manager_class = Type(config=True,
                                help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _km_default(self):
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        try:
            from jupyter_client import KernelManager
        except ImportError:
            raise ImportError(
                "`nbconvert --execute` requires the jupyter_client package: `pip install jupyter_client`"
            )
        return KernelManager

    # mapping of locations of outputs with a given display_id
    # tracks cell index and output index within cell.outputs for
    # each appearance of the display_id
    # {
    #   'display_id': {
    #     cell_idx: [output_idx,]
    #   }
    # }
    _display_id_map = Dict()

    def preprocess(self, nb, resources):
        """
        Preprocess notebook executing each code cell.

        The input argument `nb` is modified in-place.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """
        path = resources.get('metadata', {}).get('path', '')
        if path == '':
            path = None

        # clear display_id map
        self._display_id_map = {}

        # from jupyter_client.manager import start_new_kernel

        def start_new_kernel(startup_timeout=60,
                             kernel_name='python',
                             **kwargs):
            km = self.kernel_manager_class(kernel_name=kernel_name)
            km.start_kernel(**kwargs)
            kc = km.client()
            kc.start_channels()
            try:
                kc.wait_for_ready(timeout=startup_timeout)
            except RuntimeError:
                kc.stop_channels()
                km.shutdown_kernel()
                raise

            return km, kc

        kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
        if self.kernel_name:
            kernel_name = self.kernel_name
        self.log.info("Executing notebook with kernel: %s" % kernel_name)
        self.km, self.kc = start_new_kernel(
            startup_timeout=self.startup_timeout,
            kernel_name=kernel_name,
            extra_arguments=self.extra_arguments,
            cwd=path)
        self.kc.allow_stdin = False
        self.nb = nb

        try:
            nb, resources = super(ExecutePreprocessor,
                                  self).preprocess(nb, resources)
        finally:
            self.kc.stop_channels()
            self.km.shutdown_kernel(now=self.shutdown_kernel == 'immediate')

        delattr(self, 'nb')

        return nb, resources

    def preprocess_cell(self, cell, resources, cell_index):
        """
        Executes a single code cell. See base.py for details.

        To execute all cells see :meth:`preprocess`.
        """
        if cell.cell_type != 'code':
            return cell, resources

        reply, outputs = self.run_cell(cell, cell_index)
        cell.outputs = outputs

        if not self.allow_errors:
            for out in outputs:
                if out.output_type == 'error':
                    raise CellExecutionError.from_cell_and_msg(cell, out)
            if (reply is not None) and reply['content']['status'] == 'error':
                raise CellExecutionError.from_cell_and_msg(
                    cell, reply['content'])
        return cell, resources

    def _update_display_id(self, display_id, msg):
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    def _wait_for_reply(self, msg_id, cell):
        # wait for finish, with timeout
        while True:
            try:
                if self.timeout_func is not None:
                    timeout = self.timeout_func(cell)
                else:
                    timeout = self.timeout

                if not timeout or timeout < 0:
                    timeout = None
                msg = self.kc.shell_channel.get_msg(timeout=timeout)
            except Empty:
                self.log.error("Timeout waiting for execute reply (%is)." %
                               self.timeout)
                if self.interrupt_on_timeout:
                    self.log.error("Interrupting kernel")
                    self.km.interrupt_kernel()
                    break
                else:
                    try:
                        exception = TimeoutError
                    except NameError:
                        exception = RuntimeError
                    raise exception("Cell execution timed out")

            if msg['parent_header'].get('msg_id') == msg_id:
                return msg
            else:
                # not our reply
                continue

    def run_cell(self, cell, cell_index=0):
        msg_id = self.kc.execute(cell.source)
        self.log.debug("Executing cell:\n%s", cell.source)
        exec_reply = self._wait_for_reply(msg_id, cell)

        outs = cell.outputs = []

        while True:
            try:
                # We've already waited for execute_reply, so all output
                # should already be waiting. However, on slow networks, like
                # in certain CI systems, waiting < 1 second might miss messages.
                # So long as the kernel sends a status:idle message when it
                # finishes, we won't actually have to wait this long, anyway.
                msg = self.kc.iopub_channel.get_msg(timeout=self.iopub_timeout)
            except Empty:
                self.log.warn("Timeout waiting for IOPub output")
                if self.raise_on_iopub_timeout:
                    raise RuntimeError("Timeout waiting for IOPub output")
                else:
                    break
            if msg['parent_header'].get('msg_id') != msg_id:
                # not an output from our execution
                continue

            msg_type = msg['msg_type']
            self.log.debug("output: %s", msg_type)
            content = msg['content']

            # set the prompt number for the input and the output
            if 'execution_count' in content:
                cell['execution_count'] = content['execution_count']

            if msg_type == 'status':
                if content['execution_state'] == 'idle':
                    break
                else:
                    continue
            elif msg_type == 'execute_input':
                continue
            elif msg_type == 'clear_output':
                outs[:] = []
                # clear display_id mapping for this cell
                for display_id, cell_map in self._display_id_map.items():
                    if cell_index in cell_map:
                        cell_map[cell_index] = []
                continue
            elif msg_type.startswith('comm'):
                continue

            display_id = None
            if msg_type in {
                    'execute_result', 'display_data', 'update_display_data'
            }:
                display_id = msg['content'].get('transient',
                                                {}).get('display_id', None)
                if display_id:
                    self._update_display_id(display_id, msg)
                if msg_type == 'update_display_data':
                    # update_display_data doesn't get recorded
                    continue

            try:
                out = output_from_msg(msg)
            except ValueError:
                self.log.error("unhandled iopub msg: " + msg_type)
                continue
            if display_id:
                # record output index in:
                #   _display_id_map[display_id][cell_idx]
                cell_map = self._display_id_map.setdefault(display_id, {})
                output_idx_list = cell_map.setdefault(cell_index, [])
                output_idx_list.append(len(outs))

            outs.append(out)

        return exec_reply, outs