Exemple #1
0
class Stream(Reference):
    name = Enum(('stdout', 'stderr'), default_value='stdout')
    text = Unicode()
class TerminalInteractiveShell(InteractiveShell):
    mime_renderers = Dict().tag(config=True)

    space_for_menu = Integer(6, help='Number of line at the bottom of the screen '
                                     'to reserve for the tab completion menu, '
                                     'search history, ...etc, the height of '
                                     'these menus will at most this value. '
                                     'Increase it is you prefer long and skinny '
                                     'menus, decrease for short and wide.'
                            ).tag(config=True)

    pt_app = None
    debugger_history = None

    simple_prompt = Bool(_use_simple_prompt,
        help="""Use `raw_input` for the REPL, without completion and prompt colors.

            Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
            IPython own testing machinery, and emacs inferior-shell integration through elpy.

            This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
            environment variable is set, or the current terminal is not a tty."""
            ).tag(config=True)

    @property
    def debugger_cls(self):
        return Pdb if self.simple_prompt else TerminalPdb

    confirm_exit = Bool(True,
        help="""
        Set to confirm when you try to exit IPython with an EOF (Control-D
        in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
        you can force a direct exit without any confirmation.""",
    ).tag(config=True)

    editing_mode = Unicode('emacs',
        help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
    ).tag(config=True)

    autoformatter = Unicode(None,
        help="Autoformatter to reformat Terminal code. Can be `'black'` or `None`",
        allow_none=True
    ).tag(config=True)

    mouse_support = Bool(False,
        help="Enable mouse support in the prompt\n(Note: prevents selecting text with the mouse)"
    ).tag(config=True)

    # We don't load the list of styles for the help string, because loading
    # Pygments plugins takes time and can cause unexpected errors.
    highlighting_style = Union([Unicode('legacy'), Type(klass=Style)],
        help="""The name or class of a Pygments style to use for syntax
        highlighting. To see available styles, run `pygmentize -L styles`."""
    ).tag(config=True)

    @validate('editing_mode')
    def _validate_editing_mode(self, proposal):
        if proposal['value'].lower() == 'vim':
            proposal['value']= 'vi'
        elif proposal['value'].lower() == 'default':
            proposal['value']= 'emacs'

        if hasattr(EditingMode, proposal['value'].upper()):
            return proposal['value'].lower()

        return self.editing_mode


    @observe('editing_mode')
    def _editing_mode(self, change):
        u_mode = change.new.upper()
        if self.pt_app:
            self.pt_app.editing_mode = u_mode

    @observe('autoformatter')
    def _autoformatter_changed(self, change):
        formatter = change.new
        if formatter is None:
            self.reformat_handler = lambda x:x
        elif formatter == 'black':
            self.reformat_handler = black_reformat_handler
        else:
            raise ValueError

    @observe('highlighting_style')
    @observe('colors')
    def _highlighting_style_changed(self, change):
        self.refresh_style()

    def refresh_style(self):
        self._style = self._make_style_from_name_or_cls(self.highlighting_style)


    highlighting_style_overrides = Dict(
        help="Override highlighting format for specific tokens"
    ).tag(config=True)

    true_color = Bool(False,
        help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
              "If your terminal supports true color, the following command "
              "should print 'TRUECOLOR' in orange: "
              "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")
    ).tag(config=True)

    editor = Unicode(get_default_editor(),
        help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
    ).tag(config=True)

    prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True)

    prompts = Instance(Prompts)

    @default('prompts')
    def _prompts_default(self):
        return self.prompts_class(self)

#    @observe('prompts')
#    def _(self, change):
#        self._update_layout()

    @default('displayhook_class')
    def _displayhook_class_default(self):
        return RichPromptDisplayHook

    term_title = Bool(True,
        help="Automatically set the terminal title"
    ).tag(config=True)

    term_title_format = Unicode("IPython: {cwd}",
        help="Customize the terminal title format.  This is a python format string. " +
             "Available substitutions are: {cwd}."
    ).tag(config=True)

    display_completions = Enum(('column', 'multicolumn','readlinelike'),
        help= ( "Options for displaying tab completions, 'column', 'multicolumn', and "
                "'readlinelike'. These options are for `prompt_toolkit`, see "
                "`prompt_toolkit` documentation for more information."
                ),
        default_value='multicolumn').tag(config=True)

    highlight_matching_brackets = Bool(True,
        help="Highlight matching brackets.",
    ).tag(config=True)

    extra_open_editor_shortcuts = Bool(False,
        help="Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. "
             "This is in addition to the F2 binding, which is always enabled."
    ).tag(config=True)

    handle_return = Any(None,
        help="Provide an alternative handler to be called when the user presses "
             "Return. This is an advanced option intended for debugging, which "
             "may be changed or removed in later releases."
    ).tag(config=True)

    enable_history_search = Bool(True,
        help="Allows to enable/disable the prompt toolkit history search"
    ).tag(config=True)

    prompt_includes_vi_mode = Bool(True,
        help="Display the current vi mode (when using vi editing mode)."
    ).tag(config=True)

    @observe('term_title')
    def init_term_title(self, change=None):
        # Enable or disable the terminal title.
        if self.term_title:
            toggle_set_term_title(True)
            set_term_title(self.term_title_format.format(cwd=abbrev_cwd()))
        else:
            toggle_set_term_title(False)

    def restore_term_title(self):
        if self.term_title:
            restore_term_title()

    def init_display_formatter(self):
        super(TerminalInteractiveShell, self).init_display_formatter()
        # terminal only supports plain text
        self.display_formatter.active_types = ['text/plain']
        # disable `_ipython_display_`
        self.display_formatter.ipython_display_formatter.enabled = False

    def init_prompt_toolkit_cli(self):
        if self.simple_prompt:
            # Fall back to plain non-interactive output for tests.
            # This is very limited.
            def prompt():
                prompt_text = "".join(x[1] for x in self.prompts.in_prompt_tokens())
                lines = [input(prompt_text)]
                prompt_continuation = "".join(x[1] for x in self.prompts.continuation_prompt_tokens())
                while self.check_complete('\n'.join(lines))[0] == 'incomplete':
                    lines.append( input(prompt_continuation) )
                return '\n'.join(lines)
            self.prompt_for_code = prompt
            return

        # Set up keyboard shortcuts
        key_bindings = create_ipython_shortcuts(self)

        # Pre-populate history from IPython's history database
        history = InMemoryHistory()
        last_cell = u""
        for __, ___, cell in self.history_manager.get_tail(self.history_load_length,
                                                        include_latest=True):
            # Ignore blank lines and consecutive duplicates
            cell = cell.rstrip()
            if cell and (cell != last_cell):
                history.append_string(cell)
                last_cell = cell

        self._style = self._make_style_from_name_or_cls(self.highlighting_style)
        self.style = DynamicStyle(lambda: self._style)

        editing_mode = getattr(EditingMode, self.editing_mode.upper())

        self.pt_loop = asyncio.new_event_loop()
        self.pt_app = PromptSession(
                            editing_mode=editing_mode,
                            key_bindings=key_bindings,
                            history=history,
                            completer=IPythonPTCompleter(shell=self),
                            enable_history_search = self.enable_history_search,
                            style=self.style,
                            include_default_pygments_style=False,
                            mouse_support=self.mouse_support,
                            enable_open_in_editor=self.extra_open_editor_shortcuts,
                            color_depth=self.color_depth,
                            tempfile_suffix=".py",
                            **self._extra_prompt_options())

    def _make_style_from_name_or_cls(self, name_or_cls):
        """
        Small wrapper that make an IPython compatible style from a style name

        We need that to add style for prompt ... etc.
        """
        style_overrides = {}
        if name_or_cls == 'legacy':
            legacy = self.colors.lower()
            if legacy == 'linux':
                style_cls = get_style_by_name('monokai')
                style_overrides = _style_overrides_linux
            elif legacy == 'lightbg':
                style_overrides = _style_overrides_light_bg
                style_cls = get_style_by_name('pastie')
            elif legacy == 'neutral':
                # The default theme needs to be visible on both a dark background
                # and a light background, because we can't tell what the terminal
                # looks like. These tweaks to the default theme help with that.
                style_cls = get_style_by_name('default')
                style_overrides.update({
                    Token.Number: '#ansigreen',
                    Token.Operator: 'noinherit',
                    Token.String: '#ansiyellow',
                    Token.Name.Function: '#ansiblue',
                    Token.Name.Class: 'bold #ansiblue',
                    Token.Name.Namespace: 'bold #ansiblue',
                    Token.Name.Variable.Magic: '#ansiblue',
                    Token.Prompt: '#ansigreen',
                    Token.PromptNum: '#ansibrightgreen bold',
                    Token.OutPrompt: '#ansired',
                    Token.OutPromptNum: '#ansibrightred bold',
                })

                # Hack: Due to limited color support on the Windows console
                # the prompt colors will be wrong without this
                if os.name == 'nt':
                    style_overrides.update({
                        Token.Prompt: '#ansidarkgreen',
                        Token.PromptNum: '#ansigreen bold',
                        Token.OutPrompt: '#ansidarkred',
                        Token.OutPromptNum: '#ansired bold',
                    })
            elif legacy =='nocolor':
                style_cls=_NoStyle
                style_overrides = {}
            else :
                raise ValueError('Got unknown colors: ', legacy)
        else :
            if isinstance(name_or_cls, str):
                style_cls = get_style_by_name(name_or_cls)
            else:
                style_cls = name_or_cls
            style_overrides = {
                Token.Prompt: '#ansigreen',
                Token.PromptNum: '#ansibrightgreen bold',
                Token.OutPrompt: '#ansired',
                Token.OutPromptNum: '#ansibrightred bold',
            }
        style_overrides.update(self.highlighting_style_overrides)
        style = merge_styles([
            style_from_pygments_cls(style_cls),
            style_from_pygments_dict(style_overrides),
        ])

        return style

    @property
    def pt_complete_style(self):
        return {
            'multicolumn': CompleteStyle.MULTI_COLUMN,
            'column': CompleteStyle.COLUMN,
            'readlinelike': CompleteStyle.READLINE_LIKE,
        }[self.display_completions]

    @property
    def color_depth(self):
        return (ColorDepth.TRUE_COLOR if self.true_color else None)

    def _extra_prompt_options(self):
        """
        Return the current layout option for the current Terminal InteractiveShell
        """
        def get_message():
            return PygmentsTokens(self.prompts.in_prompt_tokens())

        if self.editing_mode == 'emacs':
            # with emacs mode the prompt is (usually) static, so we call only
            # the function once. With VI mode it can toggle between [ins] and
            # [nor] so we can't precompute.
            # here I'm going to favor the default keybinding which almost
            # everybody uses to decrease CPU usage.
            # if we have issues with users with custom Prompts we can see how to
            # work around this.
            get_message = get_message()

        options = {
                'complete_in_thread': False,
                'lexer':IPythonPTLexer(),
                'reserve_space_for_menu':self.space_for_menu,
                'message': get_message,
                'prompt_continuation': (
                    lambda width, lineno, is_soft_wrap:
                        PygmentsTokens(self.prompts.continuation_prompt_tokens(width))),
                'multiline': True,
                'complete_style': self.pt_complete_style,

                # Highlight matching brackets, but only when this setting is
                # enabled, and only when the DEFAULT_BUFFER has the focus.
                'input_processors': [ConditionalProcessor(
                        processor=HighlightMatchingBracketProcessor(chars='[](){}'),
                        filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() &
                            Condition(lambda: self.highlight_matching_brackets))],
                }
        if not PTK3:
            options['inputhook'] = self.inputhook

        return options

    def prompt_for_code(self):
        if self.rl_next_input:
            default = self.rl_next_input
            self.rl_next_input = None
        else:
            default = ''

        # In order to make sure that asyncio code written in the
        # interactive shell doesn't interfere with the prompt, we run the
        # prompt in a different event loop.
        # If we don't do this, people could spawn coroutine with a
        # while/true inside which will freeze the prompt.

        try:
            old_loop = asyncio.get_event_loop()
        except RuntimeError:
            # This happens when the user used `asyncio.run()`.
            old_loop = None

        asyncio.set_event_loop(self.pt_loop)
        try:
            with patch_stdout(raw=True):
                text = self.pt_app.prompt(
                    default=default,
                    **self._extra_prompt_options())
        finally:
            # Restore the original event loop.
            asyncio.set_event_loop(old_loop)

        return text

    def enable_win_unicode_console(self):
        # Since IPython 7.10 doesn't support python < 3.6 and PEP 528, Python uses the unicode APIs for the Windows
        # console by default, so WUC shouldn't be needed.
        from warnings import warn
        warn("`enable_win_unicode_console` is deprecated since IPython 7.10, does not do anything and will be removed in the future",
             DeprecationWarning,
             stacklevel=2)

    def init_io(self):
        if sys.platform not in {'win32', 'cli'}:
            return

        import colorama
        colorama.init()

        # For some reason we make these wrappers around stdout/stderr.
        # For now, we need to reset them so all output gets coloured.
        # https://github.com/ipython/ipython/issues/8669
        # io.std* are deprecated, but don't show our own deprecation warnings
        # during initialization of the deprecated API.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', DeprecationWarning)
            io.stdout = io.IOStream(sys.stdout)
            io.stderr = io.IOStream(sys.stderr)

    def init_magics(self):
        super(TerminalInteractiveShell, self).init_magics()
        self.register_magics(TerminalMagics)

    def init_alias(self):
        # The parent class defines aliases that can be safely used with any
        # frontend.
        super(TerminalInteractiveShell, self).init_alias()

        # Now define aliases that only make sense on the terminal, because they
        # need direct access to the console in a way that we can't emulate in
        # GUI or web frontend
        if os.name == 'posix':
            for cmd in ('clear', 'more', 'less', 'man'):
                self.alias_manager.soft_define_alias(cmd, cmd)


    def __init__(self, *args, **kwargs):
        super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
        self.init_prompt_toolkit_cli()
        self.init_term_title()
        self.keep_running = True

        self.debugger_history = InMemoryHistory()

    def ask_exit(self):
        self.keep_running = False

    rl_next_input = None

    def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):

        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn('interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)

        self.keep_running = True
        while self.keep_running:
            print(self.separate_in, end='')

            try:
                code = self.prompt_for_code()
            except EOFError:
                if (not self.confirm_exit) \
                        or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
                    self.ask_exit()

            else:
                if code:
                    self.run_cell(code, store_history=True)

    def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
        # An extra layer of protection in case someone mashing Ctrl-C breaks
        # out of our internal code.
        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn('mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
        while True:
            try:
                self.interact()
                break
            except KeyboardInterrupt as e:
                print("\n%s escaped interact()\n" % type(e).__name__)
            finally:
                # An interrupt during the eventloop will mess up the
                # internal state of the prompt_toolkit library.
                # Stopping the eventloop fixes this, see
                # https://github.com/ipython/ipython/pull/9867
                if hasattr(self, '_eventloop'):
                    self._eventloop.stop()

                self.restore_term_title()


    _inputhook = None
    def inputhook(self, context):
        if self._inputhook is not None:
            self._inputhook(context)

    active_eventloop = None
    def enable_gui(self, gui=None):
        if gui and (gui != 'inline') :
            self.active_eventloop, self._inputhook =\
                get_inputhook_name_and_func(gui)
        else:
            self.active_eventloop = self._inputhook = None

        # For prompt_toolkit 3.0. We have to create an asyncio event loop with
        # this inputhook.
        if PTK3:
            import asyncio
            from prompt_toolkit.eventloop import new_eventloop_with_inputhook

            if gui == 'asyncio':
                # When we integrate the asyncio event loop, run the UI in the
                # same event loop as the rest of the code. don't use an actual
                # input hook. (Asyncio is not made for nesting event loops.)
                self.pt_loop = asyncio.get_event_loop()

            elif self._inputhook:
                # If an inputhook was set, create a new asyncio event loop with
                # this inputhook for the prompt.
                self.pt_loop = new_eventloop_with_inputhook(self._inputhook)
            else:
                # When there's no inputhook, run the prompt in a separate
                # asyncio event loop.
                self.pt_loop = asyncio.new_event_loop()

    # Run !system commands directly, not through pipes, so terminal programs
    # work correctly.
    system = InteractiveShell.system_raw

    def auto_rewrite_input(self, cmd):
        """Overridden from the parent class to use fancy rewriting prompt"""
        if not self.show_rewritten_input:
            return

        tokens = self.prompts.rewrite_prompt_tokens()
        if self.pt_app:
            print_formatted_text(PygmentsTokens(tokens), end='',
                                 style=self.pt_app.app.style)
            print(cmd)
        else:
            prompt = ''.join(s for t, s in tokens)
            print(prompt, cmd, sep='')

    _prompts_before = None
    def switch_doctest_mode(self, mode):
        """Switch prompts to classic for %doctest_mode"""
        if mode:
            self._prompts_before = self.prompts
            self.prompts = ClassicPrompts(self)
        elif self._prompts_before:
            self.prompts = self._prompts_before
            self._prompts_before = None
Exemple #3
0
class IPCompleter(Completer):
    """Extension of the completer class with IPython-specific features"""
    
    @observe('greedy')
    def _greedy_changed(self, change):
        """update the splitter and readline delims when greedy is changed"""
        if change['new']:
            self.splitter.delims = GREEDY_DELIMS
        else:
            self.splitter.delims = DELIMS

        if self.readline:
            self.readline.set_completer_delims(self.splitter.delims)
    
    merge_completions = Bool(True,
        help="""Whether to merge completion results into a single list
        
        If False, only the completion results from the first non-empty
        completer will be returned.
        """
    ).tag(config=True)
    omit__names = Enum((0,1,2), default_value=2,
        help="""Instruct the completer to omit private method names
        
        Specifically, when completing on ``object.<tab>``.
        
        When 2 [default]: all names that start with '_' will be excluded.
        
        When 1: all 'magic' names (``__foo__``) will be excluded.
        
        When 0: nothing will be excluded.
        """
    ).tag(config=True)
    limit_to__all__ = Bool(False,
        help="""
        DEPRECATED as of version 5.0.
        
        Instruct the completer to use __all__ for the completion
        
        Specifically, when completing on ``object.<tab>``.
        
        When True: only those names in obj.__all__ will be included.
        
        When False [default]: the __all__ attribute is ignored 
        """,
    ).tag(config=True)

    def __init__(self, shell=None, namespace=None, global_namespace=None,
                 use_readline=True, config=None, **kwargs):
        """IPCompleter() -> completer

        Return a completer object suitable for use by the readline library
        via readline.set_completer().

        Inputs:

        - shell: a pointer to the ipython shell itself.  This is needed
          because this completer knows about magic functions, and those can
          only be accessed via the ipython instance.

        - namespace: an optional dict where completions are performed.

        - global_namespace: secondary optional dict for completions, to
          handle cases (such as IPython embedded inside functions) where
          both Python scopes are visible.

        use_readline : bool, optional
          If true, use the readline library.  This completer can still function
          without readline, though in that case callers must provide some extra
          information on each call about the current line."""

        self.magic_escape = ESC_MAGIC
        self.splitter = CompletionSplitter()

        # Readline configuration, only used by the rlcompleter method.
        if use_readline:
            # We store the right version of readline so that later code
            import IPython.utils.rlineimpl as readline
            self.readline = readline
        else:
            self.readline = None

        # _greedy_changed() depends on splitter and readline being defined:
        Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
                            config=config, **kwargs)

        # List where completion matches will be stored
        self.matches = []
        self.shell = shell
        # Regexp to split filenames with spaces in them
        self.space_name_re = re.compile(r'([^\\] )')
        # Hold a local ref. to glob.glob for speed
        self.glob = glob.glob

        # Determine if we are running on 'dumb' terminals, like (X)Emacs
        # buffers, to avoid completion problems.
        term = os.environ.get('TERM','xterm')
        self.dumb_terminal = term in ['dumb','emacs']

        # Special handling of backslashes needed in win32 platforms
        if sys.platform == "win32":
            self.clean_glob = self._clean_glob_win32
        else:
            self.clean_glob = self._clean_glob

        #regexp to parse docstring for function signature
        self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
        self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
        #use this if positional argument name is also needed
        #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')

        # All active matcher routines for completion
        self.matchers = [
                         self.python_matches,
                         self.file_matches,
                         self.magic_matches,
                         self.python_func_kw_matches,
                         self.dict_key_matches,
                         ]

        # This is set externally by InteractiveShell
        self.custom_completers = None

    def all_completions(self, text):
        """
        Wrapper around the complete method for the benefit of emacs.
        """
        return self.complete(text)[1]

    def _clean_glob(self, text):
        return self.glob("%s*" % text)

    def _clean_glob_win32(self,text):
        return [f.replace("\\","/")
                for f in self.glob("%s*" % text)]

    def file_matches(self, text):
        """Match filenames, expanding ~USER type strings.

        Most of the seemingly convoluted logic in this completer is an
        attempt to handle filenames with spaces in them.  And yet it's not
        quite perfect, because Python's readline doesn't expose all of the
        GNU readline details needed for this to be done correctly.

        For a filename with a space in it, the printed completions will be
        only the parts after what's already been typed (instead of the
        full completions, as is normally done).  I don't think with the
        current (as of Python 2.3) Python readline it's possible to do
        better."""

        # chars that require escaping with backslash - i.e. chars
        # that readline treats incorrectly as delimiters, but we
        # don't want to treat as delimiters in filename matching
        # when escaped with backslash
        if text.startswith('!'):
            text = text[1:]
            text_prefix = u'!'
        else:
            text_prefix = u''

        text_until_cursor = self.text_until_cursor
        # track strings with open quotes
        open_quotes = has_open_quotes(text_until_cursor)

        if '(' in text_until_cursor or '[' in text_until_cursor:
            lsplit = text
        else:
            try:
                # arg_split ~ shlex.split, but with unicode bugs fixed by us
                lsplit = arg_split(text_until_cursor)[-1]
            except ValueError:
                # typically an unmatched ", or backslash without escaped char.
                if open_quotes:
                    lsplit = text_until_cursor.split(open_quotes)[-1]
                else:
                    return []
            except IndexError:
                # tab pressed on empty line
                lsplit = ""

        if not open_quotes and lsplit != protect_filename(lsplit):
            # if protectables are found, do matching on the whole escaped name
            has_protectables = True
            text0,text = text,lsplit
        else:
            has_protectables = False
            text = os.path.expanduser(text)

        if text == "":
            return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]

        # Compute the matches from the filesystem
        if sys.platform == 'win32':
            m0 = self.clean_glob(text)
        else:
            m0 = self.clean_glob(text.replace('\\', ''))

        if has_protectables:
            # If we had protectables, we need to revert our changes to the
            # beginning of filename so that we don't double-write the part
            # of the filename we have so far
            len_lsplit = len(lsplit)
            matches = [text_prefix + text0 +
                       protect_filename(f[len_lsplit:]) for f in m0]
        else:
            if open_quotes:
                # if we have a string with an open quote, we don't need to
                # protect the names at all (and we _shouldn't_, as it
                # would cause bugs when the filesystem call is made).
                matches = m0
            else:
                matches = [text_prefix +
                           protect_filename(f) for f in m0]

        # Mark directories in input list by appending '/' to their names.
        return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]

    def magic_matches(self, text):
        """Match magics"""
        # Get all shell magics now rather than statically, so magics loaded at
        # runtime show up too.
        lsm = self.shell.magics_manager.lsmagic()
        line_magics = lsm['line']
        cell_magics = lsm['cell']
        pre = self.magic_escape
        pre2 = pre+pre
        
        # Completion logic:
        # - user gives %%: only do cell magics
        # - user gives %: do both line and cell magics
        # - no prefix: do both
        # In other words, line magics are skipped if the user gives %% explicitly
        bare_text = text.lstrip(pre)
        comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
        if not text.startswith(pre2):
            comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
        return [cast_unicode_py2(c) for c in comp]


    def python_matches(self, text):
        """Match attributes or global python names"""
        if "." in text:
            try:
                matches = self.attr_matches(text)
                if text.endswith('.') and self.omit__names:
                    if self.omit__names == 1:
                        # true if txt is _not_ a __ name, false otherwise:
                        no__name = (lambda txt:
                                    re.match(r'.*\.__.*?__',txt) is None)
                    else:
                        # true if txt is _not_ a _ name, false otherwise:
                        no__name = (lambda txt:
                                    re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
                    matches = filter(no__name, matches)
            except NameError:
                # catches <undefined attributes>.<tab>
                matches = []
        else:
            matches = self.global_matches(text)
        return matches

    def _default_arguments_from_docstring(self, doc):
        """Parse the first line of docstring for call signature.

        Docstring should be of the form 'min(iterable[, key=func])\n'.
        It can also parse cython docstring of the form
        'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
        """
        if doc is None:
            return []

        #care only the firstline
        line = doc.lstrip().splitlines()[0]

        #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
        #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
        sig = self.docstring_sig_re.search(line)
        if sig is None:
            return []
        # iterable[, key=func]' -> ['iterable[' ,' key=func]']
        sig = sig.groups()[0].split(',')
        ret = []
        for s in sig:
            #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
            ret += self.docstring_kwd_re.findall(s)
        return ret

    def _default_arguments(self, obj):
        """Return the list of default arguments of obj if it is callable,
        or empty list otherwise."""
        call_obj = obj
        ret = []
        if inspect.isbuiltin(obj):
            pass
        elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
            if inspect.isclass(obj):
                #for cython embededsignature=True the constructor docstring
                #belongs to the object itself not __init__
                ret += self._default_arguments_from_docstring(
                            getattr(obj, '__doc__', ''))
                # for classes, check for __init__,__new__
                call_obj = (getattr(obj, '__init__', None) or
                       getattr(obj, '__new__', None))
            # for all others, check if they are __call__able
            elif hasattr(obj, '__call__'):
                call_obj = obj.__call__
        ret += self._default_arguments_from_docstring(
                 getattr(call_obj, '__doc__', ''))

        if PY3:
            _keeps = (inspect.Parameter.KEYWORD_ONLY,
                      inspect.Parameter.POSITIONAL_OR_KEYWORD)
            signature = inspect.signature
        else:
            import IPython.utils.signatures
            _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
                      IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
            signature = IPython.utils.signatures.signature

        try:
            sig = signature(call_obj)
            ret.extend(k for k, v in sig.parameters.items() if
                       v.kind in _keeps)
        except ValueError:
            pass

        return list(set(ret))

    def python_func_kw_matches(self,text):
        """Match named parameters (kwargs) of the last open function"""

        if "." in text: # a parameter cannot be dotted
            return []
        try: regexp = self.__funcParamsRegex
        except AttributeError:
            regexp = self.__funcParamsRegex = re.compile(r'''
                '.*?(?<!\\)' |    # single quoted strings or
                ".*?(?<!\\)" |    # double quoted strings or
                \w+          |    # identifier
                \S                # other characters
                ''', re.VERBOSE | re.DOTALL)
        # 1. find the nearest identifier that comes before an unclosed
        # parenthesis before the cursor
        # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
        tokens = regexp.findall(self.text_until_cursor)
        tokens.reverse()
        iterTokens = iter(tokens); openPar = 0

        for token in iterTokens:
            if token == ')':
                openPar -= 1
            elif token == '(':
                openPar += 1
                if openPar > 0:
                    # found the last unclosed parenthesis
                    break
        else:
            return []
        # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
        ids = []
        isId = re.compile(r'\w+$').match

        while True:
            try:
                ids.append(next(iterTokens))
                if not isId(ids[-1]):
                    ids.pop(); break
                if not next(iterTokens) == '.':
                    break
            except StopIteration:
                break
        # lookup the candidate callable matches either using global_matches
        # or attr_matches for dotted names
        if len(ids) == 1:
            callableMatches = self.global_matches(ids[0])
        else:
            callableMatches = self.attr_matches('.'.join(ids[::-1]))
        argMatches = []
        for callableMatch in callableMatches:
            try:
                namedArgs = self._default_arguments(eval(callableMatch,
                                                        self.namespace))
            except:
                continue

            for namedArg in namedArgs:
                if namedArg.startswith(text):
                    argMatches.append(u"%s=" %namedArg)
        return argMatches

    def dict_key_matches(self, text):
        "Match string keys in a dictionary, after e.g. 'foo[' "
        def get_keys(obj):
            # Objects can define their own completions by defining an
            # _ipy_key_completions_() method.
            method = get_real_method(obj, '_ipython_key_completions_')
            if method is not None:
                return method()

            # Special case some common in-memory dict-like types
            if isinstance(obj, dict) or\
               _safe_isinstance(obj, 'pandas', 'DataFrame'):
                try:
                    return list(obj.keys())
                except Exception:
                    return []
            elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
                 _safe_isinstance(obj, 'numpy', 'void'):
                return obj.dtype.names or []
            return []

        try:
            regexps = self.__dict_key_regexps
        except AttributeError:
            dict_key_re_fmt = r'''(?x)
            (  # match dict-referring expression wrt greedy setting
                %s
            )
            \[   # open bracket
            \s*  # and optional whitespace
            ([uUbB]?  # string prefix (r not handled)
                (?:   # unclosed string
                    '(?:[^']|(?<!\\)\\')*
                |
                    "(?:[^"]|(?<!\\)\\")*
                )
            )?
            $
            '''
            regexps = self.__dict_key_regexps = {
                False: re.compile(dict_key_re_fmt % '''
                                  # identifiers separated by .
                                  (?!\d)\w+
                                  (?:\.(?!\d)\w+)*
                                  '''),
                True: re.compile(dict_key_re_fmt % '''
                                 .+
                                 ''')
            }

        match = regexps[self.greedy].search(self.text_until_cursor)
        if match is None:
            return []

        expr, prefix = match.groups()
        try:
            obj = eval(expr, self.namespace)
        except Exception:
            try:
                obj = eval(expr, self.global_namespace)
            except Exception:
                return []

        keys = get_keys(obj)
        if not keys:
            return keys
        closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
        if not matches:
            return matches
        
        # get the cursor position of
        # - the text being completed
        # - the start of the key text
        # - the start of the completion
        text_start = len(self.text_until_cursor) - len(text)
        if prefix:
            key_start = match.start(2)
            completion_start = key_start + token_offset
        else:
            key_start = completion_start = match.end()
        
        # grab the leading prefix, to make sure all completions start with `text`
        if text_start > key_start:
            leading = ''
        else:
            leading = text[text_start:completion_start]
        
        # the index of the `[` character
        bracket_idx = match.end(1)

        # append closing quote and bracket as appropriate
        # this is *not* appropriate if the opening quote or bracket is outside
        # the text given to this method
        suf = ''
        continuation = self.line_buffer[len(self.text_until_cursor):]
        if key_start > text_start and closing_quote:
            # quotes were opened inside text, maybe close them
            if continuation.startswith(closing_quote):
                continuation = continuation[len(closing_quote):]
            else:
                suf += closing_quote
        if bracket_idx > text_start:
            # brackets were opened inside text, maybe close them
            if not continuation.startswith(']'):
                suf += ']'
        
        return [leading + k + suf for k in matches]

    def unicode_name_matches(self, text):
        u"""Match Latex-like syntax for unicode characters base 
        on the name of the character.
        
        This does  \\GREEK SMALL LETTER ETA -> η

        Works only on valid python 3 identifier, or on combining characters that 
        will combine to form a valid identifier.
        
        Used on Python 3 only.
        """
        slashpos = text.rfind('\\')
        if slashpos > -1:
            s = text[slashpos+1:]
            try :
                unic = unicodedata.lookup(s)
                # allow combining chars
                if ('a'+unic).isidentifier():
                    return '\\'+s,[unic]
            except KeyError:
                pass
        return u'', []




    def latex_matches(self, text):
        u"""Match Latex syntax for unicode characters.
        
        This does both \\alp -> \\alpha and \\alpha -> α
        
        Used on Python 3 only.
        """
        slashpos = text.rfind('\\')
        if slashpos > -1:
            s = text[slashpos:]
            if s in latex_symbols:
                # Try to complete a full latex symbol to unicode
                # \\alpha -> α
                return s, [latex_symbols[s]]
            else:
                # If a user has partially typed a latex symbol, give them
                # a full list of options \al -> [\aleph, \alpha]
                matches = [k for k in latex_symbols if k.startswith(s)]
                return s, matches
        return u'', []

    def dispatch_custom_completer(self, text):
        if not self.custom_completers:
            return

        line = self.line_buffer
        if not line.strip():
            return None

        # Create a little structure to pass all the relevant information about
        # the current completion to any custom completer.
        event = Bunch()
        event.line = line
        event.symbol = text
        cmd = line.split(None,1)[0]
        event.command = cmd
        event.text_until_cursor = self.text_until_cursor

        # for foo etc, try also to find completer for %foo
        if not cmd.startswith(self.magic_escape):
            try_magic = self.custom_completers.s_matches(
                self.magic_escape + cmd)
        else:
            try_magic = []

        for c in itertools.chain(self.custom_completers.s_matches(cmd),
                 try_magic,
                 self.custom_completers.flat_matches(self.text_until_cursor)):
            try:
                res = c(event)
                if res:
                    # first, try case sensitive match
                    withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
                    if withcase:
                        return withcase
                    # if none, then case insensitive ones are ok too
                    text_low = text.lower()
                    return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
            except TryNext:
                pass

        return None

    @_strip_single_trailing_space
    def complete(self, text=None, line_buffer=None, cursor_pos=None):
        """Find completions for the given text and line context.

        Note that both the text and the line_buffer are optional, but at least
        one of them must be given.

        Parameters
        ----------
          text : string, optional
            Text to perform the completion on.  If not given, the line buffer
            is split using the instance's CompletionSplitter object.

          line_buffer : string, optional
            If not given, the completer attempts to obtain the current line
            buffer via readline.  This keyword allows clients which are
            requesting for text completions in non-readline contexts to inform
            the completer of the entire text.

          cursor_pos : int, optional
            Index of the cursor in the full line buffer.  Should be provided by
            remote frontends where kernel has no access to frontend state.

        Returns
        -------
        text : str
          Text that was actually used in the completion.

        matches : list
          A list of completion matches.
        """
        # if the cursor position isn't given, the only sane assumption we can
        # make is that it's at the end of the line (the common case)
        if cursor_pos is None:
            cursor_pos = len(line_buffer) if text is None else len(text)

        if self.use_main_ns:
            self.namespace = __main__.__dict__

        if PY3:

            base_text = text if not line_buffer else line_buffer[:cursor_pos]
            latex_text, latex_matches = self.latex_matches(base_text)
            if latex_matches:
                 return latex_text, latex_matches
            name_text = ''
            name_matches = []
            for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
                name_text, name_matches = meth(base_text)
                if name_text:
                    return name_text, name_matches
        
        # if text is either None or an empty string, rely on the line buffer
        if not text:
            text = self.splitter.split_line(line_buffer, cursor_pos)

        # If no line buffer is given, assume the input text is all there was
        if line_buffer is None:
            line_buffer = text

        self.line_buffer = line_buffer
        self.text_until_cursor = self.line_buffer[:cursor_pos]

        # Start with a clean slate of completions
        self.matches[:] = []
        custom_res = self.dispatch_custom_completer(text)
        if custom_res is not None:
            # did custom completers produce something?
            self.matches = custom_res
        else:
            # Extend the list of completions with the results of each
            # matcher, so we return results to the user from all
            # namespaces.
            if self.merge_completions:
                self.matches = []
                for matcher in self.matchers:
                    try:
                        self.matches.extend(matcher(text))
                    except:
                        # Show the ugly traceback if the matcher causes an
                        # exception, but do NOT crash the kernel!
                        sys.excepthook(*sys.exc_info())
            else:
                for matcher in self.matchers:
                    self.matches = matcher(text)
                    if self.matches:
                        break
        # FIXME: we should extend our api to return a dict with completions for
        # different types of objects.  The rlcomplete() method could then
        # simply collapse the dict into a list for readline, but we'd have
        # richer completion semantics in other evironments.
        self.matches = sorted(set(self.matches), key=completions_sorting_key)

        return text, self.matches
class TerminalInteractiveShell(InteractiveShell):
    space_for_menu = Integer(
        6,
        help='Number of line at the bottom of the screen '
        'to reserve for the completion menu').tag(config=True)

    def _space_for_menu_changed(self, old, new):
        self._update_layout()

    pt_cli = None
    debugger_history = None
    _pt_app = None

    simple_prompt = Bool(
        _use_simple_prompt,
        help=
        """Use `raw_input` for the REPL, without completion and prompt colors.

            Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
            IPython own testing machinery, and emacs inferior-shell integration through elpy.

            This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
            environment variable is set, or the current terminal is not a tty."""
    ).tag(config=True)

    @property
    def debugger_cls(self):
        return Pdb if self.simple_prompt else TerminalPdb

    confirm_exit = Bool(
        True,
        help="""
        Set to confirm when you try to exit IPython with an EOF (Control-D
        in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
        you can force a direct exit without any confirmation.""",
    ).tag(config=True)

    editing_mode = Unicode(
        'emacs',
        help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
    ).tag(config=True)

    mouse_support = Bool(
        False,
        help=
        "Enable mouse support in the prompt\n(Note: prevents selecting text with the mouse)"
    ).tag(config=True)

    # We don't load the list of styles for the help string, because loading
    # Pygments plugins takes time and can cause unexpected errors.
    highlighting_style = Union(
        [Unicode('legacy'), Type(klass=Style)],
        help="""The name or class of a Pygments style to use for syntax
        highlighting. To see available styles, run `pygmentize -L styles`."""
    ).tag(config=True)

    @observe('highlighting_style')
    @observe('colors')
    def _highlighting_style_changed(self, change):
        self.refresh_style()

    def refresh_style(self):
        self._style = self._make_style_from_name_or_cls(
            self.highlighting_style)

    highlighting_style_overrides = Dict(
        help="Override highlighting format for specific tokens").tag(
            config=True)

    true_color = Bool(
        False,
        help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
              "If your terminal supports true color, the following command "
              "should print 'TRUECOLOR' in orange: "
              "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")).tag(
                  config=True)

    editor = Unicode(
        get_default_editor(),
        help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
    ).tag(config=True)

    prompts_class = Type(
        Prompts,
        help='Class used to generate Prompt token for prompt_toolkit').tag(
            config=True)

    prompts = Instance(Prompts)

    @default('prompts')
    def _prompts_default(self):
        return self.prompts_class(self)

    @observe('prompts')
    def _(self, change):
        self._update_layout()

    @default('displayhook_class')
    def _displayhook_class_default(self):
        return RichPromptDisplayHook

    term_title = Bool(
        True, help="Automatically set the terminal title").tag(config=True)

    term_title_format = Unicode(
        "IPython: {cwd}",
        help=
        "Customize the terminal title format.  This is a python format string. "
        + "Available substitutions are: {cwd}.").tag(config=True)

    display_completions = Enum(
        ('column', 'multicolumn', 'readlinelike'),
        help=
        ("Options for displaying tab completions, 'column', 'multicolumn', and "
         "'readlinelike'. These options are for `prompt_toolkit`, see "
         "`prompt_toolkit` documentation for more information."),
        default_value='multicolumn').tag(config=True)

    highlight_matching_brackets = Bool(
        True,
        help="Highlight matching brackets.",
    ).tag(config=True)

    extra_open_editor_shortcuts = Bool(
        False,
        help=
        "Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. "
        "This is in addition to the F2 binding, which is always enabled.").tag(
            config=True)

    handle_return = Any(
        None,
        help="Provide an alternative handler to be called when the user presses "
        "Return. This is an advanced option intended for debugging, which "
        "may be changed or removed in later releases.").tag(config=True)

    @observe('term_title')
    def init_term_title(self, change=None):
        # Enable or disable the terminal title.
        if self.term_title:
            toggle_set_term_title(True)
            set_term_title(self.term_title_format.format(cwd=abbrev_cwd()))
        else:
            toggle_set_term_title(False)

    def init_display_formatter(self):
        super(TerminalInteractiveShell, self).init_display_formatter()
        # terminal only supports plain text
        self.display_formatter.active_types = ['text/plain']
        # disable `_ipython_display_`
        self.display_formatter.ipython_display_formatter.enabled = False

    def init_prompt_toolkit_cli(self):
        if self.simple_prompt:
            # Fall back to plain non-interactive output for tests.
            # This is very limited, and only accepts a single line.
            def prompt():
                isp = self.input_splitter
                prompt_text = "".join(x[1]
                                      for x in self.prompts.in_prompt_tokens())
                prompt_continuation = "".join(
                    x[1] for x in self.prompts.continuation_prompt_tokens())
                while isp.push_accepts_more():
                    line = cast_unicode_py2(input(prompt_text))
                    isp.push(line)
                    prompt_text = prompt_continuation
                return isp.source_reset()

            self.prompt_for_code = prompt
            return

        # Set up keyboard shortcuts
        kbmanager = KeyBindingManager.for_prompt(
            enable_open_in_editor=self.extra_open_editor_shortcuts, )
        register_ipython_shortcuts(kbmanager.registry, self)

        # Pre-populate history from IPython's history database
        history = InMemoryHistory()
        last_cell = u""
        for __, ___, cell in self.history_manager.get_tail(
                self.history_load_length, include_latest=True):
            # Ignore blank lines and consecutive duplicates
            cell = cell.rstrip()
            if cell and (cell != last_cell):
                history.append(cell)
                last_cell = cell

        self._style = self._make_style_from_name_or_cls(
            self.highlighting_style)
        self.style = DynamicStyle(lambda: self._style)

        editing_mode = getattr(EditingMode, self.editing_mode.upper())

        def patch_stdout(**kwargs):
            return self.pt_cli.patch_stdout_context(**kwargs)

        self._pt_app = create_prompt_application(
            editing_mode=editing_mode,
            key_bindings_registry=kbmanager.registry,
            history=history,
            completer=IPythonPTCompleter(shell=self,
                                         patch_stdout=patch_stdout),
            enable_history_search=True,
            style=self.style,
            mouse_support=self.mouse_support,
            **self._layout_options())
        self._eventloop = create_eventloop(self.inputhook)
        self.pt_cli = CommandLineInterface(
            self._pt_app,
            eventloop=self._eventloop,
            output=create_output(true_color=self.true_color))

    def _make_style_from_name_or_cls(self, name_or_cls):
        """
        Small wrapper that make an IPython compatible style from a style name

        We need that to add style for prompt ... etc.
        """
        style_overrides = {}
        if name_or_cls == 'legacy':
            legacy = self.colors.lower()
            if legacy == 'linux':
                style_cls = get_style_by_name('monokai')
                style_overrides = _style_overrides_linux
            elif legacy == 'lightbg':
                style_overrides = _style_overrides_light_bg
                style_cls = get_style_by_name('pastie')
            elif legacy == 'neutral':
                # The default theme needs to be visible on both a dark background
                # and a light background, because we can't tell what the terminal
                # looks like. These tweaks to the default theme help with that.
                style_cls = get_style_by_name('default')
                style_overrides.update({
                    Token.Number: '#007700',
                    Token.Operator: 'noinherit',
                    Token.String: '#BB6622',
                    Token.Name.Function: '#2080D0',
                    Token.Name.Class: 'bold #2080D0',
                    Token.Name.Namespace: 'bold #2080D0',
                    Token.Prompt: '#009900',
                    Token.PromptNum: '#00ff00 bold',
                    Token.OutPrompt: '#990000',
                    Token.OutPromptNum: '#ff0000 bold',
                })

                # Hack: Due to limited color support on the Windows console
                # the prompt colors will be wrong without this
                if os.name == 'nt':
                    style_overrides.update({
                        Token.Prompt: '#ansidarkgreen',
                        Token.PromptNum: '#ansigreen bold',
                        Token.OutPrompt: '#ansidarkred',
                        Token.OutPromptNum: '#ansired bold',
                    })
            elif legacy == 'nocolor':
                style_cls = _NoStyle
                style_overrides = {}
            else:
                raise ValueError('Got unknown colors: ', legacy)
        else:
            if isinstance(name_or_cls, str):
                style_cls = get_style_by_name(name_or_cls)
            else:
                style_cls = name_or_cls
            style_overrides = {
                Token.Prompt: '#009900',
                Token.PromptNum: '#00ff00 bold',
                Token.OutPrompt: '#990000',
                Token.OutPromptNum: '#ff0000 bold',
            }
        style_overrides.update(self.highlighting_style_overrides)
        style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls,
                                            style_dict=style_overrides)

        return style

    def _layout_options(self):
        """
        Return the current layout option for the current Terminal InteractiveShell
        """
        return {
            'lexer':
            IPythonPTLexer(),
            'reserve_space_for_menu':
            self.space_for_menu,
            'get_prompt_tokens':
            self.prompts.in_prompt_tokens,
            'get_continuation_tokens':
            self.prompts.continuation_prompt_tokens,
            'multiline':
            True,
            'display_completions_in_columns':
            (self.display_completions == 'multicolumn'),

            # Highlight matching brackets, but only when this setting is
            # enabled, and only when the DEFAULT_BUFFER has the focus.
            'extra_input_processors': [
                ConditionalProcessor(
                    processor=HighlightMatchingBracketProcessor(
                        chars='[](){}'),
                    filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()
                    & Condition(lambda cli: self.highlight_matching_brackets))
            ],
        }

    def _update_layout(self):
        """
        Ask for a re computation of the application layout, if for example ,
        some configuration options have changed.
        """
        if self._pt_app:
            self._pt_app.layout = create_prompt_layout(
                **self._layout_options())

    def prompt_for_code(self):
        with self.pt_cli.patch_stdout_context(raw=True):
            document = self.pt_cli.run(pre_run=self.pre_prompt,
                                       reset_current_buffer=True)
        return document.text

    def enable_win_unicode_console(self):
        if sys.version_info >= (3, 6):
            # Since PEP 528, Python uses the unicode APIs for the Windows
            # console by default, so WUC shouldn't be needed.
            return

        import win_unicode_console
        win_unicode_console.enable()

    def init_io(self):
        if sys.platform not in {'win32', 'cli'}:
            return

        self.enable_win_unicode_console()

        import colorama
        colorama.init()

        # For some reason we make these wrappers around stdout/stderr.
        # For now, we need to reset them so all output gets coloured.
        # https://github.com/ipython/ipython/issues/8669
        # io.std* are deprecated, but don't show our own deprecation warnings
        # during initialization of the deprecated API.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', DeprecationWarning)
            io.stdout = io.IOStream(sys.stdout)
            io.stderr = io.IOStream(sys.stderr)

    def init_magics(self):
        super(TerminalInteractiveShell, self).init_magics()
        self.register_magics(TerminalMagics)

    def init_alias(self):
        # The parent class defines aliases that can be safely used with any
        # frontend.
        super(TerminalInteractiveShell, self).init_alias()

        # Now define aliases that only make sense on the terminal, because they
        # need direct access to the console in a way that we can't emulate in
        # GUI or web frontend
        if os.name == 'posix':
            for cmd in ['clear', 'more', 'less', 'man']:
                self.alias_manager.soft_define_alias(cmd, cmd)

    def __init__(self, *args, **kwargs):
        super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
        self.init_prompt_toolkit_cli()
        self.init_term_title()
        self.keep_running = True

        self.debugger_history = InMemoryHistory()

    def ask_exit(self):
        self.keep_running = False

    rl_next_input = None

    def pre_prompt(self):
        if self.rl_next_input:
            # We can't set the buffer here, because it will be reset just after
            # this. Adding a callable to pre_run_callables does what we need
            # after the buffer is reset.
            s = self.rl_next_input

            def set_doc():
                self.pt_cli.application.buffer.document = Document(s)

            if hasattr(self.pt_cli, 'pre_run_callables'):
                self.pt_cli.pre_run_callables.append(set_doc)
            else:
                # Older version of prompt_toolkit; it's OK to set the document
                # directly here.
                set_doc()
            self.rl_next_input = None

    def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):

        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn(
                'interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.',
                DeprecationWarning,
                stacklevel=2)

        self.keep_running = True
        while self.keep_running:
            print(self.separate_in, end='')

            try:
                code = self.prompt_for_code()
            except EOFError:
                if (not self.confirm_exit) \
                        or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
                    self.ask_exit()

            else:
                if code:
                    self.run_cell(code, store_history=True)

    def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
        # An extra layer of protection in case someone mashing Ctrl-C breaks
        # out of our internal code.
        if display_banner is not DISPLAY_BANNER_DEPRECATED:
            warn(
                'mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.',
                DeprecationWarning,
                stacklevel=2)
        while True:
            try:
                self.interact()
                break
            except KeyboardInterrupt as e:
                print("\n%s escaped interact()\n" % type(e).__name__)
            finally:
                # An interrupt during the eventloop will mess up the
                # internal state of the prompt_toolkit library.
                # Stopping the eventloop fixes this, see
                # https://github.com/ipython/ipython/pull/9867
                if hasattr(self, '_eventloop'):
                    self._eventloop.stop()

    _inputhook = None

    def inputhook(self, context):
        if self._inputhook is not None:
            self._inputhook(context)

    active_eventloop = None

    def enable_gui(self, gui=None):
        if gui:
            self.active_eventloop, self._inputhook =\
                get_inputhook_name_and_func(gui)
        else:
            self.active_eventloop = self._inputhook = None

    # Run !system commands directly, not through pipes, so terminal programs
    # work correctly.
    system = InteractiveShell.system_raw

    def auto_rewrite_input(self, cmd):
        """Overridden from the parent class to use fancy rewriting prompt"""
        if not self.show_rewritten_input:
            return

        tokens = self.prompts.rewrite_prompt_tokens()
        if self.pt_cli:
            self.pt_cli.print_tokens(tokens)
            print(cmd)
        else:
            prompt = ''.join(s for t, s in tokens)
            print(prompt, cmd, sep='')

    _prompts_before = None

    def switch_doctest_mode(self, mode):
        """Switch prompts to classic for %doctest_mode"""
        if mode:
            self._prompts_before = self.prompts
            self.prompts = ClassicPrompts(self)
        elif self._prompts_before:
            self.prompts = self._prompts_before
            self._prompts_before = None
        self._update_layout()
Exemple #5
0
class NotebookNotary(LoggingConfigurable):
    """A class for computing and verifying notebook signatures."""

    data_dir = Unicode(
        help="""The storage directory for notary secret and database.""").tag(
            config=True)

    @default('data_dir')
    def _data_dir_default(self):
        app = None
        try:
            if JupyterApp.initialized():
                app = JupyterApp.instance()
        except MultipleInstanceError:
            pass
        if app is None:
            # create an app, without the global instance
            app = JupyterApp()
            app.initialize(argv=[])
        return app.data_dir

    store_factory = Callable(
        help="""A callable returning the storage backend for notebook signatures.
         The default uses an SQLite database.""").tag(config=True)

    @default('store_factory')
    def _store_factory_default(self):
        def factory():
            if sqlite3 is None:
                self.log.warning(
                    "Missing SQLite3, all notebooks will be untrusted!")
                return MemorySignatureStore()
            return SQLiteSignatureStore(self.db_file)

        return factory

    db_file = Unicode(
        help="""The sqlite file in which to store notebook signatures.
        By default, this will be in your Jupyter data directory.
        You can set it to ':memory:' to disable sqlite writing to the filesystem.
        """).tag(config=True)

    @default('db_file')
    def _db_file_default(self):
        if not self.data_dir:
            return ':memory:'
        return os.path.join(self.data_dir, u'nbsignatures.db')

    algorithm = Enum(
        algorithms,
        default_value='sha256',
        help="""The hashing algorithm used to sign notebooks.""").tag(
            config=True)

    @observe('algorithm')
    def _algorithm_changed(self, change):
        self.digestmod = getattr(hashlib, change.new)

    digestmod = Any()

    @default('digestmod')
    def _digestmod_default(self):
        return getattr(hashlib, self.algorithm)

    secret_file = Unicode(
        help="""The file where the secret key is stored.""").tag(config=True)

    @default('secret_file')
    def _secret_file_default(self):
        if not self.data_dir:
            return ''
        return os.path.join(self.data_dir, 'notebook_secret')

    secret = Bytes(
        help="""The secret key with which notebooks are signed.""").tag(
            config=True)

    @default('secret')
    def _secret_default(self):
        # note : this assumes an Application is running
        if os.path.exists(self.secret_file):
            with io.open(self.secret_file, 'rb') as f:
                return f.read()
        else:
            secret = encodebytes(os.urandom(1024))
            self._write_secret_file(secret)
            return secret

    def __init__(self, **kwargs):
        super(NotebookNotary, self).__init__(**kwargs)
        self.store = self.store_factory()

    def _write_secret_file(self, secret):
        """write my secret to my secret_file"""
        self.log.info("Writing notebook-signing key to %s", self.secret_file)
        with io.open(self.secret_file, 'wb') as f:
            f.write(secret)
        try:
            os.chmod(self.secret_file, 0o600)
        except OSError:
            self.log.warning("Could not set permissions on %s",
                             self.secret_file)
        return secret

    def compute_signature(self, nb):
        """Compute a notebook's signature

        by hashing the entire contents of the notebook via HMAC digest.
        """
        hmac = HMAC(self.secret, digestmod=self.digestmod)
        # don't include the previous hash in the content to hash
        with signature_removed(nb):
            # sign the whole thing
            for b in yield_everything(nb):
                hmac.update(b)

        return hmac.hexdigest()

    def check_signature(self, nb):
        """Check a notebook's stored signature

        If a signature is stored in the notebook's metadata,
        a new signature is computed and compared with the stored value.

        Returns True if the signature is found and matches, False otherwise.

        The following conditions must all be met for a notebook to be trusted:
        - a signature is stored in the form 'scheme:hexdigest'
        - the stored scheme matches the requested scheme
        - the requested scheme is available from hashlib
        - the computed hash from notebook_signature matches the stored hash
        """
        if nb.nbformat < 3:
            return False
        signature = self.compute_signature(nb)
        return self.store.check_signature(signature, self.algorithm)

    def sign(self, nb):
        """Sign a notebook, indicating that its output is trusted on this machine

        Stores hash algorithm and hmac digest in a local database of trusted notebooks.
        """
        if nb.nbformat < 3:
            return
        signature = self.compute_signature(nb)
        self.store.store_signature(signature, self.algorithm)

    def unsign(self, nb):
        """Ensure that a notebook is untrusted

        by removing its signature from the trusted database, if present.
        """
        signature = self.compute_signature(nb)
        self.store.remove_signature(signature, self.algorithm)

    def mark_cells(self, nb, trusted):
        """Mark cells as trusted if the notebook's signature can be verified

        Sets ``cell.metadata.trusted = True | False`` on all code cells,
        depending on the *trusted* parameter. This will typically be the return
        value from ``self.check_signature(nb)``.

        This function is the inverse of check_cells
        """
        if nb.nbformat < 3:
            return

        for cell in yield_code_cells(nb):
            cell['metadata']['trusted'] = trusted

    def _check_cell(self, cell, nbformat_version):
        """Do we trust an individual cell?

        Return True if:

        - cell is explicitly trusted
        - cell has no potentially unsafe rich output

        If a cell has no output, or only simple print statements,
        it will always be trusted.
        """
        # explicitly trusted
        if cell['metadata'].pop("trusted", False):
            return True

        # explicitly safe output
        if nbformat_version >= 4:
            unsafe_output_types = ['execute_result', 'display_data']
            safe_keys = {"output_type", "execution_count", "metadata"}
        else:  # v3
            unsafe_output_types = ['pyout', 'display_data']
            safe_keys = {"output_type", "prompt_number", "metadata"}

        for output in cell['outputs']:
            output_type = output['output_type']
            if output_type in unsafe_output_types:
                # if there are any data keys not in the safe whitelist
                output_keys = set(output)
                if output_keys.difference(safe_keys):
                    return False

        return True

    def check_cells(self, nb):
        """Return whether all code cells are trusted.

        A cell is trusted if the 'trusted' field in its metadata is truthy, or
        if it has no potentially unsafe outputs.
        If there are no code cells, return True.

        This function is the inverse of mark_cells.
        """
        if nb.nbformat < 3:
            return False
        trusted = True
        for cell in yield_code_cells(nb):
            # only distrust a cell if it actually has some output to distrust
            if not self._check_cell(cell, nb.nbformat):
                trusted = False

        return trusted
Exemple #6
0
    class JupytextContentsManager(base_contents_manager_class, Configurable):
        """
        A FileContentsManager Class that reads and stores notebooks to classical
        Jupyter notebooks (.ipynb), R Markdown notebooks (.Rmd), Julia (.jl),
        Python (.py) or R scripts (.R)
        """

        # Dictionary: notebook path => (fmt, formats) where fmt is the current format, and formats the paired formats.
        paired_notebooks = dict()

        def all_nb_extensions(self):
            """All extensions that should be classified as notebooks"""
            return [ext if ext.startswith('.') else '.' + ext for ext in self.notebook_extensions.split(',')]

        default_jupytext_formats = Unicode(
            u'',
            help='Save notebooks to these file extensions. '
                 'Can be any of ipynb,Rmd,md,jl,py,R,nb.jl,nb.py,nb.R '
                 'comma separated. If you want another format than the '
                 'default one, append the format name to the extension, '
                 'e.g. ipynb,py:percent to save the notebook to '
                 'hydrogen/spyder/vscode compatible scripts',
            config=True)

        preferred_jupytext_formats_save = Unicode(
            u'',
            help='Preferred format when saving notebooks as text, per extension. '
                 'Use "jl:percent,py:percent,R:percent" if you want to save '
                 'Julia, Python and R scripts in the double percent format and '
                 'only write "jupytext_formats": "py" in the notebook metadata.',
            config=True)

        preferred_jupytext_formats_read = Unicode(
            u'',
            help='Preferred format when reading notebooks from text, per '
                 'extension. Use "py:sphinx" if you want to read all python '
                 'scripts as Sphinx gallery scripts.',
            config=True)

        default_notebook_metadata_filter = Unicode(
            u'',
            help="Cell metadata that should be save in the text representations. "
                 "Examples: 'all', '-all', 'widgets,nteract', 'kernelspec,jupytext-all'",
            config=True)

        default_cell_metadata_filter = Unicode(
            u'',
            help="Notebook metadata that should be saved in the text representations. "
                 "Examples: 'all', 'hide_input,hide_output'",
            config=True)

        comment_magics = Enum(
            values=[True, False],
            allow_none=True,
            help='Should Jupyter magic commands be commented out in the text representation?',
            config=True)

        split_at_heading = Bool(
            False,
            help='Split markdown cells on headings (Markdown and R Markdown formats only)',
            config=True)

        sphinx_convert_rst2md = Bool(
            False,
            help='When opening a Sphinx Gallery script, convert the reStructuredText to markdown',
            config=True)

        outdated_text_notebook_margin = Float(
            1.0,
            help='Refuse to overwrite inputs of a ipynb notebooks with those of a '
                 'text notebook when the text notebook plus margin is older than '
                 'the ipynb notebook',
            config=True)

        default_cell_markers = Unicode(
            u'',
            help='Start and end cell markers for the light format, comma separated. Use "{{{,}}}" to mark cells'
                 'as foldable regions in Vim, and "region,endregion" to mark cells as Vscode/PyCharm regions',
            config=True)

        notebook_extensions = Unicode(
            u','.join(NOTEBOOK_EXTENSIONS),
            help='A comma separated list of notebook extensions',
            config=True)

        def drop_paired_notebook(self, path):
            """Remove the current notebook from the list of paired notebooks"""
            if path not in self.paired_notebooks:
                return

            fmt, formats = self.paired_notebooks.pop(path)
            prev_paired_paths = paired_paths(path, fmt, formats)
            for alt_path, _ in prev_paired_paths:
                if alt_path in self.paired_notebooks:
                    self.drop_paired_notebook(alt_path)

        def update_paired_notebooks(self, path, fmt, formats):
            """Update the list of paired notebooks to include/update the current pair"""
            if not formats:
                self.drop_paired_notebook(path)
                return

            new_paired_paths = paired_paths(path, fmt, formats)
            for alt_path, _ in new_paired_paths:
                self.drop_paired_notebook(alt_path)

            long_formats = long_form_multiple_formats(formats)
            if len(long_formats) == 1 and set(long_formats[0]) <= {'extension'}:
                return

            short_formats = short_form_multiple_formats(formats)
            for alt_path, alt_fmt in new_paired_paths:
                self.paired_notebooks[alt_path] = short_form_one_format(alt_fmt), short_formats

        def set_default_format_options(self, format_options, read=False):
            """Set default format option"""
            if self.default_notebook_metadata_filter:
                format_options.setdefault('notebook_metadata_filter', self.default_notebook_metadata_filter)
            if self.default_cell_metadata_filter:
                format_options.setdefault('cell_metadata_filter', self.default_cell_metadata_filter)
            if self.comment_magics is not None:
                format_options.setdefault('comment_magics', self.comment_magics)
            if self.split_at_heading:
                format_options.setdefault('split_at_heading', self.split_at_heading)
            if not read and self.default_cell_markers:
                format_options.setdefault('cell_markers', self.default_cell_markers)
            if read and self.sphinx_convert_rst2md:
                format_options.setdefault('rst2md', self.sphinx_convert_rst2md)

        def default_formats(self, path):
            """Return the default formats, if they apply to the current path #157"""
            formats = long_form_multiple_formats(self.default_jupytext_formats)
            for fmt in formats:
                try:
                    base_path(path, fmt)
                    return self.default_jupytext_formats
                except InconsistentPath:
                    continue

            return None

        def create_prefix_dir(self, path, fmt):
            """Create the prefix dir, if missing"""
            create_prefix_dir_from_path(self._get_os_path(path.strip('/')), fmt)

        def save(self, model, path=''):
            """Save the file model and return the model with no content."""
            if model['type'] != 'notebook':
                return super(JupytextContentsManager, self).save(model, path)

            nbk = model['content']
            try:
                metadata = nbk.get('metadata')
                rearrange_jupytext_metadata(metadata)
                jupytext_metadata = metadata.setdefault('jupytext', {})
                jupytext_formats = jupytext_metadata.get('formats') or self.default_formats(path)

                if not jupytext_formats:
                    text_representation = jupytext_metadata.get('text_representation', {})
                    ext = os.path.splitext(path)[1]
                    fmt = {'extension': ext}

                    if ext == text_representation.get('extension') and text_representation.get('format_name'):
                        fmt['format_name'] = text_representation.get('format_name')

                    jupytext_formats = [fmt]

                jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata,
                                                              auto_ext_requires_language_info=False)

                # Set preferred formats if not format name is given yet
                jupytext_formats = [preferred_format(f, self.preferred_jupytext_formats_save) for f in jupytext_formats]

                base, fmt = find_base_path_and_format(path, jupytext_formats)
                self.update_paired_notebooks(path, fmt, jupytext_formats)
                self.set_default_format_options(jupytext_metadata)

                if not jupytext_metadata:
                    metadata.pop('jupytext')

                # Save as ipynb first
                return_value = None
                value = None
                for fmt in jupytext_formats[::-1]:
                    if fmt['extension'] != '.ipynb':
                        continue

                    alt_path = full_path(base, fmt)
                    self.create_prefix_dir(alt_path, fmt)
                    self.log.info("Saving %s", os.path.basename(alt_path))
                    value = super(JupytextContentsManager, self).save(model, alt_path)
                    if alt_path == path:
                        return_value = value

                # And then to the other formats, in reverse order so that
                # the first format is the most recent
                for fmt in jupytext_formats[::-1]:
                    if fmt['extension'] == '.ipynb':
                        continue

                    alt_path = full_path(base, fmt)
                    self.create_prefix_dir(alt_path, fmt)
                    if 'format_name' in fmt and fmt['extension'] not in ['.md', '.markdown', '.Rmd']:
                        self.log.info("Saving %s in format %s:%s",
                                      os.path.basename(alt_path), fmt['extension'][1:], fmt['format_name'])
                    else:
                        self.log.info("Saving %s", os.path.basename(alt_path))
                    with mock.patch('nbformat.writes', _jupytext_writes(fmt)):
                        value = super(JupytextContentsManager, self).save(model, alt_path)
                        if alt_path == path:
                            return_value = value

                # Update modified timestamp to match that of the pair #207
                return_value['last_modified'] = value['last_modified']
                return return_value

            except Exception as err:
                raise HTTPError(400, str(err))

        def get(self, path, content=True, type=None, format=None, load_alternative_format=True):
            """ Takes a path for an entity and returns its model"""
            os_path = self._get_os_path(path.strip('/'))
            ext = os.path.splitext(path)[1]

            # Not a notebook?
            if (not self.exists(path) or os.path.isdir(os_path) or
                    (type != 'notebook' if type else ext not in self.all_nb_extensions())):
                return super(JupytextContentsManager, self).get(path, content, type, format)

            fmt = preferred_format(ext, self.preferred_jupytext_formats_read)
            if ext == '.ipynb':
                model = self._notebook_model(path, content=content)
            else:
                self.set_default_format_options(fmt, read=True)
                with mock.patch('nbformat.reads', _jupytext_reads(fmt)):
                    model = self._notebook_model(path, content=content)

            if not load_alternative_format:
                return model

            if not content:
                # Modification time of a paired notebook, in this context - Jupyter is checking timestamp
                # before saving - is the most recent among all representations #118
                if path not in self.paired_notebooks:
                    return model

                fmt, formats = self.paired_notebooks.get(path)
                for alt_path, _ in paired_paths(path, fmt, formats):
                    if alt_path != path and self.exists(alt_path):
                        alt_model = self._notebook_model(alt_path, content=False)
                        if alt_model['last_modified'] > model['last_modified']:
                            model['last_modified'] = alt_model['last_modified']

                return model

            # We will now read a second file if this is a paired notebooks.
            nbk = model['content']
            jupytext_formats = nbk.metadata.get('jupytext', {}).get('formats') or self.default_formats(path)
            jupytext_formats = long_form_multiple_formats(jupytext_formats, nbk.metadata,
                                                          auto_ext_requires_language_info=False)

            # Compute paired notebooks from formats
            alt_paths = [(path, fmt)]
            if jupytext_formats:
                try:
                    _, fmt = find_base_path_and_format(path, jupytext_formats)
                    alt_paths = paired_paths(path, fmt, jupytext_formats)
                    self.update_paired_notebooks(path, fmt, jupytext_formats)
                except InconsistentPath as err:
                    self.log.info("Unable to read paired notebook: %s", str(err))
            else:
                if path in self.paired_notebooks:
                    fmt, formats = self.paired_notebooks.get(path)
                    alt_paths = paired_paths(path, fmt, formats)

            if len(alt_paths) > 1 and ext == '.ipynb':
                # Apply default options (like saving and reloading would do)
                jupytext_metadata = model['content']['metadata'].get('jupytext', {})
                self.set_default_format_options(jupytext_metadata, read=True)
                if jupytext_metadata:
                    model['content']['metadata']['jupytext'] = jupytext_metadata

            org_model = model
            fmt_inputs = fmt
            path_inputs = path_outputs = path
            model_outputs = None

            # Source format is first non ipynb format found on disk
            if path.endswith('.ipynb'):
                for alt_path, alt_fmt in alt_paths:
                    if not alt_path.endswith('.ipynb') and self.exists(alt_path):
                        self.log.info(u'Reading SOURCE from {}'.format(alt_path))
                        path_inputs = alt_path
                        fmt_inputs = alt_fmt
                        model_outputs = model
                        model = self.get(alt_path, content=content, type='notebook', format=format,
                                         load_alternative_format=False)
                        break
            # Outputs taken from ipynb if in group, if file exists
            else:
                for alt_path, _ in alt_paths:
                    if alt_path.endswith('.ipynb') and self.exists(alt_path):
                        self.log.info(u'Reading OUTPUTS from {}'.format(alt_path))
                        path_outputs = alt_path
                        model_outputs = self.get(alt_path, content=content, type='notebook', format=format,
                                                 load_alternative_format=False)
                        break

            try:
                check_file_version(model['content'], path_inputs, path_outputs)
            except Exception as err:
                raise HTTPError(400, str(err))

            # Before we combine the two files, we make sure we're not overwriting ipynb cells
            # with an outdated text file
            try:
                if model_outputs and model_outputs['last_modified'] > model['last_modified'] + \
                        timedelta(seconds=self.outdated_text_notebook_margin):
                    raise HTTPError(
                        400,
                        '''{out} (last modified {out_last})
                        seems more recent than {src} (last modified {src_last})
                        Please either:
                        - open {src} in a text editor, make sure it is up to date, and save it,
                        - or delete {src} if not up to date,
                        - or increase check margin by adding, say,
                            c.ContentsManager.outdated_text_notebook_margin = 5 # in seconds # or float("inf")
                        to your .jupyter/jupyter_notebook_config.py file
                        '''.format(src=path_inputs, src_last=model['last_modified'],
                                   out=path_outputs, out_last=model_outputs['last_modified']))
            except OverflowError:
                pass

            if model_outputs:
                combine_inputs_with_outputs(model['content'], model_outputs['content'], fmt_inputs)
            elif not path.endswith('.ipynb'):
                set_kernelspec_from_language(model['content'])

            # Trust code cells when they have no output
            for cell in model['content'].cells:
                if cell.cell_type == 'code' and not cell.outputs and cell.metadata.get('trusted') is False:
                    cell.metadata['trusted'] = True

            # Path and name of the notebook is the one of the original path
            model['path'] = org_model['path']
            model['name'] = org_model['name']

            return model

        def trust_notebook(self, path):
            """Trust the current notebook"""
            if path.endswith('.ipynb') or path not in self.paired_notebooks:
                super(JupytextContentsManager, self).trust_notebook(path)
                return

            fmt, formats = self.paired_notebooks[path]
            for alt_path, alt_fmt in paired_paths(path, fmt, formats):
                if alt_fmt['extension'] == '.ipynb':
                    super(JupytextContentsManager, self).trust_notebook(alt_path)

        def rename_file(self, old_path, new_path):
            """Rename the current notebook, as well as its alternative representations"""
            if old_path not in self.paired_notebooks:
                try:
                    # we do not know yet if this is a paired notebook (#190)
                    # -> to get this information we open the notebook
                    self.get(old_path, content=True)
                except Exception:
                    pass

            if old_path not in self.paired_notebooks:
                super(JupytextContentsManager, self).rename_file(old_path, new_path)
                return

            fmt, formats = self.paired_notebooks.get(old_path)
            old_alt_paths = paired_paths(old_path, fmt, formats)

            # Is the new file name consistent with suffix?
            try:
                new_base = base_path(new_path, fmt)
            except Exception as err:
                raise HTTPError(400, str(err))

            for old_alt_path, alt_fmt in old_alt_paths:
                new_alt_path = full_path(new_base, alt_fmt)
                if self.exists(old_alt_path):
                    super(JupytextContentsManager, self).rename_file(old_alt_path, new_alt_path)

            self.drop_paired_notebook(old_path)
            self.update_paired_notebooks(new_path, fmt, formats)
class DisplayIntegrator(Tool):
    name = "ctapipe-display-integration"
    description = __doc__

    event_index = Int(0, help="Event index to view.").tag(config=True)
    use_event_id = Bool(
        False,
        help="event_index will obtain an event using event_id instead of index.",
    ).tag(config=True)
    telescope = Int(
        None,
        allow_none=True,
        help="Telescope to view. Set to None to display the first"
        "telescope with data.",
    ).tag(config=True)
    channel = Enum([0, 1], 0, help="Channel to view").tag(config=True)

    extractor_product = traits.enum_trait(
        ImageExtractor, default="NeighborPeakWindowSum"
    )

    aliases = Dict(
        dict(
            f="EventSource.input_url",
            max_events="EventSource.max_events",
            extractor="DisplayIntegrator.extractor_product",
            E="DisplayIntegrator.event_index",
            T="DisplayIntegrator.telescope",
            C="DisplayIntegrator.channel",
        )
    )
    flags = Dict(
        dict(
            id=(
                {"DisplayDL1Calib": {"use_event_index": True}},
                "event_index will obtain an event using event_id instead of index.",
            )
        )
    )
    classes = List([EventSource] + traits.classes_with_traits(ImageExtractor))

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        # make sure gzip files are seekable
        self.config.SimTelEventSource.back_seekable = True
        self.eventseeker = None
        self.extractor = None
        self.calibrator = None

    def setup(self):
        self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"

        event_source = self.add_component(EventSource.from_config(parent=self))
        self.eventseeker = self.add_component(EventSeeker(event_source, parent=self))
        self.extractor = self.add_component(
            ImageExtractor.from_name(self.extractor_product, parent=self)
        )
        self.calibrate = self.add_component(
            CameraCalibrator(parent=self, image_extractor=self.extractor)
        )

    def start(self):
        event_num = self.event_index
        if self.use_event_id:
            event_num = str(event_num)
        event = self.eventseeker[event_num]

        # Calibrate
        self.calibrate(event)

        # Select telescope
        tels = list(event.r0.tels_with_data)
        telid = self.telescope
        if telid is None:
            telid = tels[0]
        if telid not in tels:
            self.log.error(
                "[event] please specify one of the following "
                "telescopes for this event: {}".format(tels)
            )
            exit()

        extractor_name = self.extractor.__class__.__name__

        plot(event, telid, self.channel, extractor_name)

    def finish(self):
        pass
Exemple #8
0
class MeasureControl(Control):
    _view_name = Unicode('LeafletMeasureControlView').tag(sync=True)
    _model_name = Unicode('LeafletMeasureControlModel').tag(sync=True)

    _length_units = ['feet', 'meters', 'miles', 'kilometers']
    _area_units = ['acres', 'hectares', 'sqfeet', 'sqmeters', 'sqmiles']
    _custom_units_dict = {}
    _custom_units = Dict().tag(sync=True)

    position = Enum(['topright', 'topleft', 'bottomright', 'bottomleft'],
                    default_value='topright',
                    help="""Possible values are topleft, topright, bottomleft
                or bottomright""").tag(sync=True, o=True)

    primary_length_unit = Enum(
        values=_length_units,
        default_value='feet',
        help="""Possible values are feet, meters, miles, kilometers or any user
                defined unit""").tag(sync=True, o=True)

    secondary_length_unit = Enum(
        values=_length_units,
        default_value=None,
        allow_none=True,
        help="""Possible values are feet, meters, miles, kilometers or any user
                defined unit""").tag(sync=True, o=True)

    primary_area_unit = Enum(
        values=_area_units,
        default_value='acres',
        help="""Possible values are acres, hectares, sqfeet, sqmeters, sqmiles
                or any user defined unit""").tag(sync=True, o=True)

    secondary_area_unit = Enum(
        values=_area_units,
        default_value=None,
        allow_none=True,
        help="""Possible values are acres, hectares, sqfeet, sqmeters, sqmiles
                or any user defined unit""").tag(sync=True, o=True)

    active_color = Color('#ABE67E').tag(sync=True, o=True)
    completed_color = Color('#C8F2BE').tag(sync=True, o=True)

    popup_options = Dict({
        'className': 'leaflet-measure-resultpopup',
        'autoPanPadding': [10, 10]
    }).tag(sync=True, o=True)

    capture_z_index = Int(10000).tag(sync=True, o=True)

    def add_length_unit(self, name, factor, decimals=0):
        self._length_units.append(name)
        self._add_unit(name, factor, decimals)

    def add_area_unit(self, name, factor, decimals=0):
        self._area_units.append(name)
        self._add_unit(name, factor, decimals)

    def _add_unit(self, name, factor, decimals):
        self._custom_units_dict[name] = {
            'factor': factor,
            'display': name,
            'decimals': decimals
        }
        self._custom_units = dict(**self._custom_units_dict)
Exemple #9
0
class DisplayIntegrator(Tool):
    name = "DisplayIntegrator"
    description = "Calibrate dl0 data to dl1, and plot the various camera " \
                  "images that characterise the event and calibration. Also " \
                  "plot some examples of waveforms with the " \
                  "integration window."

    event_index = Int(0, help='Event index to view.').tag(config=True)
    use_event_id = Bool(False,
                        help='event_index will obtain an event using'
                        'event_id instead of '
                        'index.').tag(config=True)
    telescope = Int(None,
                    allow_none=True,
                    help='Telescope to view. Set to None to display the first'
                    'telescope with data.').tag(config=True)
    channel = Enum([0, 1], 0, help='Channel to view').tag(config=True)

    aliases = Dict(
        dict(r='EventFileReaderFactory.reader',
             f='EventFileReaderFactory.input_path',
             max_events='EventFileReaderFactory.max_events',
             extractor='ChargeExtractorFactory.extractor',
             window_width='ChargeExtractorFactory.window_width',
             window_shift='ChargeExtractorFactory.window_shift',
             sig_amp_cut_HG='ChargeExtractorFactory.sig_amp_cut_HG',
             sig_amp_cut_LG='ChargeExtractorFactory.sig_amp_cut_LG',
             lwt='ChargeExtractorFactory.lwt',
             clip_amplitude='CameraDL1Calibrator.clip_amplitude',
             radius='CameraDL1Calibrator.radius',
             E='DisplayIntegrator.event_index',
             T='DisplayIntegrator.telescope',
             C='DisplayIntegrator.channel',
             O='IntegratorPlotter.output_dir'))
    flags = Dict(
        dict(id=({
            'DisplayDL1Calib': {
                'use_event_index': True
            }
        }, 'event_index will obtain an event using '
                 'event_id instead of index.')))
    classes = List([
        EventFileReaderFactory, ChargeExtractorFactory, CameraDL1Calibrator,
        IntegratorPlotter
    ])

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.file_reader = None
        self.r1 = None
        self.dl0 = None
        self.extractor = None
        self.dl1 = None
        self.plotter = None

    def setup(self):
        self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"
        kwargs = dict(config=self.config, tool=self)

        reader_factory = EventFileReaderFactory(**kwargs)
        reader_class = reader_factory.get_class()
        self.file_reader = reader_class(**kwargs)

        extractor_factory = ChargeExtractorFactory(**kwargs)
        extractor_class = extractor_factory.get_class()
        self.extractor = extractor_class(**kwargs)

        r1_factory = CameraR1CalibratorFactory(origin=self.file_reader.origin,
                                               **kwargs)
        r1_class = r1_factory.get_class()
        self.r1 = r1_class(**kwargs)

        self.dl0 = CameraDL0Reducer(**kwargs)

        self.dl1 = CameraDL1Calibrator(extractor=self.extractor, **kwargs)

        self.plotter = IntegratorPlotter(**kwargs)

    def start(self):
        event = self.file_reader.get_event(self.event_index, self.use_event_id)

        # Calibrate
        self.r1.calibrate(event)
        self.dl0.reduce(event)
        self.dl1.calibrate(event)

        # Select telescope
        tels = list(event.r0.tels_with_data)
        telid = self.telescope
        if telid is None:
            telid = tels[0]
        if telid not in tels:
            self.log.error("[event] please specify one of the following "
                           "telescopes for this event: {}".format(tels))
            exit()

        extractor_name = self.extractor.name

        self.plotter.plot(self.file_reader, event, telid, self.channel,
                          extractor_name)

    def finish(self):
        pass
Exemple #10
0
class Line(Mesh):
    # don't need a custom model since we aren't introducing new custom serialized properties,
    # just making the material property a more specific instance
    _view_name = Unicode('LineView', sync=True)
    type = Enum(['LineStrip', 'LinePieces'], 'LineStrip', sync=True)
    material = Instance(_LineMaterial, sync=True, **widget_serialization)
Exemple #11
0
class ExecutePreprocessor(Preprocessor):
    """
    Executes all the cells in a notebook
    """

    timeout = Integer(30,
                      allow_none=True,
                      help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, an exception (TimeoutError
            on python 3+, RuntimeError on python 2) is raised.

            `None` or `-1` will disable the timeout. If `timeout_func` is set,
            it overrides `timeout`.
            """)).tag(config=True)

    timeout_func = Any(default_value=None,
                       allow_none=True,
                       help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, an exception
            (TimeoutError on python 3+, RuntimeError on python 2) is
            raised.

            Returning `None` or `-1` will disable the timeout for the cell.
            Not setting `timeout_func` will cause the preprocessor to
            default to using the `timeout` trait for all cells. The
            `timeout_func` trait overrides `timeout` if it is not `None`.
            """)).tag(config=True)

    interrupt_on_timeout = Bool(False,
                                help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """)).tag(config=True)

    startup_timeout = Integer(60,
                              help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """)).tag(config=True)

    allow_errors = Bool(False,
                        help=dedent("""
            If `False` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If `True`, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """)).tag(config=True)

    force_raise_errors = Bool(False,
                              help=dedent("""
            If False (default), errors from executing the notebook can be
            allowed with a `raises-exception` tag on a single cell, or the
            `allow_errors` configurable option for all cells. An allowed error
            will be recorded in notebook output, and execution will continue.
            If an error occurs when it is not explicitly allowed, a
            `CellExecutionError` will be raised.
            If True, `CellExecutionError` will be raised for any error that occurs
            while executing the notebook. This overrides both the
            `allow_errors` option and the `raises-exception` cell tag.
            """)).tag(config=True)

    extra_arguments = List(Unicode())

    kernel_name = Unicode('',
                          help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """)).tag(config=True)

    raise_on_iopub_timeout = Bool(False,
                                  help=dedent("""
            If `False` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If `True`, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """)).tag(config=True)

    store_widget_state = Bool(True,
                              help=dedent("""
            If `True` (default), then the state of the Jupyter widgets created
            at the kernel will be stored in the metadata of the notebook.
            """)).tag(config=True)

    iopub_timeout = Integer(4,
                            allow_none=False,
                            help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """)).tag(config=True)

    shutdown_kernel = Enum(['graceful', 'immediate'],
                           default_value='graceful',
                           help=dedent("""
            If `graceful` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its `atexit` hooks.
            If `immediate`, then the kernel is signaled to immediately
            terminate.
            """)).tag(config=True)

    kernel_manager_class = Type(config=True,
                                help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _kernel_manager_class_default(self):
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        try:
            from jupyter_client import KernelManager
        except ImportError:
            raise ImportError(
                "`nbconvert --execute` requires the jupyter_client package: `pip install jupyter_client`"
            )
        return KernelManager

    _display_id_map = Dict(help=dedent("""
              mapping of locations of outputs with a given display_id
              tracks cell index and output index within cell.outputs for
              each appearance of the display_id
              {
                   'display_id': {
                  cell_idx: [output_idx,]
                   }
              }
              """))

    def start_new_kernel(self, **kwargs):
        """Creates a new kernel manager and kernel client.

        Parameters
        ----------
        kwargs :
            Any options for `self.kernel_manager_class.start_kernel()`. Because
            that defaults to KernelManager, this will likely include options
            accepted by `KernelManager.start_kernel()``, which includes `cwd`.

        Returns
        -------
        km : KernelManager
            A kernel manager as created by self.kernel_manager_class.
        kc : KernelClient
            Kernel client as created by the kernel manager `km`.
        """
        if not self.kernel_name:
            self.kernel_name = self.nb.metadata.get('kernelspec',
                                                    {}).get('name', 'python')
        km = self.kernel_manager_class(kernel_name=self.kernel_name,
                                       config=self.config)
        km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)

        kc = km.client()
        kc.start_channels()
        try:
            kc.wait_for_ready(timeout=self.startup_timeout)
        except RuntimeError:
            kc.stop_channels()
            km.shutdown_kernel()
            raise
        kc.allow_stdin = False
        return km, kc

    @contextmanager
    def setup_preprocessor(self, nb, resources, km=None, **kwargs):
        """
        Context manager for setting up the class to execute a notebook.

        The assigns `nb` to `self.nb` where it will be modified in-place. It also creates
        and assigns the Kernel Manager (`self.km`) and Kernel Client(`self.kc`).

        It is intended to yield to a block that will execute codeself.

        When control returns from the yield it stops the client's zmq channels, shuts
        down the kernel, and removes the now unused attributes.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
        km : KernerlManager (optional)
            Optional kernel manaher. If none is provided, a kernel manager will
            be created.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """
        path = resources.get('metadata', {}).get('path', '') or None
        self.nb = nb
        # clear display_id map
        self._display_id_map = {}
        self.widget_state = {}
        self.widget_buffers = {}

        if km is None:
            kwargs["cwd"] = path
            self.km, self.kc = self.start_new_kernel(**kwargs)
            try:
                # Yielding unbound args for more easier understanding and downstream consumption
                yield nb, self.km, self.kc
            finally:
                self.kc.stop_channels()
                self.km.shutdown_kernel(
                    now=self.shutdown_kernel == 'immediate')

                for attr in ['nb', 'km', 'kc']:
                    delattr(self, attr)
        else:
            self.km = km
            if not km.has_kernel:
                km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)
            self.kc = km.client()

            self.kc.start_channels()
            try:
                self.kc.wait_for_ready(timeout=self.startup_timeout)
            except RuntimeError:
                self.kc.stop_channels()
                raise
            self.kc.allow_stdin = False
            try:
                yield nb, self.km, self.kc
            finally:
                for attr in ['nb', 'km', 'kc']:
                    delattr(self, attr)

    def preprocess(self, nb, resources, km=None):
        """
        Preprocess notebook executing each code cell.

        The input argument `nb` is modified in-place.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
        km: KernelManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """

        with self.setup_preprocessor(nb, resources, km=km):
            self.log.info("Executing notebook with kernel: %s" %
                          self.kernel_name)
            nb, resources = super(ExecutePreprocessor,
                                  self).preprocess(nb, resources)
            info_msg = self._wait_for_reply(self.kc.kernel_info())
            nb.metadata['language_info'] = info_msg['content']['language_info']
            self.set_widgets_metadata()

        return nb, resources

    def set_widgets_metadata(self):
        if self.widget_state:
            self.nb.metadata.widgets = {
                'application/vnd.jupyter.widget-state+json': {
                    'state': {
                        model_id: _serialize_widget_state(state)
                        for model_id, state in self.widget_state.items()
                        if '_model_name' in state
                    },
                    'version_major': 2,
                    'version_minor': 0,
                }
            }
            for key, widget in self.nb.metadata.widgets[
                    'application/vnd.jupyter.widget-state+json'][
                        'state'].items():
                buffers = self.widget_buffers.get(key)
                if buffers:
                    widget['buffers'] = buffers

    def preprocess_cell(self, cell, resources, cell_index):
        """
        Executes a single code cell. See base.py for details.

        To execute all cells see :meth:`preprocess`.
        """
        if cell.cell_type != 'code' or not cell.source.strip():
            return cell, resources

        reply, outputs = self.run_cell(cell, cell_index)
        # Backwards compatability for processes that wrap run_cell
        cell.outputs = outputs

        cell_allows_errors = (self.allow_errors
                              or "raises-exception" in cell.metadata.get(
                                  "tags", []))

        if self.force_raise_errors or not cell_allows_errors:
            for out in cell.outputs:
                if out.output_type == 'error':
                    raise CellExecutionError.from_cell_and_msg(cell, out)
            if (reply is not None) and reply['content']['status'] == 'error':
                raise CellExecutionError.from_cell_and_msg(
                    cell, reply['content'])
        return cell, resources

    def _update_display_id(self, display_id, msg):
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    def _poll_for_reply(self, msg_id, cell=None, timeout=None):
        try:
            # check with timeout if kernel is still alive
            msg = self.kc.shell_channel.get_msg(timeout=timeout)
            if msg['parent_header'].get('msg_id') == msg_id:
                return msg
        except Empty:
            # received no message, check if kernel is still alive
            self._check_alive()
            # kernel still alive, wait for a message

    def _get_timeout(self, cell):
        if self.timeout_func is not None and cell is not None:
            timeout = self.timeout_func(cell)
        else:
            timeout = self.timeout

        if not timeout or timeout < 0:
            timeout = None

        return timeout

    def _handle_timeout(self):
        self.log.error("Timeout waiting for execute reply (%is)." %
                       self.timeout)
        if self.interrupt_on_timeout:
            self.log.error("Interrupting kernel")
            self.km.interrupt_kernel()
        else:
            raise TimeoutError("Cell execution timed out")

    def _check_alive(self):
        if not self.kc.is_alive():
            self.log.error("Kernel died while waiting for execute reply.")
            raise DeadKernelError("Kernel died")

    def _wait_for_reply(self, msg_id, cell=None):
        # wait for finish, with timeout
        timeout = self._get_timeout(cell)
        cummulative_time = 0
        timeout_interval = 5
        while True:
            try:
                msg = self.kc.shell_channel.get_msg(timeout=timeout_interval)
            except Empty:
                self._check_alive()
                cummulative_time += timeout_interval
                if timeout and cummulative_time > timeout:
                    self._handle_timeout()
                    break
            else:
                if msg['parent_header'].get('msg_id') == msg_id:
                    return msg

    def _timeout_with_deadline(self, timeout, deadline):
        if deadline is not None and deadline - monotonic() < timeout:
            timeout = deadline - monotonic()

        if timeout < 0:
            timeout = 0

        return timeout

    def _passed_deadline(self, deadline):
        if deadline is not None and deadline - monotonic() <= 0:
            self._handle_timeout()
            return True
        return False

    def run_cell(self, cell, cell_index=0):
        parent_msg_id = self.kc.execute(cell.source)
        self.log.debug("Executing cell:\n%s", cell.source)
        exec_timeout = self._get_timeout(cell)
        deadline = None
        if exec_timeout is not None:
            deadline = monotonic() + exec_timeout

        cell.outputs = []
        self.clear_before_next_output = False

        # This loop resolves #659. By polling iopub_channel's and shell_channel's
        # output we avoid dropping output and important signals (like idle) from
        # iopub_channel. Prior to this change, iopub_channel wasn't polled until
        # after exec_reply was obtained from shell_channel, leading to the
        # aforementioned dropped data.

        # These two variables are used to track what still needs polling:
        # more_output=true => continue to poll the iopub_channel
        more_output = True
        # polling_exec_reply=true => continue to poll the shell_channel
        polling_exec_reply = True

        while more_output or polling_exec_reply:
            if polling_exec_reply:
                if self._passed_deadline(deadline):
                    polling_exec_reply = False
                    continue

                # Avoid exceeding the execution timeout (deadline), but stop
                # after at most 1s so we can poll output from iopub_channel.
                timeout = self._timeout_with_deadline(1, deadline)
                exec_reply = self._poll_for_reply(parent_msg_id, cell, timeout)
                if exec_reply is not None:
                    polling_exec_reply = False

            if more_output:
                try:
                    timeout = self.iopub_timeout
                    if polling_exec_reply:
                        # Avoid exceeding the execution timeout (deadline) while
                        # polling for output.
                        timeout = self._timeout_with_deadline(
                            timeout, deadline)
                    msg = self.kc.iopub_channel.get_msg(timeout=timeout)
                except Empty:
                    if polling_exec_reply:
                        # Still waiting for execution to finish so we expect that
                        # output may not always be produced yet.
                        continue

                    if self.raise_on_iopub_timeout:
                        raise TimeoutError("Timeout waiting for IOPub output")
                    else:
                        self.log.warning("Timeout waiting for IOPub output")
                        more_output = False
                        continue
            if msg['parent_header'].get('msg_id') != parent_msg_id:
                # not an output from our execution
                continue

            try:
                # Will raise CellExecutionComplete when completed
                self.process_message(msg, cell, cell_index)
            except CellExecutionComplete:
                more_output = False

        # Return cell.outputs still for backwards compatability
        return exec_reply, cell.outputs

    def process_message(self, msg, cell, cell_index):
        """
        Processes a kernel message, updates cell state, and returns the
        resulting output object that was appended to cell.outputs.

        The input argument `cell` is modified in-place.

        Parameters
        ----------
        msg : dict
            The kernel message being processed.
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionComplete
          Once a message arrives which indicates computation completeness.

        """
        msg_type = msg['msg_type']
        self.log.debug("msg_type: %s", msg_type)
        content = msg['content']
        self.log.debug("content: %s", content)

        display_id = content.get('transient', {}).get('display_id', None)
        if display_id and msg_type in {
                'execute_result', 'display_data', 'update_display_data'
        }:
            self._update_display_id(display_id, msg)

        # set the prompt number for the input and the output
        if 'execution_count' in content:
            cell['execution_count'] = content['execution_count']

        if msg_type == 'status':
            if content['execution_state'] == 'idle':
                raise CellExecutionComplete()
        elif msg_type == 'clear_output':
            self.clear_output(cell.outputs, msg, cell_index)
        elif msg_type.startswith('comm'):
            self.handle_comm_msg(cell.outputs, msg, cell_index)
        # Check for remaining messages we don't process
        elif msg_type not in ['execute_input', 'update_display_data']:
            # Assign output as our processed "result"
            return self.output(cell.outputs, msg, display_id, cell_index)

    def output(self, outs, msg, display_id, cell_index):
        msg_type = msg['msg_type']

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg_type)
            return

        if self.clear_before_next_output:
            self.log.debug('Executing delayed clear_output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)
            self.clear_before_next_output = False

        if display_id:
            # record output index in:
            #   _display_id_map[display_id][cell_idx]
            cell_map = self._display_id_map.setdefault(display_id, {})
            output_idx_list = cell_map.setdefault(cell_index, [])
            output_idx_list.append(len(outs))

        outs.append(out)

        return out

    def clear_output(self, outs, msg, cell_index):
        content = msg['content']
        if content.get('wait'):
            self.log.debug('Wait to clear output')
            self.clear_before_next_output = True
        else:
            self.log.debug('Immediate clear output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)

    def clear_display_id_mapping(self, cell_index):
        for display_id, cell_map in self._display_id_map.items():
            if cell_index in cell_map:
                cell_map[cell_index] = []

    def handle_comm_msg(self, outs, msg, cell_index):
        content = msg['content']
        data = content['data']
        if self.store_widget_state and 'state' in data:  # ignore custom msg'es
            self.widget_state.setdefault(content['comm_id'],
                                         {}).update(data['state'])
            if 'buffer_paths' in data and data['buffer_paths']:
                self.widget_buffers[content['comm_id']] = _get_buffer_data(msg)
Exemple #12
0
class Directions(GMapsWidgetMixin, widgets.Widget):
    """
    Directions layer.

    Add this to a :class:`gmaps.Figure` instance to draw directions.

    Use the :func:`gmaps.directions_layer` factory function to
    instantiate this class, rather than the constructor.

    :Examples:

    {examples}

    {params}
    """
    has_bounds = True
    _view_name = Unicode('DirectionsLayerView').tag(sync=True)
    _model_name = Unicode('DirectionsLayerModel').tag(sync=True)

    start = geotraitlets.Point().tag(sync=True)
    end = geotraitlets.Point().tag(sync=True)
    waypoints = geotraitlets.LocationArray().tag(sync=True)
    data = List(minlen=2, allow_none=True, default_value=None)
    data_bounds = List().tag(sync=True)
    avoid_ferries = Bool(default_value=False).tag(sync=True)
    avoid_highways = Bool(default_value=False).tag(sync=True)
    avoid_tolls = Bool(default_value=False).tag(sync=True)
    optimize_waypoints = Bool(default_value=False).tag(sync=True)
    travel_mode = Enum(ALLOWED_TRAVEL_MODES,
                       default_value=DEFAULT_TRAVEL_MODE).tag(sync=True)
    show_markers = Bool(default_value=True).tag(sync=True)
    show_route = Bool(default_value=True).tag(sync=True)
    stroke_color = geotraitlets.ColorAlpha(default_value=DEFAULT_STROKE_COLOR,
                                           allow_none=False).tag(sync=True)
    stroke_opacity = geotraitlets.Opacity(default_value=0.6,
                                          allow_none=False).tag(sync=True)
    stroke_weight = Float(min=0.0, allow_none=False,
                          default_value=6.0).tag(sync=True)

    layer_status = CUnicode().tag(sync=True)

    def __init__(self, start=None, end=None, waypoints=None, **kwargs):
        if kwargs.get('data') is not None:
            _warn_obsolete_data()
            # Keep for backwards compatibility with data argument
            data = kwargs['data']
            waypoints = kwargs.get('waypoints')
            if start is None and end is None and waypoints is None:
                start, end, waypoints = Directions._destructure_data(data)
                kwargs.update(
                    dict(start=start, end=end, waypoints=waypoints, data=None))
            else:
                raise ValueError(
                    'Cannot set both data and one of "start", "end"'
                    'or "waypoints".')
        else:
            if waypoints is None:
                waypoints = []
            kwargs.update(dict(start=start, end=end, waypoints=waypoints))
        super(Directions, self).__init__(**kwargs)

    @staticmethod
    def _destructure_data(data):
        start = data[0]
        end = data[-1]
        waypoints = data[1:-1]
        return start, end, waypoints

    @validate('waypoints')
    def _valid_waypoints(self, proposal):
        if proposal['value'] is None:
            _warn_obsolete_waypoints()
            proposal['value'] = []
        return proposal['value']

    @observe('data')
    def _on_data_change(self, change):
        data = change['new']
        if data is not None:
            _warn_obsolete_data()
            with self.hold_trait_notifications():
                self.start, self.end, self.waypoints = \
                        self._destructure_data(data)

    @observe('start', 'end', 'waypoints')
    def _calc_bounds(self, change):
        all_data = [self.start] + self.waypoints + [self.end]
        min_latitude = min(row[0] for row in all_data)
        min_longitude = min(row[1] for row in all_data)
        max_latitude = max(row[0] for row in all_data)
        max_longitude = max(row[1] for row in all_data)
        self.data_bounds = [(min_latitude, min_longitude),
                            (max_latitude, max_longitude)]
Exemple #13
0
class Thermostat(ipw.HBox):
    state = Enum(['Off', "Heating", "Cooling"],
                 default_value="Off").tag(sync=True)
    hsp = Float(70.0).tag(sync=True)
    csp = Float(73.0).tag(sync=True)
    temp = Float(72.0).tag(sync=True)
    oat = Float(0).tag(sync=True)
    occupied = Bool(False).tag(sync=True)

    def __init__(self):
        img_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "")
        img_tstat = os.path.join(img_path, "thermostat.png")
        self._img_tstat_open = open(img_tstat, "rb").read()
        self._tstat = ipw.Image(
            value=self._img_tstat_open,
            format='png',
            width=240,
            height=240,
        )
        form_item_layout = ipw.Layout(display='flex',
                                      flex_flow='row',
                                      width='100%',
                                      justify_content='flex-start')
        self.observe(self._updateslider, 'hsp')
        self.observe(self._updateslider, 'csp')

        self.spslider = ipw.FloatRangeSlider(min=60,
                                             max=85,
                                             step=1.0,
                                             value=[self.hsp, self.csp],
                                             continuous_update=True,
                                             orientation='horizontal')
        self.spslider.observe(self._updatesetpoints, 'value')

        self.tempsensor = ipw.Label(value='{0:.2f}'.format(self.temp))
        self.observe(self._updatetemp, 'temp')

        self.oatsensor = ipw.Label(value='{0:.2f}'.format(self.oat))

        self.statedisplay = ipw.Label(value=self.state)
        self.observe(self._updatestate, 'state')
        self.statedisplay.observe(self._updatestatedisplay, 'value')

        self.occupiedisplay = ipw.Label(value='{0}'.format(self.occupied))
        self.observe(self._update_occupancy, 'occupied')

        def occupancy_square_wave():
            i = 0
            while True:
                i = (i + 1) % 20
                time.sleep(1)
                self.occupied = i > 10

        occthread = threading.Thread(target=occupancy_square_wave)
        occthread.start()

        def thermostat_temp_wave():
            i = 0
            while True:
                i = (i + 1) % 180
                time.sleep(1)
                adjust = -.2 if self.state == 'Cooling' else .2 if self.state == 'Heating' else 0
                self.oat = 80 + 20 * math.sin(math.radians(i))
                self.temp = self.temp + adjust + (self.oat - self.temp) * .01

        tempthread = threading.Thread(target=thermostat_temp_wave)
        tempthread.start()

        form_items = [
            ipw.VBox([self._tstat]),
            ipw.VBox([
                ipw.Box(
                    [ipw.Label(value='Outside Temperature: '), self.oatsensor],
                    layout=form_item_layout),
                ipw.Box(
                    [ipw.Label(value='Inside Temperature: '), self.tempsensor],
                    layout=form_item_layout),
                ipw.Box([ipw.Label(value='Setpoints: '), self.spslider],
                        layout=form_item_layout),
                ipw.Box([ipw.Label(value='State: '), self.statedisplay],
                        layout=form_item_layout),
                ipw.Box([ipw.Label(value='Occupied? '), self.occupiedisplay],
                        layout=form_item_layout),
            ]),
        ]

        super(Thermostat, self).__init__()

        self.layout.display = 'flex'
        self.layout.flex_flow = 'row'
        self.layout.border = 'solid 2px'
        #self.layout.align_items = 'center'
        #self.width = '50%'
        self.children = form_items

    def _controlloop(self):
        # use hysteresis
        if self.state == 'Heating':
            hyst_hsp = self.hsp + 1
        else:
            hyst_hsp = self.hsp

        if self.state == 'Cooling':
            hyst_csp = self.csp - 1
        else:
            hyst_csp = self.csp

        if self.temp < hyst_hsp:
            self.state = 'Heating'
        elif self.temp > hyst_csp:
            self.state = 'Cooling'
        else:
            self.state = 'Off'

    def update_setpoints(self, hsp, csp):
        self.hsp, self.csp = hsp, csp
        self._controlloop()

    def update_temperature(self, newtemp):
        self.temp = newtemp
        self._controlloop()

    def _updatesetpoints(self, change):
        self.hsp, self.csp = change['new']
        self._controlloop()

    def _updatestate(self, change):
        self.statedisplay.value = self.state

    def _updatetemp(self, change):
        self.tempsensor.value = '{0:.2f}'.format(change['new'])
        self.oatsensor.value = '{0:.2f}'.format(self.oat)
        self._controlloop()

    def _update_occupancy(self, change):
        self.occupiedisplay.value = '{0}'.format(change['new'])

    def _updatestatedisplay(self, change):
        self.state = change['new']

    def _updateslider(self, change):
        if change['name'] == 'csp':
            old = self.spslider.value[0]
            self.spslider.value = (old, change['new'])
        elif change['name'] == 'hsp':
            old = self.spslider.value[1]
            self.spslider.value = (change['new'], old)
        self._controlloop()
Exemple #14
0
class FixtureConfig(Global):
    log_level = Enum(
        ('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
        'WARN',
    ).tag(config=True)
Exemple #15
0
class FourExitsFixedPlacing(MultiAgentSimulation):
    size_leaders = Int(
        default_value=4,
        min=4, max=4,
        help='Amount of active agents')
    size_herding = Int(
        default_value=100,
        min=0,
        help='Amount of herding agents')
    agent_type = Enum(
        default_value=Circular,
        values=(Circular, ThreeCircle))
    body_type = Enum(
        default_value='adult',
        values=('adult',))
    exit_width = Float(
        default_value=1.25,
        min=0, max=10)

    def attributes(self, target: int = NO_TARGET, is_follower: bool=False):
        def wrapper():
            orientation = np.random.uniform(-np.pi, np.pi)
            d = dict(
                target=target,
                is_leader=not is_follower,
                is_follower=is_follower,
                body_type=self.body_type,
                orientation=orientation,
                velocity=np.zeros(2),
                angular_velocity=0.0,
                target_direction=np.zeros(2),
                target_orientation=orientation,
                familiar_exit=np.random.randint(0, len(self.field.targets)))
            return d
        return wrapper

    @default('logic')
    def _default_logic(self):
        return Reset(self) << \
            InsideDomain(self) << (
                Integrator(self) << (
                    Fluctuation(self),
                    Adjusting(self) << (
                        Navigation(self) << ExitDetection(self) << LeaderFollower(self),
                        Orientation(self)),
                    AgentAgentInteractions(self),
                    AgentObstacleInteractions(self)))

    @default('field')
    def _default_field(self):
        return fields.FourExitsField(exit_width=self.exit_width)

    @default('agents')
    def _default_agents(self):
        agents = Agents(agent_type=self.agent_type)
        obstacles = geom_to_linear_obstacles(self.field.obstacles)

        # Add new spawns to the field for the leaders
        self.field.spawns.extend([
            rectangle(25, 45, 10, 10),
            rectangle(80, 65, 10, 10),
            rectangle(75, 35, 10, 10),
            rectangle(35, 5, 10, 10),
        ])

        for i in range(self.size_leaders):
            group_leader = AgentGroup(
                agent_type=self.agent_type,
                size=1,
                attributes=self.attributes(target=i, is_follower=False))

            agents.add_non_overlapping_group(
                group_leader,
                position_gen=self.field.sample_spawn(i + 1),
                obstacles=obstacles)

        group_herding = AgentGroup(
            agent_type=self.agent_type,
            size=self.size_herding,
            attributes=self.attributes(target=NO_TARGET, is_follower=True))

        agents.add_non_overlapping_group(
            group_herding,
            position_gen=self.field.sample_spawn(0),
            obstacles=obstacles)

        return agents
Exemple #16
0
class ExecutePreprocessor(Preprocessor):
    """
    Executes all the cells in a notebook
    """

    timeout = Integer(30,
                      allow_none=True,
                      help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, an exception (TimeoutError
            on python 3+, RuntimeError on python 2) is raised.

            `None` or `-1` will disable the timeout. If `timeout_func` is set,
            it overrides `timeout`.
            """)).tag(config=True)

    timeout_func = Any(default_value=None,
                       allow_none=True,
                       help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, an exception
            (TimeoutError on python 3+, RuntimeError on python 2) is
            raised.

            Returning `None` or `-1` will disable the timeout for the cell.
            Not setting `timeout_func` will cause the preprocessor to
            default to using the `timeout` trait for all cells. The
            `timeout_func` trait overrides `timeout` if it is not `None`.
            """)).tag(config=True)

    interrupt_on_timeout = Bool(False,
                                help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """)).tag(config=True)

    startup_timeout = Integer(60,
                              help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """)).tag(config=True)

    allow_errors = Bool(False,
                        help=dedent("""
            If `False` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If `True`, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """)).tag(config=True)

    extra_arguments = List(Unicode())

    kernel_name = Unicode('',
                          help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """)).tag(config=True)

    raise_on_iopub_timeout = Bool(False,
                                  help=dedent("""
            If `False` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If `True`, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """)).tag(config=True)

    iopub_timeout = Integer(4,
                            allow_none=False,
                            help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """)).tag(config=True)

    shutdown_kernel = Enum(['graceful', 'immediate'],
                           default_value='graceful',
                           help=dedent("""
            If `graceful` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its `atexit` hooks.
            If `immediate`, then the kernel is signaled to immediately
            terminate.
            """)).tag(config=True)

    kernel_manager_class = Type(config=True,
                                help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _km_default(self):
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        try:
            from jupyter_client import KernelManager
        except ImportError:
            raise ImportError(
                "`nbconvert --execute` requires the jupyter_client package: `pip install jupyter_client`"
            )
        return KernelManager

    # mapping of locations of outputs with a given display_id
    # tracks cell index and output index within cell.outputs for
    # each appearance of the display_id
    # {
    #   'display_id': {
    #     cell_idx: [output_idx,]
    #   }
    # }
    _display_id_map = Dict()

    def preprocess(self, nb, resources):
        """
        Preprocess notebook executing each code cell.

        The input argument `nb` is modified in-place.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        resources : dictionary
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        resources : dictionary
            Additional resources used in the conversion process.
        """
        path = resources.get('metadata', {}).get('path', '')
        if path == '':
            path = None

        # clear display_id map
        self._display_id_map = {}

        # from jupyter_client.manager import start_new_kernel

        def start_new_kernel(startup_timeout=60,
                             kernel_name='python',
                             **kwargs):
            km = self.kernel_manager_class(kernel_name=kernel_name)
            km.start_kernel(**kwargs)
            kc = km.client()
            kc.start_channels()
            try:
                kc.wait_for_ready(timeout=startup_timeout)
            except RuntimeError:
                kc.stop_channels()
                km.shutdown_kernel()
                raise

            return km, kc

        kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
        if self.kernel_name:
            kernel_name = self.kernel_name
        self.log.info("Executing notebook with kernel: %s" % kernel_name)
        self.km, self.kc = start_new_kernel(
            startup_timeout=self.startup_timeout,
            kernel_name=kernel_name,
            extra_arguments=self.extra_arguments,
            cwd=path)
        self.kc.allow_stdin = False
        self.nb = nb

        try:
            nb, resources = super(ExecutePreprocessor,
                                  self).preprocess(nb, resources)
        finally:
            self.kc.stop_channels()
            self.km.shutdown_kernel(now=self.shutdown_kernel == 'immediate')

        delattr(self, 'nb')

        return nb, resources

    def preprocess_cell(self, cell, resources, cell_index):
        """
        Executes a single code cell. See base.py for details.

        To execute all cells see :meth:`preprocess`.
        """
        if cell.cell_type != 'code':
            return cell, resources

        outputs = self.run_cell(cell, cell_index)
        cell.outputs = outputs

        if not self.allow_errors:
            for out in outputs:
                if out.output_type == 'error':
                    pattern = u"""\
                        An error occurred while executing the following cell:
                        ------------------
                        {cell.source}
                        ------------------

                        {out.ename}: {out.evalue}
                        """
                    msg = dedent(pattern).format(out=out, cell=cell)
                    raise CellExecutionError(msg)
        return cell, resources

    def _update_display_id(self, display_id, msg):
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    def run_cell(self, cell, cell_index=0):
        msg_id = self.kc.execute(cell.source)
        self.log.debug("Executing cell:\n%s", cell.source)
        # wait for finish, with timeout
        while True:
            try:
                if self.timeout_func is not None:
                    timeout = self.timeout_func(cell)
                else:
                    timeout = self.timeout

                if not timeout or timeout < 0:
                    timeout = None
                msg = self.kc.shell_channel.get_msg(timeout=timeout)
            except Empty:
                self.log.error("Timeout waiting for execute reply (%is)." %
                               self.timeout)
                if self.interrupt_on_timeout:
                    self.log.error("Interrupting kernel")
                    self.km.interrupt_kernel()
                    break
                else:
                    try:
                        exception = TimeoutError
                    except NameError:
                        exception = RuntimeError
                    raise exception("Cell execution timed out")

            if msg['parent_header'].get('msg_id') == msg_id:
                break
            else:
                # not our reply
                continue

        outs = cell.outputs = []

        while True:
            try:
                # We've already waited for execute_reply, so all output
                # should already be waiting. However, on slow networks, like
                # in certain CI systems, waiting < 1 second might miss messages.
                # So long as the kernel sends a status:idle message when it
                # finishes, we won't actually have to wait this long, anyway.
                msg = self.kc.iopub_channel.get_msg(timeout=self.iopub_timeout)
            except Empty:
                self.log.warn("Timeout waiting for IOPub output")
                if self.raise_on_iopub_timeout:
                    raise RuntimeError("Timeout waiting for IOPub output")
                else:
                    break
            if msg['parent_header'].get('msg_id') != msg_id:
                # not an output from our execution
                continue

            msg_type = msg['msg_type']
            self.log.debug("output: %s", msg_type)
            content = msg['content']

            # set the prompt number for the input and the output
            if 'execution_count' in content:
                cell['execution_count'] = content['execution_count']

            if msg_type == 'status':
                if content['execution_state'] == 'idle':
                    break
                else:
                    continue
            elif msg_type == 'execute_input':
                continue
            elif msg_type == 'clear_output':
                outs[:] = []
                # clear display_id mapping for this cell
                for display_id, cell_map in self._display_id_map.items():
                    if cell_index in cell_map:
                        cell_map[cell_index] = []
                continue
            elif msg_type.startswith('comm'):
                continue

            display_id = None
            if msg_type in {
                    'execute_result', 'display_data', 'update_display_data'
            }:
                display_id = msg['content'].get('transient',
                                                {}).get('display_id', None)
                if display_id:
                    self._update_display_id(display_id, msg)
                if msg_type == 'update_display_data':
                    # update_display_data doesn't get recorded
                    continue

            try:
                out = output_from_msg(msg)
            except ValueError:
                self.log.error("unhandled iopub msg: " + msg_type)
                continue
            if display_id:
                # record output index in:
                #   _display_id_map[display_id][cell_idx]
                cell_map = self._display_id_map.setdefault(display_id, {})
                output_idx_list = cell_map.setdefault(cell_index, [])
                output_idx_list.append(len(outs))

            outs.append(out)

        return outs
Exemple #17
0
class NotebookClient(LoggingConfigurable):
    """
    Encompasses a Client for executing cells in a notebook
    """

    timeout: int = Integer(
        None,
        allow_none=True,
        help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, a TimeoutError is raised.

            ``None`` or ``-1`` will disable the timeout. If ``timeout_func`` is set,
            it overrides ``timeout``.
            """),
    ).tag(config=True)

    timeout_func: t.Any = Any(
        default_value=None,
        allow_none=True,
        help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, a TimeoutError
            is raised.

            Returning ``None`` or ``-1`` will disable the timeout for the cell.
            Not setting ``timeout_func`` will cause the client to
            default to using the ``timeout`` trait for all cells. The
            ``timeout_func`` trait overrides ``timeout`` if it is not ``None``.
            """),
    ).tag(config=True)

    interrupt_on_timeout: bool = Bool(
        False,
        help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """),
    ).tag(config=True)

    startup_timeout: int = Integer(
        60,
        help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """),
    ).tag(config=True)

    allow_errors: bool = Bool(
        False,
        help=dedent("""
            If ``False`` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised, except if the error name is in
            ``allow_error_names``.
            If ``True``, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """),
    ).tag(config=True)

    allow_error_names: t.List[str] = List(
        Unicode(),
        help=dedent("""
            List of error names which won't stop the execution. Use this if the
            ``allow_errors`` option it too general and you want to allow only
            specific kinds of errors.
            """),
    ).tag(config=True)

    force_raise_errors: bool = Bool(
        False,
        help=dedent("""
            If False (default), errors from executing the notebook can be
            allowed with a ``raises-exception`` tag on a single cell, or the
            ``allow_errors`` or ``allow_error_names`` configurable options for
            all cells. An allowed error will be recorded in notebook output, and
            execution will continue. If an error occurs when it is not
            explicitly allowed, a `CellExecutionError` will be raised.
            If True, `CellExecutionError` will be raised for any error that occurs
            while executing the notebook. This overrides the ``allow_errors``
            and ``allow_error_names`` options and the ``raises-exception`` cell
            tag.
            """),
    ).tag(config=True)

    skip_cells_with_tag: str = Unicode(
        'skip-execution',
        help=dedent("""
            Name of the cell tag to use to denote a cell that should be skipped.
            """),
    ).tag(config=True)

    extra_arguments: t.List = List(Unicode()).tag(config=True)

    kernel_name: str = Unicode(
        '',
        help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """),
    ).tag(config=True)

    raise_on_iopub_timeout: bool = Bool(
        False,
        help=dedent("""
            If ``False`` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If ``True``, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """),
    ).tag(config=True)

    store_widget_state: bool = Bool(
        True,
        help=dedent("""
            If ``True`` (default), then the state of the Jupyter widgets created
            at the kernel will be stored in the metadata of the notebook.
            """),
    ).tag(config=True)

    record_timing: bool = Bool(
        True,
        help=dedent("""
            If ``True`` (default), then the execution timings of each cell will
            be stored in the metadata of the notebook.
            """),
    ).tag(config=True)

    iopub_timeout: int = Integer(
        4,
        allow_none=False,
        help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """),
    ).tag(config=True)

    shell_timeout_interval: int = Integer(
        5,
        allow_none=False,
        help=dedent("""
            The time to wait (in seconds) for Shell output before retrying.
            This generally doesn't need to be set, but if one needs to check
            for dead kernels at a faster rate this can help.
            """),
    ).tag(config=True)

    shutdown_kernel = Enum(
        ['graceful', 'immediate'],
        default_value='graceful',
        help=dedent("""
            If ``graceful`` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its ``atexit`` hooks.
            If ``immediate``, then the kernel is signaled to immediately
            terminate.
            """),
    ).tag(config=True)

    ipython_hist_file: str = Unicode(
        default_value=':memory:',
        help=
        """Path to file to use for SQLite history database for an IPython kernel.

        The specific value ``:memory:`` (including the colon
        at both end but not the back ticks), avoids creating a history file. Otherwise, IPython
        will create a history file for each kernel.

        When running kernels simultaneously (e.g. via multiprocessing) saving history a single
        SQLite file can result in database errors, so using ``:memory:`` is recommended in
        non-interactive contexts.
        """,
    ).tag(config=True)

    kernel_manager_class: KernelManager = Type(
        config=True, help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _kernel_manager_class_default(self) -> KernelManager:
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        from jupyter_client import AsyncKernelManager

        return AsyncKernelManager

    _display_id_map: t.Dict[str, t.Dict] = Dict(help=dedent("""
              mapping of locations of outputs with a given display_id
              tracks cell index and output index within cell.outputs for
              each appearance of the display_id
              {
                   'display_id': {
                  cell_idx: [output_idx,]
                   }
              }
              """))

    display_data_priority: t.List = List(
        [
            'text/html',
            'application/pdf',
            'text/latex',
            'image/svg+xml',
            'image/png',
            'image/jpeg',
            'text/markdown',
            'text/plain',
        ],
        help="""
            An ordered list of preferred output type, the first
            encountered will usually be used when converting discarding
            the others.
            """,
    ).tag(config=True)

    resources: t.Dict = Dict(help=dedent("""
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
            """))

    def __init__(self,
                 nb: NotebookNode,
                 km: t.Optional[KernelManager] = None,
                 **kw) -> None:
        """Initializes the execution manager.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        km : KernelManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.
        """
        super().__init__(**kw)
        self.nb: NotebookNode = nb
        self.km: t.Optional[KernelManager] = km
        self.owns_km: bool = km is None  # whether the NotebookClient owns the kernel manager
        self.kc: t.Optional[KernelClient] = None
        self.reset_execution_trackers()
        self.widget_registry: t.Dict[str, t.Dict] = {
            '@jupyter-widgets/output': {
                'OutputModel': OutputWidget
            }
        }
        # comm_open_handlers should return an object with a .handle_msg(msg) method or None
        self.comm_open_handlers: t.Dict[str, t.Any] = {
            'jupyter.widget': self.on_comm_open_jupyter_widget
        }

    def reset_execution_trackers(self) -> None:
        """Resets any per-execution trackers."""
        self.task_poll_for_reply: t.Optional[asyncio.Future] = None
        self.code_cells_executed = 0
        self._display_id_map = {}
        self.widget_state: t.Dict[str, t.Dict] = {}
        self.widget_buffers: t.Dict[str, t.Dict[t.Tuple[str, ...],
                                                t.Dict[str, str]]] = {}
        # maps to list of hooks, where the last is used, this is used
        # to support nested use of output widgets.
        self.output_hook_stack: t.Any = collections.defaultdict(list)
        # our front-end mimicking Output widgets
        self.comm_objects: t.Dict[str, t.Any] = {}

    def create_kernel_manager(self) -> KernelManager:
        """Creates a new kernel manager.

        Returns
        -------
        km : KernelManager
            Kernel manager whose client class is asynchronous.
        """
        if not self.kernel_name:
            kn = self.nb.metadata.get('kernelspec', {}).get('name')
            if kn is not None:
                self.kernel_name = kn

        if not self.kernel_name:
            self.km = self.kernel_manager_class(config=self.config)
        else:
            self.km = self.kernel_manager_class(kernel_name=self.kernel_name,
                                                config=self.config)

        # If the current kernel manager is still using the default (synchronous) KernelClient class,
        # switch to the async version since that's what NBClient prefers.
        if self.km.client_class == 'jupyter_client.client.KernelClient':
            self.km.client_class = 'jupyter_client.asynchronous.AsyncKernelClient'

        return self.km

    async def _async_cleanup_kernel(self) -> None:
        assert self.km is not None
        now = self.shutdown_kernel == "immediate"
        try:
            # Queue the manager to kill the process, and recover gracefully if it's already dead.
            if await ensure_async(self.km.is_alive()):
                await ensure_async(self.km.shutdown_kernel(now=now))
        except RuntimeError as e:
            # The error isn't specialized, so we have to check the message
            if 'No kernel is running!' not in str(e):
                raise
        finally:
            # Remove any state left over even if we failed to stop the kernel
            await ensure_async(self.km.cleanup_resources())
            if getattr(self, "kc") and self.kc is not None:
                await ensure_async(self.kc.stop_channels())
                self.kc = None
                self.km = None

    _cleanup_kernel = run_sync(_async_cleanup_kernel)

    async def async_start_new_kernel(self, **kwargs) -> None:
        """Creates a new kernel.

        Parameters
        ----------
        kwargs :
            Any options for ``self.kernel_manager_class.start_kernel()``. Because
            that defaults to AsyncKernelManager, this will likely include options
            accepted by ``AsyncKernelManager.start_kernel()``, which includes ``cwd``.
        """
        assert self.km is not None
        resource_path = self.resources.get('metadata', {}).get('path') or None
        if resource_path and 'cwd' not in kwargs:
            kwargs["cwd"] = resource_path

        has_history_manager_arg = any(
            arg.startswith('--HistoryManager.hist_file')
            for arg in self.extra_arguments)
        if (hasattr(self.km, 'ipykernel') and self.km.ipykernel
                and self.ipython_hist_file and not has_history_manager_arg):
            self.extra_arguments += [
                f'--HistoryManager.hist_file={self.ipython_hist_file}'
            ]

        await ensure_async(
            self.km.start_kernel(extra_arguments=self.extra_arguments,
                                 **kwargs))

    start_new_kernel = run_sync(async_start_new_kernel)

    async def async_start_new_kernel_client(self) -> KernelClient:
        """Creates a new kernel client.

        Returns
        -------
        kc : KernelClient
            Kernel client as created by the kernel manager ``km``.
        """
        assert self.km is not None
        self.kc = self.km.client()
        await ensure_async(self.kc.start_channels())
        try:
            await ensure_async(
                self.kc.wait_for_ready(timeout=self.startup_timeout))
        except RuntimeError:
            await self._async_cleanup_kernel()
            raise
        self.kc.allow_stdin = False
        return self.kc

    start_new_kernel_client = run_sync(async_start_new_kernel_client)

    @contextmanager
    def setup_kernel(self, **kwargs) -> t.Generator:
        """
        Context manager for setting up the kernel to execute a notebook.

        The assigns the Kernel Manager (``self.km``) if missing and Kernel Client(``self.kc``).

        When control returns from the yield it stops the client's zmq channels, and shuts
        down the kernel.
        """
        # by default, cleanup the kernel client if we own the kernel manager
        # and keep it alive if we don't
        cleanup_kc = kwargs.pop('cleanup_kc', self.owns_km)

        # Can't use run_until_complete on an asynccontextmanager function :(
        if self.km is None:
            self.km = self.create_kernel_manager()

        if not self.km.has_kernel:
            self.start_new_kernel(**kwargs)
            self.start_new_kernel_client()
        try:
            yield
        finally:
            if cleanup_kc:
                self._cleanup_kernel()

    @asynccontextmanager
    async def async_setup_kernel(self, **kwargs) -> t.AsyncGenerator:
        """
        Context manager for setting up the kernel to execute a notebook.

        This assigns the Kernel Manager (``self.km``) if missing and Kernel Client(``self.kc``).

        When control returns from the yield it stops the client's zmq channels, and shuts
        down the kernel.

        Handlers for SIGINT and SIGTERM are also added to cleanup in case of unexpected shutdown.
        """
        # by default, cleanup the kernel client if we own the kernel manager
        # and keep it alive if we don't
        cleanup_kc = kwargs.pop('cleanup_kc', self.owns_km)
        if self.km is None:
            self.km = self.create_kernel_manager()

        # self._cleanup_kernel uses run_async, which ensures the ioloop is running again.
        # This is necessary as the ioloop has stopped once atexit fires.
        atexit.register(self._cleanup_kernel)

        def on_signal():
            asyncio.ensure_future(self._async_cleanup_kernel())
            atexit.unregister(self._cleanup_kernel)

        loop = asyncio.get_event_loop()
        try:
            loop.add_signal_handler(signal.SIGINT, on_signal)
            loop.add_signal_handler(signal.SIGTERM, on_signal)
        except (NotImplementedError, RuntimeError):
            # NotImplementedError: Windows does not support signals.
            # RuntimeError: Raised when add_signal_handler is called outside the main thread
            pass

        if not self.km.has_kernel:
            await self.async_start_new_kernel(**kwargs)
            await self.async_start_new_kernel_client()
        try:
            yield
        finally:
            if cleanup_kc:
                await self._async_cleanup_kernel()

            atexit.unregister(self._cleanup_kernel)
            try:
                loop.remove_signal_handler(signal.SIGINT)
                loop.remove_signal_handler(signal.SIGTERM)
            except (NotImplementedError, RuntimeError):
                pass

    async def async_execute(self,
                            reset_kc: bool = False,
                            **kwargs) -> NotebookNode:
        """
        Executes each code cell.

        Parameters
        ----------
        kwargs :
            Any option for ``self.kernel_manager_class.start_kernel()``. Because
            that defaults to AsyncKernelManager, this will likely include options
            accepted by ``jupyter_client.AsyncKernelManager.start_kernel()``,
            which includes ``cwd``.

            ``reset_kc`` if True, the kernel client will be reset and a new one
            will be created (default: False).

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        """
        if reset_kc and self.owns_km:
            await self._async_cleanup_kernel()
        self.reset_execution_trackers()

        async with self.async_setup_kernel(**kwargs):
            assert self.kc is not None
            self.log.info("Executing notebook with kernel: %s" %
                          self.kernel_name)
            msg_id = await ensure_async(self.kc.kernel_info())
            info_msg = await self.async_wait_for_reply(msg_id)
            if info_msg is not None:
                if 'language_info' in info_msg['content']:
                    self.nb.metadata['language_info'] = info_msg['content'][
                        'language_info']
                else:
                    raise RuntimeError(
                        'Kernel info received message content has no "language_info" key. '
                        'Content is:\n' + str(info_msg['content']))
            for index, cell in enumerate(self.nb.cells):
                # Ignore `'execution_count' in content` as it's always 1
                # when store_history is False
                await self.async_execute_cell(
                    cell, index, execution_count=self.code_cells_executed + 1)
            self.set_widgets_metadata()

        return self.nb

    execute = run_sync(async_execute)

    def set_widgets_metadata(self) -> None:
        if self.widget_state:
            self.nb.metadata.widgets = {
                'application/vnd.jupyter.widget-state+json': {
                    'state': {
                        model_id: self._serialize_widget_state(state)
                        for model_id, state in self.widget_state.items()
                        if '_model_name' in state
                    },
                    'version_major': 2,
                    'version_minor': 0,
                }
            }
            for key, widget in self.nb.metadata.widgets[
                    'application/vnd.jupyter.widget-state+json'][
                        'state'].items():
                buffers = self.widget_buffers.get(key)
                if buffers:
                    widget['buffers'] = list(buffers.values())

    def _update_display_id(self, display_id: str, msg: t.Dict) -> None:
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    async def _async_poll_for_reply(
        self,
        msg_id: str,
        cell: NotebookNode,
        timeout: t.Optional[int],
        task_poll_output_msg: asyncio.Future,
        task_poll_kernel_alive: asyncio.Future,
    ) -> t.Dict:

        assert self.kc is not None
        new_timeout: t.Optional[float] = None
        if timeout is not None:
            deadline = monotonic() + timeout
            new_timeout = float(timeout)
        while True:
            try:
                msg = await ensure_async(
                    self.kc.shell_channel.get_msg(timeout=new_timeout))
                if msg['parent_header'].get('msg_id') == msg_id:
                    if self.record_timing:
                        cell['metadata']['execution'][
                            'shell.execute_reply'] = timestamp()
                    try:
                        await asyncio.wait_for(task_poll_output_msg,
                                               self.iopub_timeout)
                    except (asyncio.TimeoutError, Empty):
                        if self.raise_on_iopub_timeout:
                            task_poll_kernel_alive.cancel()
                            raise CellTimeoutError.error_from_timeout_and_cell(
                                "Timeout waiting for IOPub output",
                                self.iopub_timeout, cell)
                        else:
                            self.log.warning(
                                "Timeout waiting for IOPub output")
                    task_poll_kernel_alive.cancel()
                    return msg
                else:
                    if new_timeout is not None:
                        new_timeout = max(0, deadline - monotonic())
            except Empty:
                # received no message, check if kernel is still alive
                assert timeout is not None
                task_poll_kernel_alive.cancel()
                await self._async_check_alive()
                await self._async_handle_timeout(timeout, cell)

    async def _async_poll_output_msg(self, parent_msg_id: str,
                                     cell: NotebookNode,
                                     cell_index: int) -> None:

        assert self.kc is not None
        while True:
            msg = await ensure_async(
                self.kc.iopub_channel.get_msg(timeout=None))
            if msg['parent_header'].get('msg_id') == parent_msg_id:
                try:
                    # Will raise CellExecutionComplete when completed
                    self.process_message(msg, cell, cell_index)
                except CellExecutionComplete:
                    return

    async def _async_poll_kernel_alive(self) -> None:
        while True:
            await asyncio.sleep(1)
            try:
                await self._async_check_alive()
            except DeadKernelError:
                assert self.task_poll_for_reply is not None
                self.task_poll_for_reply.cancel()
                return

    def _get_timeout(self, cell: t.Optional[NotebookNode]) -> int:
        if self.timeout_func is not None and cell is not None:
            timeout = self.timeout_func(cell)
        else:
            timeout = self.timeout

        if not timeout or timeout < 0:
            timeout = None

        return timeout

    async def _async_handle_timeout(self,
                                    timeout: int,
                                    cell: t.Optional[NotebookNode] = None
                                    ) -> None:

        self.log.error("Timeout waiting for execute reply (%is)." % timeout)
        if self.interrupt_on_timeout:
            self.log.error("Interrupting kernel")
            assert self.km is not None
            await ensure_async(self.km.interrupt_kernel())
        else:
            raise CellTimeoutError.error_from_timeout_and_cell(
                "Cell execution timed out", timeout, cell)

    async def _async_check_alive(self) -> None:
        assert self.kc is not None
        if not await ensure_async(self.kc.is_alive()):
            self.log.error("Kernel died while waiting for execute reply.")
            raise DeadKernelError("Kernel died")

    async def async_wait_for_reply(
            self,
            msg_id: str,
            cell: t.Optional[NotebookNode] = None) -> t.Optional[t.Dict]:

        assert self.kc is not None
        # wait for finish, with timeout
        timeout = self._get_timeout(cell)
        cummulative_time = 0
        while True:
            try:
                msg = await ensure_async(
                    self.kc.shell_channel.get_msg(
                        timeout=self.shell_timeout_interval))
            except Empty:
                await self._async_check_alive()
                cummulative_time += self.shell_timeout_interval
                if timeout and cummulative_time > timeout:
                    await self._async_async_handle_timeout(timeout, cell)
                    break
            else:
                if msg['parent_header'].get('msg_id') == msg_id:
                    return msg
        return None

    wait_for_reply = run_sync(async_wait_for_reply)
    # Backwards compatibility naming for papermill
    _wait_for_reply = wait_for_reply

    def _passed_deadline(self, deadline: int) -> bool:
        if deadline is not None and deadline - monotonic() <= 0:
            return True
        return False

    def _check_raise_for_error(self, cell: NotebookNode,
                               exec_reply: t.Optional[t.Dict]) -> None:

        if exec_reply is None:
            return None

        exec_reply_content = exec_reply['content']
        if exec_reply_content['status'] != 'error':
            return None

        cell_allows_errors = (not self.force_raise_errors) and (
            self.allow_errors
            or exec_reply_content.get('ename') in self.allow_error_names
            or "raises-exception" in cell.metadata.get("tags", []))

        if not cell_allows_errors:
            raise CellExecutionError.from_cell_and_msg(cell,
                                                       exec_reply_content)

    async def async_execute_cell(
        self,
        cell: NotebookNode,
        cell_index: int,
        execution_count: t.Optional[int] = None,
        store_history: bool = True,
    ) -> NotebookNode:
        """
        Executes a single code cell.

        To execute all cells see :meth:`execute`.

        Parameters
        ----------
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.
        execution_count : int
            The execution count to be assigned to the cell (default: Use kernel response)
        store_history : bool
            Determines if history should be stored in the kernel (default: False).
            Specific to ipython kernels, which can store command histories.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionError
            If execution failed and should raise an exception, this will be raised
            with defaults about the failure.

        Returns
        -------
        cell : NotebookNode
            The cell which was just processed.
        """
        assert self.kc is not None
        if cell.cell_type != 'code' or not cell.source.strip():
            self.log.debug("Skipping non-executing cell %s", cell_index)
            return cell

        if self.skip_cells_with_tag in cell.metadata.get("tags", []):
            self.log.debug("Skipping tagged cell %s", cell_index)
            return cell

        if self.record_timing and 'execution' not in cell['metadata']:
            cell['metadata']['execution'] = {}

        self.log.debug("Executing cell:\n%s", cell.source)

        cell_allows_errors = (not self.force_raise_errors) and (
            self.allow_errors
            or "raises-exception" in cell.metadata.get("tags", []))

        parent_msg_id = await ensure_async(
            self.kc.execute(cell.source,
                            store_history=store_history,
                            stop_on_error=not cell_allows_errors))
        # We launched a code cell to execute
        self.code_cells_executed += 1
        exec_timeout = self._get_timeout(cell)

        cell.outputs = []
        self.clear_before_next_output = False

        task_poll_kernel_alive = asyncio.ensure_future(
            self._async_poll_kernel_alive())
        task_poll_output_msg = asyncio.ensure_future(
            self._async_poll_output_msg(parent_msg_id, cell, cell_index))
        self.task_poll_for_reply = asyncio.ensure_future(
            self._async_poll_for_reply(parent_msg_id, cell, exec_timeout,
                                       task_poll_output_msg,
                                       task_poll_kernel_alive))
        try:
            exec_reply = await self.task_poll_for_reply
        except asyncio.CancelledError:
            # can only be cancelled by task_poll_kernel_alive when the kernel is dead
            task_poll_output_msg.cancel()
            raise DeadKernelError("Kernel died")
        except Exception as e:
            # Best effort to cancel request if it hasn't been resolved
            try:
                # Check if the task_poll_output is doing the raising for us
                if not isinstance(e, CellControlSignal):
                    task_poll_output_msg.cancel()
            finally:
                raise

        if execution_count:
            cell['execution_count'] = execution_count
        self._check_raise_for_error(cell, exec_reply)
        self.nb['cells'][cell_index] = cell
        return cell

    execute_cell = run_sync(async_execute_cell)

    def process_message(self, msg: t.Dict, cell: NotebookNode,
                        cell_index: int) -> t.Optional[t.List]:
        """
        Processes a kernel message, updates cell state, and returns the
        resulting output object that was appended to cell.outputs.

        The input argument *cell* is modified in-place.

        Parameters
        ----------
        msg : dict
            The kernel message being processed.
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionComplete
          Once a message arrives which indicates computation completeness.

        """
        msg_type = msg['msg_type']
        self.log.debug("msg_type: %s", msg_type)
        content = msg['content']
        self.log.debug("content: %s", content)

        display_id = content.get('transient', {}).get('display_id', None)
        if display_id and msg_type in {
                'execute_result', 'display_data', 'update_display_data'
        }:
            self._update_display_id(display_id, msg)

        # set the prompt number for the input and the output
        if 'execution_count' in content:
            cell['execution_count'] = content['execution_count']

        if self.record_timing:
            if msg_type == 'status':
                if content['execution_state'] == 'idle':
                    cell['metadata']['execution'][
                        'iopub.status.idle'] = timestamp()
                elif content['execution_state'] == 'busy':
                    cell['metadata']['execution'][
                        'iopub.status.busy'] = timestamp()
            elif msg_type == 'execute_input':
                cell['metadata']['execution'][
                    'iopub.execute_input'] = timestamp()

        if msg_type == 'status':
            if content['execution_state'] == 'idle':
                raise CellExecutionComplete()
        elif msg_type == 'clear_output':
            self.clear_output(cell.outputs, msg, cell_index)
        elif msg_type.startswith('comm'):
            self.handle_comm_msg(cell.outputs, msg, cell_index)
        # Check for remaining messages we don't process
        elif msg_type not in ['execute_input', 'update_display_data']:
            # Assign output as our processed "result"
            return self.output(cell.outputs, msg, display_id, cell_index)
        return None

    def output(self, outs: t.List, msg: t.Dict, display_id: str,
               cell_index: int) -> t.Optional[t.List]:

        msg_type = msg['msg_type']

        parent_msg_id = msg['parent_header'].get('msg_id')
        if self.output_hook_stack[parent_msg_id]:
            # if we have a hook registered, it will override our
            # default output behaviour (e.g. OutputWidget)
            hook = self.output_hook_stack[parent_msg_id][-1]
            hook.output(outs, msg, display_id, cell_index)
            return None

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg_type)
            return None

        if self.clear_before_next_output:
            self.log.debug('Executing delayed clear_output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)
            self.clear_before_next_output = False

        if display_id:
            # record output index in:
            #   _display_id_map[display_id][cell_idx]
            cell_map = self._display_id_map.setdefault(display_id, {})
            output_idx_list = cell_map.setdefault(cell_index, [])
            output_idx_list.append(len(outs))

        outs.append(out)

        return out

    def clear_output(self, outs: t.List, msg: t.Dict, cell_index: int) -> None:

        content = msg['content']

        parent_msg_id = msg['parent_header'].get('msg_id')
        if self.output_hook_stack[parent_msg_id]:
            # if we have a hook registered, it will override our
            # default clear_output behaviour (e.g. OutputWidget)
            hook = self.output_hook_stack[parent_msg_id][-1]
            hook.clear_output(outs, msg, cell_index)
            return

        if content.get('wait'):
            self.log.debug('Wait to clear output')
            self.clear_before_next_output = True
        else:
            self.log.debug('Immediate clear output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)

    def clear_display_id_mapping(self, cell_index: int) -> None:

        for display_id, cell_map in self._display_id_map.items():
            if cell_index in cell_map:
                cell_map[cell_index] = []

    def handle_comm_msg(self, outs: t.List, msg: t.Dict,
                        cell_index: int) -> None:

        content = msg['content']
        data = content['data']
        if self.store_widget_state and 'state' in data:  # ignore custom msg'es
            self.widget_state.setdefault(content['comm_id'],
                                         {}).update(data['state'])
            if 'buffer_paths' in data and data['buffer_paths']:
                comm_id = content['comm_id']
                if comm_id not in self.widget_buffers:
                    self.widget_buffers[comm_id] = {}
                # for each comm, the path uniquely identifies a buffer
                new_buffers: t.Dict[t.Tuple[str, ...], t.Dict[str, str]] = {
                    tuple(k["path"]): k
                    for k in self._get_buffer_data(msg)
                }
                self.widget_buffers[comm_id].update(new_buffers)
        # There are cases where we need to mimic a frontend, to get similar behaviour as
        # when using the Output widget from Jupyter lab/notebook
        if msg['msg_type'] == 'comm_open':
            target = msg['content'].get('target_name')
            handler = self.comm_open_handlers.get(target)
            if handler:
                comm_id = msg['content']['comm_id']
                comm_object = handler(msg)
                if comm_object:
                    self.comm_objects[comm_id] = comm_object
            else:
                self.log.warning(
                    f'No handler found for comm target {target!r}')
        elif msg['msg_type'] == 'comm_msg':
            content = msg['content']
            comm_id = msg['content']['comm_id']
            if comm_id in self.comm_objects:
                self.comm_objects[comm_id].handle_msg(msg)

    def _serialize_widget_state(self, state: t.Dict) -> t.Dict[str, t.Any]:
        """Serialize a widget state, following format in @jupyter-widgets/schema."""
        return {
            'model_name': state.get('_model_name'),
            'model_module': state.get('_model_module'),
            'model_module_version': state.get('_model_module_version'),
            'state': state,
        }

    def _get_buffer_data(self, msg: t.Dict) -> t.List[t.Dict[str, str]]:
        encoded_buffers = []
        paths = msg['content']['data']['buffer_paths']
        buffers = msg['buffers']
        for path, buffer in zip(paths, buffers):
            encoded_buffers.append({
                'data':
                base64.b64encode(buffer).decode('utf-8'),
                'encoding':
                'base64',
                'path':
                path,
            })
        return encoded_buffers

    def register_output_hook(self, msg_id: str, hook: OutputWidget) -> None:
        """Registers an override object that handles output/clear_output instead.

        Multiple hooks can be registered, where the last one will be used (stack based)
        """
        # mimics
        # https://jupyterlab.github.io/jupyterlab/services/interfaces/kernel.ikernelconnection.html#registermessagehook
        self.output_hook_stack[msg_id].append(hook)

    def remove_output_hook(self, msg_id: str, hook: OutputWidget) -> None:
        """Unregisters an override object that handles output/clear_output instead"""
        # mimics
        # https://jupyterlab.github.io/jupyterlab/services/interfaces/kernel.ikernelconnection.html#removemessagehook
        removed_hook = self.output_hook_stack[msg_id].pop()
        assert removed_hook == hook

    def on_comm_open_jupyter_widget(self, msg: t.Dict):
        content = msg['content']
        data = content['data']
        state = data['state']
        comm_id = msg['content']['comm_id']
        module = self.widget_registry.get(state['_model_module'])
        if module:
            widget_class = module.get(state['_model_name'])
            if widget_class:
                return widget_class(comm_id, state, self.kc, self)
Exemple #18
0
class NotebookNotary(LoggingConfigurable):
    """A class for computing and verifying notebook signatures."""
    
    data_dir = Unicode()
    @default('data_dir')
    def _data_dir_default(self):
        app = None
        try:
            if JupyterApp.initialized():
                app = JupyterApp.instance()
        except MultipleInstanceError:
            pass
        if app is None:
            # create an app, without the global instance
            app = JupyterApp()
            app.initialize(argv=[])
        return app.data_dir
    
    db_file = Unicode(
        help="""The sqlite file in which to store notebook signatures.
        By default, this will be in your Jupyter data directory.
        You can set it to ':memory:' to disable sqlite writing to the filesystem.
        """).tag(config=True)

    @default('db_file')
    def _db_file_default(self):
        if not self.data_dir:
            return ':memory:'
        return os.path.join(self.data_dir, u'nbsignatures.db')
    
    # 64k entries ~ 12MB
    cache_size = Integer(65535,
        help="""The number of notebook signatures to cache.
        When the number of signatures exceeds this value,
        the oldest 25% of signatures will be culled.
        """
    ).tag(config=True)
    db = Any()
    @default('db')
    def _db_default(self):
        if sqlite3 is None:
            self.log.warn("Missing SQLite3, all notebooks will be untrusted!")
            return
        kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
        try:
            db = sqlite3.connect(self.db_file, **kwargs)
            self.init_db(db)
        except (sqlite3.DatabaseError, sqlite3.OperationalError):
            if self.db_file != ':memory:':
                old_db_location = os.path.join(self.data_dir, self.db_file + ".bak")
                self.log.warn("""The signatures database cannot be opened; maybe it is corrupted or encrypted.  You may need to rerun your notebooks to ensure that they are trusted to run Javascript.  The old signatures database has been renamed to %s and a new one has been created.""",
                    old_db_location)
                try:
                    os.rename(self.db_file, self.db_file + u'.bak')
                    db = sqlite3.connect(self.db_file, **kwargs)
                    self.init_db(db)
                except (sqlite3.DatabaseError, sqlite3.OperationalError):
                    self.log.warn("""Failed commiting signatures database to disk.  You may need to move the database file to a non-networked file system, using config option `NotebookNotary.db_file`.  Using in-memory signatures database for the remainder of this session.""")
                    self.db_file = ':memory:'
                    db = sqlite3.connect(self.db_file, **kwargs)
                    self.init_db(db)
            else:
                raise
        return db
    
    def init_db(self, db):
        db.execute("""
        CREATE TABLE IF NOT EXISTS nbsignatures
        (
            id integer PRIMARY KEY AUTOINCREMENT,
            algorithm text,
            signature text,
            path text,
            last_seen timestamp
        )""")
        db.execute("""
        CREATE INDEX IF NOT EXISTS algosig ON nbsignatures(algorithm, signature)
        """)
        db.commit()
    
    algorithm = Enum(algorithms, default_value='sha256',
        help="""The hashing algorithm used to sign notebooks."""
    ).tag(config=True)
    @observe('algorithm')
    def _algorithm_changed(self, change):
        self.digestmod = getattr(hashlib, change.new)
    
    digestmod = Any()
    @default('digestmod')
    def _digestmod_default(self):
        return getattr(hashlib, self.algorithm)
    
    secret_file = Unicode(
        help="""The file where the secret key is stored."""
    ).tag(config=True)
    @default('secret_file')
    def _secret_file_default(self):
        if not self.data_dir:
            return ''
        return os.path.join(self.data_dir, 'notebook_secret')
    
    secret = Bytes(
        help="""The secret key with which notebooks are signed."""
    ).tag(config=True)
    @default('secret')
    def _secret_default(self):
        # note : this assumes an Application is running
        if os.path.exists(self.secret_file):
            with io.open(self.secret_file, 'rb') as f:
                return f.read()
        else:
            secret = base64.encodestring(os.urandom(1024))
            self._write_secret_file(secret)
            return secret
    
    def _write_secret_file(self, secret):
        """write my secret to my secret_file"""
        self.log.info("Writing notebook-signing key to %s", self.secret_file)
        with io.open(self.secret_file, 'wb') as f:
            f.write(secret)
        try:
            os.chmod(self.secret_file, 0o600)
        except OSError:
            self.log.warn(
                "Could not set permissions on %s",
                self.secret_file
            )
        return secret
    
    def compute_signature(self, nb):
        """Compute a notebook's signature
        
        by hashing the entire contents of the notebook via HMAC digest.
        """
        hmac = HMAC(self.secret, digestmod=self.digestmod)
        # don't include the previous hash in the content to hash
        with signature_removed(nb):
            # sign the whole thing
            for b in yield_everything(nb):
                hmac.update(b)
        
        return hmac.hexdigest()
    
    def check_signature(self, nb):
        """Check a notebook's stored signature
        
        If a signature is stored in the notebook's metadata,
        a new signature is computed and compared with the stored value.
        
        Returns True if the signature is found and matches, False otherwise.
        
        The following conditions must all be met for a notebook to be trusted:
        - a signature is stored in the form 'scheme:hexdigest'
        - the stored scheme matches the requested scheme
        - the requested scheme is available from hashlib
        - the computed hash from notebook_signature matches the stored hash
        """
        if nb.nbformat < 3:
            return False
        if self.db is None:
            return False
        signature = self.compute_signature(nb)
        r = self.db.execute("""SELECT id FROM nbsignatures WHERE
            algorithm = ? AND
            signature = ?;
            """, (self.algorithm, signature)).fetchone()
        if r is None:
            return False
        self.db.execute("""UPDATE nbsignatures SET last_seen = ? WHERE
            algorithm = ? AND
            signature = ?;
            """,
            (datetime.utcnow(), self.algorithm, signature),
        )
        self.db.commit()
        return True
    
    def sign(self, nb):
        """Sign a notebook, indicating that its output is trusted on this machine
        
        Stores hash algorithm and hmac digest in a local database of trusted notebooks.
        """
        if nb.nbformat < 3:
            return
        signature = self.compute_signature(nb)
        self.store_signature(signature, nb)

    def store_signature(self, signature, nb):
        if self.db is None:
            return
        self.db.execute("""INSERT OR IGNORE INTO nbsignatures
            (algorithm, signature, last_seen) VALUES (?, ?, ?)""",
            (self.algorithm, signature, datetime.utcnow())
        )
        self.db.execute("""UPDATE nbsignatures SET last_seen = ? WHERE
            algorithm = ? AND
            signature = ?;
            """,
            (datetime.utcnow(), self.algorithm, signature),
        )
        self.db.commit()
        n, = self.db.execute("SELECT Count(*) FROM nbsignatures").fetchone()
        if n > self.cache_size:
            self.cull_db()
    
    def unsign(self, nb):
        """Ensure that a notebook is untrusted
        
        by removing its signature from the trusted database, if present.
        """
        signature = self.compute_signature(nb)
        self.db.execute("""DELETE FROM nbsignatures WHERE
                algorithm = ? AND
                signature = ?;
            """,
            (self.algorithm, signature)
        )
        self.db.commit()
    
    def cull_db(self):
        """Cull oldest 25% of the trusted signatures when the size limit is reached"""
        self.db.execute("""DELETE FROM nbsignatures WHERE id IN (
            SELECT id FROM nbsignatures ORDER BY last_seen DESC LIMIT -1 OFFSET ?
        );
        """, (max(int(0.75 * self.cache_size), 1),))
    
    def mark_cells(self, nb, trusted):
        """Mark cells as trusted if the notebook's signature can be verified
        
        Sets ``cell.metadata.trusted = True | False`` on all code cells,
        depending on whether the stored signature can be verified.
        
        This function is the inverse of check_cells
        """
        if nb.nbformat < 3:
            return
        
        for cell in yield_code_cells(nb):
            cell['metadata']['trusted'] = trusted
    
    def _check_cell(self, cell, nbformat_version):
        """Do we trust an individual cell?
        
        Return True if:
        
        - cell is explicitly trusted
        - cell has no potentially unsafe rich output
        
        If a cell has no output, or only simple print statements,
        it will always be trusted.
        """
        # explicitly trusted
        if cell['metadata'].pop("trusted", False):
            return True
        
        # explicitly safe output
        if nbformat_version >= 4:
            unsafe_output_types = ['execute_result', 'display_data']
            safe_keys = {"output_type", "execution_count", "metadata"}
        else: # v3
            unsafe_output_types = ['pyout', 'display_data']
            safe_keys = {"output_type", "prompt_number", "metadata"}
        
        for output in cell['outputs']:
            output_type = output['output_type']
            if output_type in unsafe_output_types:
                # if there are any data keys not in the safe whitelist
                output_keys = set(output)
                if output_keys.difference(safe_keys):
                    return False
        
        return True
    
    def check_cells(self, nb):
        """Return whether all code cells are trusted
        
        If there are no code cells, return True.
        
        This function is the inverse of mark_cells.
        """
        if nb.nbformat < 3:
            return False
        trusted = True
        for cell in yield_code_cells(nb):
            # only distrust a cell if it actually has some output to distrust
            if not self._check_cell(cell, nb.nbformat):
                trusted = False

        return trusted
Exemple #19
0
class DisplayIntegrator(Tool):
    name = "ctapipe-display-integration"
    description = __doc__

    event_index = Int(0, help='Event index to view.').tag(config=True)
    use_event_id = Bool(
        False,
        help='event_index will obtain an event using event_id instead of '
        'index.').tag(config=True)
    telescope = Int(None,
                    allow_none=True,
                    help='Telescope to view. Set to None to display the first'
                    'telescope with data.').tag(config=True)
    channel = Enum([0, 1], 0, help='Channel to view').tag(config=True)

    extractor_product = tool_utils.enum_trait(
        ChargeExtractor, default='NeighbourPeakIntegrator')

    aliases = Dict(
        dict(
            f='EventSource.input_url',
            max_events='EventSource.max_events',
            extractor='DisplayIntegrator.extractor_product',
            E='DisplayIntegrator.event_index',
            T='DisplayIntegrator.telescope',
            C='DisplayIntegrator.channel',
        ))
    flags = Dict(
        dict(id=({
            'DisplayDL1Calib': {
                'use_event_index': True
            }
        }, 'event_index will obtain an event using '
                 'event_id instead of index.')))
    classes = List([
        EventSource,
        CameraDL1Calibrator,
    ] + tool_utils.classes_with_traits(ChargeExtractor))

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.eventseeker = None
        self.r1 = None
        self.dl0 = None
        self.extractor = None
        self.dl1 = None

    def setup(self):
        self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"

        event_source = EventSource.from_config(parent=self)
        self.eventseeker = EventSeeker(event_source, parent=self)
        self.extractor = ChargeExtractor.from_name(
            self.extractor_product,
            parent=self,
        )
        self.r1 = CameraR1Calibrator.from_eventsource(
            eventsource=event_source,
            parent=self,
        )

        self.dl0 = CameraDL0Reducer(parent=self)
        self.dl1 = CameraDL1Calibrator(extractor=self.extractor, parent=self)

    def start(self):
        event_num = self.event_index
        if self.use_event_id:
            event_num = str(event_num)
        event = self.eventseeker[event_num]

        # Calibrate
        self.r1.calibrate(event)
        self.dl0.reduce(event)
        self.dl1.calibrate(event)

        # Select telescope
        tels = list(event.r0.tels_with_data)
        telid = self.telescope
        if telid is None:
            telid = tels[0]
        if telid not in tels:
            self.log.error("[event] please specify one of the following "
                           "telescopes for this event: {}".format(tels))
            exit()

        extractor_name = self.extractor.__class__.__name__

        plot(event, telid, self.channel, extractor_name)

    def finish(self):
        pass
Exemple #20
0
class Canvas(DOMWidget, FigureCanvasWebAggCore):

    _model_module = Unicode('jupyter-matplotlib').tag(sync=True)
    _model_module_version = Unicode(js_semver).tag(sync=True)
    _model_name = Unicode('MPLCanvasModel').tag(sync=True)

    _view_module = Unicode('jupyter-matplotlib').tag(sync=True)
    _view_module_version = Unicode(js_semver).tag(sync=True)
    _view_name = Unicode('MPLCanvasView').tag(sync=True)

    toolbar = Instance(Toolbar,
                       allow_none=True).tag(sync=True, **widget_serialization)
    toolbar_visible = Bool(True).tag(sync=True)
    toolbar_position = Enum(['top', 'bottom', 'left', 'right'],
                            default_value='left').tag(sync=True)

    header_visible = Bool(True).tag(sync=True)
    footer_visible = Bool(True).tag(sync=True)

    resizable = Bool(True).tag(sync=True)
    capture_scroll = Bool(False).tag(sync=True)

    _width = CInt().tag(sync=True)
    _height = CInt().tag(sync=True)

    _figure_label = Unicode('Figure').tag(sync=True)
    _message = Unicode().tag(sync=True)
    _cursor = Unicode('pointer').tag(sync=True)

    _image_mode = Unicode('full').tag(sync=True)

    _rubberband_x = CInt(0).tag(sync=True)
    _rubberband_y = CInt(0).tag(sync=True)
    _rubberband_width = CInt(0).tag(sync=True)
    _rubberband_height = CInt(0).tag(sync=True)

    _closed = Bool(True)

    # Must declare the superclass private members.
    _png_is_old = Bool()
    _force_full = Bool()
    _current_image_mode = Unicode()
    _dpi_ratio = Float(1.0)

    def __init__(self, figure, *args, **kwargs):
        DOMWidget.__init__(self, *args, **kwargs)
        FigureCanvasWebAggCore.__init__(self, figure, *args, **kwargs)

        self.on_msg(self._handle_message)

    def _handle_message(self, object, content, buffers):
        # Every content has a "type".
        if content['type'] == 'closing':
            self._closed = True
        elif content['type'] == 'initialized':
            _, _, w, h = self.figure.bbox.bounds
            self.manager.resize(w, h)
        else:
            self.manager.handle_json(content)

    def send_json(self, content):
        # Change in the widget state?
        if content['type'] == 'cursor':
            self._cursor = cursors_str[content['cursor']]

        elif content['type'] == 'message':
            self._message = content['message']

        elif content['type'] == 'figure_label':
            self._figure_label = content['label']

        elif content['type'] == 'resize':
            self._width = content['size'][0]
            self._height = content['size'][1]
            # Send resize message anyway
            self.send({'data': json.dumps(content)})

        elif content['type'] == 'image_mode':
            self._image_mode = content['mode']

        else:
            # Default: send the message to the front-end
            self.send({'data': json.dumps(content)})

    def send_binary(self, data):
        self.send({'data': '{"type": "binary"}'}, buffers=[data])

    def new_timer(self, *args, **kwargs):
        return TimerTornado(*args, **kwargs)
Exemple #21
0
class Map(DOMWidget, InteractMixin):
    _view_name = Unicode('LeafletMapView').tag(sync=True)
    _model_name = Unicode('LeafletMapModel').tag(sync=True)
    _view_module = Unicode('jupyter-leaflet').tag(sync=True)
    _model_module = Unicode('jupyter-leaflet').tag(sync=True)

    _view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
    _model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)

    # Map options
    center = List(def_loc).tag(sync=True, o=True)
    zoom_start = Int(12).tag(sync=True, o=True)
    zoom = Int(12).tag(sync=True, o=True)
    max_zoom = Int(18).tag(sync=True, o=True)
    min_zoom = Int(1).tag(sync=True, o=True)
    interpolation = Unicode('bilinear').tag(sync=True, o=True)
    crs = Enum(values=allowed_crs, default_value='EPSG3857').tag(sync=True)

    # Specification of the basemap
    basemap = Union(
        (Dict(), Instance(TileLayer)),
        default_value=dict(
            url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
            max_zoom=19,
            attribution=
            'Map data (c) <a href="https://openstreetmap.org">OpenStreetMap</a> contributors'
        ))
    modisdate = Unicode('yesterday').tag(sync=True)

    # Interaction options
    dragging = Bool(True).tag(sync=True, o=True)
    touch_zoom = Bool(True).tag(sync=True, o=True)
    scroll_wheel_zoom = Bool(False).tag(sync=True, o=True)
    double_click_zoom = Bool(True).tag(sync=True, o=True)
    box_zoom = Bool(True).tag(sync=True, o=True)
    tap = Bool(True).tag(sync=True, o=True)
    tap_tolerance = Int(15).tag(sync=True, o=True)
    world_copy_jump = Bool(False).tag(sync=True, o=True)
    close_popup_on_click = Bool(True).tag(sync=True, o=True)
    bounce_at_zoom_limits = Bool(True).tag(sync=True, o=True)
    keyboard = Bool(True).tag(sync=True, o=True)
    keyboard_pan_offset = Int(80).tag(sync=True, o=True)
    keyboard_zoom_offset = Int(1).tag(sync=True, o=True)
    inertia = Bool(True).tag(sync=True, o=True)
    inertia_deceleration = Int(3000).tag(sync=True, o=True)
    inertia_max_speed = Int(1500).tag(sync=True, o=True)
    # inertia_threshold = Int(?, o=True).tag(sync=True)
    # fade_animation = Bool(?).tag(sync=True, o=True)
    # zoom_animation = Bool(?).tag(sync=True, o=True)
    zoom_animation_threshold = Int(4).tag(sync=True, o=True)
    # marker_zoom_animation = Bool(?).tag(sync=True, o=True)
    fullscreen = Bool(False).tag(sync=True, o=True)

    options = List(trait=Unicode()).tag(sync=True)

    style = InstanceDict(MapStyle).tag(sync=True, **widget_serialization)
    default_style = InstanceDict(MapStyle).tag(sync=True,
                                               **widget_serialization)
    dragging_style = InstanceDict(MapStyle).tag(sync=True,
                                                **widget_serialization)

    zoom_control = Bool(True)
    attribution_control = Bool(True)

    @default('dragging_style')
    def _default_dragging_style(self):
        return {'cursor': 'move'}

    @default('options')
    def _default_options(self):
        return [name for name in self.traits(o=True)]

    south = Float(def_loc[0], read_only=True).tag(sync=True)
    north = Float(def_loc[0], read_only=True).tag(sync=True)
    east = Float(def_loc[1], read_only=True).tag(sync=True)
    west = Float(def_loc[1], read_only=True).tag(sync=True)

    layers = Tuple().tag(trait=Instance(Layer),
                         sync=True,
                         **widget_serialization)

    @default('layers')
    def _default_layers(self):
        basemap = self.basemap if isinstance(self.basemap,
                                             TileLayer) else basemap_to_tiles(
                                                 self.basemap, self.modisdate)

        basemap.base = True

        return (basemap, )

    bounds = Tuple(read_only=True)
    bounds_polygon = Tuple(read_only=True)

    @observe('south', 'north', 'east', 'west')
    def _observe_bounds(self, change):
        self.set_trait('bounds',
                       ((self.south, self.west), (self.north, self.east)))
        self.set_trait('bounds_polygon',
                       ((self.north, self.west), (self.north, self.east),
                        (self.south, self.east), (self.south, self.west)))

    def __init__(self, **kwargs):
        self.zoom_control_instance = None
        self.attribution_control_instance = None

        super(Map, self).__init__(**kwargs)
        self.on_msg(self._handle_leaflet_event)

        if self.zoom_control:
            self.zoom_control_instance = ZoomControl()
            self.add_control(self.zoom_control_instance)

        if self.attribution_control:
            self.attribution_control_instance = AttributionControl(
                position='bottomright')
            self.add_control(self.attribution_control_instance)

    @observe('zoom_control')
    def observe_zoom_control(self, change):
        if change['new']:
            self.zoom_control_instance = ZoomControl()
            self.add_control(self.zoom_control_instance)
        else:
            if self.zoom_control_instance is not None and self.zoom_control_instance in self.controls:
                self.remove_control(self.zoom_control_instance)

    @observe('attribution_control')
    def observe_attribution_control(self, change):
        if change['new']:
            self.attribution_control_instance = AttributionControl(
                position='bottomright')
            self.add_control(self.attribution_control_instance)
        else:
            if self.attribution_control_instance is not None and self.attribution_control_instance in self.controls:
                self.remove_control(self.attribution_control_instance)

    _layer_ids = List()

    @validate('layers')
    def _validate_layers(self, proposal):
        '''Validate layers list.

        Makes sure only one instance of any given layer can exist in the
        layers list.
        '''
        self._layer_ids = [l.model_id for l in proposal.value]
        if len(set(self._layer_ids)) != len(self._layer_ids):
            raise LayerException(
                'duplicate layer detected, only use each layer once')
        return proposal.value

    def add_layer(self, layer):
        if isinstance(layer, dict):
            layer = basemap_to_tiles(layer)
        if layer.model_id in self._layer_ids:
            raise LayerException('layer already on map: %r' % layer)
        self.layers = tuple([l for l in self.layers] + [layer])

    def remove_layer(self, layer):
        if layer.model_id not in self._layer_ids:
            raise LayerException('layer not on map: %r' % layer)
        self.layers = tuple(
            [l for l in self.layers if l.model_id != layer.model_id])

    def substitute_layer(self, old, new):
        if isinstance(new, dict):
            new = basemap_to_tiles(new)
        if old.model_id not in self._layer_ids:
            raise LayerException(
                'Could not substitute layer: layer not on map.')
        self.layers = tuple(
            [new if l.model_id == old.model_id else l for l in self.layers])

    def clear_layers(self):
        self.layers = ()

    controls = Tuple().tag(trait=Instance(Control),
                           sync=True,
                           **widget_serialization)
    _control_ids = List()

    @validate('controls')
    def _validate_controls(self, proposal):
        '''Validate controls list.

        Makes sure only one instance of any given layer can exist in the
        controls list.
        '''
        self._control_ids = [c.model_id for c in proposal.value]
        if len(set(self._control_ids)) != len(self._control_ids):
            raise ControlException(
                'duplicate control detected, only use each control once')
        return proposal.value

    def add_control(self, control):
        if control.model_id in self._control_ids:
            raise ControlException('control already on map: %r' % control)
        self.controls = tuple([c for c in self.controls] + [control])

    def remove_control(self, control):
        if control.model_id not in self._control_ids:
            raise ControlException('control not on map: %r' % control)
        self.controls = tuple(
            [c for c in self.controls if c.model_id != control.model_id])

    def clear_controls(self):
        self.controls = ()

    def __iadd__(self, item):
        if isinstance(item, Layer):
            self.add_layer(item)
        elif isinstance(item, Control):
            self.add_control(item)
        return self

    def __isub__(self, item):
        if isinstance(item, Layer):
            self.remove_layer(item)
        elif isinstance(item, Control):
            self.remove_control(item)
        return self

    def __add__(self, item):
        if isinstance(item, Layer):
            self.add_layer(item)
        elif isinstance(item, Control):
            self.add_control(item)
        return self

    # Event handling
    _interaction_callbacks = Instance(CallbackDispatcher, ())

    def _handle_leaflet_event(self, _, content, buffers):
        if content.get('event', '') == 'interaction':
            self._interaction_callbacks(**content)

    def on_interaction(self, callback, remove=False):
        self._interaction_callbacks.register_callback(callback, remove=remove)
Exemple #22
0
class Toolbar(DOMWidget, NavigationToolbar2WebAgg):

    _model_module = Unicode('jupyter-matplotlib').tag(sync=True)
    _model_module_version = Unicode(js_semver).tag(sync=True)
    _model_name = Unicode('ToolbarModel').tag(sync=True)

    _view_module = Unicode('jupyter-matplotlib').tag(sync=True)
    _view_module_version = Unicode(js_semver).tag(sync=True)
    _view_name = Unicode('ToolbarView').tag(sync=True)

    toolitems = List().tag(sync=True)
    orientation = Enum(['horizontal', 'vertical'],
                       default_value='vertical').tag(sync=True)
    button_style = CaselessStrEnum(
        values=['primary', 'success', 'info', 'warning', 'danger', ''],
        default_value='',
        help="""Use a predefined styling for the button.""").tag(sync=True)
    collapsed = Bool(True).tag(sync=True)

    _current_action = Enum(values=['pan', 'zoom', ''],
                           default_value='').tag(sync=True)

    def __init__(self, canvas, *args, **kwargs):
        DOMWidget.__init__(self, *args, **kwargs)
        NavigationToolbar2WebAgg.__init__(self, canvas, *args, **kwargs)

        self.on_msg(self.canvas._handle_message)

    def export(self):
        buf = io.BytesIO()
        self.canvas.figure.savefig(buf, format='png', dpi='figure')
        # Figure width in pixels
        pwidth = (self.canvas.figure.get_figwidth() *
                  self.canvas.figure.get_dpi())
        # Scale size to match widget on HiPD monitors
        width = pwidth / self.canvas._dpi_ratio
        data = "<img src='data:image/png;base64,{0}' width={1}/>"
        data = data.format(b64encode(buf.getvalue()).decode('utf-8'), width)
        display(HTML(data))

    @default('toolitems')
    def _default_toolitems(self):
        icons = {
            'home': 'home',
            'back': 'arrow-left',
            'forward': 'arrow-right',
            'zoom_to_rect': 'square-o',
            'move': 'arrows',
            'download': 'floppy-o',
            'export': 'file-picture-o'
        }

        download_item = ('Download', 'Download plot', 'download',
                         'save_figure')

        toolitems = (NavigationToolbar2.toolitems + (download_item,))

        return [(text, tooltip, icons[icon_name], method_name)
                for text, tooltip, icon_name, method_name
                in toolitems
                if icon_name in icons]
class NbGraderAPI(LoggingConfigurable):
    """A high-level API for using nbgrader."""

    coursedir = Instance(CourseDirectory, allow_none=True)
    authenticator = Instance(Authenticator, allow_none=True)
    exchange = Instance(ExchangeFactory, allow_none=True)

    # The log level for the application
    log_level = Enum(
        (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
        default_value=logging.INFO,
        help="Set the log level by value or name.").tag(config=True)

    timezone = Unicode(
        "UTC", help="Timezone for displaying timestamps").tag(config=True)

    timestamp_format = Unicode(
        "%Y-%m-%d %H:%M:%S %Z",
        help="Format string for displaying timestamps").tag(config=True)

    @observe('log_level')
    def _log_level_changed(self, change):
        """Adjust the log level when log_level is set."""
        new = change.new
        if isinstance(new, str):
            new = getattr(logging, new)
            self.log_level = new
        self.log.setLevel(new)

    def __init__(self,
                 coursedir=None,
                 authenticator=None,
                 exchange=None,
                 **kwargs):
        """Initialize the API.

        Arguments
        ---------
        coursedir: :class:`nbgrader.coursedir.CourseDirectory`
            (Optional) A course directory object.
        authenticator : :class:~`nbgrader.auth.BaseAuthenticator`
            (Optional) An authenticator instance for communicating with an
            external database.
        kwargs:
            Additional keyword arguments (e.g. ``parent``, ``config``)

        """
        self.log.setLevel(self.log_level)
        super(NbGraderAPI, self).__init__(**kwargs)

        if coursedir is None:
            self.coursedir = CourseDirectory(parent=self)
        else:
            self.coursedir = coursedir

        if authenticator is None:
            self.authenticator = Authenticator(parent=self)
        else:
            self.authenticator = authenticator

        if exchange is None:
            self.exchange = ExchangeFactory(parent=self)
        else:
            self.exchange = exchange

        if sys.platform != 'win32':
            lister = self.exchange.List(coursedir=self.coursedir,
                                        authenticator=self.authenticator,
                                        parent=self)
            self.course_id = self.coursedir.course_id
            if hasattr(lister, "root"):
                self.exchange_root = lister.root
            else:
                # For non-fs based exchanges
                self.exchange_root = ''

            try:
                lister.start()
            except ExchangeError:
                self.exchange_missing = True
            else:
                self.exchange_missing = False

        else:
            self.course_id = ''
            self.exchange_root = ''
            self.exchange_missing = True

    @property
    def exchange_is_functional(self):
        return self.course_id and not self.exchange_missing and sys.platform != 'win32'

    @property
    def gradebook(self):
        """An instance of :class:`nbgrader.api.Gradebook`.

        Note that each time this property is accessed, a new gradebook is
        created. The user is responsible for destroying the gradebook through
        :func:`~nbgrader.api.Gradebook.close`.

        """
        return Gradebook(self.coursedir.db_url, self.course_id)

    def get_source_assignments(self):
        """Get the names of all assignments in the `source` directory.

        Returns
        -------
        assignments: set
            A set of assignment names

        """
        filenames = glob.glob(
            self.coursedir.format_path(self.coursedir.source_directory,
                                       student_id='.',
                                       assignment_id='*'))

        assignments = set([])
        for filename in filenames:
            # skip files that aren't directories
            if not os.path.isdir(filename):
                continue

            # parse out the assignment name
            regex = self.coursedir.format_path(
                self.coursedir.source_directory,
                student_id='.',
                assignment_id='(?P<assignment_id>.*)',
                escape=True)

            matches = re.match(regex, filename)
            if matches:
                assignments.add(matches.groupdict()['assignment_id'])

        return assignments

    def get_released_assignments(self):
        """Get the names of all assignments that have been released to the
        exchange directory. If the course id is blank, this returns an empty
        set.

        Returns
        -------
        assignments: set
            A set of assignment names

        """
        if self.exchange_is_functional:
            lister = self.exchange.List(coursedir=self.coursedir,
                                        authenticator=self.authenticator,
                                        parent=self)
            released = set([x['assignment_id'] for x in lister.start()])
        else:
            released = set([])

        return released

    def get_submitted_students(self, assignment_id):
        """Get the ids of students that have submitted a given assignment
        (determined by whether or not a submission exists in the `submitted`
        directory).

        Arguments
        ---------
        assignment_id: string
            The name of the assignment. May be * to select for all assignments.

        Returns
        -------
        students: set
            A set of student ids

        """
        # get the names of all student submissions in the `submitted` directory
        filenames = glob.glob(
            self.coursedir.format_path(self.coursedir.submitted_directory,
                                       student_id='*',
                                       assignment_id=assignment_id))

        students = set([])
        for filename in filenames:
            # skip files that aren't directories
            if not os.path.isdir(filename):
                continue

            # parse out the student id
            if assignment_id == "*":
                assignment_id = ".*"
            regex = self.coursedir.format_path(
                self.coursedir.submitted_directory,
                student_id='(?P<student_id>.*)',
                assignment_id=assignment_id,
                escape=True)

            matches = re.match(regex, filename)
            if matches:
                students.add(matches.groupdict()['student_id'])

        return students

    def get_submitted_timestamp(self, assignment_id, student_id):
        """Gets the timestamp of a submitted assignment.

        Arguments
        ---------
        assignment_id: string
            The assignment name
        student_id: string
            The student id

        Returns
        -------
        timestamp: datetime.datetime or None
            The timestamp of the submission, or None if the timestamp does
            not exist

        """
        assignment_dir = os.path.abspath(
            self.coursedir.format_path(self.coursedir.submitted_directory,
                                       student_id, assignment_id))

        timestamp_pth = os.path.join(assignment_dir, 'timestamp.txt')
        if os.path.exists(timestamp_pth):
            with open(timestamp_pth, 'r') as fh:
                return parse_utc(fh.read().strip())

    def get_autograded_students(self, assignment_id):
        """Get the ids of students whose submission for a given assignment
        has been autograded. This is determined based on satisfying all of the
        following criteria:

        1. There is a directory present in the `autograded` directory.
        2. The submission is present in the database.
        3. The timestamp of the autograded submission is the same as the
           timestamp of the original submission (in the `submitted` directory).

        Returns
        -------
        students: set
            A set of student ids

        """
        # get all autograded submissions
        with self.gradebook as gb:
            ag_timestamps = dict(gb.db\
                .query(Student.id, SubmittedAssignment.timestamp)\
                .join(SubmittedAssignment)\
                .filter(SubmittedAssignment.name == assignment_id)\
                .all())
            ag_students = set(ag_timestamps.keys())

        students = set([])
        for student_id in ag_students:
            # skip files that aren't directories
            filename = self.coursedir.format_path(
                self.coursedir.autograded_directory,
                student_id=student_id,
                assignment_id=assignment_id)
            if not os.path.isdir(filename):
                continue

            # get the timestamps and check whether the submitted timestamp is
            # newer than the autograded timestamp
            submitted_timestamp = self.get_submitted_timestamp(
                assignment_id, student_id)
            autograded_timestamp = ag_timestamps[student_id]
            if submitted_timestamp != autograded_timestamp:
                continue

            students.add(student_id)

        return students

    def get_assignment(self, assignment_id, released=None):
        """Get information about an assignment given its name.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        released: list
            (Optional) A set of names of released assignments, obtained via
            self.get_released_assignments().

        Returns
        -------
        assignment: dict
            A dictionary containing information about the assignment

        """
        # get the set of released assignments if not given
        if not released:
            released = self.get_released_assignments()

        # check whether there is a source version of the assignment
        sourcedir = os.path.abspath(
            self.coursedir.format_path(self.coursedir.source_directory,
                                       student_id='.',
                                       assignment_id=assignment_id))
        if not os.path.isdir(sourcedir):
            return

        # see if there is information about the assignment in the database
        try:
            with self.gradebook as gb:
                db_assignment = gb.find_assignment(assignment_id)
                assignment = db_assignment.to_dict()
                if db_assignment.duedate:
                    ts = as_timezone(db_assignment.duedate, self.timezone)
                    assignment["display_duedate"] = ts.strftime(
                        self.timestamp_format)
                    assignment["duedate_notimezone"] = ts.replace(
                        tzinfo=None).isoformat()
                else:
                    assignment["display_duedate"] = None
                    assignment["duedate_notimezone"] = None
                assignment["duedate_timezone"] = to_numeric_tz(self.timezone)
                assignment["average_score"] = gb.average_assignment_score(
                    assignment_id)
                assignment[
                    "average_code_score"] = gb.average_assignment_code_score(
                        assignment_id)
                assignment[
                    "average_written_score"] = gb.average_assignment_written_score(
                        assignment_id)
                assignment[
                    "average_task_score"] = gb.average_assignment_task_score(
                        assignment_id)

        except MissingEntry:
            assignment = {
                "id": None,
                "name": assignment_id,
                "duedate": None,
                "display_duedate": None,
                "duedate_notimezone": None,
                "duedate_timezone": to_numeric_tz(self.timezone),
                "average_score": 0,
                "average_code_score": 0,
                "average_written_score": 0,
                "average_task_score": 0,
                "max_score": 0,
                "max_code_score": 0,
                "max_written_score": 0,
                "max_task_score": 0
            }

        # get released status
        if not self.exchange_is_functional:
            assignment["releaseable"] = False
            assignment["status"] = "draft"
        else:
            assignment["releaseable"] = True
            if assignment_id in released:
                assignment["status"] = "released"
            else:
                assignment["status"] = "draft"

        # get source directory
        assignment["source_path"] = os.path.relpath(sourcedir,
                                                    self.coursedir.root)

        # get release directory
        releasedir = os.path.abspath(
            self.coursedir.format_path(self.coursedir.release_directory,
                                       student_id='.',
                                       assignment_id=assignment_id))
        if os.path.exists(releasedir):
            assignment["release_path"] = os.path.relpath(
                releasedir, self.coursedir.root)
        else:
            assignment["release_path"] = None

        # number of submissions
        assignment["num_submissions"] = len(
            self.get_submitted_students(assignment_id))

        return assignment

    def get_assignments(self):
        """Get a list of information about all assignments.

        Returns
        -------
        assignments: list
            A list of dictionaries containing information about each assignment

        """
        released = self.get_released_assignments()

        assignments = []
        for x in self.get_source_assignments():
            assignments.append(self.get_assignment(x, released=released))

        assignments.sort(key=lambda x: (x["duedate"] if x["duedate"] is
                                        not None else "None", x["name"]))
        return assignments

    def get_notebooks(self, assignment_id):
        """Get a list of notebooks in an assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        notebooks: list
            A list of dictionaries containing information about each notebook

        """
        with self.gradebook as gb:
            try:
                assignment = gb.find_assignment(assignment_id)
            except MissingEntry:
                assignment = None

            # if the assignment exists in the database
            if assignment and assignment.notebooks:
                notebooks = []
                for notebook in assignment.notebooks:
                    x = notebook.to_dict()
                    x["average_score"] = gb.average_notebook_score(
                        notebook.name, assignment.name)
                    x["average_code_score"] = gb.average_notebook_code_score(
                        notebook.name, assignment.name)
                    x["average_written_score"] = gb.average_notebook_written_score(
                        notebook.name, assignment.name)
                    x["average_task_score"] = gb.average_notebook_task_score(
                        notebook.name, assignment.name)
                    notebooks.append(x)

            # if it doesn't exist in the database
            else:
                sourcedir = self.coursedir.format_path(
                    self.coursedir.source_directory,
                    student_id='.',
                    assignment_id=assignment_id)
                escaped_sourcedir = self.coursedir.format_path(
                    self.coursedir.source_directory,
                    student_id='.',
                    assignment_id=assignment_id,
                    escape=True)

                notebooks = []
                for filename in glob.glob(os.path.join(sourcedir, "*.ipynb")):
                    regex = re.escape(os.path.sep).join(
                        [escaped_sourcedir, "(?P<notebook_id>.*).ipynb"])
                    matches = re.match(regex, filename)
                    notebook_id = matches.groupdict()['notebook_id']
                    notebooks.append({
                        "name": notebook_id,
                        "id": None,
                        "average_score": 0,
                        "average_code_score": 0,
                        "average_written_score": 0,
                        "average_task_score": 0,
                        "max_score": 0,
                        "max_code_score": 0,
                        "max_written_score": 0,
                        "max_task_score": 0,
                        "needs_manual_grade": False,
                        "num_submissions": 0
                    })

        return notebooks

    def get_submission(self,
                       assignment_id,
                       student_id,
                       ungraded=None,
                       students=None):
        """Get information about a student's submission of an assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The student's id
        ungraded: set
            (Optional) A set of student ids corresponding to students whose
            submissions have not yet been autograded.
        students: dict
            (Optional) A dictionary of dictionaries, keyed by student id,
            containing information about students.

        Returns
        -------
        submission: dict
            A dictionary containing information about the submission

        """
        if ungraded is None:
            autograded = self.get_autograded_students(assignment_id)
            ungraded = self.get_submitted_students(assignment_id) - autograded
        if students is None:
            students = {x['id']: x for x in self.get_students()}

        if student_id in ungraded:
            ts = self.get_submitted_timestamp(assignment_id, student_id)
            if ts:
                timestamp = ts.isoformat()
                display_timestamp = as_timezone(ts, self.timezone).strftime(
                    self.timestamp_format)
            else:
                timestamp = None
                display_timestamp = None

            submission = {
                "id": None,
                "name": assignment_id,
                "timestamp": timestamp,
                "display_timestamp": display_timestamp,
                "score": 0.0,
                "max_score": 0.0,
                "code_score": 0.0,
                "max_code_score": 0.0,
                "written_score": 0.0,
                "max_written_score": 0.0,
                "task_score": 0.0,
                "max_task_score": 0.0,
                "needs_manual_grade": False,
                "autograded": False,
                "submitted": True,
                "student": student_id,
            }

            if student_id not in students:
                submission["last_name"] = None
                submission["first_name"] = None
            else:
                submission["last_name"] = students[student_id]["last_name"]
                submission["first_name"] = students[student_id]["first_name"]

        elif student_id in autograded:
            with self.gradebook as gb:
                try:
                    db_submission = gb.find_submission(assignment_id,
                                                       student_id)
                    submission = db_submission.to_dict()
                    if db_submission.timestamp:
                        submission["display_timestamp"] = as_timezone(
                            db_submission.timestamp,
                            self.timezone).strftime(self.timestamp_format)
                    else:
                        submission["display_timestamp"] = None

                except MissingEntry:
                    return None

            submission["autograded"] = True
            submission["submitted"] = True

        else:
            submission = {
                "id": None,
                "name": assignment_id,
                "timestamp": None,
                "display_timestamp": None,
                "score": 0.0,
                "max_score": 0.0,
                "code_score": 0.0,
                "max_code_score": 0.0,
                "written_score": 0.0,
                "max_written_score": 0.0,
                "task_score": 0.0,
                "max_task_score": 0.0,
                "needs_manual_grade": False,
                "autograded": False,
                "submitted": False,
                "student": student_id,
            }

            if student_id not in students:
                submission["last_name"] = None
                submission["first_name"] = None
            else:
                submission["last_name"] = students[student_id]["last_name"]
                submission["first_name"] = students[student_id]["first_name"]

        return submission

    def get_submissions(self, assignment_id):
        """Get a list of submissions of an assignment. Each submission
        corresponds to a student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        notebooks: list
            A list of dictionaries containing information about each submission

        """
        with self.gradebook as gb:
            db_submissions = gb.submission_dicts(assignment_id)

        ungraded = self.get_submitted_students(
            assignment_id) - self.get_autograded_students(assignment_id)
        students = {x['id']: x for x in self.get_students()}
        submissions = []
        for submission in db_submissions:
            if submission["student"] in ungraded:
                continue
            ts = submission["timestamp"]
            if ts:
                submission["timestamp"] = ts.isoformat()
                submission["display_timestamp"] = as_timezone(
                    ts, self.timezone).strftime(self.timestamp_format)
            else:
                submission["timestamp"] = None
                submission["display_timestamp"] = None
            submission["autograded"] = True
            submission["submitted"] = True
            submissions.append(submission)

        for student_id in ungraded:
            submission = self.get_submission(assignment_id,
                                             student_id,
                                             ungraded=ungraded,
                                             students=students)
            submissions.append(submission)

        submissions.sort(key=lambda x: x["student"])
        return submissions

    def _filter_existing_notebooks(self, assignment_id, notebooks):
        """Filters a list of notebooks so that it only includes those notebooks
        which actually exist on disk.

        This functionality is necessary for cases where student delete or rename
        on or more notebooks in their assignment, but still submit the assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        notebooks: list
            List of :class:`~nbgrader.api.SubmittedNotebook` objects

        Returns
        -------
        submissions: list
            List of :class:`~nbgrader.api.SubmittedNotebook` objects

        """
        # Making a filesystem call for every notebook in the assignment
        # can be very slow on certain setups, such as using NFS, see
        # https://github.com/jupyter/nbgrader/issues/929
        #
        # If students are using the exchange and submitting with
        # ExchangeSubmit.strict == True, then all the notebooks we expect
        # should be here already so we don't need to filter for only
        # existing notebooks in that case.
        if self.exchange_is_functional:
            app = self.exchange.Submit(coursedir=self.coursedir,
                                       authenticator=self.authenticator,
                                       parent=self)
            if app.strict:
                return sorted(notebooks, key=lambda x: x.id)

        submissions = list()
        for nb in notebooks:
            filename = os.path.join(
                os.path.abspath(
                    self.coursedir.format_path(
                        self.coursedir.autograded_directory,
                        student_id=nb.student.id,
                        assignment_id=assignment_id)),
                "{}.ipynb".format(nb.name))

            if os.path.exists(filename):
                submissions.append(nb)

        return sorted(submissions, key=lambda x: x.id)

    def get_notebook_submission_indices(self, assignment_id, notebook_id):
        """Get a dictionary mapping unique submission ids to indices of the
        submissions relative to the full list of submissions.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        notebook_id: string
            The name of the notebook

        Returns
        -------
        indices: dict
            A dictionary mapping submission ids to the index of each submission

        """
        with self.gradebook as gb:
            notebooks = gb.notebook_submissions(notebook_id, assignment_id)
            submissions = self._filter_existing_notebooks(
                assignment_id, notebooks)
        return dict([(x.id, i) for i, x in enumerate(submissions)])

    def get_notebook_submissions(self, assignment_id, notebook_id):
        """Get a list of submissions for a particular notebook in an assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        notebook_id: string
            The name of the notebook

        Returns
        -------
        submissions: list
            A list of dictionaries containing information about each submission.

        """
        with self.gradebook as gb:
            try:
                gb.find_notebook(notebook_id, assignment_id)
            except MissingEntry:
                return []

            submissions = gb.notebook_submission_dicts(notebook_id,
                                                       assignment_id)

        indices = self.get_notebook_submission_indices(assignment_id,
                                                       notebook_id)
        for nb in submissions:
            nb['index'] = indices.get(nb['id'], None)

        submissions = [x for x in submissions if x['index'] is not None]
        submissions.sort(key=lambda x: x["id"])
        return submissions

    def get_student(self, student_id, submitted=None):
        """Get a dictionary containing information about the given student.

        Arguments
        ---------
        student_id: string
            The unique id of the student
        submitted: set
            (Optional) A set of unique ids of students who have submitted an assignment

        Returns
        -------
        student: dictionary
            A dictionary containing information about the student, or None if
            the student does not exist

        """
        if submitted is None:
            submitted = self.get_submitted_students("*")

        try:
            with self.gradebook as gb:
                student = gb.find_student(student_id).to_dict()

        except MissingEntry:
            if student_id in submitted:
                student = {
                    "id": student_id,
                    "last_name": None,
                    "first_name": None,
                    "email": None,
                    "lms_user_id": None,
                    "score": 0.0,
                    "max_score": 0.0
                }

            else:
                return None

        return student

    def get_students(self):
        """Get a list containing information about all the students in class.

        Returns
        -------
        students: list
            A list of dictionaries containing information about all the students

        """
        with self.gradebook as gb:
            in_db = set([x.id for x in gb.students])
            students = gb.student_dicts()

        submitted = self.get_submitted_students("*")
        for student_id in (submitted - in_db):
            students.append({
                "id": student_id,
                "last_name": None,
                "first_name": None,
                "email": None,
                "lms_user_id": None,
                "score": 0.0,
                "max_score": 0.0
            })

        students.sort(key=lambda x: (x["last_name"] or "None", x["first_name"]
                                     or "None", x["id"]))
        return students

    def get_student_submissions(self, student_id):
        """Get information about all submissions from a particular student.

        Arguments
        ---------
        student_id: string
            The unique id of the student

        Returns
        -------
        submissions: list
            A list of dictionaries containing information about all the student's
            submissions

        """
        # return just an empty list if the student doesn't exist
        submissions = []
        for assignment_id in self.get_source_assignments():
            submission = self.get_submission(assignment_id, student_id)
            submissions.append(submission)

        submissions.sort(key=lambda x: x["name"])
        return submissions

    def get_student_notebook_submissions(self, student_id, assignment_id):
        """Gets information about all notebooks within a submitted assignment.

        Arguments
        ---------
        student_id: string
            The unique id of the student
        assignment_id: string
            The name of the assignment

        Returns
        -------
        submissions: list
            A list of dictionaries containing information about the submissions

        """
        with self.gradebook as gb:
            try:
                assignment = gb.find_submission(assignment_id, student_id)
                student = assignment.student
            except MissingEntry:
                return []

            submissions = []
            for notebook in assignment.notebooks:
                filename = os.path.join(
                    os.path.abspath(
                        self.coursedir.format_path(
                            self.coursedir.autograded_directory,
                            student_id=student_id,
                            assignment_id=assignment_id)),
                    "{}.ipynb".format(notebook.name))

                if os.path.exists(filename):
                    submissions.append(notebook.to_dict())
                else:
                    submissions.append({
                        "id": None,
                        "name": notebook.name,
                        "student": student_id,
                        "last_name": student.last_name,
                        "first_name": student.first_name,
                        "score": 0,
                        "max_score": notebook.max_score,
                        "code_score": 0,
                        "max_code_score": notebook.max_code_score,
                        "written_score": 0,
                        "max_written_score": notebook.max_written_score,
                        "task_score": 0,
                        "max_task_score": notebook.max_task_score,
                        "needs_manual_grade": False,
                        "failed_tests": False,
                        "flagged": False
                    })

        submissions.sort(key=lambda x: x["name"])
        return submissions

    def assign(self, *args, **kwargs):
        """Deprecated, please use `generate_assignment` instead."""
        msg = (
            "The `assign` method is deprecated, please use `generate_assignment` "
            "instead. This method will be removed in a future version of nbgrader."
        )
        warnings.warn(msg, DeprecationWarning)
        self.log.warning(msg)
        return self.generate_assignment(*args, **kwargs)

    def generate_assignment(self, assignment_id, force=True, create=True):
        """Run ``nbgrader generate_assignment`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        force: bool
            Whether to force creating the student version, even if it already
            exists.
        create: bool
            Whether to create the assignment in the database, if it doesn't
            already exist.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        with temp_attrs(self.coursedir, assignment_id=assignment_id):
            app = GenerateAssignment(coursedir=self.coursedir, parent=self)
            app.force = force
            app.create_assignment = create
            return capture_log(app)

    def unrelease(self, assignment_id):
        """Run ``nbgrader list --remove`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if sys.platform != 'win32':
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = self.exchange.List(coursedir=self.coursedir,
                                         authenticator=self.authenticator,
                                         parent=self)
                app.remove = True
                return capture_log(app)

    def release(self, *args, **kwargs):
        """Deprecated, please use `release_assignment` instead."""
        msg = (
            "The `release` method is deprecated, please use `release_assignment` "
            "instead. This method will be removed in a future version of nbgrader."
        )
        warnings.warn(msg, DeprecationWarning)
        self.log.warning(msg)
        return self.release_assignment(*args, **kwargs)

    def release_assignment(self, assignment_id):
        """Run ``nbgrader release_assignment`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if sys.platform != 'win32':
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = self.exchange.ReleaseAssignment(
                    coursedir=self.coursedir,
                    authenticator=self.authenticator,
                    parent=self)
                return capture_log(app)

    def collect(self, assignment_id, update=True):
        """Run ``nbgrader collect`` for a particular assignment.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        update: bool
            Whether to update already-collected assignments with newer
            submissions, if they exist

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if sys.platform != 'win32':
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = self.exchange.Collect(coursedir=self.coursedir,
                                            authenticator=self.authenticator,
                                            parent=self)
                app.update = update
                return capture_log(app)

    def autograde(self, assignment_id, student_id, force=True, create=True):
        """Run ``nbgrader autograde`` for a particular assignment and student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The unique id of the student
        force: bool
            Whether to autograde the submission, even if it's already been
            autograded
        create: bool
            Whether to create students in the database if they don't already
            exist

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        with temp_attrs(self.coursedir,
                        assignment_id=assignment_id,
                        student_id=student_id):
            app = Autograde(coursedir=self.coursedir, parent=self)
            app.force = force
            app.create_student = create
            return capture_log(app)

    def generate_feedback(self, assignment_id, student_id=None, force=True):
        """Run ``nbgrader generate_feedback`` for a particular assignment and student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The name of the student (optional). If not provided, then generate
            feedback from autograded submissions.
        force: bool
            Whether to force generating feedback, even if it already exists.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        # Because we may be using HTMLExporter.template_file in other
        # parts of the the UI, we need to make sure that the template
        # is explicitply 'feedback.tpl` here:
        c = Config()
        c.HTMLExporter.template_file = 'feedback.tpl'
        if student_id is not None:
            with temp_attrs(self.coursedir,
                            assignment_id=assignment_id,
                            student_id=student_id):
                app = GenerateFeedback(coursedir=self.coursedir, parent=self)
                app.update_config(c)
                app.force = force
                return capture_log(app)
        else:
            with temp_attrs(self.coursedir, assignment_id=assignment_id):
                app = GenerateFeedback(coursedir=self.coursedir, parent=self)
                app.update_config(c)
                app.force = force
                return capture_log(app)

    def release_feedback(self, assignment_id, student_id=None):
        """Run ``nbgrader release_feedback`` for a particular assignment/student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        assignment_id: string
            The name of the student (optional). If not provided, then release
            all generated feedback.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output

        """
        if student_id is not None:
            with temp_attrs(self.coursedir,
                            assignment_id=assignment_id,
                            student_id=student_id):
                app = self.exchange.ReleaseFeedback(
                    coursedir=self.coursedir,
                    authentictor=self.authenticator,
                    parent=self)
                return capture_log(app)
        else:
            with temp_attrs(self.coursedir,
                            assignment_id=assignment_id,
                            student_id='*'):
                app = self.exchange.ReleaseFeedback(
                    coursedir=self.coursedir,
                    authentictor=self.authenticator,
                    parent=self)
                return capture_log(app)

    def fetch_feedback(self, assignment_id, student_id):
        """Run ``nbgrader fetch_feedback`` for a particular assignment/student.

        Arguments
        ---------
        assignment_id: string
            The name of the assignment
        student_id: string
            The name of the student.

        Returns
        -------
        result: dict
            A dictionary with the following keys (error and log may or may not be present):

            - success (bool): whether or not the operation completed successfully
            - error (string): formatted traceback
            - log (string): captured log output
            - value (list of dict): all submitted assignments

        """
        with temp_attrs(self.coursedir,
                        assignment_id=assignment_id,
                        student_id=student_id):
            app = self.exchange.FetchFeedback(coursedir=self.coursedir,
                                              authentictor=self.authenticator,
                                              parent=self)
            ret_dic = capture_log(app)
            # assignment tab needs a 'value' field with the info needed to repopulate
            # the tables.
        with temp_attrs(self.coursedir,
                        assignment_id='*',
                        student_id=student_id):
            lister_rel = self.exchange.List(inbound=False,
                                            cached=True,
                                            coursedir=self.coursedir,
                                            authenticator=self.authenticator,
                                            config=self.config)
            assignments = lister_rel.start()
            ret_dic["value"] = sorted(assignments,
                                      key=lambda x:
                                      (x['course_id'], x['assignment_id']))
        return ret_dic
Exemple #24
0
class Figure(DOMWidget):

    """Main canvas for drawing a chart.

    The Figure object holds the list of Marks and Axes. It also holds an
    optional Interaction object that is responsible for figure-level mouse
    interactions, the "interaction layer".

    Besides, the Figure object has two reference scales, for positioning items
    in an absolute fashion in the figure canvas.

    Attributes
    ----------
    title: string (default: '')
        title of the figure
    axes: List of Axes (default: [])
        list containing the instances of the axes for the figure
    marks: List of Marks (default: [])
        list containing the marks which are to be appended to the figure
    interaction: Interaction or None (default: )
        optional interaction layer for the figure
    scale_x: Scale
        Scale representing the x values of the figure
    scale_y: Scale
        Scale representing the y values of the figure
    padding_x: Float (default: 0.0)
        Padding to be applied in the horizontal direction of the figure
        around the data points, proportion of the horizontal length
    padding_y: Float (default: 0.025)
        Padding to be applied in the vertical direction of the figure
        around the data points, proportion of the vertical length
    legend_location: {'top-right', 'top', 'top-left', 'left', 'bottom-left', 'bottom', 'bottom-right', 'right'}
        location of the legend relative to the center of the figure
    background_style: Dict (default: {})
        CSS style to be applied to the background of the figure
    title_style: Dict (default: {})
        CSS style to be applied to the title of the figure
    animation_duration: nonnegative int (default: 0)
        Duration of transition on change of data attributes, in milliseconds.

    Layout Attributes

    fig_margin: dict (default: {top=60, bottom=60, left=60, right=60})
        Dictionary containing the top, bottom, left and right margins. The user
        is responsible for making sure that the width and height are greater
        than the sum of the margins.
    min_aspect_ratio: float
         minimum width / height ratio of the figure
    max_aspect_ratio: float
         maximum width / height ratio of the figure

    Methods
    -------

    save_png:
       Saves the figure as a png file

    Note
    ----

    The aspect ratios stand for width / height ratios.

     - If the available space is within bounds in terms of min and max aspect
       ratio, we use the entire available space.
     - If the available space is too oblong horizontally, we use the client
       height and the width that corresponds max_aspect_ratio (maximize width
       under the constraints).
     - If the available space is too oblong vertically, we use the client width
       and the height that corresponds to min_aspect_ratio (maximize height
       under the constraint).
       This corresponds to maximizing the area under the constraints.

    Default min and max aspect ratio are both equal to 16 / 9.
    """
    title = Unicode().tag(sync=True, display_name='Title')
    axes = List(Instance(Axis)).tag(sync=True, **widget_serialization)
    marks = List(Instance(Mark)).tag(sync=True, **widget_serialization)
    interaction = Instance(Interaction, default_value=None, allow_none=True).tag(sync=True,
                           **widget_serialization)
    scale_x = Instance(Scale).tag(sync=True, **widget_serialization)
    scale_y = Instance(Scale).tag(sync=True, **widget_serialization)
    title_style = Dict(trait=Unicode()).tag(sync=True)
    background_style = Dict().tag(sync=True)

    # min width is based on hardcoded padding values
    layout = Instance(Layout, kw={
            'min_width': '125px'
        }, allow_none=True).tag(sync=True, **widget_serialization)
    min_aspect_ratio = Float(1.0).tag(sync=True)
    # Max aspect ratio is such that we can have 3 charts stacked vertically
    # on a 16:9 monitor: 16/9*3 ~ 5.333
    max_aspect_ratio = Float(6.0).tag(sync=True)

    fig_margin = Dict(dict(top=60, bottom=60, left=60, right=60)).tag(sync=True)
    padding_x = Float(0.0, min=0.0, max=1.0).tag(sync=True)
    padding_y = Float(0.025, min=0.0, max=1.0).tag(sync=True)
    legend_location = Enum(['top-right', 'top', 'top-left', 'left',
                            'bottom-left', 'bottom', 'bottom-right', 'right'],
                           default_value='top-right').tag(sync=True, display_name='Legend position')
    animation_duration = Int().tag(sync=True, display_name='Animation duration')

    @default('scale_x')
    def _default_scale_x(self):
        return LinearScale(min=0, max=1, allow_padding=False)

    @default('scale_y')
    def _default_scale_y(self):
        return LinearScale(min=0, max=1, allow_padding=False)

    def save_png(self):
        self.send({"type": "save_png"})

    @validate('min_aspect_ratio', 'max_aspect_ratio')
    def _validate_aspect_ratio(self, proposal):
        value = proposal['value']
        if proposal['trait'].name == 'min_aspect_ratio' and value > self.max_aspect_ratio:
            raise TraitError('setting min_aspect_ratio > max_aspect_ratio')
        if proposal['trait'].name == 'max_aspect_ratio' and value < self.min_aspect_ratio:
            raise TraitError('setting max_aspect_ratio < min_aspect_ratio')
        return value

    _view_name = Unicode('Figure').tag(sync=True)
    _model_name = Unicode('FigureModel').tag(sync=True)
    _view_module = Unicode('bqplot').tag(sync=True)
    _model_module = Unicode('bqplot').tag(sync=True)
class Main(MLflowExperiment):
    #
    # Resume previous run parameters.
    #
    resume_path = Unicode(
        u"/dccstor/faceid/results/train_coco_resnet/0198_968f3cd/1174695/190117_081837/",
        config=True,
        help=
        "Resume from checkpoint file (requires using also '--resume_epoch'.")
    resume_epoch = Int(
        49,
        config=True,
        help="Epoch to resume (requires using also '--resume_path'.")
    coco_path = Unicode(u"/tmp/aa/coco",
                        config=True,
                        help="path to local coco dataset path")
    init_inception = Bool(
        False,
        config=True,
        help="Initialize the inception networks using ALFASSY's network.")

    #
    # Network hyper parameters
    #
    base_network_name = Unicode("resnet50",
                                config=True,
                                help="Name of base network to use.")
    avgpool_kernel = Int(
        7,
        config=True,
        help=
        "Size of the last avgpool layer in the Resnet. Should match the cropsize."
    )
    classifier_name = Unicode("Inception3Classifier",
                              config=True,
                              help="Name of classifier to use.")
    sets_network_name = Unicode("SetOpsResModule",
                                config=True,
                                help="Name of setops module to use.")
    sets_block_name = Unicode("SetopResBlock_v1",
                              config=True,
                              help="Name of setops network to use.")
    sets_basic_block_name = Unicode(
        "SetopResBasicBlock",
        config=True,
        help="Name of the basic setops block to use (where applicable).")
    ops_layer_num = Int(1, config=True, help="Ops Module layer num.")
    ops_latent_dim = Int(8092, config=True, help="Ops Module latent dim.")
    setops_dropout = Float(0,
                           config=True,
                           help="Dropout ratio of setops module.")
    crop_size = Int(224,
                    config=True,
                    help="Size of input crop (Resnet 224, inception 299).")

    #
    # Run setup
    #
    batch_size = Int(16, config=True, help="Batch size.")
    num_workers = Int(8,
                      config=True,
                      help="Number of workers to use for data loading.")
    device = Unicode("cuda", config=True, help="Use `cuda` backend.")

    #
    # Training hyper parameters.
    #
    random_angle = Float(10, config=True, help="Angle of random augmentation.")
    random_scale = Float(0.3,
                         config=True,
                         help="Scale of radnom augmentation.")
    train_base = Bool(
        True, config=True,
        help="Whether to train also the base model.").tag(parameter=True)
    train_classifier = Bool(False,
                            config=True,
                            help="Whether to train also the classifier.")
    epochs = Int(50, config=True, help="Number of epochs to run.")
    optimizer_cls = Unicode("SGD",
                            config=True,
                            help="Type of optimizer to use.")
    focal_loss = Bool(False, config=True, help="Use Focal Loss.")
    recon_loss = Enum(("mse", "l1"),
                      config=True,
                      default_value="mse",
                      help="Type of reconstruction (embedding) loss: mse/l1.")

    lr1 = Float(0.0001, config=True, help="Learning rate start.")
    lr2 = Float(0.002, config=True, help="Learning rate end.")
    warmup_epochs = Int(3,
                        config=True,
                        help="Length (in epochs) of the LR warmup.")

    weight_decay = Float(
        0.0001, config=True,
        help="Weight decay (L2 regularization).").tag(parameter=True)
    recon_loss_weight = Float(
        1., config=True,
        help="Weight of reconstruction (embedding) loss.").tag(parameter=True)
    class_fake_loss_weight = Float(
        1., config=True,
        help="Weight of fake classification loss.").tag(parameter=True)
    class_S_loss_weight = Float(
        1., config=True,
        help="Weight of Substraction classification loss.").tag(parameter=True)
    class_U_loss_weight = Float(
        1., config=True,
        help="Weight of Union classification loss.").tag(parameter=True)
    class_I_loss_weight = Float(
        1., config=True,
        help="Weight of Intersection classification loss.").tag(parameter=True)
    # loss
    sym_class_toggle = Bool(
        True, config=True, help="Should we use symmetric classification loss?")
    sym_recon_toggle = Bool(
        True, config=True, help="Should we use symmetric reconstruction loss?")
    mc_toggle = Bool(True,
                     config=True,
                     help="Should we use anti mode collapse loss?")
    tautology_recon_toggle = Bool(
        True, config=True, help="Should we use tautology reconstruction loss?")
    tautology_class_toggle = Bool(
        True, config=True, help="Should we use tautology classification loss?")
    dataset_size_ratio = Int(
        4, config=True,
        help="Multiplier of training dataset.").tag(parameter=True)

    def run(self):
        # TODO: comment out if you don't want to copy coco to /tmp/aa
        # copy_coco_data()

        #
        # create model
        #
        base_model, classifier, setops_model = self.setup_model()

        #
        # Create ignite trainers and evalators.
        # Note:
        # I use "two" evaluators, the first is used for evaluating the model on the training data.
        # This separation is done so as that checkpoint will be done according to the results of
        # the validation evaluator.
        #
        trainer, train_loader = self.setup_training(base_model, classifier,
                                                    setops_model)

        #
        # kick everything off
        #
        trainer.run(train_loader, max_epochs=self.epochs)

    def setup_training(self, base_model, classifier, setops_model):

        #
        # Create the train and test dataset.
        #
        train_loader, train_subset_loader, val_loader = self.setup_datasets()

        logging.info("Setup logging and controls.")

        #
        # Setup metrics plotters.
        #
        mlflow_logger = MlflowLogger()

        #
        # Setup the optimizer.
        #
        logging.info("Setup optimizers and losses.")

        parameters = list(base_model.parameters())
        parameters += list(setops_model.parameters())
        if self.train_classifier:
            parameters += list(classifier.parameters())

        if self.optimizer_cls == "SGD":
            optimizer = torch.optim.SGD(parameters,
                                        lr=self.lr1,
                                        momentum=0.9,
                                        weight_decay=self.weight_decay)
        else:
            optimizer = torch.optim.Adam(parameters,
                                         lr=self.lr1,
                                         weight_decay=self.weight_decay)

        if self.focal_loss:
            attr_loss = FocalLoss().cuda()
        else:
            attr_loss = torch.nn.MultiLabelSoftMarginLoss().cuda()

        recon_loss = torch.nn.MSELoss(
        ) if self.recon_loss == "mse" else torch.nn.L1Loss()

        #
        # Setup the trainer object and its logging.
        #
        logging.info("Setup trainer")
        trainer = create_setops_trainer(base_model,
                                        classifier,
                                        setops_model,
                                        optimizer,
                                        criterion1=attr_loss,
                                        criterion2=recon_loss.cuda(),
                                        params_object=self,
                                        device=self.device)
        ProgressBar(bar_format=None).attach(trainer)

        mlflow_logger.attach(engine=trainer,
                             prefix="Train ",
                             plot_event=Events.ITERATION_COMPLETED,
                             update_period=LOG_INTERVAL,
                             output_transform=lambda x: x)

        #
        # Define the evaluation metrics.
        #
        logging.info("Setup evaluator")
        evaluation_losses = {
            'real class loss':
                Loss(torch.nn.MultiLabelSoftMarginLoss().cuda(), lambda o: (o["outputs"]["real class a"], o["targets"]["class a"])) + \
                Loss(torch.nn.MultiLabelSoftMarginLoss().cuda(), lambda o: (o["outputs"]["real class b"], o["targets"]["class b"])),
            'fake class loss':
                Loss(torch.nn.MultiLabelSoftMarginLoss().cuda(), lambda o: (o["outputs"]["fake class a"], o["targets"]["class a"])) + \
                Loss(torch.nn.MultiLabelSoftMarginLoss().cuda(), lambda o: (o["outputs"]["fake class b"], o["targets"]["class b"])),
            '{} fake loss'.format(self.recon_loss):
                (Loss(recon_loss.cuda(), lambda o: (o["outputs"]["fake embed a"], o["targets"]["embed a"])) +
                Loss(recon_loss.cuda(), lambda o: (o["outputs"]["fake embed b"], o["targets"]["embed b"]))) / 2,
        }
        labels_list = train_loader.dataset.labels_list
        mask = labels_list_to_1hot(labels_list, labels_list).astype(np.bool)
        evaluation_accuracies = {
            'real class acc':
            (MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                "real class a"], o["targets"]["class a"])) +
             MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                 "real class b"], o["targets"]["class b"]))) / 2,
            'fake class acc':
            (MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                "fake class a"], o["targets"]["class a"])) +
             MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                 "fake class b"], o["targets"]["class b"]))) / 2,
            'S class acc':
            (MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                "a_S_b class"], o["targets"]["a_S_b class"])) +
             MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                 "b_S_a class"], o["targets"]["b_S_a class"]))) / 2,
            'I class acc':
            (MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                "a_I_b class"], o["targets"]["a_I_b class"])) +
             MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                 "b_I_a class"], o["targets"]["a_I_b class"]))) / 2,
            'U class acc':
            (MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                "a_U_b class"], o["targets"]["a_U_b class"])) +
             MultiLabelSoftMarginIOUaccuracy(lambda o: (o["outputs"][
                 "b_U_a class"], o["targets"]["a_U_b class"]))) / 2,
            'MSE fake acc':
            (EWMeanSquaredError(lambda o: (o["outputs"]["fake embed a"], o[
                "targets"]["embed a"])) + EWMeanSquaredError(lambda o: (o[
                    "outputs"]["fake embed b"], o["targets"]["embed b"]))) / 2,
            'real mAP':
            mAP(mask=mask,
                output_transform=lambda o:
                (o["outputs"]["real class a"], o["targets"]["class a"])),
            'fake mAP':
            mAP(mask=mask,
                output_transform=lambda o:
                (o["outputs"]["fake class a"], o["targets"]["class a"])),
            'S mAP':
            mAP(mask=mask,
                output_transform=lambda o:
                (o["outputs"]["a_S_b class"], o["targets"]["a_S_b class"])),
            'I mAP':
            mAP(mask=mask,
                output_transform=lambda o:
                (o["outputs"]["a_I_b class"], o["targets"]["a_I_b class"])),
            'U mAP':
            mAP(mask=mask,
                output_transform=lambda o:
                (o["outputs"]["a_U_b class"], o["targets"]["a_U_b class"])),
        }

        #
        # Setup the training evaluator object and its logging.
        #
        train_evaluator = create_setops_evaluator(
            base_model,
            classifier,
            setops_model,
            metrics=evaluation_accuracies.copy(),
            device=self.device)

        mlflow_logger.attach(engine=train_evaluator,
                             prefix="Train Eval ",
                             plot_event=Events.EPOCH_COMPLETED,
                             metric_names=list(evaluation_accuracies.keys()))
        ProgressBar(bar_format=None).attach(train_evaluator)

        #
        # Setup the evaluator object and its logging.
        #
        evaluator = create_setops_evaluator(base_model,
                                            classifier,
                                            setops_model,
                                            metrics={
                                                **evaluation_losses,
                                                **evaluation_accuracies
                                            },
                                            device=self.device)

        mlflow_logger.attach(engine=evaluator,
                             prefix="Eval ",
                             plot_event=Events.EPOCH_COMPLETED,
                             metric_names=list({
                                 **evaluation_losses,
                                 **evaluation_accuracies
                             }.keys()))
        ProgressBar(bar_format=None).attach(evaluator)

        #
        # Checkpoint of the model
        #
        self.setup_checkpoint(base_model, classifier, setops_model, evaluator)

        logging.info("Setup schedulers.")

        #
        # Update learning rate manually using the Visdom interface.
        #
        one_cycle_size = len(train_loader) * self.warmup_epochs * 2

        scheduler_1 = LinearCyclicalScheduler(optimizer,
                                              "lr",
                                              start_value=self.lr1,
                                              end_value=self.lr2,
                                              cycle_size=one_cycle_size)
        scheduler_2 = ReduceLROnPlateau(optimizer,
                                        factor=0.5,
                                        patience=4 * len(train_loader),
                                        cooldown=len(train_loader),
                                        output_transform=lambda x: x["main"])
        lr_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2],
                                       durations=[one_cycle_size // 2],
                                       save_history=True)
        trainer.add_event_handler(Events.ITERATION_COMPLETED, lr_scheduler)

        #
        # Evaluation
        #
        @trainer.on(Events.EPOCH_COMPLETED)
        def epoch_completed(engine):
            #
            # Re-randomize the indices of the training dataset.
            #
            train_loader.dataset.calc_indices()

            #
            # Run the evaluator on a subset of the training dataset.
            #
            logging.info("Evaluation on a subset of the training data.")
            train_evaluator.run(train_subset_loader)

            #
            # Run the evaluator on the validation set.
            #
            logging.info("Evaluation on the eval data.")
            evaluator.run(val_loader)

        return trainer, train_loader

    def setup_checkpoint(self, base_model, classifier, setops_model,
                         evaluator):
        """Save checkpoints of the models."""

        checkpoint_handler_acc = ModelCheckpoint(
            self.results_path,
            CKPT_PREFIX,
            score_function=lambda eng: round(
                (eng.state.metrics["fake class acc"] + eng.state.metrics[
                    "S class acc"] + eng.state.metrics["I class acc"] + eng.
                 state.metrics["U class acc"]) / 4, 3),
            score_name="val_acc",
            n_saved=2,
            require_empty=False)
        checkpoint_handler_last = ModelCheckpoint(self.results_path,
                                                  CKPT_PREFIX,
                                                  save_interval=2,
                                                  n_saved=2,
                                                  require_empty=False)
        evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED,
                                    handler=checkpoint_handler_acc,
                                    to_save={
                                        'base_model': base_model.state_dict(),
                                        'classifier': classifier.state_dict(),
                                        'setops_model':
                                        setops_model.state_dict(),
                                    })
        evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED,
                                    handler=checkpoint_handler_last,
                                    to_save={
                                        'base_model': base_model.state_dict(),
                                        'classifier': classifier.state_dict(),
                                        'setops_model':
                                        setops_model.state_dict(),
                                    })

    def setup_model(self):
        """Create or resume the models."""

        logging.info("Setup the models.")

        logging.info("{} model".format(self.base_network_name))
        if self.base_network_name.lower().startswith("resnet"):
            base_model, classifier = getattr(
                setops_models,
                self.base_network_name)(num_classes=80,
                                        avgpool_kernel=self.avgpool_kernel)
        else:
            base_model = getattr(setops_models, self.base_network_name)()
            classifier = getattr(setops_models,
                                 self.classifier_name)(num_classes=80)

            if self.init_inception:
                logging.info(
                    "Initialize inception model using Amit's networks.")

                checkpoint = torch.load(self.resume_path)

                base_model = Inception3(aux_logits=False, transform_input=True)
                base_model.load_state_dict({
                    k: v
                    for k, v in checkpoint["state_dict"].items()
                    if k in base_model.state_dict()
                })
                classifier.load_state_dict({
                    k: v
                    for k, v in checkpoint["state_dict"].items()
                    if k in classifier.state_dict()
                })

        setops_model_cls = getattr(setops_models, self.sets_network_name)
        setops_model = setops_model_cls(
            input_dim=2048,
            S_latent_dim=self.ops_latent_dim,
            S_layers_num=self.ops_layer_num,
            I_latent_dim=self.ops_latent_dim,
            I_layers_num=self.ops_layer_num,
            U_latent_dim=self.ops_latent_dim,
            U_layers_num=self.ops_layer_num,
            block_cls_name=self.sets_block_name,
            basic_block_cls_name=self.sets_basic_block_name,
            dropout_ratio=self.setops_dropout,
        )

        if self.resume_path:
            logging.info("Resuming the models.")
            models_path = Path(self.resume_path)
            if self.base_network_name.lower().startswith("resnet"):
                base_model.load_state_dict(
                    torch.load(
                        sorted(
                            models_path.glob(
                                "networks_base_model_{}*.pth".format(
                                    self.resume_epoch)))[-1]))
                classifier.load_state_dict(
                    torch.load(
                        sorted(
                            models_path.glob(
                                "networks_classifier_{}*.pth".format(
                                    self.resume_epoch)))[-1]))

            setops_models_paths = sorted(
                models_path.glob("networks_setops_model_{}*.pth".format(
                    self.resume_epoch)))
            if len(setops_models_paths) > 0:
                setops_model.load_state_dict(
                    torch.load(setops_models_paths[-1]).state_dict())

        return base_model, classifier, setops_model

    def setup_datasets(self):
        """Load the training datasets."""

        train_transform = transforms.Compose([
            transforms.Resize(self.crop_size),
            transforms.RandomRotation(degrees=self.random_angle,
                                      resample=Image.BILINEAR),
            transforms.RandomResizedCrop(size=self.crop_size,
                                         scale=(1 - self.random_scale,
                                                1 + self.random_scale),
                                         ratio=(1, 1)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
        val_transform = transforms.Compose([
            transforms.Resize(self.crop_size),
            transforms.CenterCrop(self.crop_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        train_dataset = CocoDatasetPairs(
            root_dir=self.coco_path,
            set_name='train2014',
            transform=train_transform,
            dataset_size_ratio=self.dataset_size_ratio)
        train_subset_dataset = Subset(
            train_dataset,
            range(0, len(train_dataset), 5 * self.dataset_size_ratio))
        val_dataset = CocoDatasetPairs(
            root_dir=self.coco_path,
            set_name='val2014',
            transform=val_transform,
        )

        train_loader = DataLoader(train_dataset,
                                  batch_size=self.batch_size,
                                  shuffle=True,
                                  num_workers=self.num_workers)
        train_subset_loader = DataLoader(train_subset_dataset,
                                         batch_size=self.batch_size,
                                         shuffle=False,
                                         num_workers=self.num_workers)
        val_loader = DataLoader(val_dataset,
                                batch_size=self.batch_size,
                                shuffle=False,
                                num_workers=self.num_workers)
        return train_loader, train_subset_loader, val_loader
Exemple #26
0
class ClosedRoom(MultiAgentSimulation):
    r"""Simulation for visualizing collision avoidance."""
    size_leaders = Int(
        default_value=0,
        min=0,
        help='Amount of active agents')
    size_herding = Int(
        default_value=150,
        min=0,
        help='Amount of herding agents')
    agent_type = Enum(
        default_value=Circular,
        values=(Circular, ThreeCircle))
    body_type = Enum(
        default_value='adult',
        values=('adult',))
    width = Float(
        default_value=20.0,
        min=0)
    height = Float(
        default_value=20.0,
        min=0)

    def attributes(self, is_leader):
        def wrapper():
            orientation = np.random.uniform(-np.pi, np.pi)
            d = dict(
                is_leader=is_leader,
                is_follower=not is_leader,
                body_type=self.body_type,
                orientation=orientation,
                velocity=1.0 * unit_vector(orientation),
                angular_velocity=0.0,
                target_direction=unit_vector(orientation),
                target_orientation=orientation)
            return d
        return wrapper

    @default('logic')
    def _default_logic(self):
        return Reset(self) << (
                Integrator(self) << (
                    Fluctuation(self),
                    Adjusting(self) << (
                        LeaderFollowerWithHerding(self),
                        Orientation(self),),
                    AgentAgentInteractions(self),
                    AgentObstacleInteractions(self)))

    @default('field')
    def _default_field(self):
        return fields.ClosedRoom(width=self.width, height=self.height)

    @default('agents')
    def _default_agents(self):
        agents = Agents(agent_type=self.agent_type)

        group_leader = AgentGroup(
            agent_type=self.agent_type,
            size=self.size_leaders,
            attributes=self.attributes(is_leader=True))

        agents.add_non_overlapping_group(
            group_leader, position_gen=self.field.sample_spawn(0))

        group_herding = AgentGroup(
            agent_type=self.agent_type,
            size=self.size_herding,
            attributes=self.attributes(is_leader=False))

        agents.add_non_overlapping_group(
            group_herding, position_gen=self.field.sample_spawn(0))

        return agents
Exemple #27
0
class Figure(DOMWidget):
    """Main canvas for drawing a chart.

    The Figure object holds the list of Marks and Axes. It also holds an
    optional Interaction object that is responsible for figure-level mouse
    interactions, the "interaction layer".

    Besides, the Figure object has two reference scales, for positioning items
    in an absolute fashion in the figure canvas.

    Attributes
    ----------

    title: string (default: '')
        title of the figure
    axes: List of Axes (default: [])
        list containing the instances of the axes for the figure
    marks: List of Marks (default: [])
        list containing the marks which are to be appended to the figure
    interaction: Interaction or None (default: )
        optional interaction layer for the figure
    scale_x: Scale
        Scale representing the x values of the figure
    scale_y: Scale
        Scale representing the y values of the figure
    padding_x: Float (default: 0.0)
        Padding to be applied in the horizontal direction of the figure
        around the data points, proportion of the horizontal length
    padding_y: Float (default: 0.025)
        Padding to be applied in the vertical direction of the figure
        around the data points, proportion of the vertical length
    legend_location: {'top-right', 'top', 'top-left', 'left', 'bottom-left', 'bottom', 'bottom-right', 'right'}
        location of the legend relative to the center of the figure
    fig_color: Color (default: None)
        background color of the figure
    animation_duration: nonnegative int (default: 0)
        Duration of transition on change of data attributes, in milliseconds.


    Layout Attributes

    min_width: CFloat (default: 800.0)
        minimum width of the figure including the figure margins
    min_height: CFloat (default: 600.0)
        minimum height of the figure including the figure margins
    preserve_aspect: bool (default: False)
        Determines whether the aspect ratio for the figure specified by
        min_width and min_height is preserved during resizing. This does not
        guarantee that the data coordinates will have any specific aspect
        ratio.
    fig_margin: dict (default: {top=60, bottom=60, left=60, right=60})
        Dictionary containing the top, bottom, left and right margins. The user
        is responsible for making sure that the width and height are greater
        than the sum of the margins.

    """
    title = Unicode(sync=True, display_name='Title')
    axes = List(Instance(Axis), sync=True, **widget_serialization)
    marks = List(Instance(Mark), sync=True, **widget_serialization)
    interaction = Instance(Interaction,
                           allow_none=True,
                           sync=True,
                           **widget_serialization)
    scale_x = Instance(Scale, sync=True, **widget_serialization)
    scale_y = Instance(Scale, sync=True, **widget_serialization)
    fig_color = Color(None, allow_none=True, sync=True)

    min_width = CFloat(800.0, sync=True)
    min_height = CFloat(500.0, sync=True)
    preserve_aspect = Bool(False,
                           sync=True,
                           display_name='Preserve aspect ratio')

    fig_margin = Dict(dict(top=60, bottom=60, left=60, right=60), sync=True)
    padding_x = Float(default_value=0.0, min=0.0, max=1.0, sync=True)
    padding_y = Float(default_value=0.025, min=0.0, max=1.0, sync=True)
    legend_location = Enum([
        'top-right', 'top', 'top-left', 'left', 'bottom-left', 'bottom',
        'bottom-right', 'right'
    ],
                           default_value='top-right',
                           sync=True,
                           display_name='Legend position')
    animation_duration = Int(0, sync=True, display_name='Animation duration')

    def save(self):
        self.send({'type': 'save'})

    def _scale_x_default(self):
        return LinearScale(min=0, max=1)

    def _scale_y_default(self):
        return LinearScale(min=0, max=1)

    _view_name = Unicode('Figure', sync=True)
    _view_module = Unicode('nbextensions/bqplot/Figure', sync=True)
    _model_name = Unicode('FigureModel', sync=True)
    _model_module = Unicode('nbextensions/bqplot/FigureModel', sync=True)
Exemple #28
0
class FourExitsRandomPlacing(MultiAgentSimulation):
    size_leaders = Int(
        default_value=4,
        min=0,
        help='Amount of active agents')
    size_herding = Int(
        default_value=100,
        min=0,
        help='Amount of herding agents')
    agent_type = Enum(
        default_value=Circular,
        values=(Circular, ThreeCircle))
    body_type = Enum(
        default_value='adult',
        values=('adult',))
    exit_width = Float(
        default_value=1.25,
        min=0, max=10)

    def attributes(self, has_target: bool=True, is_follower: bool=False):
        def wrapper():
            rand_target = np.random.randint(0, len(self.field.targets))
            target = rand_target if has_target else NO_TARGET
            orientation = np.random.uniform(-np.pi, np.pi)
            d = dict(
                target=target,
                is_leader=not is_follower,
                is_follower=is_follower,
                body_type=self.body_type,
                orientation=orientation,
                velocity=np.zeros(2),
                angular_velocity=0.0,
                target_direction=np.zeros(2),
                target_orientation=orientation,
                familiar_exit=np.random.randint(0, len(self.field.targets)))
            return d
        return wrapper

    @default('logic')
    def _default_logic(self):
        return Reset(self) << \
            InsideDomain(self) << (
                Integrator(self) << (
                    Fluctuation(self),
                    Adjusting(self) << (
                        Navigation(self) << ExitDetection(self) << LeaderFollower(self),
                        Orientation(self)),
                    AgentAgentInteractions(self),
                    AgentObstacleInteractions(self)))

    @default('field')
    def _default_field(self):
        return fields.FourExitsField(exit_width=self.exit_width)

    @default('agents')
    def _default_agents(self):
        agents = Agents(agent_type=self.agent_type)

        group_active = AgentGroup(
            agent_type=self.agent_type,
            size=self.size_leaders,
            attributes=self.attributes(has_target=True, is_follower=False))

        group_herding = AgentGroup(
            agent_type=self.agent_type,
            size=self.size_herding,
            attributes=self.attributes(has_target=False, is_follower=True))

        for group in (group_active, group_herding):
            agents.add_non_overlapping_group(
                group,
                position_gen=self.field.sample_spawn(0),
                obstacles=geom_to_linear_obstacles(self.field.obstacles))

        return agents
Exemple #29
0
class NotebookClient(LoggingConfigurable):
    """
    Encompasses a Client for executing cells in a notebook
    """

    timeout = Integer(
        None,
        allow_none=True,
        help=dedent("""
            The time to wait (in seconds) for output from executions.
            If a cell execution takes longer, a TimeoutError is raised.

            `None` or `-1` will disable the timeout. If `timeout_func` is set,
            it overrides `timeout`.
            """),
    ).tag(config=True)

    timeout_func = Any(
        default_value=None,
        allow_none=True,
        help=dedent("""
            A callable which, when given the cell source as input,
            returns the time to wait (in seconds) for output from cell
            executions. If a cell execution takes longer, a TimeoutError
            is raised.

            Returning `None` or `-1` will disable the timeout for the cell.
            Not setting `timeout_func` will cause the preprocessor to
            default to using the `timeout` trait for all cells. The
            `timeout_func` trait overrides `timeout` if it is not `None`.
            """),
    ).tag(config=True)

    interrupt_on_timeout = Bool(
        False,
        help=dedent("""
            If execution of a cell times out, interrupt the kernel and
            continue executing other cells rather than throwing an error and
            stopping.
            """),
    ).tag(config=True)

    startup_timeout = Integer(
        60,
        help=dedent("""
            The time to wait (in seconds) for the kernel to start.
            If kernel startup takes longer, a RuntimeError is
            raised.
            """),
    ).tag(config=True)

    allow_errors = Bool(
        False,
        help=dedent("""
            If `False` (default), when a cell raises an error the
            execution is stopped and a `CellExecutionError`
            is raised.
            If `True`, execution errors are ignored and the execution
            is continued until the end of the notebook. Output from
            exceptions is included in the cell output in both cases.
            """),
    ).tag(config=True)

    nest_asyncio = Bool(
        False,
        help=dedent("""
            If False (default), then blocking functions such as `execute`
            assume that no event loop is already running. These functions
            run their async counterparts (e.g. `async_execute`) in an event
            loop with `asyncio.run_until_complete`, which will fail if an
            event loop is already running. This can be the case if nbclient
            is used e.g. in a Jupyter Notebook. In that case, `nest_asyncio`
            should be set to True.
            """),
    ).tag(config=True)

    force_raise_errors = Bool(
        False,
        help=dedent("""
            If False (default), errors from executing the notebook can be
            allowed with a `raises-exception` tag on a single cell, or the
            `allow_errors` configurable option for all cells. An allowed error
            will be recorded in notebook output, and execution will continue.
            If an error occurs when it is not explicitly allowed, a
            `CellExecutionError` will be raised.
            If True, `CellExecutionError` will be raised for any error that occurs
            while executing the notebook. This overrides both the
            `allow_errors` option and the `raises-exception` cell tag.
            """),
    ).tag(config=True)

    extra_arguments = List(Unicode())

    kernel_name = Unicode(
        '',
        help=dedent("""
            Name of kernel to use to execute the cells.
            If not set, use the kernel_spec embedded in the notebook.
            """),
    ).tag(config=True)

    raise_on_iopub_timeout = Bool(
        False,
        help=dedent("""
            If `False` (default), then the kernel will continue waiting for
            iopub messages until it receives a kernel idle message, or until a
            timeout occurs, at which point the currently executing cell will be
            skipped. If `True`, then an error will be raised after the first
            timeout. This option generally does not need to be used, but may be
            useful in contexts where there is the possibility of executing
            notebooks with memory-consuming infinite loops.
            """),
    ).tag(config=True)

    store_widget_state = Bool(
        True,
        help=dedent("""
            If `True` (default), then the state of the Jupyter widgets created
            at the kernel will be stored in the metadata of the notebook.
            """),
    ).tag(config=True)

    record_timing = Bool(
        True,
        help=dedent("""
            If `True` (default), then the execution timings of each cell will
            be stored in the metadata of the notebook.
            """),
    ).tag(config=True)

    iopub_timeout = Integer(
        4,
        allow_none=False,
        help=dedent("""
            The time to wait (in seconds) for IOPub output. This generally
            doesn't need to be set, but on some slow networks (such as CI
            systems) the default timeout might not be long enough to get all
            messages.
            """),
    ).tag(config=True)

    shell_timeout_interval = Integer(
        5,
        allow_none=False,
        help=dedent("""
            The time to wait (in seconds) for Shell output before retrying.
            This generally doesn't need to be set, but if one needs to check
            for dead kernels at a faster rate this can help.
            """),
    ).tag(config=True)

    shutdown_kernel = Enum(
        ['graceful', 'immediate'],
        default_value='graceful',
        help=dedent("""
            If `graceful` (default), then the kernel is given time to clean
            up after executing all cells, e.g., to execute its `atexit` hooks.
            If `immediate`, then the kernel is signaled to immediately
            terminate.
            """),
    ).tag(config=True)

    ipython_hist_file = Unicode(
        default_value=':memory:',
        help=
        """Path to file to use for SQLite history database for an IPython kernel.

        The specific value `:memory:` (including the colon
        at both end but not the back ticks), avoids creating a history file. Otherwise, IPython
        will create a history file for each kernel.

        When running kernels simultaneously (e.g. via multiprocessing) saving history a single
        SQLite file can result in database errors, so using `:memory:` is recommended in
        non-interactive contexts.
        """,
    ).tag(config=True)

    kernel_manager_class = Type(config=True,
                                help='The kernel manager class to use.')

    @default('kernel_manager_class')
    def _kernel_manager_class_default(self):
        """Use a dynamic default to avoid importing jupyter_client at startup"""
        from jupyter_client import AsyncKernelManager

        return AsyncKernelManager

    _display_id_map = Dict(help=dedent("""
              mapping of locations of outputs with a given display_id
              tracks cell index and output index within cell.outputs for
              each appearance of the display_id
              {
                   'display_id': {
                  cell_idx: [output_idx,]
                   }
              }
              """))

    display_data_priority = List(
        [
            'text/html',
            'application/pdf',
            'text/latex',
            'image/svg+xml',
            'image/png',
            'image/jpeg',
            'text/markdown',
            'text/plain',
        ],
        help="""
            An ordered list of preferred output type, the first
            encountered will usually be used when converting discarding
            the others.
            """,
    ).tag(config=True)

    resources = Dict(help=dedent("""
            Additional resources used in the conversion process. For example,
            passing ``{'metadata': {'path': run_path}}`` sets the
            execution path to ``run_path``.
            """))

    def __init__(self, nb, km=None, **kw):
        """Initializes the execution manager.

        Parameters
        ----------
        nb : NotebookNode
            Notebook being executed.
        km : KernerlManager (optional)
            Optional kernel manager. If none is provided, a kernel manager will
            be created.
        """
        super().__init__(**kw)
        self.nb = nb
        self.km = km
        self.reset_execution_trackers()

    def reset_execution_trackers(self):
        """Resets any per-execution trackers.
        """
        self.code_cells_executed = 0
        self._display_id_map = {}
        self.widget_state = {}
        self.widget_buffers = {}

    def start_kernel_manager(self):
        """Creates a new kernel manager.

        Returns
        -------
        kc : KernelClient
            Kernel client as created by the kernel manager `km`.
        """
        if not self.kernel_name:
            kn = self.nb.metadata.get('kernelspec', {}).get('name')
            if kn is not None:
                self.kernel_name = kn

        if not self.kernel_name:
            self.km = self.kernel_manager_class(config=self.config)
        else:
            self.km = self.kernel_manager_class(kernel_name=self.kernel_name,
                                                config=self.config)
        self.km.client_class = 'jupyter_client.asynchronous.AsyncKernelClient'
        return self.km

    async def _async_cleanup_kernel(self):
        try:
            # Send a polite shutdown request
            await ensure_async(self.kc.shutdown())
            try:
                # Queue the manager to kill the process, sometimes the built-in and above
                # shutdowns have not been successful or called yet, so give a direct kill
                # call here and recover gracefully if it's already dead.
                await ensure_async(self.km.shutdown_kernel(now=True))
            except RuntimeError as e:
                # The error isn't specialized, so we have to check the message
                if 'No kernel is running!' not in str(e):
                    raise
        finally:
            # Remove any state left over even if we failed to stop the kernel
            await ensure_async(self.km.cleanup())
            await ensure_async(self.kc.stop_channels())
            self.kc = None

    _cleanup_kernel = run_sync(_async_cleanup_kernel)

    async def async_start_new_kernel_client(self, **kwargs):
        """Creates a new kernel client.

        Parameters
        ----------
        kwargs :
            Any options for `self.kernel_manager_class.start_kernel()`. Because
            that defaults to AsyncKernelManager, this will likely include options
            accepted by `AsyncKernelManager.start_kernel()``, which includes `cwd`.

        Returns
        -------
        kc : KernelClient
            Kernel client as created by the kernel manager `km`.
        """
        resource_path = self.resources.get('metadata', {}).get('path') or None
        if resource_path and 'cwd' not in kwargs:
            kwargs["cwd"] = resource_path

        if self.km.ipykernel and self.ipython_hist_file:
            self.extra_arguments += [
                '--HistoryManager.hist_file={}'.format(self.ipython_hist_file)
            ]

        await ensure_async(
            self.km.start_kernel(extra_arguments=self.extra_arguments,
                                 **kwargs))

        self.kc = self.km.client()
        await ensure_async(self.kc.start_channels())
        try:
            await ensure_async(
                self.kc.wait_for_ready(timeout=self.startup_timeout))
        except RuntimeError:
            await self._async_cleanup_kernel()
            raise
        self.kc.allow_stdin = False
        return self.kc

    start_new_kernel_client = run_sync(async_start_new_kernel_client)

    @contextmanager
    def setup_kernel(self, **kwargs):
        """
        Context manager for setting up the kernel to execute a notebook.

        The assigns the Kernel Manager (`self.km`) if missing and Kernel Client(`self.kc`).

        When control returns from the yield it stops the client's zmq channels, and shuts
        down the kernel.
        """
        # Can't use run_until_complete on an asynccontextmanager function :(
        if self.km is None:
            self.start_kernel_manager()

        if not self.km.has_kernel:
            self.start_new_kernel_client(**kwargs)
        try:
            yield
        finally:
            self._cleanup_kernel()

    @asynccontextmanager
    async def async_setup_kernel(self, **kwargs):
        """
        Context manager for setting up the kernel to execute a notebook.

        This assigns the Kernel Manager (`self.km`) if missing and Kernel Client(`self.kc`).

        When control returns from the yield it stops the client's zmq channels, and shuts
        down the kernel.
        """
        reset_kc = kwargs.pop('reset_kc', False)
        if self.km is None:
            self.start_kernel_manager()

        if not self.km.has_kernel:
            await self.async_start_new_kernel_client(**kwargs)
        try:
            yield
        finally:
            if reset_kc:
                await self._async_cleanup_kernel()

    async def async_execute(self, **kwargs):
        """
        Executes each code cell.

        Parameters
        ----------
        kwargs :
            Any option for `self.kernel_manager_class.start_kernel()`. Because
            that defaults to AsyncKernelManager, this will likely include options
            accepted by `AsyncKernelManager.start_kernel()``, which includes `cwd`.
            If present, `reset_kc` is passed to `self.async_setup_kernel`:
            if True, the kernel client will be reset and a new one will be created
            and cleaned up after execution (default: False).

        Returns
        -------
        nb : NotebookNode
            The executed notebook.
        """
        reset_kc = kwargs.get('reset_kc', False)
        if reset_kc:
            await self._async_cleanup_kernel()
        self.reset_execution_trackers()

        async with self.async_setup_kernel(**kwargs):
            self.log.info("Executing notebook with kernel: %s" %
                          self.kernel_name)
            for index, cell in enumerate(self.nb.cells):
                # Ignore `'execution_count' in content` as it's always 1
                # when store_history is False
                await self.async_execute_cell(
                    cell, index, execution_count=self.code_cells_executed + 1)
            msg_id = await ensure_async(self.kc.kernel_info())
            info_msg = await self.async_wait_for_reply(msg_id)
            self.nb.metadata['language_info'] = info_msg['content'][
                'language_info']
            self.set_widgets_metadata()

        return self.nb

    execute = run_sync(async_execute)

    def set_widgets_metadata(self):
        if self.widget_state:
            self.nb.metadata.widgets = {
                'application/vnd.jupyter.widget-state+json': {
                    'state': {
                        model_id: self._serialize_widget_state(state)
                        for model_id, state in self.widget_state.items()
                        if '_model_name' in state
                    },
                    'version_major': 2,
                    'version_minor': 0,
                }
            }
            for key, widget in self.nb.metadata.widgets[
                    'application/vnd.jupyter.widget-state+json'][
                        'state'].items():
                buffers = self.widget_buffers.get(key)
                if buffers:
                    widget['buffers'] = buffers

    def _update_display_id(self, display_id, msg):
        """Update outputs with a given display_id"""
        if display_id not in self._display_id_map:
            self.log.debug("display id %r not in %s", display_id,
                           self._display_id_map)
            return

        if msg['header']['msg_type'] == 'update_display_data':
            msg['header']['msg_type'] = 'display_data'

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg['msg_type'])
            return

        for cell_idx, output_indices in self._display_id_map[display_id].items(
        ):
            cell = self.nb['cells'][cell_idx]
            outputs = cell['outputs']
            for output_idx in output_indices:
                outputs[output_idx]['data'] = out['data']
                outputs[output_idx]['metadata'] = out['metadata']

    async def _async_poll_for_reply(self, msg_id, cell, timeout,
                                    task_poll_output_msg):
        if timeout is not None:
            deadline = monotonic() + timeout
        while True:
            try:
                msg = await ensure_async(
                    self.kc.shell_channel.get_msg(timeout=timeout))
                if msg['parent_header'].get('msg_id') == msg_id:
                    if self.record_timing:
                        cell['metadata']['execution'][
                            'shell.execute_reply'] = timestamp()
                    try:
                        await asyncio.wait_for(task_poll_output_msg,
                                               self.iopub_timeout)
                    except (asyncio.TimeoutError, Empty):
                        if self.raise_on_iopub_timeout:
                            raise CellTimeoutError.error_from_timeout_and_cell(
                                "Timeout waiting for IOPub output",
                                self.iopub_timeout, cell)
                        else:
                            self.log.warning(
                                "Timeout waiting for IOPub output")
                    return msg
                else:
                    if timeout is not None:
                        timeout = max(0, deadline - monotonic())
            except Empty:
                # received no message, check if kernel is still alive
                await self._async_check_alive()
                await self._async_handle_timeout(timeout, cell)

    async def _async_poll_output_msg(self, parent_msg_id, cell, cell_index):
        while True:
            msg = await ensure_async(
                self.kc.iopub_channel.get_msg(timeout=None))
            if msg['parent_header'].get('msg_id') == parent_msg_id:
                try:
                    # Will raise CellExecutionComplete when completed
                    self.process_message(msg, cell, cell_index)
                except CellExecutionComplete:
                    return

    def _get_timeout(self, cell):
        if self.timeout_func is not None and cell is not None:
            timeout = self.timeout_func(cell)
        else:
            timeout = self.timeout

        if not timeout or timeout < 0:
            timeout = None

        return timeout

    async def _async_handle_timeout(self, timeout, cell=None):
        self.log.error("Timeout waiting for execute reply (%is)." % timeout)
        if self.interrupt_on_timeout:
            self.log.error("Interrupting kernel")
            await ensure_async(self.km.interrupt_kernel())
        else:
            raise CellTimeoutError.error_from_timeout_and_cell(
                "Cell execution timed out", timeout, cell)

    async def _async_check_alive(self):
        if not await ensure_async(self.kc.is_alive()):
            self.log.error("Kernel died while waiting for execute reply.")
            raise DeadKernelError("Kernel died")

    async def async_wait_for_reply(self, msg_id, cell=None):
        # wait for finish, with timeout
        timeout = self._get_timeout(cell)
        cummulative_time = 0
        while True:
            try:
                msg = await ensure_async(
                    self.kc.shell_channel.get_msg(
                        timeout=self.shell_timeout_interval))
            except Empty:
                await self._async_check_alive()
                cummulative_time += self.shell_timeout_interval
                if timeout and cummulative_time > timeout:
                    await self._async_async_handle_timeout(timeout, cell)
                    break
            else:
                if msg['parent_header'].get('msg_id') == msg_id:
                    return msg

    wait_for_reply = run_sync(async_wait_for_reply)
    # Backwards compatability naming for papermill
    _wait_for_reply = wait_for_reply

    def _timeout_with_deadline(self, timeout, deadline):
        if deadline is not None and deadline - monotonic() < timeout:
            timeout = deadline - monotonic()

        if timeout < 0:
            timeout = 0

        return timeout

    def _passed_deadline(self, deadline):
        if deadline is not None and deadline - monotonic() <= 0:
            return True
        return False

    def _check_raise_for_error(self, cell, exec_reply):
        cell_allows_errors = self.allow_errors or "raises-exception" in cell.metadata.get(
            "tags", [])

        if self.force_raise_errors or not cell_allows_errors:
            if (exec_reply is not None
                ) and exec_reply['content']['status'] == 'error':
                raise CellExecutionError.from_cell_and_msg(
                    cell, exec_reply['content'])

    async def async_execute_cell(self,
                                 cell,
                                 cell_index,
                                 execution_count=None,
                                 store_history=True):
        """
        Executes a single code cell.

        To execute all cells see :meth:`execute`.

        Parameters
        ----------
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.
        execution_count : int
            The execution count to be assigned to the cell (default: Use kernel response)
        store_history : bool
            Determines if history should be stored in the kernel (default: False).
            Specific to ipython kernels, which can store command histories.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionError
            If execution failed and should raise an exception, this will be raised
            with defaults about the failure.

        Returns
        -------
        cell : NotebookNode
            The cell which was just processed.
        """
        if cell.cell_type != 'code' or not cell.source.strip():
            self.log.debug("Skipping non-executing cell %s", cell_index)
            return cell

        if self.record_timing and 'execution' not in cell['metadata']:
            cell['metadata']['execution'] = {}

        self.log.debug("Executing cell:\n%s", cell.source)
        parent_msg_id = await ensure_async(
            self.kc.execute(cell.source,
                            store_history=store_history,
                            stop_on_error=not self.allow_errors))
        # We launched a code cell to execute
        self.code_cells_executed += 1
        exec_timeout = self._get_timeout(cell)

        cell.outputs = []
        self.clear_before_next_output = False

        task_poll_output_msg = asyncio.ensure_future(
            self._async_poll_output_msg(parent_msg_id, cell, cell_index))
        try:
            exec_reply = await self._async_poll_for_reply(
                parent_msg_id, cell, exec_timeout, task_poll_output_msg)
        except Exception as e:
            # Best effort to cancel request if it hasn't been resolved
            try:
                # Check if the task_poll_output is doing the raising for us
                if not isinstance(e, CellControlSignal):
                    task_poll_output_msg.cancel()
            finally:
                raise

        if execution_count:
            cell['execution_count'] = execution_count
        self._check_raise_for_error(cell, exec_reply)
        self.nb['cells'][cell_index] = cell
        return cell

    execute_cell = run_sync(async_execute_cell)

    def process_message(self, msg, cell, cell_index):
        """
        Processes a kernel message, updates cell state, and returns the
        resulting output object that was appended to cell.outputs.

        The input argument `cell` is modified in-place.

        Parameters
        ----------
        msg : dict
            The kernel message being processed.
        cell : nbformat.NotebookNode
            The cell which is currently being processed.
        cell_index : int
            The position of the cell within the notebook object.

        Returns
        -------
        output : dict
            The execution output payload (or None for no output).

        Raises
        ------
        CellExecutionComplete
          Once a message arrives which indicates computation completeness.

        """
        msg_type = msg['msg_type']
        self.log.debug("msg_type: %s", msg_type)
        content = msg['content']
        self.log.debug("content: %s", content)

        display_id = content.get('transient', {}).get('display_id', None)
        if display_id and msg_type in {
                'execute_result', 'display_data', 'update_display_data'
        }:
            self._update_display_id(display_id, msg)

        # set the prompt number for the input and the output
        if 'execution_count' in content:
            cell['execution_count'] = content['execution_count']

        if self.record_timing:
            if msg_type == 'status':
                if content['execution_state'] == 'idle':
                    cell['metadata']['execution'][
                        'iopub.status.idle'] = timestamp()
                elif content['execution_state'] == 'busy':
                    cell['metadata']['execution'][
                        'iopub.status.busy'] = timestamp()
            elif msg_type == 'execute_input':
                cell['metadata']['execution'][
                    'iopub.execute_input'] = timestamp()

        if msg_type == 'status':
            if content['execution_state'] == 'idle':
                raise CellExecutionComplete()
        elif msg_type == 'clear_output':
            self.clear_output(cell.outputs, msg, cell_index)
        elif msg_type.startswith('comm'):
            self.handle_comm_msg(cell.outputs, msg, cell_index)
        # Check for remaining messages we don't process
        elif msg_type not in ['execute_input', 'update_display_data']:
            # Assign output as our processed "result"
            return self.output(cell.outputs, msg, display_id, cell_index)

    def output(self, outs, msg, display_id, cell_index):
        msg_type = msg['msg_type']

        try:
            out = output_from_msg(msg)
        except ValueError:
            self.log.error("unhandled iopub msg: " + msg_type)
            return

        if self.clear_before_next_output:
            self.log.debug('Executing delayed clear_output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)
            self.clear_before_next_output = False

        if display_id:
            # record output index in:
            #   _display_id_map[display_id][cell_idx]
            cell_map = self._display_id_map.setdefault(display_id, {})
            output_idx_list = cell_map.setdefault(cell_index, [])
            output_idx_list.append(len(outs))

        outs.append(out)

        return out

    def clear_output(self, outs, msg, cell_index):
        content = msg['content']
        if content.get('wait'):
            self.log.debug('Wait to clear output')
            self.clear_before_next_output = True
        else:
            self.log.debug('Immediate clear output')
            outs[:] = []
            self.clear_display_id_mapping(cell_index)

    def clear_display_id_mapping(self, cell_index):
        for display_id, cell_map in self._display_id_map.items():
            if cell_index in cell_map:
                cell_map[cell_index] = []

    def handle_comm_msg(self, outs, msg, cell_index):
        content = msg['content']
        data = content['data']
        if self.store_widget_state and 'state' in data:  # ignore custom msg'es
            self.widget_state.setdefault(content['comm_id'],
                                         {}).update(data['state'])
            if 'buffer_paths' in data and data['buffer_paths']:
                self.widget_buffers[
                    content['comm_id']] = self._get_buffer_data(msg)

    def _serialize_widget_state(self, state):
        """Serialize a widget state, following format in @jupyter-widgets/schema."""
        return {
            'model_name': state.get('_model_name'),
            'model_module': state.get('_model_module'),
            'model_module_version': state.get('_model_module_version'),
            'state': state,
        }

    def _get_buffer_data(self, msg):
        encoded_buffers = []
        paths = msg['content']['data']['buffer_paths']
        buffers = msg['buffers']
        for path, buffer in zip(paths, buffers):
            encoded_buffers.append({
                'data':
                base64.b64encode(buffer).decode('utf-8'),
                'encoding':
                'base64',
                'path':
                path,
            })
        return encoded_buffers
Exemple #30
0
class Status(Reference):
    execution_state = Enum(('busy', 'idle', 'starting'), default_value='busy')