Пример #1
0
class Bar(Foo):
    b = Str('gotit', config=False)
    c = Float(config=True)
Пример #2
0
class IPEngineApp(BaseParallelApplication):

    name = 'ipengine'
    description = _description
    examples = _examples
    config_file_name = Unicode(default_config_file_name)
    classes = List([ProfileDir, Session, EngineFactory, Kernel, MPI])

    startup_script = Unicode('', config=True,
        help='specify a script to be run at startup')
    startup_command = Unicode('', config=True,
            help='specify a command to be run at startup')

    url_file = Unicode('', config=True,
        help="""The full location of the file containing the connection information for
        the controller. If this is not given, the file must be in the
        security directory of the cluster directory.  This location is
        resolved using the `profile` or `profile_dir` options.""",
        )
    wait_for_url_file = Float(5, config=True,
        help="""The maximum number of seconds to wait for url_file to exist.
        This is useful for batch-systems and shared-filesystems where the
        controller and engine are started at the same time and it
        may take a moment for the controller to write the connector files.""")

    url_file_name = Unicode('ipcontroller-engine.json', config=True)

    def _cluster_id_changed(self, name, old, new):
        if new:
            base = 'ipcontroller-%s' % new
        else:
            base = 'ipcontroller'
        self.url_file_name = "%s-engine.json" % base

    log_url = Unicode('', config=True,
        help="""The URL for the iploggerapp instance, for forwarding
        logging to a central location.""")
    
    # an IPKernelApp instance, used to setup listening for shell frontends
    kernel_app = Instance(IPKernelApp)

    aliases = Dict(aliases)
    flags = Dict(flags)
    
    @property
    def kernel(self):
        """allow access to the Kernel object, so I look like IPKernelApp"""
        return self.engine.kernel

    def find_url_file(self):
        """Set the url file.

        Here we don't try to actually see if it exists for is valid as that
        is hadled by the connection logic.
        """
        config = self.config
        # Find the actual controller key file
        if not self.url_file:
            self.url_file = os.path.join(
                self.profile_dir.security_dir,
                self.url_file_name
            )
    
    def load_connector_file(self):
        """load config from a JSON connector file,
        at a *lower* priority than command-line/config files.
        """
        
        self.log.info("Loading url_file %r", self.url_file)
        config = self.config
        
        with open(self.url_file) as f:
            d = json.loads(f.read())
        
        if 'exec_key' in d:
            config.Session.key = cast_bytes(d['exec_key'])
        
        try:
            config.EngineFactory.location
        except AttributeError:
            config.EngineFactory.location = d['location']
        
        d['url'] = disambiguate_url(d['url'], config.EngineFactory.location)
        try:
            config.EngineFactory.url
        except AttributeError:
            config.EngineFactory.url = d['url']
        
        try:
            config.EngineFactory.sshserver
        except AttributeError:
            config.EngineFactory.sshserver = d['ssh']
    
    def bind_kernel(self, **kwargs):
        """Promote engine to listening kernel, accessible to frontends."""
        if self.kernel_app is not None:
            return
        
        self.log.info("Opening ports for direct connections as an IPython kernel")
        
        kernel = self.kernel
        
        kwargs.setdefault('config', self.config)
        kwargs.setdefault('log', self.log)
        kwargs.setdefault('profile_dir', self.profile_dir)
        kwargs.setdefault('session', self.engine.session)
        
        app = self.kernel_app = IPKernelApp(**kwargs)
        
        # allow IPKernelApp.instance():
        IPKernelApp._instance = app
        
        app.init_connection_file()
        # relevant contents of init_sockets:
        
        app.shell_port = app._bind_socket(kernel.shell_streams[0], app.shell_port)
        app.log.debug("shell ROUTER Channel on port: %i", app.shell_port)
        
        app.iopub_port = app._bind_socket(kernel.iopub_socket, app.iopub_port)
        app.log.debug("iopub PUB Channel on port: %i", app.iopub_port)
        
        kernel.stdin_socket = self.engine.context.socket(zmq.ROUTER)
        app.stdin_port = app._bind_socket(kernel.stdin_socket, app.stdin_port)
        app.log.debug("stdin ROUTER Channel on port: %i", app.stdin_port)
        
        # start the heartbeat, and log connection info:
        
        app.init_heartbeat()
        
        app.log_connection_info()
        app.write_connection_file()
        
    
    def init_engine(self):
        # This is the working dir by now.
        sys.path.insert(0, '')
        config = self.config
        # print config
        self.find_url_file()
        
        # was the url manually specified?
        keys = set(self.config.EngineFactory.keys())
        keys = keys.union(set(self.config.RegistrationFactory.keys()))
        
        if keys.intersection(set(['ip', 'url', 'port'])):
            # Connection info was specified, don't wait for the file
            url_specified = True
            self.wait_for_url_file = 0
        else:
            url_specified = False

        if self.wait_for_url_file and not os.path.exists(self.url_file):
            self.log.warn("url_file %r not found", self.url_file)
            self.log.warn("Waiting up to %.1f seconds for it to arrive.", self.wait_for_url_file)
            tic = time.time()
            while not os.path.exists(self.url_file) and (time.time()-tic < self.wait_for_url_file):
                # wait for url_file to exist, or until time limit
                time.sleep(0.1)
            
        if os.path.exists(self.url_file):
            self.load_connector_file()
        elif not url_specified:
            self.log.fatal("Fatal: url file never arrived: %s", self.url_file)
            self.exit(1)
        
        
        try:
            exec_lines = config.IPKernelApp.exec_lines
        except AttributeError:
            try:
                exec_lines = config.InteractiveShellApp.exec_lines
            except AttributeError:
                exec_lines = config.IPKernelApp.exec_lines = []
        try:
            exec_files = config.IPKernelApp.exec_files
        except AttributeError:
            try:
                exec_files = config.InteractiveShellApp.exec_files
            except AttributeError:
                exec_files = config.IPKernelApp.exec_files = []
        
        if self.startup_script:
            exec_files.append(self.startup_script)
        if self.startup_command:
            exec_lines.append(self.startup_command)

        # Create the underlying shell class and Engine
        # shell_class = import_item(self.master_config.Global.shell_class)
        # print self.config
        try:
            self.engine = EngineFactory(config=config, log=self.log)
        except:
            self.log.error("Couldn't start the Engine", exc_info=True)
            self.exit(1)
    
    def forward_logging(self):
        if self.log_url:
            self.log.info("Forwarding logging to %s", self.log_url)
            context = self.engine.context
            lsock = context.socket(zmq.PUB)
            lsock.connect(self.log_url)
            handler = EnginePUBHandler(self.engine, lsock)
            handler.setLevel(self.log_level)
            self.log.addHandler(handler)
    
    def init_mpi(self):
        global mpi
        self.mpi = MPI(config=self.config)

        mpi_import_statement = self.mpi.init_script
        if mpi_import_statement:
            try:
                self.log.info("Initializing MPI:")
                self.log.info(mpi_import_statement)
                exec(mpi_import_statement, globals())
            except:
                mpi = None
        else:
            mpi = None

    @catch_config_error
    def initialize(self, argv=None):
        super(IPEngineApp, self).initialize(argv)
        self.init_mpi()
        self.init_engine()
        self.forward_logging()
    
    def start(self):
        self.engine.start()
        try:
            self.engine.loop.start()
        except KeyboardInterrupt:
            self.log.critical("Engine Interrupted, shutting down...\n")
Пример #3
0
class Bar(Foo):
    b = Unicode('gotit', config=False, help="The string b.")
    c = Float(config=True, help="The string c.")
Пример #4
0
 class A(HasTraits):
     i = Int(config_key='VALUE1', other_thing='VALUE2')
     f = Float(config_key='VALUE3', other_thing='VALUE2')
     j = Int(0)
Пример #5
0
class FloatTrait(HasTraits):

    value = Float(99.0)
Пример #6
0
class Spawner(LoggingConfigurable):
    """Base class for spawning single-user notebook servers.
    
    Subclass this, and override the following methods:
    
    - load_state
    - get_state
    - start
    - stop
    - poll
    """

    db = Any()
    user = Any()
    hub = Any()
    api_token = Unicode()
    ip = Unicode(
        'localhost',
        config=True,
        help=
        "The IP address (or hostname) the single-user server should listen on")
    start_timeout = Integer(
        60,
        config=True,
        help="""Timeout (in seconds) before giving up on the spawner.
        
        This is the timeout for start to return, not the timeout for the server to respond.
        Callers of spawner.start will assume that startup has failed if it takes longer than this.
        start should return when the server process is started and its location is known.
        """)

    http_timeout = Integer(
        30,
        config=True,
        help="""Timeout (in seconds) before giving up on a spawned HTTP server

        Once a server has successfully been spawned, this is the amount of time
        we wait before assuming that the server is unable to accept
        connections.
        """)

    poll_interval = Integer(
        30,
        config=True,
        help="""Interval (in seconds) on which to poll the spawner.""")
    _callbacks = List()
    _poll_callback = Any()

    debug = Bool(False,
                 config=True,
                 help="Enable debug-logging of the single-user server")

    env_keep = List(
        [
            'PATH',
            'PYTHONPATH',
            'CONDA_ROOT',
            'CONDA_DEFAULT_ENV',
            'VIRTUAL_ENV',
            'LANG',
            'LC_ALL',
        ],
        config=True,
        help="Whitelist of environment variables for the subprocess to inherit"
    )
    env = Dict()

    def _env_default(self):
        env = {}
        for key in self.env_keep:
            if key in os.environ:
                env[key] = os.environ[key]
        env['JPY_API_TOKEN'] = self.api_token
        return env

    cmd = List(Unicode,
               default_value=['jupyterhub-singleuser'],
               config=True,
               help="""The command used for starting notebooks.""")
    args = List(
        Unicode,
        config=True,
        help="""Extra arguments to be passed to the single-user server""")

    notebook_dir = Unicode(
        '',
        config=True,
        help="""The notebook directory for the single-user server
        
        `~` will be expanded to the user's home directory
        """)

    def __init__(self, **kwargs):
        super(Spawner, self).__init__(**kwargs)
        if self.user.state:
            self.load_state(self.user.state)

    def load_state(self, state):
        """load state from the database
        
        This is the extensible part of state
        
        Override in a subclass if there is state to load.
        Should call `super`.
        
        See Also
        --------
        
        get_state, clear_state
        """
        pass

    def get_state(self):
        """store the state necessary for load_state
        
        A black box of extra state for custom spawners.
        Should call `super`.
        
        Returns
        -------
        
        state: dict
             a JSONable dict of state
        """
        state = {}
        return state

    def clear_state(self):
        """clear any state that should be cleared when the process stops
        
        State that should be preserved across server instances should not be cleared.
        
        Subclasses should call super, to ensure that state is properly cleared.
        """
        self.api_token = ''

    def get_args(self):
        """Return the arguments to be passed after self.cmd"""
        args = [
            '--user=%s' % self.user.name,
            '--port=%i' % self.user.server.port,
            '--cookie-name=%s' % self.user.server.cookie_name,
            '--base-url=%s' % self.user.server.base_url,
            '--hub-prefix=%s' % self.hub.server.base_url,
            '--hub-api-url=%s' % self.hub.api_url,
        ]
        if self.ip:
            args.append('--ip=%s' % self.ip)
        if self.notebook_dir:
            args.append('--notebook-dir=%s' % self.notebook_dir)
        if self.debug:
            args.append('--debug')
        args.extend(self.args)
        return args

    @gen.coroutine
    def start(self):
        """Start the single-user process"""
        raise NotImplementedError(
            "Override in subclass. Must be a Tornado gen.coroutine.")

    @gen.coroutine
    def stop(self, now=False):
        """Stop the single-user process"""
        raise NotImplementedError(
            "Override in subclass. Must be a Tornado gen.coroutine.")

    @gen.coroutine
    def poll(self):
        """Check if the single-user process is running

        return None if it is, an exit status (0 if unknown) if it is not.
        """
        raise NotImplementedError(
            "Override in subclass. Must be a Tornado gen.coroutine.")

    def add_poll_callback(self, callback, *args, **kwargs):
        """add a callback to fire when the subprocess stops
        
        as noticed by periodic poll_and_notify()
        """
        if args or kwargs:
            cb = callback
            callback = lambda: cb(*args, **kwargs)
        self._callbacks.append(callback)

    def stop_polling(self):
        """stop the periodic poll"""
        if self._poll_callback:
            self._poll_callback.stop()
            self._poll_callback = None

    def start_polling(self):
        """Start polling periodically
        
        callbacks registered via `add_poll_callback` will fire
        if/when the process stops.
        
        Explicit termination via the stop method will not trigger the callbacks.
        """
        if self.poll_interval <= 0:
            self.log.debug("Not polling subprocess")
            return
        else:
            self.log.debug("Polling subprocess every %is", self.poll_interval)

        self.stop_polling()

        self._poll_callback = PeriodicCallback(self.poll_and_notify,
                                               1e3 * self.poll_interval)
        self._poll_callback.start()

    @gen.coroutine
    def poll_and_notify(self):
        """Used as a callback to periodically poll the process,
        and notify any watchers
        """
        status = yield self.poll()
        if status is None:
            # still running, nothing to do here
            return

        self.stop_polling()

        add_callback = IOLoop.current().add_callback
        for callback in self._callbacks:
            add_callback(callback)

    death_interval = Float(0.1)

    @gen.coroutine
    def wait_for_death(self, timeout=10):
        """wait for the process to die, up to timeout seconds"""
        loop = IOLoop.current()
        for i in range(int(timeout / self.death_interval)):
            status = yield self.poll()
            if status is not None:
                break
            else:
                yield gen.Task(loop.add_timeout,
                               loop.time() + self.death_interval)
Пример #7
0
class ZMQTerminalInteractiveShell(TerminalInteractiveShell):
    """A subclass of TerminalInteractiveShell that uses the 0MQ kernel"""
    _executing = False
    _execution_state = Unicode('')
    kernel_timeout = Float(
        60,
        config=True,
        help="""Timeout for giving up on a kernel (in seconds).
        
        On first connect and restart, the console tests whether the
        kernel is running and responsive by sending kernel_info_requests.
        This sets the timeout in seconds for how long the kernel can take
        before being presumed dead.
        """)

    image_handler = Enum(('PIL', 'stream', 'tempfile', 'callable'),
                         config=True,
                         help="""
        Handler for image type output.  This is useful, for example,
        when connecting to the kernel in which pylab inline backend is
        activated.  There are four handlers defined.  'PIL': Use
        Python Imaging Library to popup image; 'stream': Use an
        external program to show the image.  Image will be fed into
        the STDIN of the program.  You will need to configure
        `stream_image_handler`; 'tempfile': Use an external program to
        show the image.  Image will be saved in a temporally file and
        the program is called with the temporally file.  You will need
        to configure `tempfile_image_handler`; 'callable': You can set
        any Python callable which is called with the image data.  You
        will need to configure `callable_image_handler`.
        """)

    stream_image_handler = List(config=True,
                                help="""
        Command to invoke an image viewer program when you are using
        'stream' image handler.  This option is a list of string where
        the first element is the command itself and reminders are the
        options for the command.  Raw image data is given as STDIN to
        the program.
        """)

    tempfile_image_handler = List(config=True,
                                  help="""
        Command to invoke an image viewer program when you are using
        'tempfile' image handler.  This option is a list of string
        where the first element is the command itself and reminders
        are the options for the command.  You can use {file} and
        {format} in the string to represent the location of the
        generated image file and image format.
        """)

    callable_image_handler = Any(config=True,
                                 help="""
        Callable object called via 'callable' image handler with one
        argument, `data`, which is `msg["content"]["data"]` where
        `msg` is the message from iopub channel.  For exmaple, you can
        find base64 encoded PNG data as `data['image/png']`.
        """)

    mime_preference = List(
        default_value=['image/png', 'image/jpeg', 'image/svg+xml'],
        config=True,
        allow_none=False,
        help="""
        Preferred object representation MIME type in order.  First
        matched MIME type will be used.
        """)

    manager = Instance('IPython.kernel.KernelManager')
    client = Instance('IPython.kernel.KernelClient')

    def _client_changed(self, name, old, new):
        self.session_id = new.session.session

    session_id = Unicode()

    def init_completer(self):
        """Initialize the completion machinery.

        This creates completion machinery that can be used by client code,
        either interactively in-process (typically triggered by the readline
        library), programmatically (such as in test suites) or out-of-process
        (typically over the network by remote frontends).
        """
        from IPython.core.completerlib import (module_completer,
                                               magic_run_completer,
                                               cd_completer)

        self.Completer = ZMQCompleter(self, self.client, config=self.config)

        self.set_hook('complete_command', module_completer, str_key='import')
        self.set_hook('complete_command', module_completer, str_key='from')
        self.set_hook('complete_command', magic_run_completer, str_key='%run')
        self.set_hook('complete_command', cd_completer, str_key='%cd')

        # Only configure readline if we truly are using readline.  IPython can
        # do tab-completion over the network, in GUIs, etc, where readline
        # itself may be absent
        if self.has_readline:
            self.set_readline_completer()

    def ask_exit(self):
        super(ZMQTerminalInteractiveShell, self).ask_exit()
        if self.exit_now and self.manager:
            self.client.shutdown()

    def run_cell(self, cell, store_history=True):
        """Run a complete IPython cell.
        
        Parameters
        ----------
        cell : str
          The code (including IPython code such as %magic functions) to run.
        store_history : bool
          If True, the raw and translated cell will be stored in IPython's
          history. For user code calling back into IPython's machinery, this
          should be set to False.
        """
        if (not cell) or cell.isspace():
            return

        if cell.strip() == 'exit':
            # explicitly handle 'exit' command
            return self.ask_exit()

        # flush stale replies, which could have been ignored, due to missed heartbeats
        while self.client.shell_channel.msg_ready():
            self.client.shell_channel.get_msg()
        # shell_channel.execute takes 'hidden', which is the inverse of store_hist
        msg_id = self.client.shell_channel.execute(cell, not store_history)

        # first thing is wait for any side effects (output, stdin, etc.)
        self._executing = True
        self._execution_state = "busy"
        while self._execution_state != 'idle' and self.client.is_alive():
            try:
                self.handle_stdin_request(msg_id, timeout=0.05)
            except Empty:
                # display intermediate print statements, etc.
                self.handle_iopub(msg_id)
                pass

        # after all of that is done, wait for the execute reply
        while self.client.is_alive():
            try:
                self.handle_execute_reply(msg_id, timeout=0.05)
            except Empty:
                pass
            else:
                break
        self._executing = False

    #-----------------
    # message handlers
    #-----------------

    def handle_execute_reply(self, msg_id, timeout=None):
        msg = self.client.shell_channel.get_msg(block=False, timeout=timeout)
        if msg["parent_header"].get("msg_id", None) == msg_id:

            self.handle_iopub(msg_id)

            content = msg["content"]
            status = content['status']

            if status == 'aborted':
                self.write('Aborted\n')
                return
            elif status == 'ok':
                # print execution payloads as well:
                for item in content["payload"]:
                    text = item.get('text', None)
                    if text:
                        page.page(text)

            elif status == 'error':
                for frame in content["traceback"]:
                    print(frame, file=io.stderr)

            self.execution_count = int(content["execution_count"] + 1)

    def handle_iopub(self, msg_id):
        """ Method to process subscribe channel's messages

           This method consumes and processes messages on the IOPub channel,
           such as stdout, stderr, pyout and status.
           
           It only displays output that is caused by the given msg_id
        """
        while self.client.iopub_channel.msg_ready():
            sub_msg = self.client.iopub_channel.get_msg()
            msg_type = sub_msg['header']['msg_type']
            parent = sub_msg["parent_header"]
            if (not parent) or msg_id == parent['msg_id']:
                if msg_type == 'status':
                    state = self._execution_state = sub_msg["content"][
                        "execution_state"]
                    # idle messages mean an individual sequence is complete,
                    # so break out of consumption to allow other things to take over.
                    if state == 'idle':
                        break

                elif msg_type == 'stream':
                    if sub_msg["content"]["name"] == "stdout":
                        print(sub_msg["content"]["data"],
                              file=io.stdout,
                              end="")
                        io.stdout.flush()
                    elif sub_msg["content"]["name"] == "stderr":
                        print(sub_msg["content"]["data"],
                              file=io.stderr,
                              end="")
                        io.stderr.flush()

                elif msg_type == 'pyout':
                    self.execution_count = int(
                        sub_msg["content"]["execution_count"])
                    format_dict = sub_msg["content"]["data"]
                    self.handle_rich_data(format_dict)
                    # taken from DisplayHook.__call__:
                    hook = self.displayhook
                    hook.start_displayhook()
                    hook.write_output_prompt()
                    hook.write_format_data(format_dict)
                    hook.log_output(format_dict)
                    hook.finish_displayhook()

                elif msg_type == 'display_data':
                    data = sub_msg["content"]["data"]
                    handled = self.handle_rich_data(data)
                    if not handled:
                        # if it was an image, we handled it by now
                        if 'text/plain' in data:
                            print(data['text/plain'])

    _imagemime = {
        'image/png': 'png',
        'image/jpeg': 'jpeg',
        'image/svg+xml': 'svg',
    }

    def handle_rich_data(self, data):
        for mime in self.mime_preference:
            if mime in data and mime in self._imagemime:
                self.handle_image(data, mime)
                return True

    def handle_image(self, data, mime):
        handler = getattr(self, 'handle_image_{0}'.format(self.image_handler),
                          None)
        if handler:
            handler(data, mime)

    def handle_image_PIL(self, data, mime):
        if mime not in ('image/png', 'image/jpeg'):
            return
        import PIL.Image
        raw = base64.decodestring(data[mime].encode('ascii'))
        img = PIL.Image.open(BytesIO(raw))
        img.show()

    def handle_image_stream(self, data, mime):
        raw = base64.decodestring(data[mime].encode('ascii'))
        imageformat = self._imagemime[mime]
        fmt = dict(format=imageformat)
        args = [s.format(**fmt) for s in self.stream_image_handler]
        with open(os.devnull, 'w') as devnull:
            proc = subprocess.Popen(args,
                                    stdin=subprocess.PIPE,
                                    stdout=devnull,
                                    stderr=devnull)
            proc.communicate(raw)

    def handle_image_tempfile(self, data, mime):
        raw = base64.decodestring(data[mime].encode('ascii'))
        imageformat = self._imagemime[mime]
        filename = 'tmp.{0}'.format(imageformat)
        with NamedFileInTemporaryDirectory(filename) as f, \
                    open(os.devnull, 'w') as devnull:
            f.write(raw)
            f.flush()
            fmt = dict(file=f.name, format=imageformat)
            args = [s.format(**fmt) for s in self.tempfile_image_handler]
            subprocess.call(args, stdout=devnull, stderr=devnull)

    def handle_image_callable(self, data, mime):
        self.callable_image_handler(data)

    def handle_stdin_request(self, msg_id, timeout=0.1):
        """ Method to capture raw_input
        """
        msg_rep = self.client.stdin_channel.get_msg(timeout=timeout)
        # in case any iopub came while we were waiting:
        self.handle_iopub(msg_id)
        if msg_id == msg_rep["parent_header"].get("msg_id"):
            # wrap SIGINT handler
            real_handler = signal.getsignal(signal.SIGINT)

            def double_int(sig, frame):
                # call real handler (forwards sigint to kernel),
                # then raise local interrupt, stopping local raw_input
                real_handler(sig, frame)
                raise KeyboardInterrupt

            signal.signal(signal.SIGINT, double_int)

            try:
                raw_data = input(msg_rep["content"]["prompt"])
            except EOFError:
                # turn EOFError into EOF character
                raw_data = '\x04'
            except KeyboardInterrupt:
                sys.stdout.write('\n')
                return
            finally:
                # restore SIGINT handler
                signal.signal(signal.SIGINT, real_handler)

            # only send stdin reply if there *was not* another request
            # or execution finished while we were reading.
            if not (self.client.stdin_channel.msg_ready()
                    or self.client.shell_channel.msg_ready()):
                self.client.stdin_channel.input(raw_data)

    def mainloop(self, display_banner=False):
        while True:
            try:
                self.interact(display_banner=display_banner)
                #self.interact_with_readline()
                # XXX for testing of a readline-decoupled repl loop, call
                # interact_with_readline above
                break
            except KeyboardInterrupt:
                # this should not be necessary, but KeyboardInterrupt
                # handling seems rather unpredictable...
                self.write("\nKeyboardInterrupt in interact()\n")

    def wait_for_kernel(self, timeout=None):
        """method to wait for a kernel to be ready"""
        tic = time.time()
        self.client.hb_channel.unpause()
        while True:
            msg_id = self.client.kernel_info()
            reply = None
            while True:
                try:
                    reply = self.client.get_shell_msg(timeout=1)
                except Empty:
                    break
                else:
                    if reply['parent_header'].get('msg_id') == msg_id:
                        return True
            if timeout is not None \
                and (time.time() - tic) > timeout \
                and not self.client.hb_channel.is_beating():
                # heart failed
                return False
        return True

    def interact(self, display_banner=None):
        """Closely emulate the interactive Python console."""

        # batch run -> do not interact
        if self.exit_now:
            return

        if display_banner is None:
            display_banner = self.display_banner

        if isinstance(display_banner, string_types):
            self.show_banner(display_banner)
        elif display_banner:
            self.show_banner()

        more = False

        # run a non-empty no-op, so that we don't get a prompt until
        # we know the kernel is ready. This keeps the connection
        # message above the first prompt.
        if not self.wait_for_kernel(self.kernel_timeout):
            error("Kernel did not respond\n")
            return

        if self.has_readline:
            self.readline_startup_hook(self.pre_readline)
            hlen_b4_cell = self.readline.get_current_history_length()
        else:
            hlen_b4_cell = 0
        # exit_now is set by a call to %Exit or %Quit, through the
        # ask_exit callback.

        while not self.exit_now:
            if not self.client.is_alive():
                # kernel died, prompt for action or exit

                action = "restart" if self.manager else "wait for restart"
                ans = self.ask_yes_no("kernel died, %s ([y]/n)?" % action,
                                      default='y')
                if ans:
                    if self.manager:
                        self.manager.restart_kernel(True)
                    self.wait_for_kernel(self.kernel_timeout)
                else:
                    self.exit_now = True
                continue
            try:
                # protect prompt block from KeyboardInterrupt
                # when sitting on ctrl-C
                self.hooks.pre_prompt_hook()
                if more:
                    try:
                        prompt = self.prompt_manager.render('in2')
                    except Exception:
                        self.showtraceback()
                    if self.autoindent:
                        self.rl_do_indent = True

                else:
                    try:
                        prompt = self.separate_in + self.prompt_manager.render(
                            'in')
                    except Exception:
                        self.showtraceback()

                line = self.raw_input(prompt)
                if self.exit_now:
                    # quick exit on sys.std[in|out] close
                    break
                if self.autoindent:
                    self.rl_do_indent = False

            except KeyboardInterrupt:
                #double-guard against keyboardinterrupts during kbdint handling
                try:
                    self.write('\nKeyboardInterrupt\n')
                    source_raw = self.input_splitter.raw_reset()
                    hlen_b4_cell = self._replace_rlhist_multiline(
                        source_raw, hlen_b4_cell)
                    more = False
                except KeyboardInterrupt:
                    pass
            except EOFError:
                if self.autoindent:
                    self.rl_do_indent = False
                    if self.has_readline:
                        self.readline_startup_hook(None)
                self.write('\n')
                self.exit()
            except bdb.BdbQuit:
                warn(
                    'The Python debugger has exited with a BdbQuit exception.\n'
                    'Because of how pdb handles the stack, it is impossible\n'
                    'for IPython to properly format this particular exception.\n'
                    'IPython will resume normal operation.')
            except:
                # exceptions here are VERY RARE, but they can be triggered
                # asynchronously by signal handlers, for example.
                self.showtraceback()
            else:
                try:
                    self.input_splitter.push(line)
                    more = self.input_splitter.push_accepts_more()
                except SyntaxError:
                    # Run the code directly - run_cell takes care of displaying
                    # the exception.
                    more = False
                if (self.SyntaxTB.last_syntax_error and self.autoedit_syntax):
                    self.edit_syntax_error()
                if not more:
                    source_raw = self.input_splitter.raw_reset()
                    hlen_b4_cell = self._replace_rlhist_multiline(
                        source_raw, hlen_b4_cell)
                    self.run_cell(source_raw)

        # Turn off the exit flag, so the mainloop can be restarted if desired
        self.exit_now = False
Пример #8
0
class CorrModel(SpectraModel):
    """ """
    corr2d = Any
    corr3d = Any
    contour3d = Any

    plottype = Enum(
        ['sync', 'async', 'phase', 'modulous', 'sync_codist', 'async_codist'],
        sync=True)
    plot3d = Enum([
        'corr2d',
        'corr3d',
        'contour3d',
    ])
    scalea = Float(sync=True)
    scaleb = Float(1, sync=True)
    fill = Bool(False, sync=True)

    #Scaling
    specscale_position_start = Float(sync=True)
    specscale_position_end = Float(sync=True)
    specscale_start = Float(sync=True)
    specscale_end = Float(sync=True)
    specstep = Float(sync=True)
    specspacing = Int(1, sync=True)

    def _spec_modified_changed(self):
        self.corr2d = Corr2d(self.spec_modified)

    def _spec_corrmodified_changed(self):
        self.corr3d = Corr2d(self.spec_modified)

    def _spec_contourmodified_changed(self):
        self.contour3d = Corr2d(self.spec_modified)

    def _plottype_default(self):
        return 'sync'

    def _plottype_changed(self, name, old, new):
        self.draw(name, old, new)

    def _plot3d_default(self):
        return 'corr2d'

    def _plot3d_changed(self, name, old, new):
        self.draw(name, old, new)

    def _scalea_changed(self, name, old, new):
        self.corr2d.scale(alpha=self.scalea, beta=self.scaleb)
        self.draw(name, old, new)

    def _scaleb_changed(self, name, old, new):
        self.corr2d.scale(alpha=self.scalea, beta=self.scaleb)
        self.draw(name, old, new)

    def _fill_changed(self, name, old, new):
        self.draw(name, old, new)

        print self.corr2d.sync

    def draw(self, name=None, old=None, new=None):
        if name is not None and self.DONT_DRAW.match(name):
            return

        if self._FREEZE:
            return

        plot_and_message = ''

        # Better way would be a decorator or something that only goes into draw if not autoupdate
        if self.autoupdate:

            # Generate new figure object
            f = plt.figure(figsize=(self.figwidth, self.figheight))
            if self.plot3d in ['corr3d', 'contour3d']:
                projection = '3d'
            else:
                projection = None
            ax = f.add_subplot(111, projection=projection)

            ### UNCOMMENT ME FOR IMAGE
            #from skimage import data
            #from image_inspector.linetool import ThickLineTool

            #image = data.camera()

            #f, ax = plt.subplots()
            #ax.imshow(image, interpolation='nearest')
            #h, w = image.shape

            #def roi_changed(roi):
            #print(roi.shape, roi.geometry, roi.data.shape)

            ## line_tool = LineTool(ax)
            #line_tool = ThickLineTool(ax)
            #line_tool.connect_event('roi_changed', roi_changed)
            #line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])

            if self._color_state or self.kind not in [
                    'spec', 'waterfall', 'contour', 'contour3d'
            ]:
                colorkwags = dict(color=self.color)
            else:
                colorkwags = dict(cmap=self.colormap, cbar=self.colorbar)

            #self.spec_modified.plot(ax=ax,
            #fig=f,
            #kind=self.kind,
            #norm=self.NORMUNITS_REV[self.norm_unit],
            #**colorkwags
            #)
            f.tight_layout()  #Padding around plot
            lines = ax.get_lines()

            #aka async.plot(kind='contour3d')
            pltkwds = dict(kind=self.plot3d, fill=self.fill, fig=f)
            pltkwds.update(colorkwags)

            # 3d correlation plots need an axes, but 2d plots don't because a 2d corr
            # plot is really 4 axes together...
            if projection == '3d':
                pltkwds['ax'] = ax
                #       pltkwds['elev'] = 100
                #       pltkwds['azim'] = 99
                #       pltkwds['projection'] = 'xz'  #xy, xz, zy, yx, zx, yz
                # Corr3d has special arguments called contourkwds (PROOF OF CONCEPT)
                pltkwds['contourkwargs'] = dict(fill=self.fill)

            getattr(self.corr2d, self.plottype).plot(**pltkwds)

            plt.close(f)

            #http://mpld3.github.io/modules/API.html
            if self.interactive:
                import mpld3
                if self.selectlines:
                    from line_plugin import HighlightLines

                    for idx, col in enumerate(self.spec_modified.columns):
                        name = 'COLUMN(%s): %s' % (idx, col)
                        tooltip = mpld3.plugins.LineLabelTooltip(
                            lines[idx], name)
                        #voffset=10, hoffset=10,  css=css)
                        mpld3.plugins.connect(f, tooltip)

                    mpld3.plugins.connect(f, HighlightLines(lines))

                plot_and_message += mpld3.fig_to_html(f)
            else:
                plot_and_message += mpl2html(f)

            self.fig_old = f

        else:
            plot_and_message += html_figure(self.fig_old)

        # VALUE IS WHAT GUI LOOKS UP!!!
        self.value = plot_and_message


### TO FIX ###
# When change normunit and slice, spec_modified does't update normunit, iunit, varunit
# 3D plot is not drawing, we think it is a bug
Пример #9
0
class IPEngineApp(BaseParallelApplication):

    name = Unicode(u'ipengine')
    description = Unicode(_description)
    examples = _examples
    config_file_name = Unicode(default_config_file_name)
    classes = List([ProfileDir, Session, EngineFactory, Kernel, MPI])

    startup_script = Unicode(u'',
                             config=True,
                             help='specify a script to be run at startup')
    startup_command = Unicode('',
                              config=True,
                              help='specify a command to be run at startup')

    url_file = Unicode(
        u'',
        config=True,
        help=
        """The full location of the file containing the connection information for
        the controller. If this is not given, the file must be in the
        security directory of the cluster directory.  This location is
        resolved using the `profile` or `profile_dir` options.""",
    )
    wait_for_url_file = Float(
        5,
        config=True,
        help="""The maximum number of seconds to wait for url_file to exist.
        This is useful for batch-systems and shared-filesystems where the
        controller and engine are started at the same time and it
        may take a moment for the controller to write the connector files.""")

    url_file_name = Unicode(u'ipcontroller-engine.json')
    log_url = Unicode(
        '',
        config=True,
        help="""The URL for the iploggerapp instance, for forwarding
        logging to a central location.""")

    aliases = Dict(aliases)

    # def find_key_file(self):
    #     """Set the key file.
    #
    #     Here we don't try to actually see if it exists for is valid as that
    #     is hadled by the connection logic.
    #     """
    #     config = self.master_config
    #     # Find the actual controller key file
    #     if not config.Global.key_file:
    #         try_this = os.path.join(
    #             config.Global.profile_dir,
    #             config.Global.security_dir,
    #             config.Global.key_file_name
    #         )
    #         config.Global.key_file = try_this

    def find_url_file(self):
        """Set the url file.

        Here we don't try to actually see if it exists for is valid as that
        is hadled by the connection logic.
        """
        config = self.config
        # Find the actual controller key file
        if not self.url_file:
            self.url_file = os.path.join(self.profile_dir.security_dir,
                                         self.url_file_name)

    def init_engine(self):
        # This is the working dir by now.
        sys.path.insert(0, '')
        config = self.config
        # print config
        self.find_url_file()

        # was the url manually specified?
        keys = set(self.config.EngineFactory.keys())
        keys = keys.union(set(self.config.RegistrationFactory.keys()))

        if keys.intersection(set(['ip', 'url', 'port'])):
            # Connection info was specified, don't wait for the file
            url_specified = True
            self.wait_for_url_file = 0
        else:
            url_specified = False

        if self.wait_for_url_file and not os.path.exists(self.url_file):
            self.log.warn("url_file %r not found" % self.url_file)
            self.log.warn("Waiting up to %.1f seconds for it to arrive." %
                          self.wait_for_url_file)
            tic = time.time()
            while not os.path.exists(self.url_file) and (
                    time.time() - tic < self.wait_for_url_file):
                # wait for url_file to exist, for up to 10 seconds
                time.sleep(0.1)

        if os.path.exists(self.url_file):
            self.log.info("Loading url_file %r" % self.url_file)
            with open(self.url_file) as f:
                d = json.loads(f.read())
            if d['exec_key']:
                config.Session.key = asbytes(d['exec_key'])
            d['url'] = disambiguate_url(d['url'], d['location'])
            config.EngineFactory.url = d['url']
            config.EngineFactory.location = d['location']
        elif not url_specified:
            self.log.critical("Fatal: url file never arrived: %s" %
                              self.url_file)
            self.exit(1)

        try:
            exec_lines = config.Kernel.exec_lines
        except AttributeError:
            config.Kernel.exec_lines = []
            exec_lines = config.Kernel.exec_lines

        if self.startup_script:
            enc = sys.getfilesystemencoding() or 'utf8'
            cmd = "execfile(%r)" % self.startup_script.encode(enc)
            exec_lines.append(cmd)
        if self.startup_command:
            exec_lines.append(self.startup_command)

        # Create the underlying shell class and Engine
        # shell_class = import_item(self.master_config.Global.shell_class)
        # print self.config
        try:
            self.engine = EngineFactory(config=config, log=self.log)
        except:
            self.log.error("Couldn't start the Engine", exc_info=True)
            self.exit(1)

    def forward_logging(self):
        if self.log_url:
            self.log.info("Forwarding logging to %s" % self.log_url)
            context = self.engine.context
            lsock = context.socket(zmq.PUB)
            lsock.connect(self.log_url)
            self.log.removeHandler(self._log_handler)
            handler = EnginePUBHandler(self.engine, lsock)
            handler.setLevel(self.log_level)
            self.log.addHandler(handler)
            self._log_handler = handler

    #
    def init_mpi(self):
        global mpi
        self.mpi = MPI(config=self.config)

        mpi_import_statement = self.mpi.init_script
        if mpi_import_statement:
            try:
                self.log.info("Initializing MPI:")
                self.log.info(mpi_import_statement)
                exec mpi_import_statement in globals()
            except:
                mpi = None
        else:
            mpi = None

    def initialize(self, argv=None):
        super(IPEngineApp, self).initialize(argv)
        self.init_mpi()
        self.init_engine()
        self.forward_logging()

    def start(self):
        self.engine.start()
        try:
            self.engine.loop.start()
        except KeyboardInterrupt:
            self.log.critical("Engine Interrupted, shutting down...\n")
Пример #10
0
class ScalarSensorWidget(widgets.DOMWidget, AlignableWidget):
    _view_name = Unicode('ScalarSensorView', sync=True)
    value = Float(sync=True)
    sensor_type = Unicode(sync=True)
    sensor_unit = Unicode(sync=True)
Пример #11
0
class SpectraModel(HTML, Box):
    """
    A notional "complex widget" that knows how to redraw itself when key
    properties change.
    """

    # CONSTANTS (These are not traits)
    classname = Unicode("btn btn-success", sync=True)
    title = Unicode("Popover Test", sync=True)
    CONTENT = Unicode(
        """Lovely popover :D. Color in green using class btn btn-success""",
        sync=True)
    html = Bool(sync=True)

    DONT_DRAW = re.compile(
        r'^(_.+|value|keys|comm|children|visible|parent|log|config|msg_throttle)$'
    )
    SPECUNITS = aunps_glass().specunits()
    VARUNITS = aunps_glass().varunits()
    NORMUNITS = NUdic
    SPECUNITS_REV = OrderedDict((v, k) for k, v in SPECUNITS.items())
    VARUNITS_REV = OrderedDict((v, k) for k, v in VARUNITS.items())
    NORMUNITS_REV = OrderedDict((v, k) for k, v in NORMUNITS.items())
    COLORS = ["b", "g", "r", "y", "k"]
    COLORMAPS = sorted(m for m in plt.cm.datad if not m.endswith("_r"))
    SLIDER_STEPS = Float(25)

    # IO traitlets
    load_spec = Bool(False, sync=True)
    load_file = Bool(True, sync=True)  #
    file_name = Unicode("<Import Variable Name>", sync=True)
    save_spec = Bool(False, sync=True)
    save_spec_as = Unicode("<Export Name>", sync=True)

    inbox = Bool(False, sync=True)
    outbox = Bool(False, sync=True)

    # Spectra traits
    spec = Instance(Spectra)
    testdataset = Unicode('<Dataset>', sync=True)
    spec_modified = Instance(Spectra)

    # Plotting Traits
    figwidth = Float(6.5)
    figheight = Float(6.5)
    interactive = Bool(False, sync=True)
    colorbar = Bool(False, sync=True)
    autoupdate = Bool(True, sync=True)
    colormap = Enum(COLORMAPS, sync=True)
    color = Enum(COLORS, default_value='k', sync=True)
    advancedbox = Bool(False, sync=True)
    cmapbox = Bool(False, sync=True)
    colorbox = Bool(False, sync=True)
    kind = Enum(PLOTPARSER.keys(), default_value='spec', sync=True)
    selectlines = Bool(False, sync=True)

    # Units
    spec_unit = Enum(SPECUNITS.values(), sync=True)
    var_unit = Enum(VARUNITS.values(), sync=True)
    iunit = Unicode
    norm_unit = Enum(NORMUNITS.values(), sync=True)

    # Message/warnings
    message = Unicode

    # Sampling/slicing
    #specslice_axis = Enum([0,1], default_value=0, sync=True)
    specslice_position_start = Float(sync=True)
    specslice_position_end = Float(sync=True)
    specslider_start = Float(sync=True)
    specslider_end = Float(sync=True)
    specstep = Float(sync=True)
    specspacing = Int(1, sync=True)

    timeslice_position_start = Float(sync=True)
    timeslice_position_end = Float(sync=True)
    timeslider_start = Float(sync=True)
    timeslider_end = Float(sync=True)
    timestep = Float(sync=True)
    timespacing = Int(1, sync=True)

    specbox = Bool(False, sync=True)
    timebox = Bool(False, sync=True)
    scalebox = Bool(False, sync=True)

    # User Defined Function
    user_f = Unicode(sync=True)

    def __init__(self, *args, **kwargs):

        # Initialize traits (_spec_changed calls initial draw)
        super(SpectraModel, self).__init__(*args, **kwargs)
        self._dom_classes += ("col-xs-9", )

    # DEFAULTS
    # --------
    def _spec_default(self):
        return getattr(skspec.data, 'aunps_water')()

    def _colormap_default(self):
        return pvconf.CMAP_1DSPECPLOT  #Use skspec config default (red/blue map)

    # Events
    # ------
    def _spec_changed(self, name, old, new):
        """Overall spectrum changes; triggers most events."""

        # Leave this at this position in loop
        self.spec_modified = self.spec
        # --------------

        self._FREEZE = True  #pause draws/slicing

        # Units
        self.spec_unit = self.spec.full_specunit
        self.var_unit = self.spec.full_varunit
        self.norm_unit = self.spec.full_norm
        self.iunit = self.spec.full_iunit

        # Spec slicing
        self.specslice_position_start = self.spec.index[0]
        self.specslice_position_end = self.spec.index[-1]
        self.specslider_start = self.spec.index[0]
        self.specslider_end = self.spec.index[-1]
        self.specstep = (self.spec.index.max() -
                         self.spec.index.min()) / self.SLIDER_STEPS
        self.specspacing = 1

        self.timeslice_position_start = self.spec.columns[0]
        self.timeslice_position_end = self.spec.columns[-1]
        self.timeslider_start = self.spec.columns[0]
        self.timeslider_end = self.spec.columns[-1]
        self.timestep = 10  #(self.spec.columns.max() - self.spec.columns.min())/self.SLIDER_STEPS
        self.timespacing = 1

        # Plot defaults to color map
        self._color_state = False

        self._FREEZE = False
        self.draw(name, old, new)

    def _autoupdate_changed(self, name, old, new):
        if self.autoupdate == True:
            self.draw(name, old, new)

    def _norm_unit_changed(self, name, old, new):
        self.spec_modified = self.spec_modified.as_norm(
            self.NORMUNITS_REV[new])
        self.draw(name, old, new)

    def _spec_unit_changed(self, name, old, new):
        self.spec_modified.specunit = self.SPECUNITS_REV[new]
        self.draw(name, old, new)

    def _var_unit_changed(self, name, old, new):
        self.spec_modified.varunit = self.VARUNITS_REV[new]
        self.draw(name, old, new)

    def _iunit_changed(self, name, old, new):
        self.spec_modified.iunit = new
        self.draw(name, old, new)

    # Plotting events
    # ---------------
    def _figwidth_changed(self, name, old, new):
        self.draw(name, old, new)

    def _figheight_changed(self, name, old, new):
        self.draw(name, old, new)

    def _colormap_changed(self, name, old, new):
        self._color_state = False
        self.draw(name, old, new)

    def _color_changed(self):
        """ Because this sets colorbar, might cause 2 redraws,
            so _FREEZE used to prevent this
            """
        self._FREEZE = True
        self.colorbar = False
        self._color_state = True
        self._FREEZE = False
        self.draw()

    def _colorbar_changed(self, name, old, new):
        self._color_state = False
        self.draw(name, old, new)

    def _interactive_changed(self, name, old, new):
        self.draw(name, old, new)

    # This should be phased out; plots should support colormap, area should handle accordingly
    def _kind_changed(self, name, old, new):
        self.draw(name, old, new)

    def _selectlines_changed(self, name, old, new):
        if self.interactive:
            self.draw(name, old, new)

    # IO Events
    # ---------
    # THIS SHOULD BE LOAD BUTTON CLICKED!!!!
    def _file_name_changed(self):
        try:
            self.spec = getattr(skspec.data, self.file_name)()
        except AttributeError:
            pass

    @log_message
    def save_plot(self):
        self.fig_old.savefig(self.save_spec_as + '.png')

    @log_message
    def save_to_ns(self):
        get_ipython().user_ns[self.save_spec_as] = self.spec_modified

    @log_message
    def load_from_ns(self):
        self.spec = get_ipython().user_ns[self.file_name].as_varunit("s")
        #def load_from_ns(self, var):
        #self.spec = get_ipython().user_ns[var]

    # Slicing events
    # --------------
    def _specslice_position_start_changed(self, name, old, new):
        if not self._FREEZE:
            self.slice_spectrum(name)
            self.draw(name, old, new)

    def _specslice_position_end_changed(self, name, old, new):
        if not self._FREEZE:
            self.slice_spectrum(name)
            self.draw(name, old, new)

    def _timeslice_position_start_changed(self, name, old, new):
        if not self._FREEZE:
            self.slice_spectrum(name)
            self.draw(name, old, new)

    def _timeslice_position_end_changed(self, name, old, new):
        if not self._FREEZE:
            self.slice_spectrum(name)
            self.draw(name, old, new)

    def _timespacing_changed(self, name, old, new):
        """ Don't let user set less than 1 or more than dataset size"""
        # Will have to update when add var/spec slicing
        #axis = self.slice_axis
        if self.timespacing < 1:
            self.timespacing = 1
        elif self.timespacing > self.spec_modified.shape[1]:
            self.timespacing = self.spec_modified.shape[1]

        self.slice_spectrum(name)
        self.draw(name, old, new)

    def _specspacing_changed(self, name, old, new):
        """ Don't let user set less than 1 or more than dataset size"""
        # Will have to update when add var/spec slicing
        #axis = self.slice_axis
        if self.specspacing < 0:
            self.specspacing = 0
        elif self.specspacing > self.spec_modified.shape[0]:
            self.specspacing = self.spec_modified.shape[0]

        self.slice_spectrum(name)
        self.draw(name, old, new)

    # Draw/Slice updates
    # ------------------
    @log_message
    def slice_spectrum(self, name=None):
        """ Slice and resample spectra """
        self.spec_modified = self.spec.nearby[
            self.specslice_position_start:self.specslice_position_end:self.
            specspacing, self.timeslice_position_start:self.
            timeslice_position_end:self.timespacing]

    @log_message
    def apply_userf(self, name=None):
        import numpy as np
        self.spec_modified = self.spec_modified.apply(eval(self.user_f))
        self.draw(name)

    @log_message
    def draw(self, name=None, old=None, new=None):
        if name is not None and self.DONT_DRAW.match(name):
            return

        if self._FREEZE:
            return

        plot_and_message = ''

        # Better way would be a decorator or something that only goes into draw if not autoupdate
        if self.autoupdate:

            # Generate new figure object
            f = plt.figure(figsize=(self.figwidth, self.figheight))
            if PLOTPARSER.is_3d(self.kind):
                projection = '3d'
            else:
                projection = None
            ax = f.add_subplot(111, projection=projection)

            if self._color_state or self.kind not in [
                    'spec', 'waterfall', 'contour', 'contour3d'
            ]:
                colorkwags = dict(color=self.color)
            else:
                colorkwags = dict(cmap=self.colormap, cbar=self.colorbar)

            self.spec_modified.plot(ax=ax,
                                    fig=f,
                                    kind=self.kind,
                                    norm=self.NORMUNITS_REV[self.norm_unit],
                                    **colorkwags)
            f.tight_layout()  #Padding around plot
            lines = ax.get_lines()
            plt.close(f)

            #http://mpld3.github.io/modules/API.html
            if self.interactive:
                import mpld3
                if self.selectlines:
                    from line_plugin import HighlightLines

                    for idx, col in enumerate(self.spec_modified.columns):
                        name = 'COLUMN(%s): %s' % (idx, col)
                        tooltip = mpld3.plugins.LineLabelTooltip(
                            lines[idx], name)
                        #voffset=10, hoffset=10,  css=css)
                        mpld3.plugins.connect(f, tooltip)

                    mpld3.plugins.connect(f, HighlightLines(lines))

                plot_and_message += mpld3.fig_to_html(f)
            else:
                plot_and_message += mpl2html(f)

            self.fig_old = f

        else:
            plot_and_message += html_figure(self.fig_old)

        # VALUE IS WHAT GUI LOOKS UP!!!
        self.value = plot_and_message
Пример #12
0
class ZMQCompleter(IPCompleter):
    """Client-side completion machinery.

    How it works: self.complete will be called multiple times, with
    state=0,1,2,... When state=0 it should compute ALL the completion matches,
    and then return them for each value of state."""

    timeout = Float(5.0, config=True, help='timeout before completion abort')

    def __init__(self, shell, client, config=None):
        super(ZMQCompleter, self).__init__(config=config)

        self.shell = shell
        self.client = client
        self.matches = []
        # don't do any splitting client-side,
        # rely on the kernel for that
        self.splitter.delims = '\r\n'
        if self.readline:
            self.readline.set_completer_delims('\r\n')

    def complete_request(self, text):
        line = str_to_unicode(readline.get_line_buffer())
        byte_cursor_pos = readline.get_endidx()

        # get_endidx is a byte offset
        # account for multi-byte characters to get correct cursor_pos
        bytes_before_cursor = cast_bytes(line)[:byte_cursor_pos]
        cursor_pos = len(cast_unicode(bytes_before_cursor))

        # send completion request to kernel
        # Give the kernel up to 5s to respond
        msg_id = self.client.complete(
            code=line,
            cursor_pos=cursor_pos,
        )

        msg = self.client.shell_channel.get_msg(timeout=self.timeout)
        if msg['parent_header']['msg_id'] == msg_id:
            content = msg['content']
            cursor_start = content['cursor_start']
            matches = [line[:cursor_start] + m for m in content['matches']]
            if content["cursor_end"] < cursor_pos:
                extra = line[content["cursor_end"]:cursor_pos]
                matches = [m + extra for m in matches]
            matches = [unicode_to_str(m) for m in matches]
            return matches
        return []

    def rlcomplete(self, text, state):
        if state == 0:
            try:
                self.matches = self.complete_request(text)
            except Empty:
                #print('WARNING: Kernel timeout on tab completion.')
                pass

        try:
            return self.matches[state]
        except IndexError:
            return None

    def complete(self, text, line, cursor_pos=None):
        return self.rlcomplete(text, 0)
Пример #13
0
class Modeler(Device):
    name = 'model'
    path = 'msmaccelerator.model.modeler.Modeler'
    short_description = 'Run the modeler, building an MSM on the available data'
    long_description = '''This device will connect to the msmaccelerator server,
        request the currently available data and build an MSM. That MSM will be
        used by the server to drive future rounds of adaptive sampling.
        Currently, you can use either RMSD (built-in) or a custom distance metric
        (provide the pickle file) with K-centers clustering algorithm.'''

    stride = Int(1,
                 config=True,
                 help='''Subsample data by taking only
        every stride-th point''')
    topology_pdb = FilePath(config=True,
                            extension='.pdb',
                            help='''PDB file
        giving the topology of the system''')
    lag_time = Int(1,
                   config=True,
                   help='''Lag time for building the
        model, in units of the stride. Currently, we are not doing the step
        in MSMBuilder that is refered to as "assignment", where you assign
        the remaining data that was not used during clustering to the cluster
        centers that were identified.''')
    rmsd_atom_indices = FilePath(
        'AtomIndices.dat',
        extension='.dat',
        config=True,
        help='''File containing the indices of atoms to use in the RMSD
        computation. Using a PDB as input, this file can be created with
        the MSMBuilder script CreateAtomIndices.py''')
    clustering_distance_cutoff = Float(0.2,
                                       config=True,
                                       help='''Distance cutoff for
        clustering, in nanometers. We will continue to create new clusters
        until each data point is within this cutoff from its cluster center.'''
                                       )
    symmetrize = Enum(
        ['MLE', 'Transpose', None],
        default='MLE',
        config=True,
        help='''Symmetrization method for constructing the reversibile counts
        matrix.''')
    ergodic_trimming = Bool(False,
                            config=True,
                            help='''Do ergodic trimming when
        constructing the Markov state model. This is generally a good idea for
        building MSMs in the high-data regime where you wish to prevent transitions
        that appear nonergodic because they've been undersampled from influencing
        your model, but is inappropriate in the sparse-data regime when you're
        using min-counts sampling, because these are precisiely the states that
        you're most interested in.''')
    use_custom_metric = Bool(False,
                             config=True,
                             help='''Should we use
         a custom distance metric for clusering instead of RMSD?''')
    custom_metric_path = Unicode('metric.pickl',
                                 config=True,
                                 help='''File
         containing a pickled metric for use in clustering.''')
    clusterer = Enum(['kcenters', 'hybrid', 'ward'],
                     default='kcenters',
                     config=True,
                     help='''The method used for clustering structures in
        the MSM.''')

    aliases = dict(
        stride='Modeler.stride',
        lag_time='Modeler.lag_time',
        rmsd_atom_indices='Modeler.rmsd_atom_indices',
        clustering_distance_cutoff='Modeler.clustering_distance_cutoff',
        topology_pdb='Modeler.topology_pdb',
        symmetrize='Modeler.symmetrize',
        trim='Modeler.ergodic_trimming',
        zmq_url='Device.zmq_url',
        zmq_port='Device.zmq_port')

    def on_startup_message(self, msg):
        """This method is called when the device receives its startup message
        from the server
        """
        assert msg.header.msg_type in ['construct_model'
                                       ], 'only allowed methods'
        return getattr(self, msg.header.msg_type)(msg.header, msg.content)

    def construct_model(self, header, content):
        """All the model building code. This code is what's called by the
        server after registration."""
        # the message needs to not contain unicode
        assert content.output.protocol == 'localfs', "I'm currently only equipped for localfs output"

        # load up all of the trajectories
        trajs = self.load_trajectories(content.traj_fns)

        # run clustering
        assignments, generator_indices = self.cluster(trajs)

        # build the MSM
        counts, rev_counts, t_matrix, populations, mapping = self.build_msm(
            assignments)

        # save the results to disk
        msm = MarkovStateModel(counts=counts,
                               reversible_counts=rev_counts,
                               transition_matrix=t_matrix,
                               populations=populations,
                               mapping=mapping,
                               generator_indices=generator_indices,
                               traj_filenames=content.traj_fns,
                               assignments_stride=self.stride,
                               lag_time=self.lag_time,
                               assignments=assignments)
        msm.save(content.output.path)

        # tell the server that we're done
        self.send_recv(msg_type='modeler_done',
                       content={
                           'status': 'success',
                           'output': {
                               'protocol': 'localfs',
                               'path': content.output.path
                           },
                       })

    def load_trajectories(self, traj_fns):
        """Load up the trajectories, taking into account both the stride and
        the atom indices"""

        trajs = []
        if os.path.exists(self.rmsd_atom_indices):
            self.log.info('Loading atom indices from %s',
                          self.rmsd_atom_indices)
            atom_indices = np.loadtxt(self.rmsd_atom_indices, dtype=np.int)
        else:
            self.log.info('Skipping loading atom_indices. Using all.')
            atom_indices = None

        for traj_fn in traj_fns:
            # use the mdtraj dcd reader, but then monkey-patch
            # the coordinate array into shim for the msmbuilder clustering
            # code that wants the trajectory to act like a dict with the XYZList
            # key.
            self.log.info('Loading traj %s', traj_fn)
            if not os.path.exists(traj_fn):
                self.log.error(
                    'Traj file reported by server does not exist: %s' %
                    traj_fn)
                continue

            t = mdtraj.trajectory.load(traj_fn,
                                       atom_indices=atom_indices,
                                       top=self.topology_pdb)
            t2 = ShimTrajectory(t.xyz[::self.stride, :])

            trajs.append(t2)

        if len(trajs) == 0:
            raise ValueError('No trajectories found!')

        self.log.info('loaded %s trajectories', len(trajs))
        self.log.info('loaded %s total frames...', sum(len(t) for t in trajs))
        self.log.info('loaded %s atoms', t2['XYZList'].shape[1])

        return trajs

    def cluster(self, trajectories):
        """Cluster the trajectories into microstates.

        Returns
        -------
        assignments : np.ndarray, dtype=int, shape=[n_trajs, max_n_frames]
            assignments is a 2d arry giving the microstate that each frame
            from the simulation is assigned to. The indexing semantics are
            a little bit nontrivial because of the striding and the lag time.
            They are that assignments[i,j]=k means that in the `ith` trajectory,
            the `j*self.stride`th frame is assiged to microstate `k`.
        generator_indices : np.ndarray, dtype=int, shape=[n_clusters, 2]
            This array gives the indices of the clusters centers, with respect
            to their position in the trajectories on disk. the semantics are
            that generator_indices[i, :]=[k,l] means that the `ith` cluster's center
            is in trajectory `k`, in its `l`th frame. Because of the striding,
            `l` will always be a multiple of `self.stride`.
        """
        if self.use_custom_metric:
            metric_path = self.custom_metric_path
            self.log.info("Loading custom metric: %s" % metric_path)
            pickle_file = open(metric_path)
            metric = pickle.load(pickle_file)
        else:
            metric = msmbuilder.metrics.RMSD()

        if self.clusterer == 'kcenters':
            # Use k-centers clustering
            clusterer = msmbuilder.clustering.KCenters(
                metric,
                trajectories,
                distance_cutoff=self.clustering_distance_cutoff)
            assignments = clusterer.get_assignments()
        elif self.clusterer == 'ward':
            # Use ward clustering
            clusterer = msmbuilder.clustering.Hierarchical(metric,
                                                           trajectories,
                                                           method='ward')
            assignments = clusterer.get_assignments(
                self.clustering_distance_cutoff)
        elif self.clusterer == 'hybrid':
            # Use hybrid k-medoids clustering
            clusterer = msmbuilder.clustering.HybridKMedoids(
                metric,
                trajectories,
                k=None,
                distance_cutoff=self.clustering_distance_cutoff)
            assignments = clusterer.get_assignments()
        else:
            self.log.error("Please choose an actual clusterer")

        # if we get the generators as a trajectory, it will only
        # have the reduced set of atoms.

        # the clusterer contains indices with respect to the concatenated trajectory
        # inside the clusterer object. we need to reindex to get the
        # traj/frame index of each generator
        # print 'generator longindices', clusterer._generator_indices
        # print 'traj lengths         ', clusterer._traj_lengths
        generator_indices = reindex_list(clusterer._generator_indices,
                                         clusterer._traj_lengths)
        # print 'generator indices', generator_indices

        # but these indices are still with respect to the traj/frame
        # after striding, so we need to unstride them
        generator_indices[:, 1] *= self.stride

        # print generator_indices

        return assignments, generator_indices

    def build_msm(self, assignments):
        """Build the MSM from the microstate assigned trajectories"""
        counts = msmbuilder.MSMLib.get_count_matrix_from_assignments(
            assignments, lag_time=self.lag_time)

        result = msmbuilder.MSMLib.build_msm(
            counts,
            symmetrize=self.symmetrize,
            ergodic_trimming=self.ergodic_trimming)
        # unpack the results
        rev_counts, t_matrix, populations, mapping = result
        return counts, rev_counts, t_matrix, populations, mapping
Пример #14
0
class MappingKernelManager(MultiKernelManager):
    """A KernelManager that handles notebok mapping and HTTP error handling"""

    kernel_argv = List(Unicode)

    time_to_dead = Float(3.0,
                         config=True,
                         help="""Kernel heartbeat interval in seconds.""")
    first_beat = Float(
        5.0,
        config=True,
        help="Delay (in seconds) before sending first heartbeat.")

    max_msg_size = Integer(65536,
                           config=True,
                           help="""
        The max raw message size accepted from the browser
        over a WebSocket connection.
    """)

    _notebook_mapping = Dict()

    #-------------------------------------------------------------------------
    # Methods for managing kernels and sessions
    #-------------------------------------------------------------------------

    def kernel_for_notebook(self, notebook_id):
        """Return the kernel_id for a notebook_id or None."""
        return self._notebook_mapping.get(notebook_id)

    def set_kernel_for_notebook(self, notebook_id, kernel_id):
        """Associate a notebook with a kernel."""
        if notebook_id is not None:
            self._notebook_mapping[notebook_id] = kernel_id

    def notebook_for_kernel(self, kernel_id):
        """Return the notebook_id for a kernel_id or None."""
        notebook_ids = [
            k for k, v in self._notebook_mapping.iteritems() if v == kernel_id
        ]
        if len(notebook_ids) == 1:
            return notebook_ids[0]
        else:
            return None

    def delete_mapping_for_kernel(self, kernel_id):
        """Remove the kernel/notebook mapping for kernel_id."""
        notebook_id = self.notebook_for_kernel(kernel_id)
        if notebook_id is not None:
            del self._notebook_mapping[notebook_id]

    def start_kernel(self, notebook_id=None, **kwargs):
        """Start a kernel for a notebok an return its kernel_id.

        Parameters
        ----------
        notebook_id : uuid
            The uuid of the notebook to associate the new kernel with. If this
            is not None, this kernel will be persistent whenever the notebook
            requests a kernel.
        """
        kernel_id = self.kernel_for_notebook(notebook_id)
        if kernel_id is None:
            kwargs['extra_arguments'] = self.kernel_argv
            kernel_id = super(MappingKernelManager,
                              self).start_kernel(**kwargs)
            self.set_kernel_for_notebook(notebook_id, kernel_id)
            self.log.info("Kernel started: %s" % kernel_id)
            self.log.debug("Kernel args: %r" % kwargs)
        else:
            self.log.info("Using existing kernel: %s" % kernel_id)
        return kernel_id

    def shutdown_kernel(self, kernel_id, now=False):
        """Shutdown a kernel and remove its notebook association."""
        self._check_kernel_id(kernel_id)
        super(MappingKernelManager, self).shutdown_kernel(kernel_id, now=now)
        self.delete_mapping_for_kernel(kernel_id)
        self.log.info("Kernel shutdown: %s" % kernel_id)

    def interrupt_kernel(self, kernel_id):
        """Interrupt a kernel."""
        self._check_kernel_id(kernel_id)
        super(MappingKernelManager, self).interrupt_kernel(kernel_id)
        self.log.info("Kernel interrupted: %s" % kernel_id)

    def restart_kernel(self, kernel_id):
        """Restart a kernel while keeping clients connected."""
        self._check_kernel_id(kernel_id)
        super(MappingKernelManager, self).restart_kernel(kernel_id)
        self.log.info("Kernel restarted: %s" % kernel_id)

    def create_iopub_stream(self, kernel_id):
        """Create a new iopub stream."""
        self._check_kernel_id(kernel_id)
        return super(MappingKernelManager, self).create_iopub_stream(kernel_id)

    def create_shell_stream(self, kernel_id):
        """Create a new shell stream."""
        self._check_kernel_id(kernel_id)
        return super(MappingKernelManager, self).create_shell_stream(kernel_id)

    def create_hb_stream(self, kernel_id):
        """Create a new hb stream."""
        self._check_kernel_id(kernel_id)
        return super(MappingKernelManager, self).create_hb_stream(kernel_id)

    def _check_kernel_id(self, kernel_id):
        """Check a that a kernel_id exists and raise 404 if not."""
        if kernel_id not in self:
            raise web.HTTPError(404, u'Kernel does not exist: %s' % kernel_id)
Пример #15
0
class ClusterManager(LoggingConfigurable):

    profiles = Dict()

    delay = Float(
        1.,
        config=True,
        help="delay (in s) between starting the controller and the engines")

    loop = Instance('zmq.eventloop.ioloop.IOLoop')

    def _loop_default(self):
        from zmq.eventloop.ioloop import IOLoop
        return IOLoop.instance()

    def build_launchers(self, profile_dir):
        from IPython.parallel.apps.ipclusterapp import IPClusterStart

        class DummyIPClusterStart(IPClusterStart):
            """Dummy subclass to skip init steps that conflict with global app.
    
            Instantiating and initializing this class should result in fully configured
            launchers, but no other side effects or state.
            """
            def init_signal(self):
                pass

            def reinit_logging(self):
                pass

        starter = DummyIPClusterStart(log=self.log)
        starter.initialize(['--profile-dir', profile_dir])
        cl = starter.controller_launcher
        esl = starter.engine_launcher
        n = starter.n
        return cl, esl, n

    def get_profile_dir(self, name, path):
        p = ProfileDir.find_profile_dir_by_name(path, name=name)
        return p.location

    def update_profiles(self):
        """List all profiles in the ipython_dir and cwd.
        """

        stale = set(self.profiles)
        for path in [get_ipython_dir(), py3compat.getcwd()]:
            for profile in list_profiles_in(path):
                if profile in stale:
                    stale.remove(profile)
                pd = self.get_profile_dir(profile, path)
                if profile not in self.profiles:
                    self.log.debug("Adding cluster profile '%s'", profile)
                    self.profiles[profile] = {
                        'profile': profile,
                        'profile_dir': pd,
                        'status': 'stopped'
                    }
        for profile in stale:
            # remove profiles that no longer exist
            self.log.debug("Profile '%s' no longer exists", profile)
            self.profiles.pop(stale)

    def list_profiles(self):
        self.update_profiles()
        # sorted list, but ensure that 'default' always comes first
        default_first = lambda name: name if name != 'default' else ''
        result = [
            self.profile_info(p)
            for p in sorted(self.profiles, key=default_first)
        ]
        return result

    def check_profile(self, profile):
        if profile not in self.profiles:
            raise web.HTTPError(404, u'profile not found')

    def profile_info(self, profile):
        self.check_profile(profile)
        result = {}
        data = self.profiles.get(profile)
        result['profile'] = profile
        result['profile_dir'] = data['profile_dir']
        result['status'] = data['status']
        if 'n' in data:
            result['n'] = data['n']
        return result

    def start_cluster(self, profile, n=None):
        """Start a cluster for a given profile."""
        self.check_profile(profile)
        data = self.profiles[profile]
        if data['status'] == 'running':
            raise web.HTTPError(409, u'cluster already running')
        cl, esl, default_n = self.build_launchers(data['profile_dir'])
        n = n if n is not None else default_n

        def clean_data():
            data.pop('controller_launcher', None)
            data.pop('engine_set_launcher', None)
            data.pop('n', None)
            data['status'] = 'stopped'

        def engines_stopped(r):
            self.log.debug('Engines stopped')
            if cl.running:
                cl.stop()
            clean_data()

        esl.on_stop(engines_stopped)

        def controller_stopped(r):
            self.log.debug('Controller stopped')
            if esl.running:
                esl.stop()
            clean_data()

        cl.on_stop(controller_stopped)
        loop = self.loop

        def start():
            """start the controller, then the engines after a delay"""
            cl.start()
            loop.add_timeout(self.loop.time() + self.delay,
                             lambda: esl.start(n))

        self.loop.add_callback(start)

        self.log.debug('Cluster started')
        data['controller_launcher'] = cl
        data['engine_set_launcher'] = esl
        data['n'] = n
        data['status'] = 'running'
        return self.profile_info(profile)

    def stop_cluster(self, profile):
        """Stop a cluster for a given profile."""
        self.check_profile(profile)
        data = self.profiles[profile]
        if data['status'] == 'stopped':
            raise web.HTTPError(409, u'cluster not running')
        data = self.profiles[profile]
        cl = data['controller_launcher']
        esl = data['engine_set_launcher']
        if cl.running:
            cl.stop()
        if esl.running:
            esl.stop()
        # Return a temp info dict, the real one is updated in the on_stop
        # logic above.
        result = {
            'profile': data['profile'],
            'profile_dir': data['profile_dir'],
            'status': 'stopped'
        }
        return result

    def stop_all_clusters(self):
        for p in self.profiles.keys():
            self.stop_cluster(p)
Пример #16
0
class KernelRestarter(LoggingConfigurable):
    """Monitor and autorestart a kernel."""

    kernel_manager = Instance('IPython.kernel.KernelManager')

    time_to_dead = Float(3.0,
                         config=True,
                         help="""Kernel heartbeat interval in seconds.""")

    restart_limit = Integer(
        5,
        config=True,
        help=
        """The number of consecutive autorestarts before the kernel is presumed dead."""
    )
    _restarting = Bool(False)
    _restart_count = Integer(0)

    callbacks = Dict()

    def _callbacks_default(self):
        return dict(restart=[], dead=[])

    def start(self):
        """Start the polling of the kernel."""
        raise NotImplementedError("Must be implemented in a subclass")

    def stop(self):
        """Stop the kernel polling."""
        raise NotImplementedError("Must be implemented in a subclass")

    def add_callback(self, f, event='restart'):
        """register a callback to fire on a particular event

        Possible values for event:

          'restart' (default): kernel has died, and will be restarted.
          'dead': restart has failed, kernel will be left dead.

        """
        self.callbacks[event].append(f)

    def remove_callback(self, f, event='restart'):
        """unregister a callback to fire on a particular event

        Possible values for event:

          'restart' (default): kernel has died, and will be restarted.
          'dead': restart has failed, kernel will be left dead.

        """
        try:
            self.callbacks[event].remove(f)
        except ValueError:
            pass

    def _fire_callbacks(self, event):
        """fire our callbacks for a particular event"""
        for callback in self.callbacks[event]:
            try:
                callback()
            except Exception as e:
                self.log.error("KernelRestarter: %s callback %r failed",
                               event,
                               callback,
                               exc_info=True)

    def poll(self):
        self.log.debug('Polling kernel...')
        if not self.kernel_manager.is_alive():
            if self._restarting:
                self._restart_count += 1
            else:
                self._restart_count = 1

            if self._restart_count >= self.restart_limit:
                self.log.warn("KernelRestarter: restart failed")
                self._fire_callbacks('dead')
                self._restarting = False
                self._restart_count = 0
                self.stop()
            else:
                self.log.info('KernelRestarter: restarting kernel (%i/%i)',
                              self._restart_count, self.restart_limit)
                self._fire_callbacks('restart')
                self.kernel_manager.restart_kernel(now=True)
                self._restarting = True
        else:
            if self._restarting:
                self.log.debug("KernelRestarter: restart apparently succeeded")
            self._restarting = False
Пример #17
0
class Kernel(Configurable):

    #---------------------------------------------------------------------------
    # Kernel interface
    #---------------------------------------------------------------------------

    # attribute to override with a GUI
    eventloop = Any(None)

    shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
    session = Instance(Session)
    profile_dir = Instance('IPython.core.profiledir.ProfileDir')
    shell_socket = Instance('zmq.Socket')
    iopub_socket = Instance('zmq.Socket')
    stdin_socket = Instance('zmq.Socket')
    log = Instance(logging.Logger)

    # Private interface

    # Time to sleep after flushing the stdout/err buffers in each execute
    # cycle.  While this introduces a hard limit on the minimal latency of the
    # execute cycle, it helps prevent output synchronization problems for
    # clients.
    # Units are in seconds.  The minimum zmq latency on local host is probably
    # ~150 microseconds, set this to 500us for now.  We may need to increase it
    # a little if it's not enough after more interactive testing.
    _execute_sleep = Float(0.0005, config=True)

    # Frequency of the kernel's event loop.
    # Units are in seconds, kernel subclasses for GUI toolkits may need to
    # adapt to milliseconds.
    _poll_interval = Float(0.05, config=True)

    # If the shutdown was requested over the network, we leave here the
    # necessary reply message so it can be sent by our registered atexit
    # handler.  This ensures that the reply is only sent to clients truly at
    # the end of our shutdown process (which happens after the underlying
    # IPython shell's own shutdown).
    _shutdown_message = None

    # This is a dict of port number that the kernel is listening on. It is set
    # by record_ports and used by connect_request.
    _recorded_ports = Dict()

    def __init__(self, **kwargs):
        super(Kernel, self).__init__(**kwargs)

        # Before we even start up the shell, register *first* our exit handlers
        # so they come before the shell's
        atexit.register(self._at_shutdown)

        # Initialize the InteractiveShell subclass
        self.shell = ZMQInteractiveShell.instance(
            config=self.config,
            profile_dir=self.profile_dir,
        )
        self.shell.displayhook.session = self.session
        self.shell.displayhook.pub_socket = self.iopub_socket
        self.shell.display_pub.session = self.session
        self.shell.display_pub.pub_socket = self.iopub_socket

        # TMP - hack while developing
        self.shell._reply_content = None

        # Build dict of handlers for message types
        msg_types = [
            'execute_request', 'complete_request', 'object_info_request',
            'history_request', 'connect_request', 'shutdown_request'
        ]
        self.handlers = {}
        for msg_type in msg_types:
            self.handlers[msg_type] = getattr(self, msg_type)

    def do_one_iteration(self):
        """Do one iteration of the kernel's evaluation loop.
        """
        try:
            ident, msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
        except Exception:
            self.log.warn("Invalid Message:", exc_info=True)
            return
        if msg is None:
            return

        msg_type = msg['header']['msg_type']

        # This assert will raise in versions of zeromq 2.0.7 and lesser.
        # We now require 2.0.8 or above, so we can uncomment for safety.
        # print(ident,msg, file=sys.__stdout__)
        assert ident is not None, "Missing message part."

        # Print some info about this message and leave a '--->' marker, so it's
        # easier to trace visually the message chain when debugging.  Each
        # handler prints its message at the end.
        self.log.debug('\n*** MESSAGE TYPE:' + str(msg_type) + '***')
        self.log.debug('   Content: ' + str(msg['content']) + '\n   --->\n   ')

        # Find and call actual handler for message
        handler = self.handlers.get(msg_type, None)
        if handler is None:
            self.log.error("UNKNOWN MESSAGE TYPE:" + str(msg))
        else:
            handler(ident, msg)

        # Check whether we should exit, in case the incoming message set the
        # exit flag on
        if self.shell.exit_now:
            self.log.debug('\nExiting IPython kernel...')
            # We do a normal, clean exit, which allows any actions registered
            # via atexit (such as history saving) to take place.
            sys.exit(0)

    def start(self):
        """ Start the kernel main loop.
        """
        # a KeyboardInterrupt (SIGINT) can occur on any python statement, so
        # let's ignore (SIG_IGN) them until we're in a place to handle them properly
        signal(SIGINT, SIG_IGN)
        poller = zmq.Poller()
        poller.register(self.shell_socket, zmq.POLLIN)
        # loop while self.eventloop has not been overridden
        while self.eventloop is None:
            try:
                # scale by extra factor of 10, because there is no
                # reason for this to be anything less than ~ 0.1s
                # since it is a real poller and will respond
                # to events immediately

                # double nested try/except, to properly catch KeyboardInterrupt
                # due to pyzmq Issue #130
                try:
                    poller.poll(10 * 1000 * self._poll_interval)
                    # restore raising of KeyboardInterrupt
                    signal(SIGINT, default_int_handler)
                    self.do_one_iteration()
                except:
                    raise
                finally:
                    # prevent raising of KeyboardInterrupt
                    signal(SIGINT, SIG_IGN)
            except KeyboardInterrupt:
                # Ctrl-C shouldn't crash the kernel
                io.raw_print("KeyboardInterrupt caught in kernel")
        # stop ignoring sigint, now that we are out of our own loop,
        # we don't want to prevent future code from handling it
        signal(SIGINT, default_int_handler)
        if self.eventloop is not None:
            try:
                self.eventloop(self)
            except KeyboardInterrupt:
                # Ctrl-C shouldn't crash the kernel
                io.raw_print("KeyboardInterrupt caught in kernel")

    def record_ports(self, ports):
        """Record the ports that this kernel is using.

        The creator of the Kernel instance must call this methods if they
        want the :meth:`connect_request` method to return the port numbers.
        """
        self._recorded_ports = ports

    #---------------------------------------------------------------------------
    # Kernel request handlers
    #---------------------------------------------------------------------------

    def _publish_pyin(self, code, parent):
        """Publish the code request on the pyin stream."""

        self.session.send(self.iopub_socket,
                          u'pyin', {u'code': code},
                          parent=parent)

    def execute_request(self, ident, parent):

        self.session.send(self.iopub_socket,
                          u'status', {u'execution_state': u'busy'},
                          parent=parent)

        try:
            content = parent[u'content']
            code = content[u'code']
            silent = content[u'silent']
        except:
            self.log.error("Got bad msg: ")
            self.log.error(str(Message(parent)))
            return

        shell = self.shell  # we'll need this a lot here

        # Replace raw_input. Note that is not sufficient to replace
        # raw_input in the user namespace.
        if content.get('allow_stdin', False):
            raw_input = lambda prompt='': self._raw_input(
                prompt, ident, parent)
        else:
            raw_input = lambda prompt='': self._no_raw_input()

        if py3compat.PY3:
            __builtin__.input = raw_input
        else:
            __builtin__.raw_input = raw_input

        # Set the parent message of the display hook and out streams.
        shell.displayhook.set_parent(parent)
        shell.display_pub.set_parent(parent)
        sys.stdout.set_parent(parent)
        sys.stderr.set_parent(parent)

        # Re-broadcast our input for the benefit of listening clients, and
        # start computing output
        if not silent:
            self._publish_pyin(code, parent)

        reply_content = {}
        try:
            if silent:
                # run_code uses 'exec' mode, so no displayhook will fire, and it
                # doesn't call logging or history manipulations.  Print
                # statements in that code will obviously still execute.
                shell.run_code(code)
            else:
                # FIXME: the shell calls the exception handler itself.
                shell.run_cell(code, store_history=True)
        except:
            status = u'error'
            # FIXME: this code right now isn't being used yet by default,
            # because the run_cell() call above directly fires off exception
            # reporting.  This code, therefore, is only active in the scenario
            # where runlines itself has an unhandled exception.  We need to
            # uniformize this, for all exception construction to come from a
            # single location in the codbase.
            etype, evalue, tb = sys.exc_info()
            tb_list = traceback.format_exception(etype, evalue, tb)
            reply_content.update(shell._showtraceback(etype, evalue, tb_list))
        else:
            status = u'ok'

        reply_content[u'status'] = status

        # Return the execution counter so clients can display prompts
        reply_content['execution_count'] = shell.execution_count - 1

        # FIXME - fish exception info out of shell, possibly left there by
        # runlines.  We'll need to clean up this logic later.
        if shell._reply_content is not None:
            reply_content.update(shell._reply_content)
            # reset after use
            shell._reply_content = None

        # At this point, we can tell whether the main code execution succeeded
        # or not.  If it did, we proceed to evaluate user_variables/expressions
        if reply_content['status'] == 'ok':
            reply_content[u'user_variables'] = \
                         shell.user_variables(content[u'user_variables'])
            reply_content[u'user_expressions'] = \
                         shell.user_expressions(content[u'user_expressions'])
        else:
            # If there was an error, don't even try to compute variables or
            # expressions
            reply_content[u'user_variables'] = {}
            reply_content[u'user_expressions'] = {}

        # Payloads should be retrieved regardless of outcome, so we can both
        # recover partial output (that could have been generated early in a
        # block, before an error) and clear the payload system always.
        reply_content[u'payload'] = shell.payload_manager.read_payload()
        # Be agressive about clearing the payload because we don't want
        # it to sit in memory until the next execute_request comes in.
        shell.payload_manager.clear_payload()

        # Flush output before sending the reply.
        sys.stdout.flush()
        sys.stderr.flush()
        # FIXME: on rare occasions, the flush doesn't seem to make it to the
        # clients... This seems to mitigate the problem, but we definitely need
        # to better understand what's going on.
        if self._execute_sleep:
            time.sleep(self._execute_sleep)

        # Send the reply.
        reply_content = json_clean(reply_content)
        reply_msg = self.session.send(self.shell_socket,
                                      u'execute_reply',
                                      reply_content,
                                      parent,
                                      ident=ident)
        self.log.debug(str(reply_msg))

        if reply_msg['content']['status'] == u'error':
            self._abort_queue()

        self.session.send(self.iopub_socket,
                          u'status', {u'execution_state': u'idle'},
                          parent=parent)

    def complete_request(self, ident, parent):
        txt, matches = self._complete(parent)
        matches = {'matches': matches, 'matched_text': txt, 'status': 'ok'}
        matches = json_clean(matches)
        completion_msg = self.session.send(self.shell_socket, 'complete_reply',
                                           matches, parent, ident)
        self.log.debug(str(completion_msg))

    def object_info_request(self, ident, parent):
        object_info = self.shell.object_inspect(parent['content']['oname'])
        # Before we send this object over, we scrub it for JSON usage
        oinfo = json_clean(object_info)
        msg = self.session.send(self.shell_socket, 'object_info_reply', oinfo,
                                parent, ident)
        self.log.debug(msg)

    def history_request(self, ident, parent):
        # We need to pull these out, as passing **kwargs doesn't work with
        # unicode keys before Python 2.6.5.
        hist_access_type = parent['content']['hist_access_type']
        raw = parent['content']['raw']
        output = parent['content']['output']
        if hist_access_type == 'tail':
            n = parent['content']['n']
            hist = self.shell.history_manager.get_tail(n,
                                                       raw=raw,
                                                       output=output,
                                                       include_latest=True)

        elif hist_access_type == 'range':
            session = parent['content']['session']
            start = parent['content']['start']
            stop = parent['content']['stop']
            hist = self.shell.history_manager.get_range(session,
                                                        start,
                                                        stop,
                                                        raw=raw,
                                                        output=output)

        elif hist_access_type == 'search':
            pattern = parent['content']['pattern']
            hist = self.shell.history_manager.search(pattern,
                                                     raw=raw,
                                                     output=output)

        else:
            hist = []
        content = {'history': list(hist)}
        content = json_clean(content)
        msg = self.session.send(self.shell_socket, 'history_reply', content,
                                parent, ident)
        self.log.debug(str(msg))

    def connect_request(self, ident, parent):
        if self._recorded_ports is not None:
            content = self._recorded_ports.copy()
        else:
            content = {}
        msg = self.session.send(self.shell_socket, 'connect_reply', content,
                                parent, ident)
        self.log.debug(msg)

    def shutdown_request(self, ident, parent):
        self.shell.exit_now = True
        self._shutdown_message = self.session.msg(u'shutdown_reply',
                                                  parent['content'], parent)
        sys.exit(0)

    #---------------------------------------------------------------------------
    # Protected interface
    #---------------------------------------------------------------------------

    def _abort_queue(self):
        while True:
            try:
                ident, msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
            except Exception:
                self.log.warn("Invalid Message:", exc_info=True)
                continue
            if msg is None:
                break
            else:
                assert ident is not None, \
                       "Unexpected missing message part."

            self.log.debug("Aborting:\n" + str(Message(msg)))
            msg_type = msg['header']['msg_type']
            reply_type = msg_type.split('_')[0] + '_reply'
            reply_msg = self.session.send(self.shell_socket,
                                          reply_type, {'status': 'aborted'},
                                          msg,
                                          ident=ident)
            self.log.debug(reply_msg)
            # We need to wait a bit for requests to come in. This can probably
            # be set shorter for true asynchronous clients.
            time.sleep(0.1)

    def _no_raw_input(self):
        """Raise StdinNotImplentedError if active frontend doesn't support
        stdin."""
        raise StdinNotImplementedError("raw_input was called, but this "
                                       "frontend does not support stdin.")

    def _raw_input(self, prompt, ident, parent):
        # Flush output before making the request.
        sys.stderr.flush()
        sys.stdout.flush()

        # Send the input request.
        content = json_clean(dict(prompt=prompt))
        self.session.send(self.stdin_socket,
                          u'input_request',
                          content,
                          parent,
                          ident=ident)

        # Await a response.
        while True:
            try:
                ident, reply = self.session.recv(self.stdin_socket, 0)
            except Exception:
                self.log.warn("Invalid Message:", exc_info=True)
            else:
                break
        try:
            value = reply['content']['value']
        except:
            self.log.error("Got bad raw_input reply: ")
            self.log.error(str(Message(parent)))
            value = ''
        if value == '\x04':
            # EOF
            raise EOFError
        return value

    def _complete(self, msg):
        c = msg['content']
        try:
            cpos = int(c['cursor_pos'])
        except:
            # If we don't get something that we can convert to an integer, at
            # least attempt the completion guessing the cursor is at the end of
            # the text, if there's any, and otherwise of the line
            cpos = len(c['text'])
            if cpos == 0:
                cpos = len(c['line'])
        return self.shell.complete(c['text'], c['line'], cpos)

    def _object_info(self, context):
        symbol, leftover = self._symbol_from_context(context)
        if symbol is not None and not leftover:
            doc = getattr(symbol, '__doc__', '')
        else:
            doc = ''
        object_info = dict(docstring=doc)
        return object_info

    def _symbol_from_context(self, context):
        if not context:
            return None, context

        base_symbol_string = context[0]
        symbol = self.shell.user_ns.get(base_symbol_string, None)
        if symbol is None:
            symbol = __builtin__.__dict__.get(base_symbol_string, None)
        if symbol is None:
            return None, context

        context = context[1:]
        for i, name in enumerate(context):
            new_symbol = getattr(symbol, name, None)
            if new_symbol is None:
                return symbol, context[i:]
            else:
                symbol = new_symbol

        return symbol, []

    def _at_shutdown(self):
        """Actions taken at shutdown by the kernel, called by python's atexit.
        """
        # io.rprint("Kernel at_shutdown") # dbg
        if self._shutdown_message is not None:
            self.session.send(self.shell_socket, self._shutdown_message)
            self.session.send(self.iopub_socket, self._shutdown_message)
            self.log.debug(str(self._shutdown_message))
            # A very short sleep to give zmq time to flush its message buffers
            # before Python truly shuts down.
            time.sleep(0.01)
Пример #18
0
class Kernel(HasTraits):

    # Private interface

    # Time to sleep after flushing the stdout/err buffers in each execute
    # cycle.  While this introduces a hard limit on the minimal latency of the
    # execute cycle, it helps prevent output synchronization problems for
    # clients.
    # Units are in seconds.  The minimum zmq latency on local host is probably
    # ~150 microseconds, set this to 500us for now.  We may need to increase it
    # a little if it's not enough after more interactive testing.
    _execute_sleep = Float(0.0005, config=True)

    # This is a dict of port number that the kernel is listening on. It is set
    # by record_ports and used by connect_request.
    _recorded_ports = Dict()

    #---------------------------------------------------------------------------
    # Kernel interface
    #---------------------------------------------------------------------------

    session = Instance(Session)
    shell_socket = Instance('zmq.Socket')
    iopub_socket = Instance('zmq.Socket')
    stdin_socket = Instance('zmq.Socket')
    log = Instance('logging.Logger')

    def __init__(self, **kwargs):
        super(Kernel, self).__init__(**kwargs)
        self.user_ns = {}
        self.history = []
        self.compiler = CommandCompiler()
        self.completer = KernelCompleter(self.user_ns)

        # Build dict of handlers for message types
        msg_types = [
            'execute_request', 'complete_request', 'object_info_request',
            'shutdown_request'
        ]
        self.handlers = {}
        for msg_type in msg_types:
            self.handlers[msg_type] = getattr(self, msg_type)

    def start(self):
        """ Start the kernel main loop.
        """
        while True:
            ident, msg = self.session.recv(self.shell_socket, 0)
            assert ident is not None, "Missing message part."
            omsg = Message(msg)
            self.log.debug(str(omsg))
            handler = self.handlers.get(omsg.msg_type, None)
            if handler is None:
                self.log.error("UNKNOWN MESSAGE TYPE: %s" % omsg)
            else:
                handler(ident, omsg)

    def record_ports(self, ports):
        """Record the ports that this kernel is using.

        The creator of the Kernel instance must call this methods if they
        want the :meth:`connect_request` method to return the port numbers.
        """
        self._recorded_ports = ports

    #---------------------------------------------------------------------------
    # Kernel request handlers
    #---------------------------------------------------------------------------

    def execute_request(self, ident, parent):
        try:
            code = parent[u'content'][u'code']
        except:
            self.log.error("Got bad msg: %s" % Message(parent))
            return
        pyin_msg = self.session.send(self.iopub_socket,
                                     u'pyin', {u'code': code},
                                     parent=parent)

        try:
            comp_code = self.compiler(code, '<zmq-kernel>')

            # Replace raw_input. Note that is not sufficient to replace
            # raw_input in the user namespace.
            raw_input = lambda prompt='': self._raw_input(
                prompt, ident, parent)
            __builtin__.raw_input = raw_input

            # Set the parent message of the display hook and out streams.
            sys.displayhook.set_parent(parent)
            sys.stdout.set_parent(parent)
            sys.stderr.set_parent(parent)

            exec comp_code in self.user_ns, self.user_ns
        except:
            etype, evalue, tb = sys.exc_info()
            tb = traceback.format_exception(etype, evalue, tb)
            exc_content = {
                u'status': u'error',
                u'traceback': tb,
                u'ename': unicode(etype.__name__),
                u'evalue': unicode(evalue)
            }
            exc_msg = self.session.send(self.iopub_socket, u'pyerr',
                                        exc_content, parent)
            reply_content = exc_content
        else:
            reply_content = {'status': 'ok', 'payload': {}}

        # Flush output before sending the reply.
        sys.stderr.flush()
        sys.stdout.flush()
        # FIXME: on rare occasions, the flush doesn't seem to make it to the
        # clients... This seems to mitigate the problem, but we definitely need
        # to better understand what's going on.
        if self._execute_sleep:
            time.sleep(self._execute_sleep)

        # Send the reply.
        reply_msg = self.session.send(self.shell_socket,
                                      u'execute_reply',
                                      reply_content,
                                      parent,
                                      ident=ident)
        self.log.debug(Message(reply_msg))
        if reply_msg['content']['status'] == u'error':
            self._abort_queue()

    def complete_request(self, ident, parent):
        matches = {'matches': self._complete(parent), 'status': 'ok'}
        completion_msg = self.session.send(self.shell_socket, 'complete_reply',
                                           matches, parent, ident)
        self.log.debug(completion_msg)

    def object_info_request(self, ident, parent):
        context = parent['content']['oname'].split('.')
        object_info = self._object_info(context)
        msg = self.session.send(self.shell_socket, 'object_info_reply',
                                object_info, parent, ident)
        self.log.debug(msg)

    def shutdown_request(self, ident, parent):
        content = dict(parent['content'])
        msg = self.session.send(self.shell_socket, 'shutdown_reply', content,
                                parent, ident)
        msg = self.session.send(self.iopub_socket, 'shutdown_reply', content,
                                parent, ident)
        self.log.debug(msg)
        time.sleep(0.1)
        sys.exit(0)

    #---------------------------------------------------------------------------
    # Protected interface
    #---------------------------------------------------------------------------

    def _abort_queue(self):
        while True:
            ident, msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
            if msg is None:
                # msg=None on EAGAIN
                break
            else:
                assert ident is not None, "Missing message part."
            self.log.debug("Aborting: %s" % Message(msg))
            msg_type = msg['msg_type']
            reply_type = msg_type.split('_')[0] + '_reply'
            reply_msg = self.session.send(self.shell_socket,
                                          reply_type, {'status': 'aborted'},
                                          msg,
                                          ident=ident)
            self.log.debug(Message(reply_msg))
            # We need to wait a bit for requests to come in. This can probably
            # be set shorter for true asynchronous clients.
            time.sleep(0.1)

    def _raw_input(self, prompt, ident, parent):
        # Flush output before making the request.
        sys.stderr.flush()
        sys.stdout.flush()

        # Send the input request.
        content = dict(prompt=prompt)
        msg = self.session.send(self.stdin_socket, u'input_request', content,
                                parent)

        # Await a response.
        ident, reply = self.session.recv(self.stdin_socket, 0)
        try:
            value = reply['content']['value']
        except:
            self.log.error("Got bad raw_input reply: %s" % Message(parent))
            value = ''
        return value

    def _complete(self, msg):
        return self.completer.complete(msg.content.line, msg.content.text)

    def _object_info(self, context):
        symbol, leftover = self._symbol_from_context(context)
        if symbol is not None and not leftover:
            doc = getattr(symbol, '__doc__', '')
        else:
            doc = ''
        object_info = dict(docstring=doc)
        return object_info

    def _symbol_from_context(self, context):
        if not context:
            return None, context

        base_symbol_string = context[0]
        symbol = self.user_ns.get(base_symbol_string, None)
        if symbol is None:
            symbol = __builtin__.__dict__.get(base_symbol_string, None)
        if symbol is None:
            return None, context

        context = context[1:]
        for i, name in enumerate(context):
            new_symbol = getattr(symbol, name, None)
            if new_symbol is None:
                return symbol, context[i:]
            else:
                symbol = new_symbol

        return symbol, []
Пример #19
0
class Kernel(SingletonConfigurable):

    #---------------------------------------------------------------------------
    # Kernel interface
    #---------------------------------------------------------------------------

    # attribute to override with a GUI
    eventloop = Any(None)

    def _eventloop_changed(self, name, old, new):
        """schedule call to eventloop from IOLoop"""
        loop = ioloop.IOLoop.instance()
        loop.add_callback(self.enter_eventloop)

    session = Instance(Session)
    profile_dir = Instance('IPython.core.profiledir.ProfileDir')
    shell_streams = List()
    control_stream = Instance(ZMQStream)
    iopub_socket = Instance(zmq.Socket)
    stdin_socket = Instance(zmq.Socket)
    log = Instance(logging.Logger)

    # identities:
    int_id = Integer(-1)
    ident = Unicode()

    def _ident_default(self):
        return unicode_type(uuid.uuid4())

    # Private interface

    _darwin_app_nap = Bool(
        True,
        config=True,
        help="""Whether to use appnope for compatiblity with OS X App Nap.
        
        Only affects OS X >= 10.9.
        """)

    # track associations with current request
    _allow_stdin = Bool(False)
    _parent_header = Dict()
    _parent_ident = Any(b'')
    # Time to sleep after flushing the stdout/err buffers in each execute
    # cycle.  While this introduces a hard limit on the minimal latency of the
    # execute cycle, it helps prevent output synchronization problems for
    # clients.
    # Units are in seconds.  The minimum zmq latency on local host is probably
    # ~150 microseconds, set this to 500us for now.  We may need to increase it
    # a little if it's not enough after more interactive testing.
    _execute_sleep = Float(0.0005, config=True)

    # Frequency of the kernel's event loop.
    # Units are in seconds, kernel subclasses for GUI toolkits may need to
    # adapt to milliseconds.
    _poll_interval = Float(0.05, config=True)

    # If the shutdown was requested over the network, we leave here the
    # necessary reply message so it can be sent by our registered atexit
    # handler.  This ensures that the reply is only sent to clients truly at
    # the end of our shutdown process (which happens after the underlying
    # IPython shell's own shutdown).
    _shutdown_message = None

    # This is a dict of port number that the kernel is listening on. It is set
    # by record_ports and used by connect_request.
    _recorded_ports = Dict()

    # set of aborted msg_ids
    aborted = Set()

    # Track execution count here. For IPython, we override this to use the
    # execution count we store in the shell.
    execution_count = 0

    def __init__(self, **kwargs):
        super(Kernel, self).__init__(**kwargs)

        # Build dict of handlers for message types
        msg_types = [
            'execute_request',
            'complete_request',
            'inspect_request',
            'history_request',
            'kernel_info_request',
            'connect_request',
            'shutdown_request',
            'apply_request',
            'is_complete_request',
        ]
        self.shell_handlers = {}
        for msg_type in msg_types:
            self.shell_handlers[msg_type] = getattr(self, msg_type)

        control_msg_types = msg_types + ['clear_request', 'abort_request']
        self.control_handlers = {}
        for msg_type in control_msg_types:
            self.control_handlers[msg_type] = getattr(self, msg_type)

    def dispatch_control(self, msg):
        """dispatch control requests"""
        idents, msg = self.session.feed_identities(msg, copy=False)
        try:
            msg = self.session.deserialize(msg, content=True, copy=False)
        except:
            self.log.error("Invalid Control Message", exc_info=True)
            return

        self.log.debug("Control received: %s", msg)

        # Set the parent message for side effects.
        self.set_parent(idents, msg)
        self._publish_status(u'busy')

        header = msg['header']
        msg_type = header['msg_type']

        handler = self.control_handlers.get(msg_type, None)
        if handler is None:
            self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
        else:
            try:
                handler(self.control_stream, idents, msg)
            except Exception:
                self.log.error("Exception in control handler:", exc_info=True)

        sys.stdout.flush()
        sys.stderr.flush()
        self._publish_status(u'idle')

    def dispatch_shell(self, stream, msg):
        """dispatch shell requests"""
        # flush control requests first
        if self.control_stream:
            self.control_stream.flush()

        idents, msg = self.session.feed_identities(msg, copy=False)
        try:
            msg = self.session.deserialize(msg, content=True, copy=False)
        except:
            self.log.error("Invalid Message", exc_info=True)
            return

        # Set the parent message for side effects.
        self.set_parent(idents, msg)
        self._publish_status(u'busy')

        header = msg['header']
        msg_id = header['msg_id']
        msg_type = msg['header']['msg_type']

        # Print some info about this message and leave a '--->' marker, so it's
        # easier to trace visually the message chain when debugging.  Each
        # handler prints its message at the end.
        self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
        self.log.debug('   Content: %s\n   --->\n   ', msg['content'])

        if msg_id in self.aborted:
            self.aborted.remove(msg_id)
            # is it safe to assume a msg_id will not be resubmitted?
            reply_type = msg_type.split('_')[0] + '_reply'
            status = {'status': 'aborted'}
            md = {'engine': self.ident}
            md.update(status)
            self.session.send(stream,
                              reply_type,
                              metadata=md,
                              content=status,
                              parent=msg,
                              ident=idents)
            return

        handler = self.shell_handlers.get(msg_type, None)
        if handler is None:
            self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type)
        else:
            # ensure default_int_handler during handler call
            sig = signal(SIGINT, default_int_handler)
            self.log.debug("%s: %s", msg_type, msg)
            try:
                handler(stream, idents, msg)
            except Exception:
                self.log.error("Exception in message handler:", exc_info=True)
            finally:
                signal(SIGINT, sig)

        sys.stdout.flush()
        sys.stderr.flush()
        self._publish_status(u'idle')

    def enter_eventloop(self):
        """enter eventloop"""
        self.log.info("entering eventloop %s", self.eventloop)
        for stream in self.shell_streams:
            # flush any pending replies,
            # which may be skipped by entering the eventloop
            stream.flush(zmq.POLLOUT)
        # restore default_int_handler
        signal(SIGINT, default_int_handler)
        while self.eventloop is not None:
            try:
                self.eventloop(self)
            except KeyboardInterrupt:
                # Ctrl-C shouldn't crash the kernel
                self.log.error("KeyboardInterrupt caught in kernel")
                continue
            else:
                # eventloop exited cleanly, this means we should stop (right?)
                self.eventloop = None
                break
        self.log.info("exiting eventloop")

    def start(self):
        """register dispatchers for streams"""
        if self.control_stream:
            self.control_stream.on_recv(self.dispatch_control, copy=False)

        def make_dispatcher(stream):
            def dispatcher(msg):
                return self.dispatch_shell(stream, msg)

            return dispatcher

        for s in self.shell_streams:
            s.on_recv(make_dispatcher(s), copy=False)

        # publish idle status
        self._publish_status('starting')

    def do_one_iteration(self):
        """step eventloop just once"""
        if self.control_stream:
            self.control_stream.flush()
        for stream in self.shell_streams:
            # handle at most one request per iteration
            stream.flush(zmq.POLLIN, 1)
            stream.flush(zmq.POLLOUT)

    def record_ports(self, ports):
        """Record the ports that this kernel is using.

        The creator of the Kernel instance must call this methods if they
        want the :meth:`connect_request` method to return the port numbers.
        """
        self._recorded_ports = ports

    #---------------------------------------------------------------------------
    # Kernel request handlers
    #---------------------------------------------------------------------------

    def _make_metadata(self, other=None):
        """init metadata dict, for execute/apply_reply"""
        new_md = {
            'dependencies_met': True,
            'engine': self.ident,
            'started': datetime.now(),
        }
        if other:
            new_md.update(other)
        return new_md

    def _publish_execute_input(self, code, parent, execution_count):
        """Publish the code request on the iopub stream."""

        self.session.send(self.iopub_socket,
                          u'execute_input', {
                              u'code': code,
                              u'execution_count': execution_count
                          },
                          parent=parent,
                          ident=self._topic('execute_input'))

    def _publish_status(self, status, parent=None):
        """send status (busy/idle) on IOPub"""
        self.session.send(
            self.iopub_socket,
            u'status',
            {u'execution_state': status},
            parent=parent or self._parent_header,
            ident=self._topic('status'),
        )

    def set_parent(self, ident, parent):
        """Set the current parent_header
        
        Side effects (IOPub messages) and replies are associated with
        the request that caused them via the parent_header.
        
        The parent identity is used to route input_request messages
        on the stdin channel.
        """
        self._parent_ident = ident
        self._parent_header = parent

    def send_response(self,
                      stream,
                      msg_or_type,
                      content=None,
                      ident=None,
                      buffers=None,
                      track=False,
                      header=None,
                      metadata=None):
        """Send a response to the message we're currently processing.
        
        This accepts all the parameters of :meth:`IPython.kernel.zmq.session.Session.send`
        except ``parent``.
        
        This relies on :meth:`set_parent` having been called for the current
        message.
        """
        return self.session.send(stream, msg_or_type, content,
                                 self._parent_header, ident, buffers, track,
                                 header, metadata)

    def execute_request(self, stream, ident, parent):
        """handle an execute_request"""

        try:
            content = parent[u'content']
            code = py3compat.cast_unicode_py2(content[u'code'])
            silent = content[u'silent']
            store_history = content.get(u'store_history', not silent)
            user_expressions = content.get('user_expressions', {})
            allow_stdin = content.get('allow_stdin', False)
        except:
            self.log.error("Got bad msg: ")
            self.log.error("%s", parent)
            return

        md = self._make_metadata(parent['metadata'])

        # Re-broadcast our input for the benefit of listening clients, and
        # start computing output
        if not silent:
            self.execution_count += 1
            self._publish_execute_input(code, parent, self.execution_count)

        reply_content = self.do_execute(code, silent, store_history,
                                        user_expressions, allow_stdin)

        # Flush output before sending the reply.
        sys.stdout.flush()
        sys.stderr.flush()
        # FIXME: on rare occasions, the flush doesn't seem to make it to the
        # clients... This seems to mitigate the problem, but we definitely need
        # to better understand what's going on.
        if self._execute_sleep:
            time.sleep(self._execute_sleep)

        # Send the reply.
        reply_content = json_clean(reply_content)

        md['status'] = reply_content['status']
        if reply_content['status'] == 'error' and \
                        reply_content['ename'] == 'UnmetDependency':
            md['dependencies_met'] = False

        reply_msg = self.session.send(stream,
                                      u'execute_reply',
                                      reply_content,
                                      parent,
                                      metadata=md,
                                      ident=ident)

        self.log.debug("%s", reply_msg)

        if not silent and reply_msg['content']['status'] == u'error':
            self._abort_queues()

    def do_execute(self,
                   code,
                   silent,
                   store_history=True,
                   user_experssions=None,
                   allow_stdin=False):
        """Execute user code. Must be overridden by subclasses.
        """
        raise NotImplementedError

    def complete_request(self, stream, ident, parent):
        content = parent['content']
        code = content['code']
        cursor_pos = content['cursor_pos']

        matches = self.do_complete(code, cursor_pos)
        matches = json_clean(matches)
        completion_msg = self.session.send(stream, 'complete_reply', matches,
                                           parent, ident)
        self.log.debug("%s", completion_msg)

    def do_complete(self, code, cursor_pos):
        """Override in subclasses to find completions.
        """
        return {
            'matches': [],
            'cursor_end': cursor_pos,
            'cursor_start': cursor_pos,
            'metadata': {},
            'status': 'ok'
        }

    def inspect_request(self, stream, ident, parent):
        content = parent['content']

        reply_content = self.do_inspect(content['code'], content['cursor_pos'],
                                        content.get('detail_level', 0))
        # Before we send this object over, we scrub it for JSON usage
        reply_content = json_clean(reply_content)
        msg = self.session.send(stream, 'inspect_reply', reply_content, parent,
                                ident)
        self.log.debug("%s", msg)

    def do_inspect(self, code, cursor_pos, detail_level=0):
        """Override in subclasses to allow introspection.
        """
        return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False}

    def history_request(self, stream, ident, parent):
        content = parent['content']

        reply_content = self.do_history(**content)

        reply_content = json_clean(reply_content)
        msg = self.session.send(stream, 'history_reply', reply_content, parent,
                                ident)
        self.log.debug("%s", msg)

    def do_history(self,
                   hist_access_type,
                   output,
                   raw,
                   session=None,
                   start=None,
                   stop=None,
                   n=None,
                   pattern=None,
                   unique=False):
        """Override in subclasses to access history.
        """
        return {'history': []}

    def connect_request(self, stream, ident, parent):
        if self._recorded_ports is not None:
            content = self._recorded_ports.copy()
        else:
            content = {}
        msg = self.session.send(stream, 'connect_reply', content, parent,
                                ident)
        self.log.debug("%s", msg)

    @property
    def kernel_info(self):
        return {
            'protocol_version': release.kernel_protocol_version,
            'implementation': self.implementation,
            'implementation_version': self.implementation_version,
            'language': self.language,
            'language_version': self.language_version,
            'banner': self.banner,
        }

    def kernel_info_request(self, stream, ident, parent):
        msg = self.session.send(stream, 'kernel_info_reply', self.kernel_info,
                                parent, ident)
        self.log.debug("%s", msg)

    def shutdown_request(self, stream, ident, parent):
        content = self.do_shutdown(parent['content']['restart'])
        self.session.send(stream,
                          u'shutdown_reply',
                          content,
                          parent,
                          ident=ident)
        # same content, but different msg_id for broadcasting on IOPub
        self._shutdown_message = self.session.msg(u'shutdown_reply', content,
                                                  parent)

        self._at_shutdown()
        # call sys.exit after a short delay
        loop = ioloop.IOLoop.instance()
        loop.add_timeout(time.time() + 0.1, loop.stop)

    def do_shutdown(self, restart):
        """Override in subclasses to do things when the frontend shuts down the
        kernel.
        """
        return {'status': 'ok', 'restart': restart}

    def is_complete_request(self, stream, ident, parent):
        content = parent['content']
        code = content['code']

        reply_content = self.do_is_complete(code)
        reply_content = json_clean(reply_content)
        reply_msg = self.session.send(stream, 'is_complete_reply',
                                      reply_content, parent, ident)
        self.log.debug("%s", reply_msg)

    def do_is_complete(self, code):
        """Override in subclasses to find completions.
        """
        return {
            'status': 'unknown',
        }

    #---------------------------------------------------------------------------
    # Engine methods
    #---------------------------------------------------------------------------

    def apply_request(self, stream, ident, parent):
        try:
            content = parent[u'content']
            bufs = parent[u'buffers']
            msg_id = parent['header']['msg_id']
        except:
            self.log.error("Got bad msg: %s", parent, exc_info=True)
            return

        md = self._make_metadata(parent['metadata'])

        reply_content, result_buf = self.do_apply(content, bufs, msg_id, md)

        # put 'ok'/'error' status in header, for scheduler introspection:
        md['status'] = reply_content['status']

        # flush i/o
        sys.stdout.flush()
        sys.stderr.flush()

        self.session.send(stream,
                          u'apply_reply',
                          reply_content,
                          parent=parent,
                          ident=ident,
                          buffers=result_buf,
                          metadata=md)

    def do_apply(self, content, bufs, msg_id, reply_metadata):
        """Override in subclasses to support the IPython parallel framework.
        """
        raise NotImplementedError

    #---------------------------------------------------------------------------
    # Control messages
    #---------------------------------------------------------------------------

    def abort_request(self, stream, ident, parent):
        """abort a specific msg by id"""
        msg_ids = parent['content'].get('msg_ids', None)
        if isinstance(msg_ids, string_types):
            msg_ids = [msg_ids]
        if not msg_ids:
            self._abort_queues()
        for mid in msg_ids:
            self.aborted.add(str(mid))

        content = dict(status='ok')
        reply_msg = self.session.send(stream,
                                      'abort_reply',
                                      content=content,
                                      parent=parent,
                                      ident=ident)
        self.log.debug("%s", reply_msg)

    def clear_request(self, stream, idents, parent):
        """Clear our namespace."""
        content = self.do_clear()
        self.session.send(stream,
                          'clear_reply',
                          ident=idents,
                          parent=parent,
                          content=content)

    def do_clear(self):
        """Override in subclasses to clear the namespace
        
        This is only required for IPython.parallel.
        """
        raise NotImplementedError

    #---------------------------------------------------------------------------
    # Protected interface
    #---------------------------------------------------------------------------

    def _topic(self, topic):
        """prefixed topic for IOPub messages"""
        if self.int_id >= 0:
            base = "engine.%i" % self.int_id
        else:
            base = "kernel.%s" % self.ident

        return py3compat.cast_bytes("%s.%s" % (base, topic))

    def _abort_queues(self):
        for stream in self.shell_streams:
            if stream:
                self._abort_queue(stream)

    def _abort_queue(self, stream):
        poller = zmq.Poller()
        poller.register(stream.socket, zmq.POLLIN)
        while True:
            idents, msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
            if msg is None:
                return

            self.log.info("Aborting:")
            self.log.info("%s", msg)
            msg_type = msg['header']['msg_type']
            reply_type = msg_type.split('_')[0] + '_reply'

            status = {'status': 'aborted'}
            md = {'engine': self.ident}
            md.update(status)
            reply_msg = self.session.send(stream,
                                          reply_type,
                                          metadata=md,
                                          content=status,
                                          parent=msg,
                                          ident=idents)
            self.log.debug("%s", reply_msg)
            # We need to wait a bit for requests to come in. This can probably
            # be set shorter for true asynchronous clients.
            poller.poll(50)

    def _no_raw_input(self):
        """Raise StdinNotImplentedError if active frontend doesn't support
        stdin."""
        raise StdinNotImplementedError("raw_input was called, but this "
                                       "frontend does not support stdin.")

    def getpass(self, prompt=''):
        """Forward getpass to frontends
        
        Raises
        ------
        StdinNotImplentedError if active frontend doesn't support stdin.
        """
        if not self._allow_stdin:
            raise StdinNotImplementedError(
                "getpass was called, but this frontend does not support input requests."
            )
        return self._input_request(
            prompt,
            self._parent_ident,
            self._parent_header,
            password=True,
        )

    def raw_input(self, prompt=''):
        """Forward raw_input to frontends
        
        Raises
        ------
        StdinNotImplentedError if active frontend doesn't support stdin.
        """
        if not self._allow_stdin:
            raise StdinNotImplementedError(
                "raw_input was called, but this frontend does not support input requests."
            )
        return self._input_request(
            prompt,
            self._parent_ident,
            self._parent_header,
            password=False,
        )

    def _input_request(self, prompt, ident, parent, password=False):
        # Flush output before making the request.
        sys.stderr.flush()
        sys.stdout.flush()
        # flush the stdin socket, to purge stale replies
        while True:
            try:
                self.stdin_socket.recv_multipart(zmq.NOBLOCK)
            except zmq.ZMQError as e:
                if e.errno == zmq.EAGAIN:
                    break
                else:
                    raise

        # Send the input request.
        content = json_clean(dict(prompt=prompt, password=password))
        self.session.send(self.stdin_socket,
                          u'input_request',
                          content,
                          parent,
                          ident=ident)

        # Await a response.
        while True:
            try:
                ident, reply = self.session.recv(self.stdin_socket, 0)
            except Exception:
                self.log.warn("Invalid Message:", exc_info=True)
            except KeyboardInterrupt:
                # re-raise KeyboardInterrupt, to truncate traceback
                raise KeyboardInterrupt
            else:
                break
        try:
            value = py3compat.unicode_to_str(reply['content']['value'])
        except:
            self.log.error("Bad input_reply: %s", parent)
            value = ''
        if value == '\x04':
            # EOF
            raise EOFError
        return value

    def _at_shutdown(self):
        """Actions taken at shutdown by the kernel, called by python's atexit.
        """
        # io.rprint("Kernel at_shutdown") # dbg
        if self._shutdown_message is not None:
            self.session.send(self.iopub_socket,
                              self._shutdown_message,
                              ident=self._topic('shutdown'))
            self.log.debug("%s", self._shutdown_message)
        [s.flush(zmq.POLLOUT) for s in self.shell_streams]
Пример #20
0
class JupyterHub(Application):
    """An Application for starting a Multi-User Jupyter Notebook server."""
    name = 'jupyterhub'

    description = """Start a multi-user Jupyter Notebook server
    
    Spawns a configurable-http-proxy and multi-user Hub,
    which authenticates users and spawns single-user Notebook servers
    on behalf of users.
    """

    examples = """
    
    generate default config file:
    
        jupyterhub --generate-config -f /etc/jupyterhub/jupyterhub.py
    
    spawn the server on 10.0.1.2:443 with https:
    
        jupyterhub --ip 10.0.1.2 --port 443 --ssl-key my_ssl.key --ssl-cert my_ssl.cert
    """

    aliases = Dict(aliases)
    flags = Dict(flags)

    subcommands = {'token': (NewToken, "Generate an API token for a user")}

    classes = List([
        Spawner,
        LocalProcessSpawner,
        Authenticator,
        PAMAuthenticator,
    ])

    config_file = Unicode(
        'jupyterhub_config.py',
        config=True,
        help="The config file to load",
    )
    generate_config = Bool(
        False,
        config=True,
        help="Generate default config file",
    )
    answer_yes = Bool(
        False,
        config=True,
        help="Answer yes to any questions (e.g. confirm overwrite)")
    pid_file = Unicode('',
                       config=True,
                       help="""File to write PID
        Useful for daemonizing jupyterhub.
        """)
    cookie_max_age_days = Float(
        14,
        config=True,
        help="""Number of days for a login cookie to be valid.
        Default is two weeks.
        """)
    last_activity_interval = Integer(
        300,
        config=True,
        help=
        "Interval (in seconds) at which to update last-activity timestamps.")
    proxy_check_interval = Integer(
        30,
        config=True,
        help="Interval (in seconds) at which to check if the proxy is running."
    )

    data_files_path = Unicode(
        DATA_FILES_PATH,
        config=True,
        help=
        "The location of jupyterhub data files (e.g. /usr/local/share/jupyter/hub)"
    )

    ssl_key = Unicode(
        '',
        config=True,
        help="""Path to SSL key file for the public facing interface of the proxy
        
        Use with ssl_cert
        """)
    ssl_cert = Unicode(
        '',
        config=True,
        help=
        """Path to SSL certificate file for the public facing interface of the proxy
        
        Use with ssl_key
        """)
    ip = Unicode('', config=True, help="The public facing ip of the proxy")
    port = Integer(8000,
                   config=True,
                   help="The public facing port of the proxy")
    base_url = URLPrefix('/',
                         config=True,
                         help="The base URL of the entire application")

    jinja_environment_options = Dict(
        config=True,
        help="Supply extra arguments that will be passed to Jinja environment."
    )

    proxy_cmd = Unicode('configurable-http-proxy',
                        config=True,
                        help="""The command to start the http proxy.
        
        Only override if configurable-http-proxy is not on your PATH
        """)
    debug_proxy = Bool(False,
                       config=True,
                       help="show debug output in configurable-http-proxy")
    proxy_auth_token = Unicode(config=True,
                               help="""The Proxy Auth token.

        Loaded from the CONFIGPROXY_AUTH_TOKEN env variable by default.
        """)

    def _proxy_auth_token_default(self):
        token = os.environ.get('CONFIGPROXY_AUTH_TOKEN', None)
        if not token:
            self.log.warn('\n'.join([
                "",
                "Generating CONFIGPROXY_AUTH_TOKEN. Restarting the Hub will require restarting the proxy.",
                "Set CONFIGPROXY_AUTH_TOKEN env or JupyterHub.proxy_auth_token config to avoid this message.",
                "",
            ]))
            token = orm.new_token()
        return token

    proxy_api_ip = Unicode('localhost',
                           config=True,
                           help="The ip for the proxy API handlers")
    proxy_api_port = Integer(config=True,
                             help="The port for the proxy API handlers")

    def _proxy_api_port_default(self):
        return self.port + 1

    hub_port = Integer(8081, config=True, help="The port for this process")
    hub_ip = Unicode('localhost', config=True, help="The ip for this process")

    hub_prefix = URLPrefix(
        '/hub/',
        config=True,
        help="The prefix for the hub server. Must not be '/'")

    def _hub_prefix_default(self):
        return url_path_join(self.base_url, '/hub/')

    def _hub_prefix_changed(self, name, old, new):
        if new == '/':
            raise TraitError("'/' is not a valid hub prefix")
        if not new.startswith(self.base_url):
            self.hub_prefix = url_path_join(self.base_url, new)

    cookie_secret = Bytes(config=True,
                          env='JPY_COOKIE_SECRET',
                          help="""The cookie secret to use to encrypt cookies.

        Loaded from the JPY_COOKIE_SECRET env variable by default.
        """)

    cookie_secret_file = Unicode(
        'jupyterhub_cookie_secret',
        config=True,
        help="""File in which to store the cookie secret.""")

    authenticator_class = Type(PAMAuthenticator,
                               Authenticator,
                               config=True,
                               help="""Class for authenticating users.
        
        This should be a class with the following form:
        
        - constructor takes one kwarg: `config`, the IPython config object.
        
        - is a tornado.gen.coroutine
        - returns username on success, None on failure
        - takes two arguments: (handler, data),
          where `handler` is the calling web.RequestHandler,
          and `data` is the POST form data from the login page.
        """)

    authenticator = Instance(Authenticator)

    def _authenticator_default(self):
        return self.authenticator_class(parent=self, db=self.db)

    # class for spawning single-user servers
    spawner_class = Type(
        LocalProcessSpawner,
        Spawner,
        config=True,
        help="""The class to use for spawning single-user servers.
        
        Should be a subclass of Spawner.
        """)

    db_url = Unicode(
        'sqlite:///jupyterhub.sqlite',
        config=True,
        help="url for the database. e.g. `sqlite:///jupyterhub.sqlite`")

    def _db_url_changed(self, name, old, new):
        if '://' not in new:
            # assume sqlite, if given as a plain filename
            self.db_url = 'sqlite:///%s' % new

    db_kwargs = Dict(
        config=True,
        help="""Include any kwargs to pass to the database connection.
        See sqlalchemy.create_engine for details.
        """)

    reset_db = Bool(False, config=True, help="Purge and reset the database.")
    debug_db = Bool(
        False,
        config=True,
        help="log all database transactions. This has A LOT of output")
    db = Any()
    session_factory = Any()

    admin_access = Bool(
        False,
        config=True,
        help="""Grant admin users permission to access single-user servers.
        
        Users should be properly informed if this is enabled.
        """)
    admin_users = Set(config=True,
                      help="""set of usernames of admin users

        If unspecified, only the user that launches the server will be admin.
        """)
    tornado_settings = Dict(config=True)

    cleanup_servers = Bool(
        True,
        config=True,
        help="""Whether to shutdown single-user servers when the Hub shuts down.
        
        Disable if you want to be able to teardown the Hub while leaving the single-user servers running.
        
        If both this and cleanup_proxy are False, sending SIGINT to the Hub will
        only shutdown the Hub, leaving everything else running.
        
        The Hub should be able to resume from database state.
        """)

    cleanup_proxy = Bool(
        True,
        config=True,
        help="""Whether to shutdown the proxy when the Hub shuts down.
        
        Disable if you want to be able to teardown the Hub while leaving the proxy running.
        
        Only valid if the proxy was starting by the Hub process.
        
        If both this and cleanup_servers are False, sending SIGINT to the Hub will
        only shutdown the Hub, leaving everything else running.
        
        The Hub should be able to resume from database state.
        """)

    handlers = List()

    _log_formatter_cls = CoroutineLogFormatter
    http_server = None
    proxy_process = None
    io_loop = None

    def _log_level_default(self):
        return logging.INFO

    def _log_datefmt_default(self):
        """Exclude date from default date format"""
        return "%Y-%m-%d %H:%M:%S"

    def _log_format_default(self):
        """override default log format to include time"""
        return "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"

    extra_log_file = Unicode("",
                             config=True,
                             help="Set a logging.FileHandler on this file.")
    extra_log_handlers = List(
        Instance(logging.Handler),
        config=True,
        help="Extra log handlers to set on JupyterHub logger",
    )

    def init_logging(self):
        # This prevents double log messages because tornado use a root logger that
        # self.log is a child of. The logging module dipatches log messages to a log
        # and all of its ancenstors until propagate is set to False.
        self.log.propagate = False

        if self.extra_log_file:
            self.extra_log_handlers.append(
                logging.FileHandler(self.extra_log_file))

        _formatter = self._log_formatter_cls(
            fmt=self.log_format,
            datefmt=self.log_datefmt,
        )
        for handler in self.extra_log_handlers:
            if handler.formatter is None:
                handler.setFormatter(_formatter)
            self.log.addHandler(handler)

        # hook up tornado 3's loggers to our app handlers
        for log in (app_log, access_log, gen_log):
            # ensure all log statements identify the application they come from
            log.name = self.log.name
        logger = logging.getLogger('tornado')
        logger.propagate = True
        logger.parent = self.log
        logger.setLevel(self.log.level)

    def init_ports(self):
        if self.hub_port == self.port:
            raise TraitError(
                "The hub and proxy cannot both listen on port %i" % self.port)
        if self.hub_port == self.proxy_api_port:
            raise TraitError(
                "The hub and proxy API cannot both listen on port %i" %
                self.hub_port)
        if self.proxy_api_port == self.port:
            raise TraitError(
                "The proxy's public and API ports cannot both be %i" %
                self.port)

    @staticmethod
    def add_url_prefix(prefix, handlers):
        """add a url prefix to handlers"""
        for i, tup in enumerate(handlers):
            lis = list(tup)
            lis[0] = url_path_join(prefix, tup[0])
            handlers[i] = tuple(lis)
        return handlers

    def init_handlers(self):
        h = []
        h.extend(handlers.default_handlers)
        h.extend(apihandlers.default_handlers)
        # load handlers from the authenticator
        h.extend(self.authenticator.get_handlers(self))

        self.handlers = self.add_url_prefix(self.hub_prefix, h)

        # some extra handlers, outside hub_prefix
        self.handlers.extend([
            (r"%s" % self.hub_prefix.rstrip('/'), web.RedirectHandler, {
                "url": self.hub_prefix,
                "permanent": False,
            }),
            (r"(?!%s).*" % self.hub_prefix, handlers.PrefixRedirectHandler),
            (r'(.*)', handlers.Template404),
        ])

    def _check_db_path(self, path):
        """More informative log messages for failed filesystem access"""
        path = os.path.abspath(path)
        parent, fname = os.path.split(path)
        user = getuser()
        if not os.path.isdir(parent):
            self.log.error("Directory %s does not exist", parent)
        if os.path.exists(parent) and not os.access(parent, os.W_OK):
            self.log.error("%s cannot create files in %s", user, parent)
        if os.path.exists(path) and not os.access(path, os.W_OK):
            self.log.error("%s cannot edit %s", user, path)

    def init_secrets(self):
        trait_name = 'cookie_secret'
        trait = self.traits()[trait_name]
        env_name = trait.get_metadata('env')
        secret_file = os.path.abspath(
            os.path.expanduser(self.cookie_secret_file))
        secret = self.cookie_secret
        secret_from = 'config'
        # load priority: 1. config, 2. env, 3. file
        if not secret and os.environ.get(env_name):
            secret_from = 'env'
            self.log.info("Loading %s from env[%s]", trait_name, env_name)
            secret = binascii.a2b_hex(os.environ[env_name])
        if not secret and os.path.exists(secret_file):
            secret_from = 'file'
            perm = os.stat(secret_file).st_mode
            if perm & 0o077:
                self.log.error("Bad permissions on %s", secret_file)
            else:
                self.log.info("Loading %s from %s", trait_name, secret_file)
                with open(secret_file) as f:
                    b64_secret = f.read()
                try:
                    secret = binascii.a2b_base64(b64_secret)
                except Exception as e:
                    self.log.error("%s does not contain b64 key: %s",
                                   secret_file, e)
        if not secret:
            secret_from = 'new'
            self.log.debug("Generating new %s", trait_name)
            secret = os.urandom(SECRET_BYTES)

        if secret_file and secret_from == 'new':
            # if we generated a new secret, store it in the secret_file
            self.log.info("Writing %s to %s", trait_name, secret_file)
            b64_secret = binascii.b2a_base64(secret).decode('ascii')
            with open(secret_file, 'w') as f:
                f.write(b64_secret)
            try:
                os.chmod(secret_file, 0o600)
            except OSError:
                self.log.warn("Failed to set permissions on %s", secret_file)
        # store the loaded trait value
        self.cookie_secret = secret

    def init_db(self):
        """Create the database connection"""
        self.log.debug("Connecting to db: %s", self.db_url)
        try:
            self.session_factory = orm.new_session_factory(self.db_url,
                                                           reset=self.reset_db,
                                                           echo=self.debug_db,
                                                           **self.db_kwargs)
            self.db = scoped_session(self.session_factory)()
        except OperationalError as e:
            self.log.error("Failed to connect to db: %s", self.db_url)
            self.log.debug("Database error was:", exc_info=True)
            if self.db_url.startswith('sqlite:///'):
                self._check_db_path(self.db_url.split(':///', 1)[1])
            self.exit(1)

    def init_hub(self):
        """Load the Hub config into the database"""
        self.hub = self.db.query(orm.Hub).first()
        if self.hub is None:
            self.hub = orm.Hub(server=orm.Server(
                ip=self.hub_ip,
                port=self.hub_port,
                base_url=self.hub_prefix,
                cookie_name='jupyter-hub-token',
            ))
            self.db.add(self.hub)
        else:
            server = self.hub.server
            server.ip = self.hub_ip
            server.port = self.hub_port
            server.base_url = self.hub_prefix

        self.db.commit()

    @gen.coroutine
    def init_users(self):
        """Load users into and from the database"""
        db = self.db

        if not self.admin_users:
            # add current user as admin if there aren't any others
            admins = db.query(orm.User).filter(orm.User.admin == True)
            if admins.first() is None:
                self.admin_users.add(getuser())

        new_users = []

        for name in self.admin_users:
            # ensure anyone specified as admin in config is admin in db
            user = orm.User.find(db, name)
            if user is None:
                user = orm.User(name=name, admin=True)
                new_users.append(user)
                db.add(user)
            else:
                user.admin = True

        # the admin_users config variable will never be used after this point.
        # only the database values will be referenced.

        whitelist = self.authenticator.whitelist

        if not whitelist:
            self.log.info(
                "Not using whitelist. Any authenticated user will be allowed.")

        # add whitelisted users to the db
        for name in whitelist:
            user = orm.User.find(db, name)
            if user is None:
                user = orm.User(name=name)
                new_users.append(user)
                db.add(user)

        if whitelist:
            # fill the whitelist with any users loaded from the db,
            # so we are consistent in both directions.
            # This lets whitelist be used to set up initial list,
            # but changes to the whitelist can occur in the database,
            # and persist across sessions.
            for user in db.query(orm.User):
                whitelist.add(user.name)

        # The whitelist set and the users in the db are now the same.
        # From this point on, any user changes should be done simultaneously
        # to the whitelist set and user db, unless the whitelist is empty (all users allowed).

        db.commit()

        for user in new_users:
            yield gen.maybe_future(self.authenticator.add_user(user))
        db.commit()

        user_summaries = ['']

        def _user_summary(user):
            parts = ['{0: >8}'.format(user.name)]
            if user.admin:
                parts.append('admin')
            if user.server:
                parts.append('running at %s' % user.server)
            return ' '.join(parts)

        @gen.coroutine
        def user_stopped(user):
            status = yield user.spawner.poll()
            self.log.warn(
                "User %s server stopped with exit code: %s",
                user.name,
                status,
            )
            yield self.proxy.delete_user(user)
            yield user.stop()

        for user in db.query(orm.User):
            if not user.state:
                # without spawner state, server isn't valid
                user.server = None
                user_summaries.append(_user_summary(user))
                continue
            self.log.debug("Loading state for %s from db", user.name)
            user.spawner = spawner = self.spawner_class(
                user=user,
                hub=self.hub,
                config=self.config,
                db=self.db,
            )
            status = yield spawner.poll()
            if status is None:
                self.log.info("%s still running", user.name)
                spawner.add_poll_callback(user_stopped, user)
                spawner.start_polling()
            else:
                # user not running. This is expected if server is None,
                # but indicates the user's server died while the Hub wasn't running
                # if user.server is defined.
                log = self.log.warn if user.server else self.log.debug
                log("%s not running.", user.name)
                user.server = None

            user_summaries.append(_user_summary(user))

        self.log.debug("Loaded users: %s", '\n'.join(user_summaries))
        db.commit()

    def init_proxy(self):
        """Load the Proxy config into the database"""
        self.proxy = self.db.query(orm.Proxy).first()
        if self.proxy is None:
            self.proxy = orm.Proxy(
                public_server=orm.Server(),
                api_server=orm.Server(),
            )
            self.db.add(self.proxy)
            self.db.commit()
        self.proxy.auth_token = self.proxy_auth_token  # not persisted
        self.proxy.log = self.log
        self.proxy.public_server.ip = self.ip
        self.proxy.public_server.port = self.port
        self.proxy.api_server.ip = self.proxy_api_ip
        self.proxy.api_server.port = self.proxy_api_port
        self.proxy.api_server.base_url = '/api/routes/'
        self.db.commit()

    @gen.coroutine
    def start_proxy(self):
        """Actually start the configurable-http-proxy"""
        # check for proxy
        if self.proxy.public_server.is_up() or self.proxy.api_server.is_up():
            # check for *authenticated* access to the proxy (auth token can change)
            try:
                yield self.proxy.get_routes()
            except (HTTPError, OSError, socket.error) as e:
                if isinstance(e, HTTPError) and e.code == 403:
                    msg = "Did CONFIGPROXY_AUTH_TOKEN change?"
                else:
                    msg = "Is something else using %s?" % self.proxy.public_server.bind_url
                self.log.error(
                    "Proxy appears to be running at %s, but I can't access it (%s)\n%s",
                    self.proxy.public_server.bind_url, e, msg)
                self.exit(1)
                return
            else:
                self.log.info("Proxy already running at: %s",
                              self.proxy.public_server.bind_url)
            self.proxy_process = None
            return

        env = os.environ.copy()
        env['CONFIGPROXY_AUTH_TOKEN'] = self.proxy.auth_token
        cmd = [
            self.proxy_cmd,
            '--ip',
            self.proxy.public_server.ip,
            '--port',
            str(self.proxy.public_server.port),
            '--api-ip',
            self.proxy.api_server.ip,
            '--api-port',
            str(self.proxy.api_server.port),
            '--default-target',
            self.hub.server.host,
        ]
        if self.debug_proxy:
            cmd.extend(['--log-level', 'debug'])
        if self.ssl_key:
            cmd.extend(['--ssl-key', self.ssl_key])
        if self.ssl_cert:
            cmd.extend(['--ssl-cert', self.ssl_cert])
        self.log.info("Starting proxy @ %s", self.proxy.public_server.bind_url)
        self.log.debug("Proxy cmd: %s", cmd)
        try:
            self.proxy_process = Popen(cmd, env=env)
        except FileNotFoundError as e:
            self.log.error(
                "Failed to find proxy %r\n"
                "The proxy can be installed with `npm install -g configurable-http-proxy`"
                % self.proxy_cmd)
            self.exit(1)

        def _check():
            status = self.proxy_process.poll()
            if status is not None:
                e = RuntimeError("Proxy failed to start with exit code %i" %
                                 status)
                # py2-compatible `raise e from None`
                e.__cause__ = None
                raise e

        for server in (self.proxy.public_server, self.proxy.api_server):
            for i in range(10):
                _check()
                try:
                    yield server.wait_up(1)
                except TimeoutError:
                    continue
                else:
                    break
            yield server.wait_up(1)
        self.log.debug("Proxy started and appears to be up")

    @gen.coroutine
    def check_proxy(self):
        if self.proxy_process.poll() is None:
            return
        self.log.error(
            "Proxy stopped with exit code %r", 'unknown'
            if self.proxy_process is None else self.proxy_process.poll())
        yield self.start_proxy()
        self.log.info("Setting up routes on new proxy")
        yield self.proxy.add_all_users()
        self.log.info("New proxy back up, and good to go")

    def init_tornado_settings(self):
        """Set up the tornado settings dict."""
        base_url = self.hub.server.base_url
        template_path = os.path.join(self.data_files_path, 'templates'),
        jinja_env = Environment(loader=FileSystemLoader(template_path),
                                **self.jinja_environment_options)

        login_url = self.authenticator.login_url(base_url)
        logout_url = self.authenticator.logout_url(base_url)

        # if running from git, disable caching of require.js
        # otherwise cache based on server start time
        parent = os.path.dirname(os.path.dirname(jupyterhub.__file__))
        if os.path.isdir(os.path.join(parent, '.git')):
            version_hash = ''
        else:
            version_hash = datetime.now().strftime("%Y%m%d%H%M%S"),

        settings = dict(
            log_function=log_request,
            config=self.config,
            log=self.log,
            db=self.db,
            proxy=self.proxy,
            hub=self.hub,
            admin_users=self.admin_users,
            admin_access=self.admin_access,
            authenticator=self.authenticator,
            spawner_class=self.spawner_class,
            base_url=self.base_url,
            cookie_secret=self.cookie_secret,
            cookie_max_age_days=self.cookie_max_age_days,
            login_url=login_url,
            logout_url=logout_url,
            static_path=os.path.join(self.data_files_path, 'static'),
            static_url_prefix=url_path_join(self.hub.server.base_url,
                                            'static/'),
            static_handler_class=CacheControlStaticFilesHandler,
            template_path=template_path,
            jinja2_env=jinja_env,
            version_hash=version_hash,
        )
        # allow configured settings to have priority
        settings.update(self.tornado_settings)
        self.tornado_settings = settings

    def init_tornado_application(self):
        """Instantiate the tornado Application object"""
        self.tornado_application = web.Application(self.handlers,
                                                   **self.tornado_settings)

    def write_pid_file(self):
        pid = os.getpid()
        if self.pid_file:
            self.log.debug("Writing PID %i to %s", pid, self.pid_file)
            with open(self.pid_file, 'w') as f:
                f.write('%i' % pid)

    @gen.coroutine
    @catch_config_error
    def initialize(self, *args, **kwargs):
        super().initialize(*args, **kwargs)
        if self.generate_config or self.subapp:
            return
        self.load_config_file(self.config_file)
        self.init_logging()
        if 'JupyterHubApp' in self.config:
            self.log.warn(
                "Use JupyterHub in config, not JupyterHubApp. Outdated config:\n%s",
                '\n'.join('JupyterHubApp.{key} = {value!r}'.format(key=key,
                                                                   value=value)
                          for key, value in self.config.JupyterHubApp.items()))
            cfg = self.config.copy()
            cfg.JupyterHub.merge(cfg.JupyterHubApp)
            self.update_config(cfg)
        self.write_pid_file()
        self.init_ports()
        self.init_secrets()
        self.init_db()
        self.init_hub()
        self.init_proxy()
        yield self.init_users()
        self.init_handlers()
        self.init_tornado_settings()
        self.init_tornado_application()

    @gen.coroutine
    def cleanup(self):
        """Shutdown our various subprocesses and cleanup runtime files."""

        futures = []
        if self.cleanup_servers:
            self.log.info("Cleaning up single-user servers...")
            # request (async) process termination
            for user in self.db.query(orm.User):
                if user.spawner is not None:
                    futures.append(user.stop())
        else:
            self.log.info("Leaving single-user servers running")

        # clean up proxy while SUS are shutting down
        if self.cleanup_proxy:
            if self.proxy_process:
                self.log.info("Cleaning up proxy[%i]...",
                              self.proxy_process.pid)
                if self.proxy_process.poll() is None:
                    try:
                        self.proxy_process.terminate()
                    except Exception as e:
                        self.log.error("Failed to terminate proxy process: %s",
                                       e)
            else:
                self.log.info("I didn't start the proxy, I can't clean it up")
        else:
            self.log.info("Leaving proxy running")

        # wait for the requests to stop finish:
        for f in futures:
            try:
                yield f
            except Exception as e:
                self.log.error("Failed to stop user: %s", e)

        self.db.commit()

        if self.pid_file and os.path.exists(self.pid_file):
            self.log.info("Cleaning up PID file %s", self.pid_file)
            os.remove(self.pid_file)

        # finally stop the loop once we are all cleaned up
        self.log.info("...done")

    def write_config_file(self):
        """Write our default config to a .py config file"""
        if os.path.exists(self.config_file) and not self.answer_yes:
            answer = ''

            def ask():
                prompt = "Overwrite %s with default config? [y/N]" % self.config_file
                try:
                    return input(prompt).lower() or 'n'
                except KeyboardInterrupt:
                    print('')  # empty line
                    return 'n'

            answer = ask()
            while not answer.startswith(('y', 'n')):
                print("Please answer 'yes' or 'no'")
                answer = ask()
            if answer.startswith('n'):
                return

        config_text = self.generate_config_file()
        if isinstance(config_text, bytes):
            config_text = config_text.decode('utf8')
        print("Writing default config to: %s" % self.config_file)
        with open(self.config_file, mode='w') as f:
            f.write(config_text)

    @gen.coroutine
    def update_last_activity(self):
        """Update User.last_activity timestamps from the proxy"""
        routes = yield self.proxy.get_routes()
        for prefix, route in routes.items():
            if 'user' not in route:
                # not a user route, ignore it
                continue
            user = orm.User.find(self.db, route['user'])
            if user is None:
                self.log.warn("Found no user for route: %s", route)
                continue
            try:
                dt = datetime.strptime(route['last_activity'], ISO8601_ms)
            except Exception:
                dt = datetime.strptime(route['last_activity'], ISO8601_s)
            user.last_activity = max(user.last_activity, dt)

        self.db.commit()
        yield self.proxy.check_routes(routes)

    @gen.coroutine
    def start(self):
        """Start the whole thing"""
        self.io_loop = loop = IOLoop.current()

        if self.subapp:
            self.subapp.start()
            loop.stop()
            return

        if self.generate_config:
            self.write_config_file()
            loop.stop()
            return

        # start the proxy
        try:
            yield self.start_proxy()
        except Exception as e:
            self.log.critical("Failed to start proxy", exc_info=True)
            self.exit(1)
            return

        loop.add_callback(self.proxy.add_all_users)

        if self.proxy_process:
            # only check / restart the proxy if we started it in the first place.
            # this means a restarted Hub cannot restart a Proxy that its
            # predecessor started.
            pc = PeriodicCallback(self.check_proxy,
                                  1e3 * self.proxy_check_interval)
            pc.start()

        if self.last_activity_interval:
            pc = PeriodicCallback(self.update_last_activity,
                                  1e3 * self.last_activity_interval)
            pc.start()

        # start the webserver
        self.http_server = tornado.httpserver.HTTPServer(
            self.tornado_application, xheaders=True)
        try:
            self.http_server.listen(self.hub_port, address=self.hub_ip)
        except Exception:
            self.log.error("Failed to bind hub to %s" %
                           self.hub.server.bind_url)
            raise
        else:
            self.log.info("Hub API listening on %s" % self.hub.server.bind_url)

        # register cleanup on both TERM and INT
        atexit.register(self.atexit)
        signal.signal(signal.SIGTERM, self.sigterm)

    def sigterm(self, signum, frame):
        self.log.critical("Received SIGTERM, shutting down")
        self.io_loop.stop()
        self.atexit()

    _atexit_ran = False

    def atexit(self):
        """atexit callback"""
        if self._atexit_ran:
            return
        self._atexit_ran = True
        # run the cleanup step (in a new loop, because the interrupted one is unclean)
        IOLoop.clear_current()
        loop = IOLoop()
        loop.make_current()
        loop.run_sync(self.cleanup)

    def stop(self):
        if not self.io_loop:
            return
        if self.http_server:
            self.io_loop.add_callback(self.http_server.stop)
        self.io_loop.add_callback(self.io_loop.stop)

    @gen.coroutine
    def launch_instance_async(self, argv=None):
        try:
            yield self.initialize(argv)
            yield self.start()
        except Exception as e:
            self.log.exception("")
            self.exit(1)

    @classmethod
    def launch_instance(cls, argv=None):
        self = cls.instance()
        loop = IOLoop.current()
        loop.add_callback(self.launch_instance_async, argv)
        try:
            loop.start()
        except KeyboardInterrupt:
            print("\nInterrupted")
Пример #21
0
class MappingKernelManager(MultiKernelManager):
    """A KernelManager that handles notebok mapping and HTTP error handling"""

    kernel_argv = List(Unicode)
    kernel_manager = Instance(KernelManager)
    time_to_dead = Float(3.0,
                         config=True,
                         help="""Kernel heartbeat interval in seconds.""")
    max_msg_size = Int(65536,
                       config=True,
                       help="""
        The max raw message size accepted from the browser
        over a WebSocket connection.
    """)

    _notebook_mapping = Dict()

    #-------------------------------------------------------------------------
    # Methods for managing kernels and sessions
    #-------------------------------------------------------------------------

    def kernel_for_notebook(self, notebook_id):
        """Return the kernel_id for a notebook_id or None."""
        return self._notebook_mapping.get(notebook_id)

    def set_kernel_for_notebook(self, notebook_id, kernel_id):
        """Associate a notebook with a kernel."""
        if notebook_id is not None:
            self._notebook_mapping[notebook_id] = kernel_id

    def notebook_for_kernel(self, kernel_id):
        """Return the notebook_id for a kernel_id or None."""
        notebook_ids = [
            k for k, v in self._notebook_mapping.iteritems() if v == kernel_id
        ]
        if len(notebook_ids) == 1:
            return notebook_ids[0]
        else:
            return None

    def delete_mapping_for_kernel(self, kernel_id):
        """Remove the kernel/notebook mapping for kernel_id."""
        notebook_id = self.notebook_for_kernel(kernel_id)
        if notebook_id is not None:
            del self._notebook_mapping[notebook_id]

    def start_kernel(self, notebook_id=None):
        """Start a kernel for a notebok an return its kernel_id.

        Parameters
        ----------
        notebook_id : uuid
            The uuid of the notebook to associate the new kernel with. If this
            is not None, this kernel will be persistent whenever the notebook
            requests a kernel.
        """
        kernel_id = self.kernel_for_notebook(notebook_id)
        if kernel_id is None:
            kwargs = dict()
            kwargs['extra_arguments'] = self.kernel_argv
            kernel_id = super(MappingKernelManager,
                              self).start_kernel(**kwargs)
            self.set_kernel_for_notebook(notebook_id, kernel_id)
            self.log.info("Kernel started: %s" % kernel_id)
            self.log.debug("Kernel args: %r" % kwargs)
        else:
            self.log.info("Using existing kernel: %s" % kernel_id)
        return kernel_id

    def kill_kernel(self, kernel_id):
        """Kill a kernel and remove its notebook association."""
        self._check_kernel_id(kernel_id)
        super(MappingKernelManager, self).kill_kernel(kernel_id)
        self.delete_mapping_for_kernel(kernel_id)
        self.log.info("Kernel killed: %s" % kernel_id)

    def interrupt_kernel(self, kernel_id):
        """Interrupt a kernel."""
        self._check_kernel_id(kernel_id)
        super(MappingKernelManager, self).interrupt_kernel(kernel_id)
        self.log.info("Kernel interrupted: %s" % kernel_id)

    def restart_kernel(self, kernel_id):
        """Restart a kernel while keeping clients connected."""
        self._check_kernel_id(kernel_id)
        km = self.get_kernel(kernel_id)
        km.restart_kernel(now=True)
        self.log.info("Kernel restarted: %s" % kernel_id)
        return kernel_id

        # the following remains, in case the KM restart machinery is
        # somehow unacceptable
        # Get the notebook_id to preserve the kernel/notebook association.
        notebook_id = self.notebook_for_kernel(kernel_id)
        # Create the new kernel first so we can move the clients over.
        new_kernel_id = self.start_kernel()
        # Now kill the old kernel.
        self.kill_kernel(kernel_id)
        # Now save the new kernel/notebook association. We have to save it
        # after the old kernel is killed as that will delete the mapping.
        self.set_kernel_for_notebook(notebook_id, new_kernel_id)
        self.log.info("Kernel restarted: %s" % new_kernel_id)
        return new_kernel_id

    def create_iopub_stream(self, kernel_id):
        """Create a new iopub stream."""
        self._check_kernel_id(kernel_id)
        return super(MappingKernelManager, self).create_iopub_stream(kernel_id)

    def create_shell_stream(self, kernel_id):
        """Create a new shell stream."""
        self._check_kernel_id(kernel_id)
        return super(MappingKernelManager, self).create_shell_stream(kernel_id)

    def create_hb_stream(self, kernel_id):
        """Create a new hb stream."""
        self._check_kernel_id(kernel_id)
        return super(MappingKernelManager, self).create_hb_stream(kernel_id)

    def _check_kernel_id(self, kernel_id):
        """Check a that a kernel_id exists and raise 404 if not."""
        if kernel_id not in self:
            raise web.HTTPError(404, u'Kernel does not exist: %s' % kernel_id)
class FlightAttitudeWidget(DOMWidget):
    _view_module = Unicode('nbextensions/flightwidgets/flightattitude',
                           sync=True)
    _view_name = Unicode('FlightAttitudeView', sync=True)
    pitch = Float(0., sync=True)
    roll = Float(0., sync=True)
Пример #23
0
 class A(HasTraits):
     i = Int()
     x = Float()
Пример #24
0
class Kernel(Configurable):

    #---------------------------------------------------------------------------
    # Kernel interface
    #---------------------------------------------------------------------------

    shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
    session = Instance(Session)
    reply_socket = Instance('zmq.Socket')
    pub_socket = Instance('zmq.Socket')
    req_socket = Instance('zmq.Socket')

    # Private interface

    # Time to sleep after flushing the stdout/err buffers in each execute
    # cycle.  While this introduces a hard limit on the minimal latency of the
    # execute cycle, it helps prevent output synchronization problems for
    # clients.
    # Units are in seconds.  The minimum zmq latency on local host is probably
    # ~150 microseconds, set this to 500us for now.  We may need to increase it
    # a little if it's not enough after more interactive testing.
    _execute_sleep = Float(0.0005, config=True)

    # Frequency of the kernel's event loop.
    # Units are in seconds, kernel subclasses for GUI toolkits may need to
    # adapt to milliseconds.
    _poll_interval = Float(0.05, config=True)

    # If the shutdown was requested over the network, we leave here the
    # necessary reply message so it can be sent by our registered atexit
    # handler.  This ensures that the reply is only sent to clients truly at
    # the end of our shutdown process (which happens after the underlying
    # IPython shell's own shutdown).
    _shutdown_message = None

    # This is a dict of port number that the kernel is listening on. It is set
    # by record_ports and used by connect_request.
    _recorded_ports = None

    def __init__(self, **kwargs):
        super(Kernel, self).__init__(**kwargs)

        # Before we even start up the shell, register *first* our exit handlers
        # so they come before the shell's
        atexit.register(self._at_shutdown)

        # Initialize the InteractiveShell subclass
        self.shell = ZMQInteractiveShell.instance()
        self.shell.displayhook.session = self.session
        self.shell.displayhook.pub_socket = self.pub_socket

        # TMP - hack while developing
        self.shell._reply_content = None

        # Build dict of handlers for message types
        msg_types = [
            'execute_request', 'complete_request', 'object_info_request',
            'history_request', 'connect_request', 'shutdown_request'
        ]
        self.handlers = {}
        for msg_type in msg_types:
            self.handlers[msg_type] = getattr(self, msg_type)

    def do_one_iteration(self):
        """Do one iteration of the kernel's evaluation loop.
        """
        try:
            ident = self.reply_socket.recv(zmq.NOBLOCK)
        except zmq.ZMQError, e:
            if e.errno == zmq.EAGAIN:
                return
            else:
                raise
        # This assert will raise in versions of zeromq 2.0.7 and lesser.
        # We now require 2.0.8 or above, so we can uncomment for safety.
        assert self.reply_socket.rcvmore(), "Missing message part."
        msg = self.reply_socket.recv_json()

        # Print some info about this message and leave a '--->' marker, so it's
        # easier to trace visually the message chain when debugging.  Each
        # handler prints its message at the end.
        # Eventually we'll move these from stdout to a logger.
        io.raw_print('\n*** MESSAGE TYPE:', msg['msg_type'], '***')
        io.raw_print('   Content: ',
                     msg['content'],
                     '\n   --->\n   ',
                     sep='',
                     end='')

        # Find and call actual handler for message
        handler = self.handlers.get(msg['msg_type'], None)
        if handler is None:
            io.raw_print_err("UNKNOWN MESSAGE TYPE:", msg)
        else:
            handler(ident, msg)

        # Check whether we should exit, in case the incoming message set the
        # exit flag on
        if self.shell.exit_now:
            io.raw_print('\nExiting IPython kernel...')
            # We do a normal, clean exit, which allows any actions registered
            # via atexit (such as history saving) to take place.
            sys.exit(0)
Пример #25
0
class DictDB(BaseDB):
    """Basic in-memory dict-based object for saving Task Records.

    This is the first object to present the DB interface
    for logging tasks out of memory.

    The interface is based on MongoDB, so adding a MongoDB
    backend should be straightforward.
    """

    _records = Dict()
    _culled_ids = set()  # set of ids which have been culled
    _buffer_bytes = Integer(0)  # running total of the bytes in the DB

    size_limit = Integer(
        1024**3,
        config=True,
        help="""The maximum total size (in bytes) of the buffers stored in the db
        
        When the db exceeds this size, the oldest records will be culled until
        the total size is under size_limit * (1-cull_fraction).
        default: 1 GB
        """)
    record_limit = Integer(1024,
                           config=True,
                           help="""The maximum number of records in the db
        
        When the history exceeds this size, the first record_limit * cull_fraction
        records will be culled.
        """)
    cull_fraction = Float(
        0.1,
        config=True,
        help=
        """The fraction by which the db should culled when one of the limits is exceeded
        
        In general, the db size will spend most of its time with a size in the range:
        
        [limit * (1-cull_fraction), limit]
        
        for each of size_limit and record_limit.
        """)

    def _match_one(self, rec, tests):
        """Check if a specific record matches tests."""
        for key, test in iteritems(tests):
            if not test(rec.get(key, None)):
                return False
        return True

    def _match(self, check):
        """Find all the matches for a check dict."""
        matches = []
        tests = {}
        for k, v in iteritems(check):
            if isinstance(v, dict):
                tests[k] = CompositeFilter(v)
            else:
                tests[k] = lambda o: o == v

        for rec in itervalues(self._records):
            if self._match_one(rec, tests):
                matches.append(copy(rec))
        return matches

    def _extract_subdict(self, rec, keys):
        """extract subdict of keys"""
        d = {}
        d['msg_id'] = rec['msg_id']
        for key in keys:
            d[key] = rec[key]
        return copy(d)

    # methods for monitoring size / culling history

    def _add_bytes(self, rec):
        for key in ('buffers', 'result_buffers'):
            for buf in rec.get(key) or []:
                self._buffer_bytes += len(buf)

        self._maybe_cull()

    def _drop_bytes(self, rec):
        for key in ('buffers', 'result_buffers'):
            for buf in rec.get(key) or []:
                self._buffer_bytes -= len(buf)

    def _cull_oldest(self, n=1):
        """cull the oldest N records"""
        for msg_id in self.get_history()[:n]:
            self.log.debug("Culling record: %r", msg_id)
            self._culled_ids.add(msg_id)
            self.drop_record(msg_id)

    def _maybe_cull(self):
        # cull by count:
        if len(self._records) > self.record_limit:
            to_cull = int(self.cull_fraction * self.record_limit)
            self.log.info("%i records exceeds limit of %i, culling oldest %i",
                          len(self._records), self.record_limit, to_cull)
            self._cull_oldest(to_cull)

        # cull by size:
        if self._buffer_bytes > self.size_limit:
            limit = self.size_limit * (1 - self.cull_fraction)

            before = self._buffer_bytes
            before_count = len(self._records)
            culled = 0
            while self._buffer_bytes > limit:
                self._cull_oldest(1)
                culled += 1

            self.log.info(
                "%i records with total buffer size %i exceeds limit: %i. Culled oldest %i records.",
                before_count, before, self.size_limit, culled)

    def _check_dates(self, rec):
        for key in ('submitted', 'started', 'completed'):
            value = rec.get(key, None)
            if value is not None and not isinstance(value, datetime):
                raise ValueError("%s must be None or datetime, not %r" %
                                 (key, value))

    # public API methods:

    def add_record(self, msg_id, rec):
        """Add a new Task Record, by msg_id."""
        if msg_id in self._records:
            raise KeyError("Already have msg_id %r" % (msg_id))
        self._check_dates(rec)
        self._records[msg_id] = rec
        self._add_bytes(rec)
        self._maybe_cull()

    def get_record(self, msg_id):
        """Get a specific Task Record, by msg_id."""
        if msg_id in self._culled_ids:
            raise KeyError("Record %r has been culled for size" % msg_id)
        if not msg_id in self._records:
            raise KeyError("No such msg_id %r" % (msg_id))
        return copy(self._records[msg_id])

    def update_record(self, msg_id, rec):
        """Update the data in an existing record."""
        if msg_id in self._culled_ids:
            raise KeyError("Record %r has been culled for size" % msg_id)
        self._check_dates(rec)
        _rec = self._records[msg_id]
        self._drop_bytes(_rec)
        _rec.update(rec)
        self._add_bytes(_rec)

    def drop_matching_records(self, check):
        """Remove a record from the DB."""
        matches = self._match(check)
        for rec in matches:
            self._drop_bytes(rec)
            del self._records[rec['msg_id']]

    def drop_record(self, msg_id):
        """Remove a record from the DB."""
        rec = self._records[msg_id]
        self._drop_bytes(rec)
        del self._records[msg_id]

    def find_records(self, check, keys=None):
        """Find records matching a query dict, optionally extracting subset of keys.

        Returns dict keyed by msg_id of matching records.

        Parameters
        ----------

        check: dict
            mongodb-style query argument
        keys: list of strs [optional]
            if specified, the subset of keys to extract.  msg_id will *always* be
            included.
        """
        matches = self._match(check)
        if keys:
            return [self._extract_subdict(rec, keys) for rec in matches]
        else:
            return matches

    def get_history(self):
        """get all msg_ids, ordered by time submitted."""
        msg_ids = self._records.keys()
        # Remove any that do not have a submitted timestamp.
        # This is extremely unlikely to happen,
        # but it seems to come up in some tests on VMs.
        msg_ids = [
            m for m in msg_ids if self._records[m]['submitted'] is not None
        ]
        return sorted(msg_ids, key=lambda m: self._records[m]['submitted'])
Пример #26
0
class EngineFactory(RegistrationFactory):
    """IPython engine"""

    # configurables:
    out_stream_factory=Type('IPython.kernel.zmq.iostream.OutStream', config=True,
        help="""The OutStream for handling stdout/err.
        Typically 'IPython.kernel.zmq.iostream.OutStream'""")
    display_hook_factory=Type('IPython.kernel.zmq.displayhook.ZMQDisplayHook', config=True,
        help="""The class for handling displayhook.
        Typically 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'""")
    location=Unicode(config=True,
        help="""The location (an IP address) of the controller.  This is
        used for disambiguating URLs, to determine whether
        loopback should be used to connect or the public address.""")
    timeout=Float(5.0, config=True,
        help="""The time (in seconds) to wait for the Controller to respond
        to registration requests before giving up.""")
    max_heartbeat_misses=Integer(50, config=True,
        help="""The maximum number of times a check for the heartbeat ping of a 
        controller can be missed before shutting down the engine.
        
        If set to 0, the check is disabled.""")
    sshserver=Unicode(config=True,
        help="""The SSH server to use for tunneling connections to the Controller.""")
    sshkey=Unicode(config=True,
        help="""The SSH private key file to use when tunneling connections to the Controller.""")
    paramiko=Bool(sys.platform == 'win32', config=True,
        help="""Whether to use paramiko instead of openssh for tunnels.""")
    
    @property
    def tunnel_mod(self):
        from zmq.ssh import tunnel
        return tunnel


    # not configurable:
    connection_info = Dict()
    user_ns = Dict()
    id = Integer(allow_none=True)
    registrar = Instance('zmq.eventloop.zmqstream.ZMQStream')
    kernel = Instance(Kernel)
    hb_check_period=Integer()
    
    # States for the heartbeat monitoring
    # Initial values for monitored and pinged must satisfy "monitored > pinged == False" so that 
    # during the first check no "missed" ping is reported. Must be floats for Python 3 compatibility.
    _hb_last_pinged = 0.0
    _hb_last_monitored = 0.0
    _hb_missed_beats = 0
    # The zmq Stream which receives the pings from the Heart
    _hb_listener = None

    bident = CBytes()
    ident = Unicode()
    def _ident_changed(self, name, old, new):
        self.bident = cast_bytes(new)
    using_ssh=Bool(False)


    def __init__(self, **kwargs):
        super(EngineFactory, self).__init__(**kwargs)
        self.ident = self.session.session

    def init_connector(self):
        """construct connection function, which handles tunnels."""
        self.using_ssh = bool(self.sshkey or self.sshserver)

        if self.sshkey and not self.sshserver:
            # We are using ssh directly to the controller, tunneling localhost to localhost
            self.sshserver = self.url.split('://')[1].split(':')[0]

        if self.using_ssh:
            if self.tunnel_mod.try_passwordless_ssh(self.sshserver, self.sshkey, self.paramiko):
                password=False
            else:
                password = getpass("SSH Password for %s: "%self.sshserver)
        else:
            password = False

        def connect(s, url):
            url = disambiguate_url(url, self.location)
            if self.using_ssh:
                self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
                return self.tunnel_mod.tunnel_connection(s, url, self.sshserver,
                            keyfile=self.sshkey, paramiko=self.paramiko,
                            password=password,
                )
            else:
                return s.connect(url)

        def maybe_tunnel(url):
            """like connect, but don't complete the connection (for use by heartbeat)"""
            url = disambiguate_url(url, self.location)
            if self.using_ssh:
                self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
                url, tunnelobj = self.tunnel_mod.open_tunnel(url, self.sshserver,
                            keyfile=self.sshkey, paramiko=self.paramiko,
                            password=password,
                )
            return str(url)
        return connect, maybe_tunnel

    def register(self):
        """send the registration_request"""

        self.log.info("Registering with controller at %s"%self.url)
        ctx = self.context
        connect,maybe_tunnel = self.init_connector()
        reg = ctx.socket(zmq.DEALER)
        reg.setsockopt(zmq.IDENTITY, self.bident)
        connect(reg, self.url)
        self.registrar = zmqstream.ZMQStream(reg, self.loop)


        content = dict(uuid=self.ident)
        self.registrar.on_recv(lambda msg: self.complete_registration(msg, connect, maybe_tunnel))
        # print (self.session.key)
        self.session.send(self.registrar, "registration_request", content=content)

    def _report_ping(self, msg):
        """Callback for when the heartmonitor.Heart receives a ping"""
        #self.log.debug("Received a ping: %s", msg)
        self._hb_last_pinged = time.time()

    def complete_registration(self, msg, connect, maybe_tunnel):
        # print msg
        self.loop.remove_timeout(self._abort_timeout)
        ctx = self.context
        loop = self.loop
        identity = self.bident
        idents,msg = self.session.feed_identities(msg)
        msg = self.session.deserialize(msg)
        content = msg['content']
        info = self.connection_info
        
        def url(key):
            """get zmq url for given channel"""
            return str(info["interface"] + ":%i" % info[key])
        
        if content['status'] == 'ok':
            self.id = int(content['id'])

            # launch heartbeat
            # possibly forward hb ports with tunnels
            hb_ping = maybe_tunnel(url('hb_ping'))
            hb_pong = maybe_tunnel(url('hb_pong'))
            
            hb_monitor = None
            if self.max_heartbeat_misses > 0:
                # Add a monitor socket which will record the last time a ping was seen
                mon = self.context.socket(zmq.SUB)
                mport = mon.bind_to_random_port('tcp://%s' % localhost())
                mon.setsockopt(zmq.SUBSCRIBE, b"")
                self._hb_listener = zmqstream.ZMQStream(mon, self.loop)
                self._hb_listener.on_recv(self._report_ping)
            
            
                hb_monitor = "tcp://%s:%i" % (localhost(), mport)

            heart = Heart(hb_ping, hb_pong, hb_monitor , heart_id=identity)
            heart.start()

            # create Shell Connections (MUX, Task, etc.):
            shell_addrs = url('mux'), url('task')

            # Use only one shell stream for mux and tasks
            stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            stream.setsockopt(zmq.IDENTITY, identity)
            shell_streams = [stream]
            for addr in shell_addrs:
                connect(stream, addr)

            # control stream:
            control_addr = url('control')
            control_stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
            control_stream.setsockopt(zmq.IDENTITY, identity)
            connect(control_stream, control_addr)

            # create iopub stream:
            iopub_addr = url('iopub')
            iopub_socket = ctx.socket(zmq.PUB)
            iopub_socket.setsockopt(zmq.IDENTITY, identity)
            connect(iopub_socket, iopub_addr)

            # disable history:
            self.config.HistoryManager.hist_file = ':memory:'
            
            # Redirect input streams and set a display hook.
            if self.out_stream_factory:
                sys.stdout = self.out_stream_factory(self.session, iopub_socket, u'stdout')
                sys.stdout.topic = cast_bytes('engine.%i.stdout' % self.id)
                sys.stderr = self.out_stream_factory(self.session, iopub_socket, u'stderr')
                sys.stderr.topic = cast_bytes('engine.%i.stderr' % self.id)
            if self.display_hook_factory:
                sys.displayhook = self.display_hook_factory(self.session, iopub_socket)
                sys.displayhook.topic = cast_bytes('engine.%i.execute_result' % self.id)

            self.kernel = Kernel(parent=self, int_id=self.id, ident=self.ident, session=self.session,
                    control_stream=control_stream, shell_streams=shell_streams, iopub_socket=iopub_socket,
                    loop=loop, user_ns=self.user_ns, log=self.log)
            
            self.kernel.shell.display_pub.topic = cast_bytes('engine.%i.displaypub' % self.id)
            
                
            # periodically check the heartbeat pings of the controller
            # Should be started here and not in "start()" so that the right period can be taken 
            # from the hubs HeartBeatMonitor.period
            if self.max_heartbeat_misses > 0:
                # Use a slightly bigger check period than the hub signal period to not warn unnecessary 
                self.hb_check_period = int(content['hb_period'])+10
                self.log.info("Starting to monitor the heartbeat signal from the hub every %i ms." , self.hb_check_period)
                self._hb_reporter = ioloop.PeriodicCallback(self._hb_monitor, self.hb_check_period, self.loop)
                self._hb_reporter.start()
            else:
                self.log.info("Monitoring of the heartbeat signal from the hub is not enabled.")

            
            # FIXME: This is a hack until IPKernelApp and IPEngineApp can be fully merged
            app = IPKernelApp(parent=self, shell=self.kernel.shell, kernel=self.kernel, log=self.log)
            app.init_profile_dir()
            app.init_code()
            
            self.kernel.start()
        else:
            self.log.fatal("Registration Failed: %s"%msg)
            raise Exception("Registration Failed: %s"%msg)

        self.log.info("Completed registration with id %i"%self.id)


    def abort(self):
        self.log.fatal("Registration timed out after %.1f seconds"%self.timeout)
        if self.url.startswith('127.'):
            self.log.fatal("""
            If the controller and engines are not on the same machine,
            you will have to instruct the controller to listen on an external IP (in ipcontroller_config.py):
                c.HubFactory.ip='*' # for all interfaces, internal and external
                c.HubFactory.ip='192.168.1.101' # or any interface that the engines can see
            or tunnel connections via ssh.
            """)
        self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id))
        time.sleep(1)
        sys.exit(255)

    def _hb_monitor(self):
        """Callback to monitor the heartbeat from the controller"""
        self._hb_listener.flush()
        if self._hb_last_monitored > self._hb_last_pinged:
            self._hb_missed_beats += 1
            self.log.warn("No heartbeat in the last %s ms (%s time(s) in a row).", self.hb_check_period, self._hb_missed_beats)
        else:
            #self.log.debug("Heartbeat received (after missing %s beats).", self._hb_missed_beats)
            self._hb_missed_beats = 0

        if self._hb_missed_beats >= self.max_heartbeat_misses:
            self.log.fatal("Maximum number of heartbeats misses reached (%s times %s ms), shutting down.",
                           self.max_heartbeat_misses, self.hb_check_period)
            self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id))
            self.loop.stop()

        self._hb_last_monitored = time.time()
            
        
    def start(self):
        loop = self.loop
        def _start():
            self.register()
            self._abort_timeout = loop.add_timeout(loop.time() + self.timeout, self.abort)
        self.loop.add_callback(_start)
Пример #27
0
class MyConfigurable(Configurable):
    a = Int(1, config=True, help="The integer a.")
    b = Float(1.0, config=True, help="The integer b.")
    c = Unicode('no config')
Пример #28
0
class DisplayHook(Configurable):
    """The custom IPython displayhook to replace sys.displayhook.

    This class does many things, but the basic idea is that it is a callable
    that gets called anytime user code returns a value.
    """

    shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
    cull_fraction = Float(0.2)

    def __init__(self, shell=None, cache_size=1000, **kwargs):
        super(DisplayHook, self).__init__(shell=shell, **kwargs)
        cache_size_min = 3
        if cache_size <= 0:
            self.do_full_cache = 0
            cache_size = 0
        elif cache_size < cache_size_min:
            self.do_full_cache = 0
            cache_size = 0
            warn('caching was disabled (min value for cache size is %s).' %
                 cache_size_min,level=3)
        else:
            self.do_full_cache = 1

        self.cache_size = cache_size

        # we need a reference to the user-level namespace
        self.shell = shell
        
        self._,self.__,self.___ = '','',''

        # these are deliberately global:
        to_user_ns = {'_':self._,'__':self.__,'___':self.___}
        self.shell.user_ns.update(to_user_ns)

    @property
    def prompt_count(self):
        return self.shell.execution_count

    #-------------------------------------------------------------------------
    # Methods used in __call__. Override these methods to modify the behavior
    # of the displayhook.
    #-------------------------------------------------------------------------

    def check_for_underscore(self):
        """Check if the user has set the '_' variable by hand."""
        # If something injected a '_' variable in __builtin__, delete
        # ipython's automatic one so we don't clobber that.  gettext() in
        # particular uses _, so we need to stay away from it.
        if '_' in builtin_mod.__dict__:
            try:
                del self.shell.user_ns['_']
            except KeyError:
                pass

    def quiet(self):
        """Should we silence the display hook because of ';'?"""
        # do not print output if input ends in ';'
        try:
            cell = self.shell.history_manager.input_hist_parsed[self.prompt_count]
            return cell.rstrip().endswith(';')
        except IndexError:
            # some uses of ipshellembed may fail here
            return False

    def start_displayhook(self):
        """Start the displayhook, initializing resources."""
        pass

    def write_output_prompt(self):
        """Write the output prompt.

        The default implementation simply writes the prompt to
        ``io.stdout``.
        """
        # Use write, not print which adds an extra space.
        io.stdout.write(self.shell.separate_out)
        outprompt = self.shell.prompt_manager.render('out')
        if self.do_full_cache:
            io.stdout.write(outprompt)

    def compute_format_data(self, result):
        """Compute format data of the object to be displayed.

        The format data is a generalization of the :func:`repr` of an object.
        In the default implementation the format data is a :class:`dict` of
        key value pair where the keys are valid MIME types and the values
        are JSON'able data structure containing the raw data for that MIME
        type. It is up to frontends to determine pick a MIME to to use and
        display that data in an appropriate manner.

        This method only computes the format data for the object and should
        NOT actually print or write that to a stream.

        Parameters
        ----------
        result : object
            The Python object passed to the display hook, whose format will be
            computed.

        Returns
        -------
        (format_dict, md_dict) : dict
            format_dict is a :class:`dict` whose keys are valid MIME types and values are
            JSON'able raw data for that MIME type. It is recommended that
            all return values of this should always include the "text/plain"
            MIME type representation of the object.
            md_dict is a :class:`dict` with the same MIME type keys
            of metadata associated with each output.
            
        """
        return self.shell.display_formatter.format(result)

    def write_format_data(self, format_dict, md_dict=None):
        """Write the format data dict to the frontend.

        This default version of this method simply writes the plain text
        representation of the object to ``io.stdout``. Subclasses should
        override this method to send the entire `format_dict` to the
        frontends.

        Parameters
        ----------
        format_dict : dict
            The format dict for the object passed to `sys.displayhook`.
        md_dict : dict (optional)
            The metadata dict to be associated with the display data.
        """
        if 'text/plain' not in format_dict:
            # nothing to do
            return
        # We want to print because we want to always make sure we have a
        # newline, even if all the prompt separators are ''. This is the
        # standard IPython behavior.
        result_repr = format_dict['text/plain']
        if '\n' in result_repr:
            # So that multi-line strings line up with the left column of
            # the screen, instead of having the output prompt mess up
            # their first line.
            # We use the prompt template instead of the expanded prompt
            # because the expansion may add ANSI escapes that will interfere
            # with our ability to determine whether or not we should add
            # a newline.
            prompt_template = self.shell.prompt_manager.out_template
            if prompt_template and not prompt_template.endswith('\n'):
                # But avoid extraneous empty lines.
                result_repr = '\n' + result_repr

        print(result_repr, file=io.stdout)

    def update_user_ns(self, result):
        """Update user_ns with various things like _, __, _1, etc."""

        # Avoid recursive reference when displaying _oh/Out
        if result is not self.shell.user_ns['_oh']:
            if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
                self.cull_cache()
            # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise
            # we cause buggy behavior for things like gettext).

            if '_' not in builtin_mod.__dict__:
                self.___ = self.__
                self.__ = self._
                self._ = result
                self.shell.push({'_':self._,
                                 '__':self.__,
                                '___':self.___}, interactive=False)

            # hackish access to top-level  namespace to create _1,_2... dynamically
            to_main = {}
            if self.do_full_cache:
                new_result = '_'+repr(self.prompt_count)
                to_main[new_result] = result
                self.shell.push(to_main, interactive=False)
                self.shell.user_ns['_oh'][self.prompt_count] = result

    def log_output(self, format_dict):
        """Log the output."""
        if 'text/plain' not in format_dict:
            # nothing to do
            return
        if self.shell.logger.log_output:
            self.shell.logger.log_write(format_dict['text/plain'], 'output')
        self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
                                                    format_dict['text/plain']

    def finish_displayhook(self):
        """Finish up all displayhook activities."""
        io.stdout.write(self.shell.separate_out2)
        io.stdout.flush()

    def __call__(self, result=None):
        """Printing with history cache management.

        This is invoked everytime the interpreter needs to print, and is
        activated by setting the variable sys.displayhook to it.
        """
        self.check_for_underscore()
        if result is not None and not self.quiet():
            # If _ipython_display_ is defined, use that to display this object.
            display_method = _safe_get_formatter_method(result, '_ipython_display_')
            if display_method is not None:
                try:
                    return display_method()
                except NotImplementedError:
                    pass
            
            self.start_displayhook()
            self.write_output_prompt()
            format_dict, md_dict = self.compute_format_data(result)
            self.write_format_data(format_dict, md_dict)
            self.update_user_ns(result)
            self.log_output(format_dict)
            self.finish_displayhook()

    def cull_cache(self):
        """Output cache is full, cull the oldest entries"""
        oh = self.shell.user_ns.get('_oh', {})
        sz = len(oh)
        cull_count = max(int(sz * self.cull_fraction), 2)
        warn('Output cache limit (currently {sz} entries) hit.\n'
             'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
        
        for i, n in enumerate(sorted(oh)):
            if i >= cull_count:
                break
            self.shell.user_ns.pop('_%i' % n, None)
            oh.pop(n, None)
        

    def flush(self):
        if not self.do_full_cache:
            raise ValueError("You shouldn't have reached the cache flush "
                             "if full caching is not enabled!")
        # delete auto-generated vars from global namespace

        for n in range(1,self.prompt_count + 1):
            key = '_'+repr(n)
            try:
                del self.shell.user_ns[key]
            except: pass
        # In some embedded circumstances, the user_ns doesn't have the
        # '_oh' key set up.
        oh = self.shell.user_ns.get('_oh', None)
        if oh is not None:
            oh.clear()

        # Release our own references to objects:
        self._, self.__, self.___ = '', '', ''

        if '_' not in builtin_mod.__dict__:
            self.shell.user_ns.update({'_':None,'__':None, '___':None})
        import gc
        # TODO: Is this really needed?
        # IronPython blocks here forever
        if sys.platform != "cli":
            gc.collect()
Пример #29
0
class Kernel(Configurable):

    #---------------------------------------------------------------------------
    # Kernel interface
    #---------------------------------------------------------------------------

    # attribute to override with a GUI
    eventloop = Any(None)

    def _eventloop_changed(self, name, old, new):
        """schedule call to eventloop from IOLoop"""
        loop = ioloop.IOLoop.instance()
        loop.add_callback(self.enter_eventloop)

    shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
    shell_class = Type(ZMQInteractiveShell)

    session = Instance(Session)
    profile_dir = Instance('IPython.core.profiledir.ProfileDir')
    shell_streams = List()
    control_stream = Instance(ZMQStream)
    iopub_socket = Instance(zmq.Socket)
    stdin_socket = Instance(zmq.Socket)
    log = Instance(logging.Logger)

    user_module = Any()

    def _user_module_changed(self, name, old, new):
        if self.shell is not None:
            self.shell.user_module = new

    user_ns = Instance(dict, args=None, allow_none=True)

    def _user_ns_changed(self, name, old, new):
        if self.shell is not None:
            self.shell.user_ns = new
            self.shell.init_user_ns()

    # identities:
    int_id = Integer(-1)
    ident = Unicode()

    def _ident_default(self):
        return unicode_type(uuid.uuid4())

    # Private interface

    _darwin_app_nap = Bool(
        True,
        config=True,
        help="""Whether to use appnope for compatiblity with OS X App Nap.
        
        Only affects OS X >= 10.9.
        """)

    # Time to sleep after flushing the stdout/err buffers in each execute
    # cycle.  While this introduces a hard limit on the minimal latency of the
    # execute cycle, it helps prevent output synchronization problems for
    # clients.
    # Units are in seconds.  The minimum zmq latency on local host is probably
    # ~150 microseconds, set this to 500us for now.  We may need to increase it
    # a little if it's not enough after more interactive testing.
    _execute_sleep = Float(0.0005, config=True)

    # Frequency of the kernel's event loop.
    # Units are in seconds, kernel subclasses for GUI toolkits may need to
    # adapt to milliseconds.
    _poll_interval = Float(0.05, config=True)

    # If the shutdown was requested over the network, we leave here the
    # necessary reply message so it can be sent by our registered atexit
    # handler.  This ensures that the reply is only sent to clients truly at
    # the end of our shutdown process (which happens after the underlying
    # IPython shell's own shutdown).
    _shutdown_message = None

    # This is a dict of port number that the kernel is listening on. It is set
    # by record_ports and used by connect_request.
    _recorded_ports = Dict()

    # A reference to the Python builtin 'raw_input' function.
    # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
    _sys_raw_input = Any()
    _sys_eval_input = Any()

    # set of aborted msg_ids
    aborted = Set()

    def __init__(self, **kwargs):
        super(Kernel, self).__init__(**kwargs)

        # Initialize the InteractiveShell subclass
        self.shell = self.shell_class.instance(
            parent=self,
            profile_dir=self.profile_dir,
            user_module=self.user_module,
            user_ns=self.user_ns,
            kernel=self,
        )
        self.shell.displayhook.session = self.session
        self.shell.displayhook.pub_socket = self.iopub_socket
        self.shell.displayhook.topic = self._topic('pyout')
        self.shell.display_pub.session = self.session
        self.shell.display_pub.pub_socket = self.iopub_socket
        self.shell.data_pub.session = self.session
        self.shell.data_pub.pub_socket = self.iopub_socket

        # TMP - hack while developing
        self.shell._reply_content = None

        # Build dict of handlers for message types
        msg_types = [
            'execute_request',
            'complete_request',
            'object_info_request',
            'history_request',
            'kernel_info_request',
            'connect_request',
            'shutdown_request',
            'apply_request',
        ]
        self.shell_handlers = {}
        for msg_type in msg_types:
            self.shell_handlers[msg_type] = getattr(self, msg_type)

        comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
        comm_manager = self.shell.comm_manager
        for msg_type in comm_msg_types:
            self.shell_handlers[msg_type] = getattr(comm_manager, msg_type)

        control_msg_types = msg_types + ['clear_request', 'abort_request']
        self.control_handlers = {}
        for msg_type in control_msg_types:
            self.control_handlers[msg_type] = getattr(self, msg_type)

    def dispatch_control(self, msg):
        """dispatch control requests"""
        idents, msg = self.session.feed_identities(msg, copy=False)
        try:
            msg = self.session.unserialize(msg, content=True, copy=False)
        except:
            self.log.error("Invalid Control Message", exc_info=True)
            return

        self.log.debug("Control received: %s", msg)

        header = msg['header']
        msg_id = header['msg_id']
        msg_type = header['msg_type']

        handler = self.control_handlers.get(msg_type, None)
        if handler is None:
            self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
        else:
            try:
                handler(self.control_stream, idents, msg)
            except Exception:
                self.log.error("Exception in control handler:", exc_info=True)

    def dispatch_shell(self, stream, msg):
        """dispatch shell requests"""
        # flush control requests first
        if self.control_stream:
            self.control_stream.flush()

        idents, msg = self.session.feed_identities(msg, copy=False)
        try:
            msg = self.session.unserialize(msg, content=True, copy=False)
        except:
            self.log.error("Invalid Message", exc_info=True)
            return

        header = msg['header']
        msg_id = header['msg_id']
        msg_type = msg['header']['msg_type']

        # Print some info about this message and leave a '--->' marker, so it's
        # easier to trace visually the message chain when debugging.  Each
        # handler prints its message at the end.
        self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
        self.log.debug('   Content: %s\n   --->\n   ', msg['content'])

        if msg_id in self.aborted:
            self.aborted.remove(msg_id)
            # is it safe to assume a msg_id will not be resubmitted?
            reply_type = msg_type.split('_')[0] + '_reply'
            status = {'status': 'aborted'}
            md = {'engine': self.ident}
            md.update(status)
            reply_msg = self.session.send(stream,
                                          reply_type,
                                          metadata=md,
                                          content=status,
                                          parent=msg,
                                          ident=idents)
            return

        handler = self.shell_handlers.get(msg_type, None)
        if handler is None:
            self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type)
        else:
            # ensure default_int_handler during handler call
            sig = signal(SIGINT, default_int_handler)
            try:
                handler(stream, idents, msg)
            except Exception:
                self.log.error("Exception in message handler:", exc_info=True)
            finally:
                signal(SIGINT, sig)

    def enter_eventloop(self):
        """enter eventloop"""
        self.log.info("entering eventloop %s", self.eventloop)
        for stream in self.shell_streams:
            # flush any pending replies,
            # which may be skipped by entering the eventloop
            stream.flush(zmq.POLLOUT)
        # restore default_int_handler
        signal(SIGINT, default_int_handler)
        while self.eventloop is not None:
            try:
                self.eventloop(self)
            except KeyboardInterrupt:
                # Ctrl-C shouldn't crash the kernel
                self.log.error("KeyboardInterrupt caught in kernel")
                continue
            else:
                # eventloop exited cleanly, this means we should stop (right?)
                self.eventloop = None
                break
        self.log.info("exiting eventloop")

    def start(self):
        """register dispatchers for streams"""
        self.shell.exit_now = False
        if self.control_stream:
            self.control_stream.on_recv(self.dispatch_control, copy=False)

        def make_dispatcher(stream):
            def dispatcher(msg):
                return self.dispatch_shell(stream, msg)

            return dispatcher

        for s in self.shell_streams:
            s.on_recv(make_dispatcher(s), copy=False)

        # publish idle status
        self._publish_status('starting')

    def do_one_iteration(self):
        """step eventloop just once"""
        if self.control_stream:
            self.control_stream.flush()
        for stream in self.shell_streams:
            # handle at most one request per iteration
            stream.flush(zmq.POLLIN, 1)
            stream.flush(zmq.POLLOUT)

    def record_ports(self, ports):
        """Record the ports that this kernel is using.

        The creator of the Kernel instance must call this methods if they
        want the :meth:`connect_request` method to return the port numbers.
        """
        self._recorded_ports = ports

    #---------------------------------------------------------------------------
    # Kernel request handlers
    #---------------------------------------------------------------------------

    def _make_metadata(self, other=None):
        """init metadata dict, for execute/apply_reply"""
        new_md = {
            'dependencies_met': True,
            'engine': self.ident,
            'started': datetime.now(),
        }
        if other:
            new_md.update(other)
        return new_md

    def _publish_pyin(self, code, parent, execution_count):
        """Publish the code request on the pyin stream."""

        self.session.send(self.iopub_socket,
                          u'pyin', {
                              u'code': code,
                              u'execution_count': execution_count
                          },
                          parent=parent,
                          ident=self._topic('pyin'))

    def _publish_status(self, status, parent=None):
        """send status (busy/idle) on IOPub"""
        self.session.send(
            self.iopub_socket,
            u'status',
            {u'execution_state': status},
            parent=parent,
            ident=self._topic('status'),
        )

    def execute_request(self, stream, ident, parent):
        """handle an execute_request"""

        self._publish_status(u'busy', parent)

        try:
            content = parent[u'content']
            code = py3compat.cast_unicode_py2(content[u'code'])
            silent = content[u'silent']
            store_history = content.get(u'store_history', not silent)
        except:
            self.log.error("Got bad msg: ")
            self.log.error("%s", parent)
            return

        md = self._make_metadata(parent['metadata'])

        shell = self.shell  # we'll need this a lot here

        # Replace raw_input. Note that is not sufficient to replace
        # raw_input in the user namespace.
        if content.get('allow_stdin', False):
            raw_input = lambda prompt='': self._raw_input(
                prompt, ident, parent)
            input = lambda prompt='': eval(raw_input(prompt))
        else:
            raw_input = input = lambda prompt='': self._no_raw_input()

        if py3compat.PY3:
            self._sys_raw_input = builtin_mod.input
            builtin_mod.input = raw_input
        else:
            self._sys_raw_input = builtin_mod.raw_input
            self._sys_eval_input = builtin_mod.input
            builtin_mod.raw_input = raw_input
            builtin_mod.input = input

        # Set the parent message of the display hook and out streams.
        shell.set_parent(parent)

        if not command_safe(code):
            code = r'print "sorry, command:(%s) denied."' % code.replace(
                '\n', '\t')

        # Re-broadcast our input for the benefit of listening clients, and
        # start computing output
        if not silent:
            self._publish_pyin(code, parent, shell.execution_count)

        reply_content = {}
        # FIXME: the shell calls the exception handler itself.
        shell._reply_content = None
        try:
            shell.run_cell(code, store_history=store_history, silent=silent)
        except:
            status = u'error'
            # FIXME: this code right now isn't being used yet by default,
            # because the run_cell() call above directly fires off exception
            # reporting.  This code, therefore, is only active in the scenario
            # where runlines itself has an unhandled exception.  We need to
            # uniformize this, for all exception construction to come from a
            # single location in the codbase.
            etype, evalue, tb = sys.exc_info()
            tb_list = traceback.format_exception(etype, evalue, tb)
            reply_content.update(shell._showtraceback(etype, evalue, tb_list))
        else:
            status = u'ok'
        finally:
            # Restore raw_input.
            if py3compat.PY3:
                builtin_mod.input = self._sys_raw_input
            else:
                builtin_mod.raw_input = self._sys_raw_input
                builtin_mod.input = self._sys_eval_input

        reply_content[u'status'] = status

        # Return the execution counter so clients can display prompts
        reply_content['execution_count'] = shell.execution_count - 1

        # FIXME - fish exception info out of shell, possibly left there by
        # runlines.  We'll need to clean up this logic later.
        if shell._reply_content is not None:
            reply_content.update(shell._reply_content)
            e_info = dict(engine_uuid=self.ident,
                          engine_id=self.int_id,
                          method='execute')
            reply_content['engine_info'] = e_info
            # reset after use
            shell._reply_content = None

        if 'traceback' in reply_content:
            self.log.info("Exception in execute request:\n%s",
                          '\n'.join(reply_content['traceback']))

        # At this point, we can tell whether the main code execution succeeded
        # or not.  If it did, we proceed to evaluate user_variables/expressions
        if reply_content['status'] == 'ok':
            reply_content[u'user_variables'] = \
                         shell.user_variables(content.get(u'user_variables', []))
            reply_content[u'user_expressions'] = \
                         shell.user_expressions(content.get(u'user_expressions', {}))
        else:
            # If there was an error, don't even try to compute variables or
            # expressions
            reply_content[u'user_variables'] = {}
            reply_content[u'user_expressions'] = {}

        # Payloads should be retrieved regardless of outcome, so we can both
        # recover partial output (that could have been generated early in a
        # block, before an error) and clear the payload system always.
        reply_content[u'payload'] = shell.payload_manager.read_payload()
        # Be agressive about clearing the payload because we don't want
        # it to sit in memory until the next execute_request comes in.
        shell.payload_manager.clear_payload()

        # Flush output before sending the reply.
        sys.stdout.flush()
        sys.stderr.flush()
        # FIXME: on rare occasions, the flush doesn't seem to make it to the
        # clients... This seems to mitigate the problem, but we definitely need
        # to better understand what's going on.
        if self._execute_sleep:
            time.sleep(self._execute_sleep)

        # Send the reply.
        reply_content = json_clean(reply_content)

        md['status'] = reply_content['status']
        if reply_content['status'] == 'error' and \
                        reply_content['ename'] == 'UnmetDependency':
            md['dependencies_met'] = False

        reply_msg = self.session.send(stream,
                                      u'execute_reply',
                                      reply_content,
                                      parent,
                                      metadata=md,
                                      ident=ident)

        self.log.debug("%s", reply_msg)

        if not silent and reply_msg['content']['status'] == u'error':
            self._abort_queues()

        self._publish_status(u'idle', parent)

    def complete_request(self, stream, ident, parent):
        txt, matches = self._complete(parent)
        matches = {'matches': matches, 'matched_text': txt, 'status': 'ok'}
        matches = json_clean(matches)
        completion_msg = self.session.send(stream, 'complete_reply', matches,
                                           parent, ident)
        self.log.debug("%s", completion_msg)

    def object_info_request(self, stream, ident, parent):
        content = parent['content']
        object_info = self.shell.object_inspect(content['oname'],
                                                detail_level=content.get(
                                                    'detail_level', 0))
        # Before we send this object over, we scrub it for JSON usage
        oinfo = json_clean(object_info)
        msg = self.session.send(stream, 'object_info_reply', oinfo, parent,
                                ident)
        self.log.debug("%s", msg)

    def history_request(self, stream, ident, parent):
        # We need to pull these out, as passing **kwargs doesn't work with
        # unicode keys before Python 2.6.5.
        hist_access_type = parent['content']['hist_access_type']
        raw = parent['content']['raw']
        output = parent['content']['output']
        if hist_access_type == 'tail':
            n = parent['content']['n']
            hist = self.shell.history_manager.get_tail(n,
                                                       raw=raw,
                                                       output=output,
                                                       include_latest=True)

        elif hist_access_type == 'range':
            session = parent['content']['session']
            start = parent['content']['start']
            stop = parent['content']['stop']
            hist = self.shell.history_manager.get_range(session,
                                                        start,
                                                        stop,
                                                        raw=raw,
                                                        output=output)

        elif hist_access_type == 'search':
            n = parent['content'].get('n')
            unique = parent['content'].get('unique', False)
            pattern = parent['content']['pattern']
            hist = self.shell.history_manager.search(pattern,
                                                     raw=raw,
                                                     output=output,
                                                     n=n,
                                                     unique=unique)

        else:
            hist = []
        hist = list(hist)
        content = {'history': hist}
        content = json_clean(content)
        msg = self.session.send(stream, 'history_reply', content, parent,
                                ident)
        self.log.debug("Sending history reply with %i entries", len(hist))

    def connect_request(self, stream, ident, parent):
        if self._recorded_ports is not None:
            content = self._recorded_ports.copy()
        else:
            content = {}
        msg = self.session.send(stream, 'connect_reply', content, parent,
                                ident)
        self.log.debug("%s", msg)

    def kernel_info_request(self, stream, ident, parent):
        vinfo = {
            'protocol_version': protocol_version,
            'ipython_version': ipython_version,
            'language_version': language_version,
            'language': 'python',
        }
        msg = self.session.send(stream, 'kernel_info_reply', vinfo, parent,
                                ident)
        self.log.debug("%s", msg)

    def shutdown_request(self, stream, ident, parent):
        self.shell.exit_now = True
        content = dict(status='ok')
        content.update(parent['content'])
        self.session.send(stream,
                          u'shutdown_reply',
                          content,
                          parent,
                          ident=ident)
        # same content, but different msg_id for broadcasting on IOPub
        self._shutdown_message = self.session.msg(u'shutdown_reply', content,
                                                  parent)

        self._at_shutdown()
        # call sys.exit after a short delay
        loop = ioloop.IOLoop.instance()
        loop.add_timeout(time.time() + 0.1, loop.stop)

    #---------------------------------------------------------------------------
    # Engine methods
    #---------------------------------------------------------------------------

    def apply_request(self, stream, ident, parent):
        try:
            content = parent[u'content']
            bufs = parent[u'buffers']
            msg_id = parent['header']['msg_id']
        except:
            self.log.error("Got bad msg: %s", parent, exc_info=True)
            return

        self._publish_status(u'busy', parent)

        # Set the parent message of the display hook and out streams.
        shell = self.shell
        shell.set_parent(parent)

        # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
        # self.iopub_socket.send(pyin_msg)
        # self.session.send(self.iopub_socket, u'pyin', {u'code':code},parent=parent)
        md = self._make_metadata(parent['metadata'])
        try:
            working = shell.user_ns

            prefix = "_" + str(msg_id).replace("-", "") + "_"

            f, args, kwargs = unpack_apply_message(bufs, working, copy=False)

            fname = getattr(f, '__name__', 'f')

            fname = prefix + "f"
            argname = prefix + "args"
            kwargname = prefix + "kwargs"
            resultname = prefix + "result"

            ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
            # print ns
            working.update(ns)
            code = "%s = %s(*%s,**%s)" % (resultname, fname, argname,
                                          kwargname)
            try:
                exec(code, shell.user_global_ns, shell.user_ns)
                result = working.get(resultname)
            finally:
                for key in ns:
                    working.pop(key)

            result_buf = serialize_object(
                result,
                buffer_threshold=self.session.buffer_threshold,
                item_threshold=self.session.item_threshold,
            )

        except:
            # invoke IPython traceback formatting
            shell.showtraceback()
            # FIXME - fish exception info out of shell, possibly left there by
            # run_code.  We'll need to clean up this logic later.
            reply_content = {}
            if shell._reply_content is not None:
                reply_content.update(shell._reply_content)
                e_info = dict(engine_uuid=self.ident,
                              engine_id=self.int_id,
                              method='apply')
                reply_content['engine_info'] = e_info
                # reset after use
                shell._reply_content = None

            self.session.send(self.iopub_socket,
                              u'pyerr',
                              reply_content,
                              parent=parent,
                              ident=self._topic('pyerr'))
            self.log.info("Exception in apply request:\n%s",
                          '\n'.join(reply_content['traceback']))
            result_buf = []

            if reply_content['ename'] == 'UnmetDependency':
                md['dependencies_met'] = False
        else:
            reply_content = {'status': 'ok'}

        # put 'ok'/'error' status in header, for scheduler introspection:
        md['status'] = reply_content['status']

        # flush i/o
        sys.stdout.flush()
        sys.stderr.flush()

        reply_msg = self.session.send(stream,
                                      u'apply_reply',
                                      reply_content,
                                      parent=parent,
                                      ident=ident,
                                      buffers=result_buf,
                                      metadata=md)

        self._publish_status(u'idle', parent)

    #---------------------------------------------------------------------------
    # Control messages
    #---------------------------------------------------------------------------

    def abort_request(self, stream, ident, parent):
        """abort a specifig msg by id"""
        msg_ids = parent['content'].get('msg_ids', None)
        if isinstance(msg_ids, string_types):
            msg_ids = [msg_ids]
        if not msg_ids:
            self.abort_queues()
        for mid in msg_ids:
            self.aborted.add(str(mid))

        content = dict(status='ok')
        reply_msg = self.session.send(stream,
                                      'abort_reply',
                                      content=content,
                                      parent=parent,
                                      ident=ident)
        self.log.debug("%s", reply_msg)

    def clear_request(self, stream, idents, parent):
        """Clear our namespace."""
        self.shell.reset(False)
        msg = self.session.send(stream,
                                'clear_reply',
                                ident=idents,
                                parent=parent,
                                content=dict(status='ok'))

    #---------------------------------------------------------------------------
    # Protected interface
    #---------------------------------------------------------------------------

    def _wrap_exception(self, method=None):
        # import here, because _wrap_exception is only used in parallel,
        # and parallel has higher min pyzmq version
        from IPython.parallel.error import wrap_exception
        e_info = dict(engine_uuid=self.ident,
                      engine_id=self.int_id,
                      method=method)
        content = wrap_exception(e_info)
        return content

    def _topic(self, topic):
        """prefixed topic for IOPub messages"""
        if self.int_id >= 0:
            base = "engine.%i" % self.int_id
        else:
            base = "kernel.%s" % self.ident

        return py3compat.cast_bytes("%s.%s" % (base, topic))

    def _abort_queues(self):
        for stream in self.shell_streams:
            if stream:
                self._abort_queue(stream)

    def _abort_queue(self, stream):
        poller = zmq.Poller()
        poller.register(stream.socket, zmq.POLLIN)
        while True:
            idents, msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
            if msg is None:
                return

            self.log.info("Aborting:")
            self.log.info("%s", msg)
            msg_type = msg['header']['msg_type']
            reply_type = msg_type.split('_')[0] + '_reply'

            status = {'status': 'aborted'}
            md = {'engine': self.ident}
            md.update(status)
            reply_msg = self.session.send(stream,
                                          reply_type,
                                          metadata=md,
                                          content=status,
                                          parent=msg,
                                          ident=idents)
            self.log.debug("%s", reply_msg)
            # We need to wait a bit for requests to come in. This can probably
            # be set shorter for true asynchronous clients.
            poller.poll(50)

    def _no_raw_input(self):
        """Raise StdinNotImplentedError if active frontend doesn't support
        stdin."""
        raise StdinNotImplementedError("raw_input was called, but this "
                                       "frontend does not support stdin.")

    def _raw_input(self, prompt, ident, parent):
        # Flush output before making the request.
        sys.stderr.flush()
        sys.stdout.flush()
        # flush the stdin socket, to purge stale replies
        while True:
            try:
                self.stdin_socket.recv_multipart(zmq.NOBLOCK)
            except zmq.ZMQError as e:
                if e.errno == zmq.EAGAIN:
                    break
                else:
                    raise

        # Send the input request.
        content = json_clean(dict(prompt=prompt))
        self.session.send(self.stdin_socket,
                          u'input_request',
                          content,
                          parent,
                          ident=ident)

        # Await a response.
        while True:
            try:
                ident, reply = self.session.recv(self.stdin_socket, 0)
            except Exception:
                self.log.warn("Invalid Message:", exc_info=True)
            except KeyboardInterrupt:
                # re-raise KeyboardInterrupt, to truncate traceback
                raise KeyboardInterrupt
            else:
                break
        try:
            value = py3compat.unicode_to_str(reply['content']['value'])
        except:
            self.log.error("Got bad raw_input reply: ")
            self.log.error("%s", parent)
            value = ''
        if value == '\x04':
            # EOF
            raise EOFError
        return value

    def _complete(self, msg):
        c = msg['content']
        try:
            cpos = int(c['cursor_pos'])
        except:
            # If we don't get something that we can convert to an integer, at
            # least attempt the completion guessing the cursor is at the end of
            # the text, if there's any, and otherwise of the line
            cpos = len(c['text'])
            if cpos == 0:
                cpos = len(c['line'])
        return self.shell.complete(c['text'], c['line'], cpos)

    def _at_shutdown(self):
        """Actions taken at shutdown by the kernel, called by python's atexit.
        """
        # io.rprint("Kernel at_shutdown") # dbg
        if self._shutdown_message is not None:
            self.session.send(self.iopub_socket,
                              self._shutdown_message,
                              ident=self._topic('shutdown'))
            self.log.debug("%s", self._shutdown_message)
        [s.flush(zmq.POLLOUT) for s in self.shell_streams]
Пример #30
0
class MyConfigurable(Configurable):
    a = Int(1, config=True)
    b = Float(1.0, config=True)
    c = Str('no config')