def do_one_iteration(self): """Do one iteration of the kernel's evaluation loop. """ ident,msg = self.session.recv(self.reply_socket, zmq.NOBLOCK) if msg is None: return # This assert will raise in versions of zeromq 2.0.7 and lesser. # We now require 2.0.8 or above, so we can uncomment for safety. # print(ident,msg, file=sys.__stdout__) assert ident is not None, "Missing message part." # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. # Eventually we'll move these from stdout to a logger. io.raw_print('\n*** MESSAGE TYPE:', msg['msg_type'], '***') io.raw_print(' Content: ', msg['content'], '\n --->\n ', sep='', end='') # Find and call actual handler for message handler = self.handlers.get(msg['msg_type'], None) if handler is None: io.raw_print_err("UNKNOWN MESSAGE TYPE:", msg) else: handler(ident, msg) # Check whether we should exit, in case the incoming message set the # exit flag on if self.shell.exit_now: io.raw_print('\nExiting IPython kernel...') # We do a normal, clean exit, which allows any actions registered # via atexit (such as history saving) to take place. sys.exit(0)
def _raw_input(self, prompt, ident, parent): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # Send the input request. content = dict(prompt=prompt) msg = self.session.send(self.req_socket, 'input_request', content, parent) # Await a response. ident, reply = self.session.recv(self.req_socket, 0) try: value = reply['content']['value'] except: io.raw_print_err("Got bad raw_input reply: ") io.raw_print_err(Message(parent)) value = '' return value
def _raw_input(self, prompt, ident, parent): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # Send the input request. content = dict(prompt=prompt) msg = self.session.msg(u'input_request', content, parent) self.req_socket.send_json(msg) # Await a response. reply = self.req_socket.recv_json() try: value = reply['content']['value'] except: io.raw_print_err("Got bad raw_input reply: ") io.raw_print_err(Message(parent)) value = '' return value
def _raw_input(self, prompt, ident, parent): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # Send the input request. content = dict(prompt=prompt) msg = self.session.msg(u"input_request", content, parent) self.req_socket.send_json(msg) # Await a response. reply = self.req_socket.recv_json() try: value = reply["content"]["value"] except: io.raw_print_err("Got bad raw_input reply: ") io.raw_print_err(Message(parent)) value = "" return value
class Kernel(Configurable): #--------------------------------------------------------------------------- # Kernel interface #--------------------------------------------------------------------------- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') session = Instance(Session) reply_socket = Instance('zmq.Socket') pub_socket = Instance('zmq.Socket') req_socket = Instance('zmq.Socket') # Private interface # Time to sleep after flushing the stdout/err buffers in each execute # cycle. While this introduces a hard limit on the minimal latency of the # execute cycle, it helps prevent output synchronization problems for # clients. # Units are in seconds. The minimum zmq latency on local host is probably # ~150 microseconds, set this to 500us for now. We may need to increase it # a little if it's not enough after more interactive testing. _execute_sleep = Float(0.0005, config=True) # Frequency of the kernel's event loop. # Units are in seconds, kernel subclasses for GUI toolkits may need to # adapt to milliseconds. _poll_interval = Float(0.05, config=True) # If the shutdown was requested over the network, we leave here the # necessary reply message so it can be sent by our registered atexit # handler. This ensures that the reply is only sent to clients truly at # the end of our shutdown process (which happens after the underlying # IPython shell's own shutdown). _shutdown_message = None # This is a dict of port number that the kernel is listening on. It is set # by record_ports and used by connect_request. _recorded_ports = None def __init__(self, **kwargs): super(Kernel, self).__init__(**kwargs) # Before we even start up the shell, register *first* our exit handlers # so they come before the shell's atexit.register(self._at_shutdown) # Initialize the InteractiveShell subclass self.shell = ZMQInteractiveShell.instance() self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.pub_socket # TMP - hack while developing self.shell._reply_content = None # Build dict of handlers for message types msg_types = [ 'execute_request', 'complete_request', 'object_info_request', 'history_request', 'connect_request', 'shutdown_request' ] self.handlers = {} for msg_type in msg_types: self.handlers[msg_type] = getattr(self, msg_type) def do_one_iteration(self): """Do one iteration of the kernel's evaluation loop. """ try: ident = self.reply_socket.recv(zmq.NOBLOCK) except zmq.ZMQError, e: if e.errno == zmq.EAGAIN: return else: raise # This assert will raise in versions of zeromq 2.0.7 and lesser. # We now require 2.0.8 or above, so we can uncomment for safety. assert self.reply_socket.rcvmore(), "Missing message part." msg = self.reply_socket.recv_json() # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. # Eventually we'll move these from stdout to a logger. io.raw_print('\n*** MESSAGE TYPE:', msg['msg_type'], '***') io.raw_print(' Content: ', msg['content'], '\n --->\n ', sep='', end='') # Find and call actual handler for message handler = self.handlers.get(msg['msg_type'], None) if handler is None: io.raw_print_err("UNKNOWN MESSAGE TYPE:", msg) else: handler(ident, msg) # Check whether we should exit, in case the incoming message set the # exit flag on if self.shell.exit_now: io.raw_print('\nExiting IPython kernel...') # We do a normal, clean exit, which allows any actions registered # via atexit (such as history saving) to take place. sys.exit(0)
def execute_request(self, ident, parent): status_msg = self.session.msg(u'status', {u'execution_state': u'busy'}, parent=parent) self.pub_socket.send_json(status_msg) try: content = parent[u'content'] code = content[u'code'] silent = content[u'silent'] except: io.raw_print_err("Got bad msg: ") io.raw_print_err(Message(parent)) return shell = self.shell # we'll need this a lot here # Replace raw_input. Note that is not sufficient to replace # raw_input in the user namespace. raw_input = lambda prompt='': self._raw_input(prompt, ident, parent) __builtin__.raw_input = raw_input # Set the parent message of the display hook and out streams. shell.displayhook.set_parent(parent) sys.stdout.set_parent(parent) sys.stderr.set_parent(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self._publish_pyin(code, parent) reply_content = {} try: if silent: # run_code uses 'exec' mode, so no displayhook will fire, and it # doesn't call logging or history manipulations. Print # statements in that code will obviously still execute. shell.run_code(code) else: # FIXME: the shell calls the exception handler itself. shell._reply_content = None shell.run_cell(code) except: status = u'error' # FIXME: this code right now isn't being used yet by default, # because the runlines() call above directly fires off exception # reporting. This code, therefore, is only active in the scenario # where runlines itself has an unhandled exception. We need to # uniformize this, for all exception construction to come from a # single location in the codbase. etype, evalue, tb = sys.exc_info() tb_list = traceback.format_exception(etype, evalue, tb) reply_content.update(shell._showtraceback(etype, evalue, tb_list)) else: status = u'ok' reply_content[u'status'] = status # Return the execution counter so clients can display prompts reply_content['execution_count'] = shell.execution_count - 1 # FIXME - fish exception info out of shell, possibly left there by # runlines. We'll need to clean up this logic later. if shell._reply_content is not None: reply_content.update(shell._reply_content) # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_variables/expressions if reply_content['status'] == 'ok': reply_content[u'user_variables'] = \ shell.user_variables(content[u'user_variables']) reply_content[u'user_expressions'] = \ shell.user_expressions(content[u'user_expressions']) else: # If there was an error, don't even try to compute variables or # expressions reply_content[u'user_variables'] = {} reply_content[u'user_expressions'] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and clear the payload system always. reply_content[u'payload'] = shell.payload_manager.read_payload() # Be agressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() # Send the reply. reply_msg = self.session.msg(u'execute_reply', reply_content, parent) io.raw_print(reply_msg) # Flush output before sending the reply. sys.stdout.flush() sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. if self._execute_sleep: time.sleep(self._execute_sleep) self.reply_socket.send(ident, zmq.SNDMORE) self.reply_socket.send_json(reply_msg) if reply_msg['content']['status'] == u'error': self._abort_queue() status_msg = self.session.msg(u'status', {u'execution_state': u'idle'}, parent=parent) self.pub_socket.send_json(status_msg)
def execute_request(self, ident, parent): status_msg = self.session.msg(u"status", {u"execution_state": u"busy"}, parent=parent) self.pub_socket.send_json(status_msg) try: content = parent[u"content"] code = content[u"code"] silent = content[u"silent"] except: io.raw_print_err("Got bad msg: ") io.raw_print_err(Message(parent)) return shell = self.shell # we'll need this a lot here # Replace raw_input. Note that is not sufficient to replace # raw_input in the user namespace. raw_input = lambda prompt="": self._raw_input(prompt, ident, parent) __builtin__.raw_input = raw_input # Set the parent message of the display hook and out streams. shell.displayhook.set_parent(parent) sys.stdout.set_parent(parent) sys.stderr.set_parent(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self._publish_pyin(code, parent) reply_content = {} try: if silent: # runcode uses 'exec' mode, so no displayhook will fire, and it # doesn't call logging or history manipulations. Print # statements in that code will obviously still execute. shell.runcode(code) else: # FIXME: runlines calls the exception handler itself. shell._reply_content = None # For now leave this here until we're sure we can stop using it # shell.runlines(code) # Experimental: cell mode! Test more before turning into # default and removing the hacks around runlines. shell.run_cell(code) except: status = u"error" # FIXME: this code right now isn't being used yet by default, # because the runlines() call above directly fires off exception # reporting. This code, therefore, is only active in the scenario # where runlines itself has an unhandled exception. We need to # uniformize this, for all exception construction to come from a # single location in the codbase. etype, evalue, tb = sys.exc_info() tb_list = traceback.format_exception(etype, evalue, tb) reply_content.update(shell._showtraceback(etype, evalue, tb_list)) else: status = u"ok" reply_content[u"status"] = status # Compute the execution counter so clients can display prompts reply_content["execution_count"] = shell.displayhook.prompt_count # FIXME - fish exception info out of shell, possibly left there by # runlines. We'll need to clean up this logic later. if shell._reply_content is not None: reply_content.update(shell._reply_content) # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_variables/expressions if reply_content["status"] == "ok": reply_content[u"user_variables"] = shell.user_variables(content[u"user_variables"]) reply_content[u"user_expressions"] = shell.user_expressions(content[u"user_expressions"]) else: # If there was an error, don't even try to compute variables or # expressions reply_content[u"user_variables"] = {} reply_content[u"user_expressions"] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and clear the payload system always. reply_content[u"payload"] = shell.payload_manager.read_payload() # Be agressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() # Send the reply. reply_msg = self.session.msg(u"execute_reply", reply_content, parent) io.raw_print(reply_msg) # Flush output before sending the reply. sys.stdout.flush() sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. if self._execute_sleep: time.sleep(self._execute_sleep) self.reply_socket.send(ident, zmq.SNDMORE) self.reply_socket.send_json(reply_msg) if reply_msg["content"]["status"] == u"error": self._abort_queue() status_msg = self.session.msg(u"status", {u"execution_state": u"idle"}, parent=parent) self.pub_socket.send_json(status_msg)