def exec_code(kid, code): """ Executes arbitrary `code` in the kernel with id `kid`. Returns: - tuple: the output of the code and the error, if any. """ # Load connection info and init communications. cf = find_connection_file(kid) with jupyter_lock: km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() msg_id = km.execute(code, store_history=False) reply = km.get_shell_msg(msg_id, timeout=60) output, error = None, None while km.is_alive(): msg = km.get_iopub_msg(timeout=10) if ("content" in msg and "name" in msg["content"] and msg["content"]["name"] == "stdout"): output = msg["content"]["text"] break km.stop_channels() if reply["content"]["status"] != "ok": logging.error(f"Status is {reply['content']['status']}") logging.error(output) error = output output = None return output, error
def exec_code(kid, var, code): # load connection info and init communication cf = find_connection_file(kid) # str(port)) global jupyter_lock jupyter_lock.acquire() try: km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() # logging.debug('Executing:\n' + str(code)) msg_id = km.execute(code, store_history=False) reply = km.get_shell_msg(msg_id, timeout=10) # logging.info('Execution reply:\n' + str(reply)) state = 'busy' output = None idle_count = 0 try: while km.is_alive(): try: msg = km.get_iopub_msg(timeout=10) # logging.debug('Read ' + str(msg)) if not 'content' in msg: continue if 'name' in msg['content'] and msg['content'][ 'name'] == 'stdout': # logging.debug('Got data '+ msg['content']['text']) output = msg['content']['text'] break if 'execution_state' in msg['content']: # logging.debug('Got state') state = msg['content']['execution_state'] if state == 'idle': idle_count = idle_count + 1 except Empty: pass except KeyboardInterrupt: logging.error('Keyboard interrupt') pass finally: # logging.info('Kernel IO finished') km.stop_channels() # logging.info(str(output)) error = '' if reply['content']['status'] != 'ok': logging.error('Status is ' + reply['content']['status']) logging.error(str(output)) error = output output = None finally: jupyter_lock.release() return output, error
def run(self): cf = find_connection_file() client = BlockingKernelClient(connection_file=cf) client.load_connection_file() client.start_channels(shell=False, iopub=True, stdin=False, control=False, hb=False) while True: msg = client.get_iopub_msg() self.received.emit(msg)
def run(self): cf = find_connection_file() client = BlockingKernelClient(connection_file=cf) client.load_connection_file() client.start_channels(shell=False, iopub=True, stdin=False, control=True, hb=False) while True: try: msg = client.get_iopub_msg(TIMEOUT) self.pub_q.put(msg) except Empty: pass if self.cmd_q.qsize(): cmd = self.cmd_q.get() if cmd is None: print('Client thread closing') break client.execute(cmd) self.ctrl_q.put(client.get_shell_msg())
class Kernel(object): def __init__(self, active_dir, pyspark): # kernel config is stored in a dot file with the active directory config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. log_file = None if pyspark: os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark") spark_log = os.environ.get("SPARK_LOG", None) if spark_log: log_file = open(spark_log, "w") spark_opts = os.environ.get("SPARK_OPTS", "") args = [pyspark] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS> p = subprocess.Popen(args, stdout=log_file, stderr=log_file) else: args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) def close_file(): if log_file: log_file.close() atexit.register(close_file) # i found that if i tried to connect to the kernel immediately, it wasn't # up and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch) def _run_code(self, code, timeout=0.1): # this function executes some code and waits for it to completely finish # before returning. i don't think that this is neccessarily the best # way to do this, but the IPython documentation isn't very helpful for # this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a # stream of output) # 3) when we get a message that is one of the following, save relevant # data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that # the kernel is idle and the message's parent is the original execution # ID (msg_id) that's associated with our executing code. if this is the # case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code) output = { "msg_id": msg_id, "output": None, "image": None, "error": None } while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id: if reply['parent_header']['msg_type']=="execute_request": return output elif reply['header']['msg_type']=="execute_result": output['output'] = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type']=="display_data": output['image'] = reply['content']['data'].get('image/png', '') elif reply['header']['msg_type']=="stream": output['output'] = reply['content'].get('text', '') elif reply['header']['msg_type']=="error": output['error'] = "\n".join(reply['content']['traceback']) def execute(self, code): return self._run_code(code) def complete(self, code): # i couldn't figure out how to get the autocomplete working with the # ipython kernel (i couldn't get a completion_reply from the iopub), so # we're using jedi to do the autocompletion. the __autocomplete is # defined in `autocomplete_patch` above. return self.execute("__autocomplete('%s')" % code) def get_dataframes(self): return self.execute("__get_variables()")
class Kernel(object): def __init__(self, active_dir): # kernel config is stored in a temp file config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, it wasn't up # and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch) def _run_code(self, code, timeout=0.1): # this function executes some code and waits for it to completely finish before # returning. i don't think that this is neccessarily the best way to do this, but # the IPython documentation isn't very helpful for this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a stream of output) # 3) when we get a message that is one of the following, save relevant data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that the kernel is idle # and the message's parent is the original execution ID (msg_id) that's associated with # our executing code. if this is the case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code) data = None image = None while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id: if reply['parent_header']['msg_type']=="execute_request": return { "msg_id": msg_id, "output": data, "image": image } elif reply['header']['msg_type']=="execute_result": data = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type']=="display_data": image = reply['content']['data'].get('image/png', '') elif reply['header']['msg_type']=="stream": data = reply['content'].get('text', '') elif reply['header']['msg_type']=="error": data = "\n".join(reply['content']['traceback']) def execute(self, code): return self._run_code(code) def complete(self, code): # i couldn't figure out how to get the autocomplete working with the ipython # kernel (i couldn't get a completion_reply from the iopub), so we're using # jedi to do the autocompletion. the __autocomplete is defined in `autocomplete_patch` # above. return self.execute("__autocomplete('%s')" % code)
# simplest usage - execute statments and check if OK msgid = client.execute('a = 2') ret = client.get_shell_msg() status = ret['content']['status'] if status == 'ok': print('statement executed ok') elif status == 'error': ename = ret['content']['ename'] print('there was a %s exception, which will also appear on the ' 'iopub channel' % ename) # listen to what's going on in the kernel with blocking calls, # and take different actions depending on what's arriving while True: try: msg = client.get_iopub_msg(timeout=.1) msg_type = msg['header']['msg_type'] if msg_type == 'status': color_print('status now', msg['content']['execution_state'], color='OKCYAN') elif msg_type == 'execute_input': color_print('input [%u]: ' % msg['content']['execution_count'], '"%s"' % msg['content']['code'], color='OKGREEN') elif msg_type == 'execute_result': color_print('output [%u]: ' % msg['content']['execution_count'], '"%s"' % msg['content']['data']['text/plain'], color='HEADER') elif msg_type == 'error': ename = msg['content']['ename']
class Kernel(object): # kernel config is stored in a dot file with the active directory def __init__(self, config, active_dir, pyspark): # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. log_file = None if pyspark: os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark") spark_log = os.environ.get("SPARK_LOG", None) if spark_log: log_file = open(spark_log, "w") spark_opts = os.environ.get("SPARK_OPTS", "") args = [pyspark] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS> p = subprocess.Popen(args, stdout=log_file, stderr=log_file) else: args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): if os.path.isfile(config): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, so we'll # wait until the config file exists before moving on while os.path.isfile(config)==False: time.sleep(0.1) def close_file(): if log_file: log_file.close() atexit.register(close_file) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") python_patch_file = os.path.join(dirname, "langs", "python-patch.py") self.client.execute("%run " + python_patch_file) def _run_code(self, execution_id, code, timeout=0.1): # this function executes some code and waits for it to completely finish # before returning. i don't think that this is neccessarily the best # way to do this, but the IPython documentation isn't very helpful for # this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a # stream of output) # 3) when we get a message that is one of the following, save relevant # data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that # the kernel is idle and the message's parent is the original execution # ID (msg_id) that's associated with our executing code. if this is the # case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code, allow_stdin=False) request = { "id": execution_id, "msg_id": msg_id, "code": code, "status": "started" } sys.stdout.write(json.dumps(request) + '\n') sys.stdout.flush() output = { "id": execution_id, "msg_id": msg_id, "output": "", "stream": None, "image": None, "error": None } while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id: if reply['parent_header']['msg_type']=="execute_request": request["status"] = "complete" sys.stdout.write(json.dumps(request) + '\n') sys.stdout.flush() return elif reply['header']['msg_type']=="execute_result": output['output'] = reply['content']['data'].get('text/plain', '') output['stream'] = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type']=="display_data": if 'image/png' in reply['content']['data']: output['image'] = reply['content']['data']['image/png'] elif 'text/html' in reply['content']['data']: output['html'] = reply['content']['data']['text/html'] elif reply['header']['msg_type']=="stream": output['output'] += reply['content'].get('text', '') output['stream'] = reply['content'].get('text', '') elif reply['header']['msg_type']=="error": output['error'] = "\n".join(reply['content']['traceback']) # TODO: if we have something non-trivial to send back... sys.stdout.write(json.dumps(output) + '\n') sys.stdout.flush() # TODO: should probably get rid of all this output['stream'] = None output['image'] = None output['html'] = None def _complete(self, execution_id, code, timeout=0.5): # Call ipython kernel complete, wait for response with the correct msg_id, # and construct appropriate UI payload. # See below for an example response from ipython kernel completion for 'el' # # { # 'parent_header': # {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_request', # u'msg_id': u'5222d158-ada8-474e-88d8-8907eb7cc74c', u'session': u'cda4a03d-a8a1-4e6c-acd0-de62d169772e', # u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 796886)}, # 'msg_type': u'complete_reply', # 'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d', # 'content': # {u'matches': [u'elif', u'else'], u'status': u'ok', u'cursor_start': 0, u'cursor_end': 2, u'metadata': {}}, # 'header': # {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_reply', # u'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d', u'session': u'f1491112-7234-4782-8601-b4fb2697a2f6', # u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 803470)}, # 'buffers': [], # 'metadata': {} # } # msg_id = self.client.complete(code) request = { "id": execution_id, "msg_id": msg_id, "code": code, "status": "started" } sys.stdout.write(json.dumps(request) + '\n') sys.stdout.flush() output = { "id": execution_id, "msg_id": msg_id, "output": None, "image": None, "error": None } while True: try: reply = self.client.get_shell_msg(timeout=timeout) except Empty: continue if "matches" in reply['content'] and reply['msg_type']=="complete_reply" and reply['parent_header']['msg_id']==msg_id: results = [] for completion in reply['content']['matches']: result = { "value": completion, "dtype": "---" } if "." in code: # result['text'] = result['value'] # ".".join(result['value'].split(".")[1:]) result['text'] = result['value'] #.split('.')[-1] result["dtype"] = "function" else: result['text'] = result['value'] result["dtype"] = "" # type(globals().get(code)).__name__ results.append(result) output['output'] = results output['status'] = "complete" sys.stdout.write(json.dumps(output) + '\n') sys.stdout.flush() return def execute(self, execution_id, code, complete=False): if complete==True: return self._complete(execution_id, code) else: result = self._run_code(execution_id, code) if re.match("%?reset", code): # load our monkeypatches... k.client.execute("%matplotlib inline") k.client.execute(vars_patch) return result def get_packages(self): return self.execute("__get_packages()")
class Kernel(object): def __init__(self, active_dir): # kernel config is stored in a dot file with the active directory config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, it wasn't # up and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch) def _run_code(self, code, timeout=0.1): # this function executes some code and waits for it to completely finish # before returning. i don't think that this is neccessarily the best # way to do this, but the IPython documentation isn't very helpful for # this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a # stream of output) # 3) when we get a message that is one of the following, save relevant # data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that # the kernel is idle and the message's parent is the original execution # ID (msg_id) that's associated with our executing code. if this is the # case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code) data = None image = None while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state'] == "idle" and reply[ 'parent_header']['msg_id'] == msg_id: if reply['parent_header']['msg_type'] == "execute_request": return { "msg_id": msg_id, "output": data, "image": image } elif reply['header']['msg_type'] == "execute_result": data = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type'] == "display_data": image = reply['content']['data'].get('image/png', '') elif reply['header']['msg_type'] == "stream": data = reply['content'].get('text', '') elif reply['header']['msg_type'] == "error": data = "\n".join(reply['content']['traceback']) def execute(self, code): return self._run_code(code) def complete(self, code): # i couldn't figure out how to get the autocomplete working with the # ipython kernel (i couldn't get a completion_reply from the iopub), so # we're using jedi to do the autocompletion. the __autocomplete is # defined in `autocomplete_patch` above. return self.execute("__autocomplete('%s')" % code) def get_dataframes(self): return self.execute("__get_variables()")
msg_id = client.execute("1 + 10") # client.wait_for_ready() res = client.get_shell_msg(msg_id, timeout=1) print(res) print("----------------------------------------") msg = res["msg_id"] for i in range(10): if not client.is_alive(): print("not alived") break try: res = client.get_iopub_msg(msg_id, timeout=1) except Empty as e: print("!", repr(e)) time.sleep(0.1) continue if res["msg_type"] != "status": print("!!", res) break print(res) if res["content"]["execution_state"] != "busy": print(res) print("ok") break
# msg_id = client.execute("print('hello')") msg_id = client.execute("1 + 10") # client.wait_for_ready() res = client.get_shell_msg(msg_id, timeout=1) print(res) print("----------------------------------------") msg = res["msg_id"] for i in range(10): if not client.is_alive(): print("not alived") break try: res = client.get_iopub_msg(msg_id, timeout=1) except Empty as e: print("!", repr(e)) time.sleep(0.1) continue if res["msg_type"] != "status": print("!!", res) break print(res) if res["content"]["execution_state"] != "busy": print(res) print("ok") break
class DaisyWorkflow_client: def __init__(self, connection_file=None, executable=False): import os self.alg_keys = [] self.dat_keys = [] #super().__init__() self.kc = BlockingKernelClient() if connection_file == None: raise Exception( 'Please Specific Connection file to Remote IPython Kernel first' ) if os.access(connection_file, os.R_OK) == False: raise Exception('The connection file can no be read!') self.kc.load_connection_file(connection_file) try: self.kc.start_channels() except RuntimeError: raise Exception( 'Can not start channels, Please CHECK REMOTE KERNEL STATUS') self.executable = executable self.remote_name = None self.alg_clients = {} def initialize(self, class_name=None, workflow_name=None, workflow_cfgfile=None, algorithms_cfgfile=None): import os, json if class_name == None: raise Exception('Please Specific Workflow class name first') cmd = "from Daisy.Workflow import " + class_name self.kc.execute_interactive(cmd) if workflow_name == None: workflow_name = class_name self.remote_name = workflow_name cmd = self.remote_name + " = " + class_name + "('" + workflow_name + "')" self.kc.execute_interactive(cmd) if workflow_cfgfile == None: raise Exception('Please Specific Workflow Config file first') if os.access(workflow_cfgfile, os.R_OK) == False: raise Exception('The Workflow Config file can no be read!') with open(workflow_cfgfile, 'r') as json_file: string = json_file.read() wf_cfg = json.loads(string) temp_name = 'cfg_dict' + str(randint(1, 1000000)) cmd = temp_name + ' = ' + str(wf_cfg) self.kc.execute_interactive(cmd) cmd = self.remote_name + ".initialize(workflow_engine='PyWorkflowEngine', workflow_environment = " + temp_name + ")" self.kc.execute_interactive(cmd) self.kc.execute_interactive('del ' + temp_name) def setLogLevel(self, level): pass #Sniper.setLogLevel(level) #super().setLogLevel(level) def data_keys(self): cmd = self.remote_name + ".data_keys()" msg_id = self.kc.execute(cmd) exe_msg = self.execute_status(msg_id) dat_keys = [] if 'data' in exe_msg: msg = exe_msg['data'] msg = msg[msg.find("[") + 1:msg.rfind("]")] items = msg.split(',') #items = exe_msg['data'].split('\n') for i in items: begin = i.find("'") end = i.rfind("'") dat_keys.append(i[begin + 1:end]) self.dat_keys = dat_keys return self.dat_keys def algorithm_keys(self): cmd = self.remote_name + ".algorithm_keys()" msg_id = self.kc.execute(cmd) exe_msg = self.execute_status(msg_id) alg_keys = [] if 'data' in exe_msg: msg = exe_msg['data'] msg = msg[msg.find("[") + 1:msg.rfind("]")] items = msg.split(',') for i in items: begin = i.find("'") end = i.rfind("'") alg_keys.append(i[begin + 1:end]) self.alg_keys = alg_keys return self.alg_keys def get_data(self, data_name): raise Exception('Cannot get DataObject from Server') #return self.engine.datastore[data_name] def get_algorithm(self, algorithm_name): if algorithm_name in self.alg_clients.keys(): return self.alg_clients[algorithm_name] cmd = self.remote_name if algorithm_name in self.alg_keys: cmd = cmd + ".get_algorithm('" + algorithm_name + "')" elif algorithm_name in self.algorithm_keys(): cmd = cmd + ".get_algorithm('" + algorithm_name + "')" else: return False msg_id = self.kc.execute(cmd) exe_msg = self.execute_status(msg_id) self.alg_clients[algorithm_name] = DaisyAlgorithm_client( self.kc, exe_msg['code']) print(exe_msg['data']) return self.alg_clients[algorithm_name] def execute(self): pass #raise Exception('Must') def finalize(self): cmd = self.remote_name cmd = cmd + ".finalize()" self.kc.execute_interactive(cmd) #exe_msg = self.execute_status(msg_id) #print(exe_msg) #self.engine.finalize() def execute_status(self, msg_id): code_flag = False data_flag = False exe_msg = {'msg_id': msg_id} while True: try: kc_msg = self.kc.get_iopub_msg(timeout=5) if 'parent_header' in kc_msg and kc_msg['parent_header'][ 'msg_id'] != exe_msg['msg_id']: continue #_output_hook_default(kc_msg) msg_type = kc_msg['header']['msg_type'] msg_cont = kc_msg['content'] if msg_type == 'stream': exe_msg[msg_cont['name']] = msg_cont['text'] elif msg_type == 'error': exe_msg['error'] = msg_cont['traceback'] print(msg_cont['traceback']) break elif msg_type in ('display_data', 'execute_result'): if 'data' in msg_cont: data_flag = True exe_msg['data'] = msg_cont['data'].get( 'text/plain', '') if 'code' in msg_cont: code_flag = True exe_msg['code'] = msg_cont['code'] if code_flag and data_flag: break except: print('timeout kc.get_iopub_msg') break return exe_msg
class ToreeClient: def __init__(self, connectionFileLocation): self.client = BlockingKernelClient( connection_file=connectionFileLocation) self.client.load_connection_file( connection_file=connectionFileLocation) def is_alive(self): return self.client.is_alive() def is_ready(self): try: result = self.eval('1') if result == '1': return True else: return False except: return False def wait_for_ready(self, timeout=TIMEOUT): # Wait for initialization, by receiving an 'idle' message # Flush Shell channel abs_timeout = time.time() + timeout while True: try: msg = self.client.shell_channel.get_msg(block=True, timeout=0.2) except Empty: break # Check if current time is ready check time plus timeout if time.time() > abs_timeout: raise RuntimeError("Kernel didn't respond in %d seconds" % timeout) # Flush IOPub channel while True: try: msg = self.client.iopub_channel.get_msg(block=True, timeout=0.2) except Empty: break # Check if current time is ready check time plus timeout if time.time() > abs_timeout: raise RuntimeError("Kernel didn't respond in %d seconds" % timeout) def eval(self, code, timeout=TIMEOUT): # Validate that remote kernel is available before submitting request if self.client.is_alive() == False: raise Exception( 'Problem connecting to remote kernel: Kernel is NOT alive') debug_print('-----------------------------------------') debug_print('Executing: ') debug_pprint(code) # submit request and retrieve the message id for the execution msg_id = self.client.execute(code=code, allow_stdin=False) debug_print('Message id for code execution:' + msg_id) # now the kernel should be 'busy' with [parent_header][msg_id] being the current message try: busy_msg = self.client.iopub_channel.get_msg(block=True, timeout=timeout) except: raise Exception('Error: Timeout retrieving busy status message') debug_print('Current kernel status (%s): %s' % (busy_msg['parent_header']['msg_id'], busy_msg['content']['execution_state'])) if busy_msg['content']['execution_state'] == 'busy': debug_print('busy_message received as expected') else: debug_print('Error: did not receive busy message for request %s' % msg_id) debug_pprint(busy_msg) # Check message reply status (ok / error) debug_print('Waiting for status reply') reply = self.client.get_shell_msg(block=True, timeout=timeout) debug_print('message reply: %s' % reply['content']['status']) debug_pprint(reply) type = '' results = [] while True: try: msg = self.client.get_iopub_msg(timeout=timeout) except: raise Exception("Error: Timeout executing request") debug_print('message') debug_pprint(msg) # validate that the responses are still related to current request if msg['parent_header']['msg_id'] != msg_id: debug_print('Warning: Invalid message id received ' + msg['parent_header']['msg_id'] + ' expected ' + msg_id) continue # validate execute_inputs are from current code elif msg['msg_type'] == 'execute_input': debug_print('current message status: ' + msg['msg_type']) debug_print('current message content code: ' + msg['content']['code']) if msg['content']['code'] == code: continue # Stream results are being returned, accumulate them to results elif msg['msg_type'] == 'stream': type = 'stream' results.append(msg['content']['text']) continue # Execute_Results are being returned: # They can be text/plain or text/html # accumulate them to results elif msg['msg_type'] == 'execute_result': debug_print('Received results of type: %s ' % msg['content']['data']) if 'text/plain' in msg['content']['data']: type = 'text' results.append(msg['content']['data']['text/plain']) elif 'text/html' in msg['content']['data']: type = 'html' results.append(msg['content']['data']['text/html']) continue # When idle, responses have all been processed/returned elif msg['msg_type'] == 'status': debug_print('current message status: ' + msg['content']['execution_state']) if msg['content']['execution_state'] == 'idle': break else: debug_print('Message ignored: %s' % msg['msg_type']) if reply['content']['status'] == 'ok': debug_print('Returning sucessful invocation result') if type == 'html': html = ''.join(results) htmlWrapper = HtmlOutput(html) return htmlWrapper else: return ''.join(results) else: debug_print('Returning failed invocation exception') error = '' if 'ename' in reply['content']: error = reply['content']['ename'] error_message = '' if 'evalue' in reply['content']: error_message = reply['content']['evalue'] raise Exception('Error: %s - %s' % (error, error_message))