def exec_code(kid, code): """ Executes arbitrary `code` in the kernel with id `kid`. Returns: - tuple: the output of the code and the error, if any. """ # Load connection info and init communications. cf = find_connection_file(kid) with jupyter_lock: km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() msg_id = km.execute(code, store_history=False) reply = km.get_shell_msg(msg_id, timeout=60) output, error = None, None while km.is_alive(): msg = km.get_iopub_msg(timeout=10) if ("content" in msg and "name" in msg["content"] and msg["content"]["name"] == "stdout"): output = msg["content"]["text"] break km.stop_channels() if reply["content"]["status"] != "ok": logging.error(f"Status is {reply['content']['status']}") logging.error(output) error = output output = None return output, error
def exec_code(kid, var, code): # load connection info and init communication cf = find_connection_file(kid) # str(port)) global jupyter_lock jupyter_lock.acquire() try: km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() # logging.debug('Executing:\n' + str(code)) msg_id = km.execute(code, store_history=False) reply = km.get_shell_msg(msg_id, timeout=10) # logging.info('Execution reply:\n' + str(reply)) state = 'busy' output = None idle_count = 0 try: while km.is_alive(): try: msg = km.get_iopub_msg(timeout=10) # logging.debug('Read ' + str(msg)) if not 'content' in msg: continue if 'name' in msg['content'] and msg['content'][ 'name'] == 'stdout': # logging.debug('Got data '+ msg['content']['text']) output = msg['content']['text'] break if 'execution_state' in msg['content']: # logging.debug('Got state') state = msg['content']['execution_state'] if state == 'idle': idle_count = idle_count + 1 except Empty: pass except KeyboardInterrupt: logging.error('Keyboard interrupt') pass finally: # logging.info('Kernel IO finished') km.stop_channels() # logging.info(str(output)) error = '' if reply['content']['status'] != 'ok': logging.error('Status is ' + reply['content']['status']) logging.error(str(output)) error = output output = None finally: jupyter_lock.release() return output, error
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Client(s["address"], loop=loop) as e: info = e.start_ipython_scheduler() kc = BlockingKernelClient() kc.load_connection_info(info) kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: info = e.start_ipython_scheduler() key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1, should_check_state=False) as (s, [a]): with Client(s['address'], loop=loop) as e: info = e.start_ipython_scheduler() key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Client(('127.0.0.1', s['port']), loop=loop) as e: info = e.start_ipython_scheduler() key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def run(self): cf = find_connection_file() client = BlockingKernelClient(connection_file=cf) client.load_connection_file() client.start_channels(shell=False, iopub=True, stdin=False, control=True, hb=False) while True: try: msg = client.get_iopub_msg(TIMEOUT) self.pub_q.put(msg) except Empty: pass if self.cmd_q.qsize(): cmd = self.cmd_q.get() if cmd is None: print('Client thread closing') break client.execute(cmd) self.ctrl_q.put(client.get_shell_msg())
def test_start_ipython_workers(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Client(s["address"], loop=loop) as e: info_dict = e.start_ipython_workers() info = first(info_dict.values()) kc = BlockingKernelClient() kc.load_connection_info(info) kc.start_channels() kc.wait_for_ready(timeout=10) msg_id = kc.execute("worker") reply = kc.get_shell_msg(timeout=10) assert reply["parent_header"]["msg_id"] == msg_id assert reply["content"]["status"] == "ok" kc.stop_channels()
def test_start_ipython_workers(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Client(('127.0.0.1', s['port']), loop=loop) as e: info_dict = e.start_ipython_workers() info = first(info_dict.values()) key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() kc.wait_for_ready(timeout=10) msg_id = kc.execute("worker") reply = kc.get_shell_msg(timeout=10) assert reply['parent_header']['msg_id'] == msg_id assert reply['content']['status'] == 'ok' kc.stop_channels()
def test_start_ipython_workers(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: info_dict = e.start_ipython_workers() info = first(info_dict.values()) key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() kc.wait_for_ready(timeout=10) msg_id = kc.execute("worker") reply = kc.get_shell_msg(timeout=10) assert reply['parent_header']['msg_id'] == msg_id assert reply['content']['status'] == 'ok' kc.stop_channels()
class Kernel(object): def __init__(self, active_dir, pyspark): # kernel config is stored in a dot file with the active directory config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. log_file = None if pyspark: os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark") spark_log = os.environ.get("SPARK_LOG", None) if spark_log: log_file = open(spark_log, "w") spark_opts = os.environ.get("SPARK_OPTS", "") args = [pyspark] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS> p = subprocess.Popen(args, stdout=log_file, stderr=log_file) else: args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) def close_file(): if log_file: log_file.close() atexit.register(close_file) # i found that if i tried to connect to the kernel immediately, it wasn't # up and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch) def _run_code(self, code, timeout=0.1): # this function executes some code and waits for it to completely finish # before returning. i don't think that this is neccessarily the best # way to do this, but the IPython documentation isn't very helpful for # this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a # stream of output) # 3) when we get a message that is one of the following, save relevant # data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that # the kernel is idle and the message's parent is the original execution # ID (msg_id) that's associated with our executing code. if this is the # case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code) output = { "msg_id": msg_id, "output": None, "image": None, "error": None } while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id: if reply['parent_header']['msg_type']=="execute_request": return output elif reply['header']['msg_type']=="execute_result": output['output'] = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type']=="display_data": output['image'] = reply['content']['data'].get('image/png', '') elif reply['header']['msg_type']=="stream": output['output'] = reply['content'].get('text', '') elif reply['header']['msg_type']=="error": output['error'] = "\n".join(reply['content']['traceback']) def execute(self, code): return self._run_code(code) def complete(self, code): # i couldn't figure out how to get the autocomplete working with the # ipython kernel (i couldn't get a completion_reply from the iopub), so # we're using jedi to do the autocompletion. the __autocomplete is # defined in `autocomplete_patch` above. return self.execute("__autocomplete('%s')" % code) def get_dataframes(self): return self.execute("__get_variables()")
class Kernel(object): def __init__(self, active_dir): # kernel config is stored in a temp file config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, it wasn't up # and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch) def _run_code(self, code, timeout=0.1): # this function executes some code and waits for it to completely finish before # returning. i don't think that this is neccessarily the best way to do this, but # the IPython documentation isn't very helpful for this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a stream of output) # 3) when we get a message that is one of the following, save relevant data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that the kernel is idle # and the message's parent is the original execution ID (msg_id) that's associated with # our executing code. if this is the case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code) data = None image = None while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id: if reply['parent_header']['msg_type']=="execute_request": return { "msg_id": msg_id, "output": data, "image": image } elif reply['header']['msg_type']=="execute_result": data = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type']=="display_data": image = reply['content']['data'].get('image/png', '') elif reply['header']['msg_type']=="stream": data = reply['content'].get('text', '') elif reply['header']['msg_type']=="error": data = "\n".join(reply['content']['traceback']) def execute(self, code): return self._run_code(code) def complete(self, code): # i couldn't figure out how to get the autocomplete working with the ipython # kernel (i couldn't get a completion_reply from the iopub), so we're using # jedi to do the autocompletion. the __autocomplete is defined in `autocomplete_patch` # above. return self.execute("__autocomplete('%s')" % code)
class JupyterRAMUsage(Stat): """ tag: ``jupyter.ram_usage`` settings: .. code-block:: javascript { "connection info": "", "query interval [s]": 1 } Tracks the RAM usage of all variables in a user-specified jupyter notebook. If no connection info is given in the settings, take the kernel with the latest start date. ``connection info`` must be a string containing the info displayed when running ``%connect_info`` in a jupyter notebook (you can directly copy-paste it). ``query interval [s]`` specifies how often the thread running in the jupyter notebook should read the variables. The lower this is, the higher the resolution of the stat but it might start affecting the speed of your notebook when too low. Note that RAM tracked in this way is not equal to the actual RAM the OS needs because some further optimization is done by e. g. numpy to reduce the OS memory usage. """ name = 'RAM Usage of objects in a Python Jupyter Notebook [MB]' base_tag = 'ram_usage' default_settings = { 'connection info': '', # how often the memory usage is read in the jupyter notebook 'query interval [s]': 1. } @classmethod def _read_latest_connection_file(cls): """ Reads the latest jupyter kernel connection file. https://jupyter.readthedocs.io/en/latest/projects/jupyter-directories.html. """ runtime_dir = jupyter_runtime_dir() files = glob.glob(os.path.join(runtime_dir, 'kernel-*.json')) if len(files) == 0: return None # use the latest connection file connection_file = max(files, key=os.path.getctime) with open(connection_file, 'r') as f: return json.load(f) @classmethod def get_connection_info(cls): """ Get the target kernel connection info. Returns a dictionary of the connection info supplied in the settings, or the latest started kernel if none is given. Retuns `None` if no kernel has been found. """ if len(cls.settings['connection info']) == 0: return cls._read_latest_connection_file() return json.loads(cls.settings['connection info']) @classmethod def check_availability(cls): # the stat is not available if no suitable connection info # can be found if cls.get_connection_info() is None: raise exceptions.StatNotAvailableError( 'Could not find any running kernel.') def __init__(self, fps): self.config = self.get_connection_info() data_dir = appdirs.user_data_dir('permon', 'bminixhofer') os.makedirs(data_dir, exist_ok=True) self.usage_file = os.path.join(data_dir, 'jupyter_ram_usage.csv') open(self.usage_file, 'w').close() # self.setup_code is the code that is run in the notebook when the # stat is instantiated. It starts a thread which reads the memory # usage of all public variables in a set interval and saves it to a # csv file in the user data directory self.setup_code = f""" if '_permon_running' not in globals() or not _permon_running: import threading import csv import sys import time from pympler import asizeof from types import ModuleType def _permon_get_ram_usage_per_object(): while _permon_running: ram_usage = [] global_vars = [key for key in globals() if not key.startswith('_')] for name in global_vars: value = globals()[name] if name in globals() else None if isinstance(value, ModuleType): continue try: ram_usage.append((name, asizeof.asizeof(value))) except TypeError: continue with open('{self.usage_file}', 'w') as f: writer = csv.writer(f, delimiter=',') for name, ram in ram_usage: writer.writerow([name, ram]) time.sleep({self.settings['query interval [s]']}) _permon_thread = threading.Thread(target=_permon_get_ram_usage_per_object) _permon_running = True _permon_thread.start() """ self.teardown_code = """ _permon_running = False """ self.client = BlockingKernelClient() self.client.load_connection_info(self.config) self.client.start_channels() self.client.execute(self.setup_code) super(JupyterRAMUsage, self).__init__(fps=fps) def __del__(self): # stop the thread running in the jupyter notebook # and stop the connection to the kernel upon deletion self.client.execute(self.teardown_code) self.client.stop_channels() def get_stat(self): # reads the csv file the setup code has written to ram_usage = [] with open(self.usage_file, 'r') as f: reader = csv.reader(f) for row in reader: ram_usage.append((row[0], float(row[1]) / 1000**2)) # sort the ram_usage list so that the largest variables come first ram_usage = sorted(ram_usage, key=lambda x: x[1], reverse=True) # return the sum of RAM usage and the variables taking up the most RAM return sum(x[1] for x in ram_usage), ram_usage[:5] @property def minimum(self): return 0 @property def maximum(self): return None
'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', } print(lookup[color], end='') print(*args, end='') print(lookup['ENDC']) # setup by automatically finding a running kernel cf = find_connection_file() client = BlockingKernelClient(connection_file=cf) client.load_connection_file() client.start_channels() # simplest usage - execute statments and check if OK msgid = client.execute('a = 2') ret = client.get_shell_msg() status = ret['content']['status'] if status == 'ok': print('statement executed ok') elif status == 'error': ename = ret['content']['ename'] print('there was a %s exception, which will also appear on the ' 'iopub channel' % ename) # listen to what's going on in the kernel with blocking calls, # and take different actions depending on what's arriving while True: try: msg = client.get_iopub_msg(timeout=.1) msg_type = msg['header']['msg_type']
def main(): client = BlockingKernelClient() client.load_connection_file(find_connection_file()) code = "; ".join(sys.argv[1:]) if len(sys.argv) > 1 else sys.stdin.read() client.execute(code, allow_stdin=False)
class ToreeClient: def __init__(self, connectionFileLocation): self.client = BlockingKernelClient( connection_file=connectionFileLocation) self.client.load_connection_file( connection_file=connectionFileLocation) def is_alive(self): return self.client.is_alive() def is_ready(self): try: result = self.eval('1') if result == '1': return True else: return False except: return False def wait_for_ready(self, timeout=TIMEOUT): # Wait for initialization, by receiving an 'idle' message # Flush Shell channel abs_timeout = time.time() + timeout while True: try: msg = self.client.shell_channel.get_msg(block=True, timeout=0.2) except Empty: break # Check if current time is ready check time plus timeout if time.time() > abs_timeout: raise RuntimeError("Kernel didn't respond in %d seconds" % timeout) # Flush IOPub channel while True: try: msg = self.client.iopub_channel.get_msg(block=True, timeout=0.2) except Empty: break # Check if current time is ready check time plus timeout if time.time() > abs_timeout: raise RuntimeError("Kernel didn't respond in %d seconds" % timeout) def eval(self, code, timeout=TIMEOUT): # Validate that remote kernel is available before submitting request if self.client.is_alive() == False: raise Exception( 'Problem connecting to remote kernel: Kernel is NOT alive') debug_print('-----------------------------------------') debug_print('Executing: ') debug_pprint(code) # submit request and retrieve the message id for the execution msg_id = self.client.execute(code=code, allow_stdin=False) debug_print('Message id for code execution:' + msg_id) # now the kernel should be 'busy' with [parent_header][msg_id] being the current message try: busy_msg = self.client.iopub_channel.get_msg(block=True, timeout=timeout) except: raise Exception('Error: Timeout retrieving busy status message') debug_print('Current kernel status (%s): %s' % (busy_msg['parent_header']['msg_id'], busy_msg['content']['execution_state'])) if busy_msg['content']['execution_state'] == 'busy': debug_print('busy_message received as expected') else: debug_print('Error: did not receive busy message for request %s' % msg_id) debug_pprint(busy_msg) # Check message reply status (ok / error) debug_print('Waiting for status reply') reply = self.client.get_shell_msg(block=True, timeout=timeout) debug_print('message reply: %s' % reply['content']['status']) debug_pprint(reply) type = '' results = [] while True: try: msg = self.client.get_iopub_msg(timeout=timeout) except: raise Exception("Error: Timeout executing request") debug_print('message') debug_pprint(msg) # validate that the responses are still related to current request if msg['parent_header']['msg_id'] != msg_id: debug_print('Warning: Invalid message id received ' + msg['parent_header']['msg_id'] + ' expected ' + msg_id) continue # validate execute_inputs are from current code elif msg['msg_type'] == 'execute_input': debug_print('current message status: ' + msg['msg_type']) debug_print('current message content code: ' + msg['content']['code']) if msg['content']['code'] == code: continue # Stream results are being returned, accumulate them to results elif msg['msg_type'] == 'stream': type = 'stream' results.append(msg['content']['text']) continue # Execute_Results are being returned: # They can be text/plain or text/html # accumulate them to results elif msg['msg_type'] == 'execute_result': debug_print('Received results of type: %s ' % msg['content']['data']) if 'text/plain' in msg['content']['data']: type = 'text' results.append(msg['content']['data']['text/plain']) elif 'text/html' in msg['content']['data']: type = 'html' results.append(msg['content']['data']['text/html']) continue # When idle, responses have all been processed/returned elif msg['msg_type'] == 'status': debug_print('current message status: ' + msg['content']['execution_state']) if msg['content']['execution_state'] == 'idle': break else: debug_print('Message ignored: %s' % msg['msg_type']) if reply['content']['status'] == 'ok': debug_print('Returning sucessful invocation result') if type == 'html': html = ''.join(results) htmlWrapper = HtmlOutput(html) return htmlWrapper else: return ''.join(results) else: debug_print('Returning failed invocation exception') error = '' if 'ename' in reply['content']: error = reply['content']['ename'] error_message = '' if 'evalue' in reply['content']: error_message = reply['content']['evalue'] raise Exception('Error: %s - %s' % (error, error_message))
class Kernel(object): def __init__(self, active_dir): # kernel config is stored in a dot file with the active directory config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, it wasn't # up and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch) def _run_code(self, code, timeout=0.1): # this function executes some code and waits for it to completely finish # before returning. i don't think that this is neccessarily the best # way to do this, but the IPython documentation isn't very helpful for # this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a # stream of output) # 3) when we get a message that is one of the following, save relevant # data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that # the kernel is idle and the message's parent is the original execution # ID (msg_id) that's associated with our executing code. if this is the # case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code) data = None image = None while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state'] == "idle" and reply[ 'parent_header']['msg_id'] == msg_id: if reply['parent_header']['msg_type'] == "execute_request": return { "msg_id": msg_id, "output": data, "image": image } elif reply['header']['msg_type'] == "execute_result": data = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type'] == "display_data": image = reply['content']['data'].get('image/png', '') elif reply['header']['msg_type'] == "stream": data = reply['content'].get('text', '') elif reply['header']['msg_type'] == "error": data = "\n".join(reply['content']['traceback']) def execute(self, code): return self._run_code(code) def complete(self, code): # i couldn't figure out how to get the autocomplete working with the # ipython kernel (i couldn't get a completion_reply from the iopub), so # we're using jedi to do the autocompletion. the __autocomplete is # defined in `autocomplete_patch` above. return self.execute("__autocomplete('%s')" % code) def get_dataframes(self): return self.execute("__get_variables()")
logging.basicConfig(level=logging.DEBUG) f = connect.find_connection_file() print("loaded") client = BlockingKernelClient() client.load_connection_file(f) print("prepared") # msg_id = client.execute("print('hello')") msg_id = client.execute("1 + 10") # client.wait_for_ready() res = client.get_shell_msg(msg_id, timeout=1) print(res) print("----------------------------------------") msg = res["msg_id"] for i in range(10): if not client.is_alive(): print("not alived") break try: res = client.get_iopub_msg(msg_id, timeout=1)
from jupyter_client import connect from jupyter_client import BlockingKernelClient logging.basicConfig(level=logging.DEBUG) f = connect.find_connection_file() print("loaded") client = BlockingKernelClient() client.load_connection_file(f) print("prepared") # msg_id = client.execute("print('hello')") msg_id = client.execute("1 + 10") # client.wait_for_ready() res = client.get_shell_msg(msg_id, timeout=1) print(res) print("----------------------------------------") msg = res["msg_id"] for i in range(10): if not client.is_alive(): print("not alived") break try: res = client.get_iopub_msg(msg_id, timeout=1) except Empty as e:
class SendToIPython(object): def __init__(self, nvim): self.nvim = nvim self.client = None self.kerneldir = Path(jupyter_runtime_dir()) @neovim.function('RunningKernels', sync=True) def running_kernels(self, args): l = self.kerneldir.glob('kernel-*.json') l = sorted(l, reverse=True, key=lambda f: f.stat().st_ctime) return [f.name for f in l] @neovim.command('SendTo', complete='customlist,RunningKernels', nargs='?') def send_to(self, args): cfs = args or self.running_kernels(None) if not cfs: self.nvim.command('echom "No kernel found"') return if self.client is not None: self.client.stop_channels() cf = cfs[0] self.client = BlockingKernelClient() self.client.load_connection_file(self.kerneldir / cf) self.client.start_channels() # run function once to register it for the `funcref` function self.nvim.command('call SendLinesToJupyter()') self.nvim.command( 'let g:send_target = {"send": funcref("SendLinesToJupyter")}') self.nvim.command('echom "Sending to %s"' % cf) @neovim.function('SendLinesToJupyter') def send_lines(self, args): if args: self.client.execute('\n'.join(args[0])) @neovim.function('SendComplete', sync=True) def complete(self, args): findstart, base = args if self.client is None: return -3 # no client setup yet: cancel silently and leave completion mode if findstart: line = self.nvim.current.line if not line: return -2 # empty line: cancel silently but stay in completion mode pos = self.nvim.current.window.cursor[1] try: reply = self.client.complete(line, pos, reply=True, timeout=timeout)['content'] except TimeoutError: return -2 self.completions = [{ 'word': w, 'info': ' ' } for w in reply['matches']] return reply['cursor_start'] else: # TODO: use vim's complete_add/complete_check for async operation get_info(self.client, self.completions) return {'words': self.completions, 'refresh': 'always'} @neovim.function('SendCanComplete', sync=True) def can_complete(self, args): return args[ 0] != '' and self.client is not None and self.client.is_alive()
class Kernel(object): # kernel config is stored in a dot file with the active directory def __init__(self, config, active_dir, pyspark): # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. log_file = None if pyspark: os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark") spark_log = os.environ.get("SPARK_LOG", None) if spark_log: log_file = open(spark_log, "w") spark_opts = os.environ.get("SPARK_OPTS", "") args = [pyspark] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS> p = subprocess.Popen(args, stdout=log_file, stderr=log_file) else: args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): if os.path.isfile(config): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, so we'll # wait until the config file exists before moving on while os.path.isfile(config)==False: time.sleep(0.1) def close_file(): if log_file: log_file.close() atexit.register(close_file) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") python_patch_file = os.path.join(dirname, "langs", "python-patch.py") self.client.execute("%run " + python_patch_file) def _run_code(self, execution_id, code, timeout=0.1): # this function executes some code and waits for it to completely finish # before returning. i don't think that this is neccessarily the best # way to do this, but the IPython documentation isn't very helpful for # this particular topic. # # 1) execute code and grab the ID for that execution thread # 2) look for messages coming from the "iopub" channel (this is just a # stream of output) # 3) when we get a message that is one of the following, save relevant # data to `data`: # - execute_result - content from repr # - stream - content from stdout # - error - ansii encoded stacktrace # the final piece is that we check for when the message indicates that # the kernel is idle and the message's parent is the original execution # ID (msg_id) that's associated with our executing code. if this is the # case, we'll return the data and the msg_id and exit msg_id = self.client.execute(code, allow_stdin=False) request = { "id": execution_id, "msg_id": msg_id, "code": code, "status": "started" } sys.stdout.write(json.dumps(request) + '\n') sys.stdout.flush() output = { "id": execution_id, "msg_id": msg_id, "output": "", "stream": None, "image": None, "error": None } while True: try: reply = self.client.get_iopub_msg(timeout=timeout) except Empty: continue if "execution_state" in reply['content']: if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id: if reply['parent_header']['msg_type']=="execute_request": request["status"] = "complete" sys.stdout.write(json.dumps(request) + '\n') sys.stdout.flush() return elif reply['header']['msg_type']=="execute_result": output['output'] = reply['content']['data'].get('text/plain', '') output['stream'] = reply['content']['data'].get('text/plain', '') elif reply['header']['msg_type']=="display_data": if 'image/png' in reply['content']['data']: output['image'] = reply['content']['data']['image/png'] elif 'text/html' in reply['content']['data']: output['html'] = reply['content']['data']['text/html'] elif reply['header']['msg_type']=="stream": output['output'] += reply['content'].get('text', '') output['stream'] = reply['content'].get('text', '') elif reply['header']['msg_type']=="error": output['error'] = "\n".join(reply['content']['traceback']) # TODO: if we have something non-trivial to send back... sys.stdout.write(json.dumps(output) + '\n') sys.stdout.flush() # TODO: should probably get rid of all this output['stream'] = None output['image'] = None output['html'] = None def _complete(self, execution_id, code, timeout=0.5): # Call ipython kernel complete, wait for response with the correct msg_id, # and construct appropriate UI payload. # See below for an example response from ipython kernel completion for 'el' # # { # 'parent_header': # {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_request', # u'msg_id': u'5222d158-ada8-474e-88d8-8907eb7cc74c', u'session': u'cda4a03d-a8a1-4e6c-acd0-de62d169772e', # u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 796886)}, # 'msg_type': u'complete_reply', # 'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d', # 'content': # {u'matches': [u'elif', u'else'], u'status': u'ok', u'cursor_start': 0, u'cursor_end': 2, u'metadata': {}}, # 'header': # {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_reply', # u'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d', u'session': u'f1491112-7234-4782-8601-b4fb2697a2f6', # u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 803470)}, # 'buffers': [], # 'metadata': {} # } # msg_id = self.client.complete(code) request = { "id": execution_id, "msg_id": msg_id, "code": code, "status": "started" } sys.stdout.write(json.dumps(request) + '\n') sys.stdout.flush() output = { "id": execution_id, "msg_id": msg_id, "output": None, "image": None, "error": None } while True: try: reply = self.client.get_shell_msg(timeout=timeout) except Empty: continue if "matches" in reply['content'] and reply['msg_type']=="complete_reply" and reply['parent_header']['msg_id']==msg_id: results = [] for completion in reply['content']['matches']: result = { "value": completion, "dtype": "---" } if "." in code: # result['text'] = result['value'] # ".".join(result['value'].split(".")[1:]) result['text'] = result['value'] #.split('.')[-1] result["dtype"] = "function" else: result['text'] = result['value'] result["dtype"] = "" # type(globals().get(code)).__name__ results.append(result) output['output'] = results output['status'] = "complete" sys.stdout.write(json.dumps(output) + '\n') sys.stdout.flush() return def execute(self, execution_id, code, complete=False): if complete==True: return self._complete(execution_id, code) else: result = self._run_code(execution_id, code) if re.match("%?reset", code): # load our monkeypatches... k.client.execute("%matplotlib inline") k.client.execute(vars_patch) return result def get_packages(self): return self.execute("__get_packages()")
class DaisyWorkflow_client: def __init__(self, connection_file=None, executable=False): import os self.alg_keys = [] self.dat_keys = [] #super().__init__() self.kc = BlockingKernelClient() if connection_file == None: raise Exception( 'Please Specific Connection file to Remote IPython Kernel first' ) if os.access(connection_file, os.R_OK) == False: raise Exception('The connection file can no be read!') self.kc.load_connection_file(connection_file) try: self.kc.start_channels() except RuntimeError: raise Exception( 'Can not start channels, Please CHECK REMOTE KERNEL STATUS') self.executable = executable self.remote_name = None self.alg_clients = {} def initialize(self, class_name=None, workflow_name=None, workflow_cfgfile=None, algorithms_cfgfile=None): import os, json if class_name == None: raise Exception('Please Specific Workflow class name first') cmd = "from Daisy.Workflow import " + class_name self.kc.execute_interactive(cmd) if workflow_name == None: workflow_name = class_name self.remote_name = workflow_name cmd = self.remote_name + " = " + class_name + "('" + workflow_name + "')" self.kc.execute_interactive(cmd) if workflow_cfgfile == None: raise Exception('Please Specific Workflow Config file first') if os.access(workflow_cfgfile, os.R_OK) == False: raise Exception('The Workflow Config file can no be read!') with open(workflow_cfgfile, 'r') as json_file: string = json_file.read() wf_cfg = json.loads(string) temp_name = 'cfg_dict' + str(randint(1, 1000000)) cmd = temp_name + ' = ' + str(wf_cfg) self.kc.execute_interactive(cmd) cmd = self.remote_name + ".initialize(workflow_engine='PyWorkflowEngine', workflow_environment = " + temp_name + ")" self.kc.execute_interactive(cmd) self.kc.execute_interactive('del ' + temp_name) def setLogLevel(self, level): pass #Sniper.setLogLevel(level) #super().setLogLevel(level) def data_keys(self): cmd = self.remote_name + ".data_keys()" msg_id = self.kc.execute(cmd) exe_msg = self.execute_status(msg_id) dat_keys = [] if 'data' in exe_msg: msg = exe_msg['data'] msg = msg[msg.find("[") + 1:msg.rfind("]")] items = msg.split(',') #items = exe_msg['data'].split('\n') for i in items: begin = i.find("'") end = i.rfind("'") dat_keys.append(i[begin + 1:end]) self.dat_keys = dat_keys return self.dat_keys def algorithm_keys(self): cmd = self.remote_name + ".algorithm_keys()" msg_id = self.kc.execute(cmd) exe_msg = self.execute_status(msg_id) alg_keys = [] if 'data' in exe_msg: msg = exe_msg['data'] msg = msg[msg.find("[") + 1:msg.rfind("]")] items = msg.split(',') for i in items: begin = i.find("'") end = i.rfind("'") alg_keys.append(i[begin + 1:end]) self.alg_keys = alg_keys return self.alg_keys def get_data(self, data_name): raise Exception('Cannot get DataObject from Server') #return self.engine.datastore[data_name] def get_algorithm(self, algorithm_name): if algorithm_name in self.alg_clients.keys(): return self.alg_clients[algorithm_name] cmd = self.remote_name if algorithm_name in self.alg_keys: cmd = cmd + ".get_algorithm('" + algorithm_name + "')" elif algorithm_name in self.algorithm_keys(): cmd = cmd + ".get_algorithm('" + algorithm_name + "')" else: return False msg_id = self.kc.execute(cmd) exe_msg = self.execute_status(msg_id) self.alg_clients[algorithm_name] = DaisyAlgorithm_client( self.kc, exe_msg['code']) print(exe_msg['data']) return self.alg_clients[algorithm_name] def execute(self): pass #raise Exception('Must') def finalize(self): cmd = self.remote_name cmd = cmd + ".finalize()" self.kc.execute_interactive(cmd) #exe_msg = self.execute_status(msg_id) #print(exe_msg) #self.engine.finalize() def execute_status(self, msg_id): code_flag = False data_flag = False exe_msg = {'msg_id': msg_id} while True: try: kc_msg = self.kc.get_iopub_msg(timeout=5) if 'parent_header' in kc_msg and kc_msg['parent_header'][ 'msg_id'] != exe_msg['msg_id']: continue #_output_hook_default(kc_msg) msg_type = kc_msg['header']['msg_type'] msg_cont = kc_msg['content'] if msg_type == 'stream': exe_msg[msg_cont['name']] = msg_cont['text'] elif msg_type == 'error': exe_msg['error'] = msg_cont['traceback'] print(msg_cont['traceback']) break elif msg_type in ('display_data', 'execute_result'): if 'data' in msg_cont: data_flag = True exe_msg['data'] = msg_cont['data'].get( 'text/plain', '') if 'code' in msg_cont: code_flag = True exe_msg['code'] = msg_cont['code'] if code_flag and data_flag: break except: print('timeout kc.get_iopub_msg') break return exe_msg