def register_worker_magic(connection_info, magic_name="worker"): """Register a %worker magic, given connection_info. Both a line and cell magic are registered, which run the given cell in a remote kernel. """ ip = get_ipython() kc = BlockingKernelClient() kc.load_connection_info(connection_info) kc.start_channels() def remote(line, cell=None): """Run the current cell on a remote IPython kernel""" if cell is None: # both line and cell magic cell = line run_cell_remote(ip, kc, cell) remote.client = kc # preserve reference on kc, largely for mocking ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name) ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
def __init__(self, active_dir): # kernel config is stored in a dot file with the active directory config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, it wasn't # up and running. 1.5 seconds was arbitrarily chosen (but seems to work) time.sleep(1.5) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(autocomplete_patch) self.client.execute(vars_patch)
def main(kid): # Load connection info and init communications. cf = find_connection_file(kid) km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() # Define a function that is useful from within the user's notebook: juneau_connect() can be # used to directly connect the notebook to the source database. Note that this includes the # full "root" credentials. # FIXME: allow for user-specific credentials on SQL tables. The DBMS may also not be at localhost. code = f""" from sqlalchemy import create_engine def juneau_connect(): engine = create_engine( "postgresql://{config.sql.name}:{config.sql.password}@{config.sql.host}/{config.sql.dbname}", connect_args={{ "options": "-csearch_path='{config.sql.dbs}'" }} ) return engine.connect() """ km.execute_interactive(code, timeout=TIMEOUT) km.stop_channels()
def register_worker_magic(connection_info, magic_name='worker'): """Register a %worker magic, given connection_info. Both a line and cell magic are registered, which run the given cell in a remote kernel. """ ip = get_ipython() info = dict(connection_info) # copy key = info.pop('key') kc = BlockingKernelClient(**connection_info) kc.session.key = key kc.start_channels() def remote(line, cell=None): """Run the current cell on a remote IPython kernel""" if cell is None: # both line and cell magic cell = line run_cell_remote(ip, kc, cell) remote.client = kc # preserve reference on kc, largely for mocking ip.register_magic_function(remote, magic_kind='line', magic_name=magic_name) ip.register_magic_function(remote, magic_kind='cell', magic_name=magic_name)
def exec_code(kid, code): """ Executes arbitrary `code` in the kernel with id `kid`. Returns: - tuple: the output of the code and the error, if any. """ # Load connection info and init communications. cf = find_connection_file(kid) with jupyter_lock: km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() msg_id = km.execute(code, store_history=False) reply = km.get_shell_msg(msg_id, timeout=60) output, error = None, None while km.is_alive(): msg = km.get_iopub_msg(timeout=10) if ("content" in msg and "name" in msg["content"] and msg["content"]["name"] == "stdout"): output = msg["content"]["text"] break km.stop_channels() if reply["content"]["status"] != "ok": logging.error(f"Status is {reply['content']['status']}") logging.error(output) error = output output = None return output, error
def exec_code(kid, var, code): # load connection info and init communication cf = find_connection_file(kid) # str(port)) global jupyter_lock jupyter_lock.acquire() try: km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() # logging.debug('Executing:\n' + str(code)) msg_id = km.execute(code, store_history=False) reply = km.get_shell_msg(msg_id, timeout=10) # logging.info('Execution reply:\n' + str(reply)) state = 'busy' output = None idle_count = 0 try: while km.is_alive(): try: msg = km.get_iopub_msg(timeout=10) # logging.debug('Read ' + str(msg)) if not 'content' in msg: continue if 'name' in msg['content'] and msg['content'][ 'name'] == 'stdout': # logging.debug('Got data '+ msg['content']['text']) output = msg['content']['text'] break if 'execution_state' in msg['content']: # logging.debug('Got state') state = msg['content']['execution_state'] if state == 'idle': idle_count = idle_count + 1 except Empty: pass except KeyboardInterrupt: logging.error('Keyboard interrupt') pass finally: # logging.info('Kernel IO finished') km.stop_channels() # logging.info(str(output)) error = '' if reply['content']['status'] != 'ok': logging.error('Status is ' + reply['content']['status']) logging.error(str(output)) error = output output = None finally: jupyter_lock.release() return output, error
def create_kernel_client(self, ci): kernel_client = BlockingKernelClient() kernel_client.load_connection_info(ci) kernel_client.start_channels(shell=True, iopub=False, stdin=False, hb=False) return kernel_client
def _make_blocking_client(self): kc = self.kernel_client if kc is None: return info = kc.get_connection_info() bc = BlockingKernelClient(**info) bc.session.key = kc.session.key bc.shell_channel.start() self._blocking_client = bc
def setup_kernel(cmd): """start an embedded kernel in a subprocess, and wait for it to be ready Returns ------- kernel_manager: connected KernelManager instance """ def connection_file_ready(connection_file): """Check if connection_file is a readable json file.""" if not os.path.exists(connection_file): return False try: with open(connection_file) as f: json.load(f) return True except ValueError: return False kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE) try: connection_file = os.path.join( paths.jupyter_runtime_dir(), 'kernel-%i.json' % kernel.pid, ) # wait for connection file to exist, timeout after 5s tic = time.time() while not connection_file_ready(connection_file) \ and kernel.poll() is None \ and time.time() < tic + SETUP_TIMEOUT: time.sleep(0.1) # Wait 100ms for the writing to finish time.sleep(0.1) if kernel.poll() is not None: o,e = kernel.communicate() e = py3compat.cast_unicode(e) raise IOError("Kernel failed to start:\n%s" % e) if not os.path.exists(connection_file): if kernel.poll() is None: kernel.terminate() raise IOError("Connection file %r never arrived" % connection_file) client = BlockingKernelClient(connection_file=connection_file) client.load_connection_file() client.start_channels() client.wait_for_ready() try: yield client finally: client.stop_channels() finally: kernel.terminate()
def run(self): cf = find_connection_file() client = BlockingKernelClient(connection_file=cf) client.load_connection_file() client.start_channels(shell=False, iopub=True, stdin=False, control=False, hb=False) while True: msg = client.get_iopub_msg() self.received.emit(msg)
def __init__(self, active_dir, pyspark): # kernel config is stored in a dot file with the active directory config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4())) # right now we're spawning a child process for IPython. we can # probably work directly with the IPython kernel API, but the docs # don't really explain how to do it. log_file = None if pyspark: os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark") spark_log = os.environ.get("SPARK_LOG", None) if spark_log: log_file = open(spark_log, "w") spark_opts = os.environ.get("SPARK_OPTS", "") args = [ pyspark ] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS> p = subprocess.Popen(args, stdout=log_file, stderr=log_file) else: args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # when __this__ process exits, we're going to remove the ipython config # file and kill the ipython subprocess atexit.register(p.terminate) def remove_config(): if os.path.isfile(config): os.remove(config) atexit.register(remove_config) # i found that if i tried to connect to the kernel immediately, so we'll # wait until the config file exists before moving on while os.path.isfile(config) == False: time.sleep(0.1) def close_file(): if log_file: log_file.close() atexit.register(close_file) # fire up the kernel with the appropriate config self.client = BlockingKernelClient(connection_file=config) self.client.load_connection_file() self.client.start_channels() # load our monkeypatches... self.client.execute("%matplotlib inline") self.client.execute(vars_patch)
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Client(s["address"], loop=loop) as e: info = e.start_ipython_scheduler() kc = BlockingKernelClient() kc.load_connection_info(info) kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def send_to(self, args): if args and args[0].endswith('(newest)'): args[0] = args[0][:-len('(newest)')] cf = find_connection_file(*args) if cf not in self.clients: client = BlockingKernelClient() client.load_connection_file(cf) client.start_channels() self.clients[cf] = client return cf
def __init__(self, fps): self.config = self.get_connection_info() data_dir = appdirs.user_data_dir('permon', 'bminixhofer') os.makedirs(data_dir, exist_ok=True) self.usage_file = os.path.join(data_dir, 'jupyter_ram_usage.csv') open(self.usage_file, 'w').close() # self.setup_code is the code that is run in the notebook when the # stat is instantiated. It starts a thread which reads the memory # usage of all public variables in a set interval and saves it to a # csv file in the user data directory self.setup_code = f""" if '_permon_running' not in globals() or not _permon_running: import threading import csv import sys import time from pympler import asizeof from types import ModuleType def _permon_get_ram_usage_per_object(): while _permon_running: ram_usage = [] global_vars = [key for key in globals() if not key.startswith('_')] for name in global_vars: value = globals()[name] if name in globals() else None if isinstance(value, ModuleType): continue try: ram_usage.append((name, asizeof.asizeof(value))) except TypeError: continue with open('{self.usage_file}', 'w') as f: writer = csv.writer(f, delimiter=',') for name, ram in ram_usage: writer.writerow([name, ram]) time.sleep({self.settings['query interval [s]']}) _permon_thread = threading.Thread(target=_permon_get_ram_usage_per_object) _permon_running = True _permon_thread.start() """ self.teardown_code = """ _permon_running = False """ self.client = BlockingKernelClient() self.client.load_connection_info(self.config) self.client.start_channels() self.client.execute(self.setup_code) super(JupyterRAMUsage, self).__init__(fps=fps)
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: info = e.start_ipython_scheduler() key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def is_runing(cf): """ Check if kernel is alive. """ kc = BlockingKernelClient() kc.load_connection_file(cf) port = kc.get_connection_info()['iopub_port'] # if check_server(port): if is_open("127.0.0.1", port): return True else: return False
def test_start_ipython_scheduler(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1, should_check_state=False) as (s, [a]): with Client(s['address'], loop=loop) as e: info = e.start_ipython_scheduler() key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() msg_id = kc.execute("scheduler") reply = kc.get_shell_msg(timeout=10) kc.stop_channels()
def connect(connection_file): if connection_file not in clients: print "[nyroglancer] connecting to: " + connection_file kernel_client = BlockingKernelClient(connection_file=connection_file) kernel_client.load_connection_file() kernel_client.start_channels() clients[connection_file] = kernel_client return kernel_client return clients[connection_file]
def main(kid, var): # Load connection info and init communications. cf = find_connection_file(kid) # str(port)) km = BlockingKernelClient(connection_file=cf) km.load_connection_file() km.start_channels() code = f""" import pandas as pd import numpy as np if type({var}) in [pd.DataFrame, np.ndarray, list]: print({var}.to_json(orient='split', index=False)) """ km.execute_interactive(code, timeout=TIMEOUT) km.stop_channels()
def test_start_ipython_workers(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Client(s["address"], loop=loop) as e: info_dict = e.start_ipython_workers() info = first(info_dict.values()) kc = BlockingKernelClient() kc.load_connection_info(info) kc.start_channels() kc.wait_for_ready(timeout=10) msg_id = kc.execute("worker") reply = kc.get_shell_msg(timeout=10) assert reply["parent_header"]["msg_id"] == msg_id assert reply["content"]["status"] == "ok" kc.stop_channels()
def test_start_ipython_workers(loop, zmq_ctx): from jupyter_client import BlockingKernelClient with cluster(1) as (s, [a]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: info_dict = e.start_ipython_workers() info = first(info_dict.values()) key = info.pop('key') kc = BlockingKernelClient(**info) kc.session.key = key kc.start_channels() kc.wait_for_ready(timeout=10) msg_id = kc.execute("worker") reply = kc.get_shell_msg(timeout=10) assert reply['parent_header']['msg_id'] == msg_id assert reply['content']['status'] == 'ok' kc.stop_channels()
def setup_kernel(cmd): """start an embedded kernel in a subprocess, and wait for it to be ready This function was taken from the ipykernel project. We plan to remove it when dropping support for python 2. Yields ------- client: jupyter_client.BlockingKernelClient connected to the kernel """ kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE) try: connection_file = os.path.join( paths.jupyter_runtime_dir(), 'kernel-%i.json' % kernel.pid, ) # wait for connection file to exist, timeout after 5s tic = time.time() while not os.path.exists(connection_file) \ and kernel.poll() is None \ and time.time() < tic + SETUP_TIMEOUT: time.sleep(0.1) if kernel.poll() is not None: o, e = kernel.communicate() if not PY3 and isinstance(e, bytes): e = e.decode() raise IOError("Kernel failed to start:\n%s" % e) if not os.path.exists(connection_file): if kernel.poll() is None: kernel.terminate() raise IOError("Connection file %r never arrived" % connection_file) client = BlockingKernelClient(connection_file=connection_file) client.load_connection_file() client.start_channels() client.wait_for_ready() try: yield client finally: client.stop_channels() finally: if not PY2: kernel.terminate()
def connect_kernel(cf): """ Connect a kernel. """ if is_runing(cf): kc = BlockingKernelClient(connection_file=cf) kc.load_connection_file(cf) km = None else: # Kernel manager km = manager.KernelManager(connection_file=cf) km.start_kernel() # Kernel Client kc = km.blocking_client() init_kernel(kc) return km, kc
def setup(): global client kernel = Popen([sys.executable, '-m', 'ipykernel'], stdout=PIPE, stderr=PIPE) connection_file = os.path.join( paths.jupyter_runtime_dir(), 'kernel-%i.json' % kernel.pid, ) sleep(1) client = BlockingKernelClient(connection_file=connection_file) client.load_connection_file() client.start_channels() client.wait_for_ready() loaded = client.execute_interactive(load_splonky) if loaded['content']['status'] == 'error': raise Exception("Could not load core Splonky libraries") os_process_id = re.findall('.*\/kernel-(\d+)\.json$', connection_file)[0] return os_process_id
def send_to(self, args): cfs = args or self.running_kernels(None) if not cfs: self.nvim.command('echom "No kernel found"') return if self.client is not None: self.client.stop_channels() cf = cfs[0] self.client = BlockingKernelClient() self.client.load_connection_file(self.kerneldir / cf) self.client.start_channels() # run function once to register it for the `funcref` function self.nvim.command('call SendLinesToJupyter()') self.nvim.command( 'let g:send_target = {"send": funcref("SendLinesToJupyter")}') self.nvim.command('echom "Sending to %s"' % cf)
def remote_magic(line, cell=None): """A magic for running code on a specified remote worker The connection_info dict of the worker will be looked up as the first positional arg to the magic. The rest of the line (or the entire cell for a %%cell magic) will be passed to the remote kernel. Usage: info = e.start_ipython(worker)[worker] %remote info print(worker.data) """ # get connection info from IPython's user namespace ip = get_ipython() split_line = line.split(None, 1) info_name = split_line[0] if info_name not in ip.user_ns: raise NameError(info_name) connection_info = dict(ip.user_ns[info_name]) if not cell: # line magic, use the rest of the line if len(split_line) == 1: raise ValueError("I need some code to run!") cell = split_line[1] # turn info dict to hashable str for use as lookup key in _clients cache key = ",".join(map(str, sorted(connection_info.items()))) session_key = connection_info.pop("key") if key in remote_magic._clients: kc = remote_magic._clients[key] else: kc = BlockingKernelClient(**connection_info) kc.session.key = session_key kc.start_channels() kc.wait_for_ready(timeout=10) remote_magic._clients[key] = kc # actually run the code run_cell_remote(ip, kc, cell)
def __init__(self, connection_file=None, executable=False): import os self.alg_keys = [] self.dat_keys = [] #super().__init__() self.kc = BlockingKernelClient() if connection_file == None: raise Exception( 'Please Specific Connection file to Remote IPython Kernel first' ) if os.access(connection_file, os.R_OK) == False: raise Exception('The connection file can no be read!') self.kc.load_connection_file(connection_file) try: self.kc.start_channels() except RuntimeError: raise Exception( 'Can not start channels, Please CHECK REMOTE KERNEL STATUS') self.executable = executable self.remote_name = None self.alg_clients = {}
def setup_kernel(cmd): """start an embedded kernel in a subprocess, and wait for it to be ready Returns ------- kernel_manager: connected KernelManager instance """ kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE, env=env) connection_file = os.path.join(IPYTHONDIR, 'profile_default', 'security', 'kernel-%i.json' % kernel.pid) # wait for connection file to exist, timeout after 5s tic = time.time() while not os.path.exists(connection_file) \ and kernel.poll() is None \ and time.time() < tic + SETUP_TIMEOUT: time.sleep(0.1) if kernel.poll() is not None: o, e = kernel.communicate() e = py3compat.cast_unicode(e) raise IOError("Kernel failed to start:\n%s" % e) if not os.path.exists(connection_file): if kernel.poll() is None: kernel.terminate() raise IOError("Connection file %r never arrived" % connection_file) client = BlockingKernelClient(connection_file=connection_file) client.load_connection_file() client.start_channels() client.wait_for_ready() try: yield client finally: client.stop_channels() kernel.terminate()
def run(self): cf = find_connection_file() client = BlockingKernelClient(connection_file=cf) client.load_connection_file() client.start_channels(shell=False, iopub=True, stdin=False, control=True, hb=False) while True: try: msg = client.get_iopub_msg(TIMEOUT) self.pub_q.put(msg) except Empty: pass if self.cmd_q.qsize(): cmd = self.cmd_q.get() if cmd is None: print('Client thread closing') break client.execute(cmd) self.ctrl_q.put(client.get_shell_msg())
def execute_from_command_line(): if sys.argv.count('--existing') != 1: raise ValueError(f'{sys.argv}\n' f'--existing argument must occur once only.') kernel_arg_index = sys.argv.index('--existing') try: kernel_name = sys.argv[kernel_arg_index + 1] except IndexError: # Following the command-line API of jupyter console, qtconsole etc, the --existing argument # can be used without a value, meaning use the kernel whose connection file has most # recently been accessed. We support that here when --existing is the last element of the # command line. Otherwise, the behavior of the no-argument-value form can be achieved with # --existing ''. kernel_name = None else: sys.argv.pop(kernel_arg_index + 1) sys.argv.pop(kernel_arg_index) if {'shell', 'shell_plus'} & set(sys.argv): # Special case: use `jupyter console` for management commands requesting a python shell. argv = [ 'jupyter', 'console', '--Completer.use_jedi=False', '--existing' ] if kernel_name: argv.append(kernel_name) os.execlp(argv[0], *argv) connection_file = find_connection_file( kernel_name) if kernel_name else find_connection_file() kernel_client = BlockingKernelClient(connection_file=connection_file) kernel_client.load_connection_file() response = kernel_client.execute_interactive(f""" from devkernel.kernel import execute_from_command_line execute_from_command_line('{json.dumps(sys.argv)}') """) exit_status = 0 if response['metadata']['status'] == 'ok' else 1 sys.exit(exit_status)