def init_tornado_settings(self): """Set up the tornado settings dict.""" base_url = self.base_url template_path = os.path.join(self.data_files_path, 'templates'), jinja_env = Environment( loader=FileSystemLoader(template_path), **self.jinja_environment_options ) settings = dict( config=self.config, log=self.log, db=self.db, hub=self.hub, admin_users=self.admin_users, authenticator=import_item(self.authenticator)(config=self.config), spawner_class=import_item(self.spawner_class), base_url=base_url, cookie_secret=self.cookie_secret, login_url=url_path_join(self.hub.server.base_url, 'login'), static_path=os.path.join(self.data_files_path, 'static'), static_url_prefix=url_path_join(self.hub.server.base_url, 'static/'), template_path=template_path, jinja2_env=jinja_env, ) # allow configured settings to have priority settings.update(self.tornado_settings) self.tornado_settings = settings
def init_io(self): """Redirect input streams and set a display hook.""" if self.outstream_class: outstream_factory = import_item(str(self.outstream_class)) sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout') sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr') if self.displayhook_class: displayhook_factory = import_item(str(self.displayhook_class)) sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
def init_configurables(self): # force Session default to be secure default_secure(self.config) kls = import_item(self.kernel_manager_class) self.kernel_manager = kls( parent=self, log=self.log, kernel_argv=self.kernel_argv, connection_dir=self.profile_dir.security_dir ) kls = import_item(self.notebook_manager_class) self.notebook_manager = kls(parent=self, log=self.log) kls = import_item(self.session_manager_class) self.session_manager = kls(parent=self, log=self.log)
def init_io(self): """Redirect input streams and set a display hook.""" if self.outstream_class: outstream_factory = import_item(str(self.outstream_class)) sys.stdout = outstream_factory(self.session, self.iopub_socket, 'stdout') sys.stderr = outstream_factory(self.session, self.iopub_socket, 'stderr') if self.displayhook_class: displayhook_factory = import_item(str(self.displayhook_class)) sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
def init_configurables(self): # force Session default to be secure default_secure(self.config) kls = import_item(self.kernel_manager_class) self.kernel_manager = kls( parent=self, log=self.log, kernel_argv=self.kernel_argv, connection_dir=self.profile_dir.security_dir, ) kls = import_item(self.notebook_manager_class) self.notebook_manager = kls(parent=self, log=self.log) kls = import_item(self.session_manager_class) self.session_manager = kls(parent=self, log=self.log)
def init_configurables(self): # force Session default to be secure default_secure(self.config) kls = import_item(self.kernel_spec_manager_class) self.kernel_spec_manager = kls(ipython_dir=self.ipython_dir) kls = import_item(self.kernel_manager_class) self.kernel_manager = kls( parent=self, log=self.log, ipython_kernel_argv=self.ipython_kernel_argv, connection_dir=self.profile_dir.security_dir, ) kls = import_item(self.contents_manager_class) self.contents_manager = kls(parent=self, log=self.log) kls = import_item(self.session_manager_class) self.session_manager = kls(parent=self, log=self.log, kernel_manager=self.kernel_manager, contents_manager=self.contents_manager) kls = import_item(self.cluster_manager_class) self.cluster_manager = kls(parent=self, log=self.log) self.cluster_manager.update_profiles() self.login_handler_class = import_item(self.login_handler) self.logout_handler_class = import_item(self.logout_handler) kls = import_item(self.config_manager_class) self.config_manager = kls(parent=self, log=self.log, profile_dir=self.profile_dir.location)
def init_configurables(self): # force Session default to be secure default_secure(self.config) kls = import_item(self.kernel_spec_manager_class) self.kernel_spec_manager = kls(ipython_dir=self.ipython_dir) kls = import_item(self.kernel_manager_class) self.kernel_manager = kls( parent=self, log=self.log, ipython_kernel_argv=self.ipython_kernel_argv, connection_dir = self.profile_dir.security_dir, ) kls = import_item(self.contents_manager_class) self.contents_manager = kls(parent=self, log=self.log) kls = import_item(self.session_manager_class) self.session_manager = kls(parent=self, log=self.log, kernel_manager=self.kernel_manager, contents_manager=self.contents_manager) kls = import_item(self.cluster_manager_class) self.cluster_manager = kls(parent=self, log=self.log) self.cluster_manager.update_profiles() self.login_handler_class = import_item(self.login_handler) self.logout_handler_class = import_item(self.logout_handler) kls = import_item(self.config_manager_class) self.config_manager = kls(parent=self, log=self.log, profile_dir=self.profile_dir.location)
def yaml(self, line, cell): line = line.strip() args = magic_arguments.parse_argstring(self.yaml, line) display(Javascript( """ require( [ "notebook/js/codecell", "codemirror/mode/yaml/yaml" ], function(cc){ cc.CodeCell.options_default.highlight_modes.magic_yaml = { reg: ["^%%yaml"] } } ); """)) loader = get_ipython().user_global_ns.get(args.loader, None) if loader is None: loader = import_item(args.loader) try: val = yaml.load(cell, Loader=loader) except yaml.YAMLError as err: print(err) return if args.var_name is not None: get_ipython().user_ns[args.var_name] = val else: return val
def test_for(item, min_version=None, callback=extract_version): """Test to see if item is importable, and optionally check against a minimum version. If min_version is given, the default behavior is to check against the `__version__` attribute of the item, but specifying `callback` allows you to extract the value you are interested in. e.g:: In [1]: import sys In [2]: from IPython.testing.iptest import test_for In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info) Out[3]: True """ try: check = import_item(item) except (ImportError, RuntimeError): # GTK reports Runtime error if it can't be initialized even if it's # importable. return False else: if min_version: if callback: # extra processing step to get version to compare check = callback(check) return check >= min_version else: return True
def get_instances(cls, name=None, root=None, klass=None): """Get all instances of cls and its subclasses. Parameters ---------- name : str Limit to components with this name. root : Component or subclass Limit to components having this root. klass : class or str Limits to instances of the class or its subclasses. If a str is given ut must be in the form 'foo.bar.MyClass'. The str form of this argument is useful for forward declarations. """ if klass is not None: if isinstance(klass, basestring): klass = import_item(klass) # Limit search to instances of klass for performance if issubclass(klass, Component): return klass.get_instances(name=name, root=root) instances = cls.__instance_refs.values() if name is not None: instances = [i for i in instances if i.name == name] if klass is not None: instances = [i for i in instances if isinstance(i, klass)] if root is not None: instances = [i for i in instances if i.root == root] return instances
def get_validator(version=None, version_minor=None): """Load the JSON schema into a Validator""" if version is None: from .. import current_nbformat version = current_nbformat v = import_item("IPython.nbformat.v%s" % version) current_minor = v.nbformat_minor if version_minor is None: version_minor = current_minor version_tuple = (version, version_minor) if version_tuple not in validators: try: v.nbformat_schema except AttributeError: # no validator return None schema_path = os.path.join(os.path.dirname(v.__file__), v.nbformat_schema) with open(schema_path) as f: schema_json = json.load(f) if current_minor < version_minor: # notebook from the future, relax all `additionalProperties: False` requirements schema_json = _relax_additional_properties(schema_json) # and allow undefined cell types and outputs schema_json = _allow_undefined(schema_json) validators[version_tuple] = Validator(schema_json) return validators[version_tuple]
def construct(self): # This is the working dir by now. sys.path.insert(0, '') self.start_mpi() self.start_logging() # Create the underlying shell class and EngineService shell_class = import_item(self.master_config.Global.shell_class) self.engine_service = EngineService(shell_class, mpi=mpi) self.exec_lines() # Create the service hierarchy self.main_service = service.MultiService() self.engine_service.setServiceParent(self.main_service) self.tub_service = Tub() self.tub_service.setServiceParent(self.main_service) # This needs to be called before the connection is initiated self.main_service.startService() # This initiates the connection to the controller and calls # register_engine to tell the controller we are ready to do work self.engine_connector = EngineConnector(self.tub_service) log.msg("Using furl file: %s" % self.master_config.Global.furl_file) reactor.callWhenRunning(self.call_connect)
def _resolve_func_name(self, func_name): if callable(func_name): return func_name elif isinstance(func_name, basestring): return import_item(func_name) else: raise TypeError('func_name must be a str or callable, got: %r' % func_name)
def yaml(self, line, cell): line = line.strip() args = magic_arguments.parse_argstring(self.yaml, line) display( Javascript(""" require( [ "notebook/js/codecell", "codemirror/mode/yaml/yaml" ], function(cc){ cc.CodeCell.options_default.highlight_modes.magic_yaml = { reg: ["^%%yaml"] } } ); """)) loader = get_ipython().user_global_ns.get(args.loader, None) if loader is None: loader = import_item(args.loader) try: val = yaml.load(cell, Loader=loader) except yaml.YAMLError as err: print(err) return if args.var_name is not None: get_ipython().user_ns[args.var_name] = val else: return val
def __init__(self, *args, **kwargs): super(MetaManager, self).__init__(*args, **kwargs) self.app = kwargs['parent'] self.managers = {} server_home = FileContentsManager() server_home.root_dir = self.root_dir self.managers['server-home'] = server_home if self.enable_custom_handlers: enable_custom_handlers() for alias, path in self.file_dirs.items(): fb = FileContentsManager() fb.root_dir = path self.managers[alias] = fb for alias, path in self.bundle_dirs.items(): fb = BundleNotebookManager() fb.root_dir = path self.managers[alias] = fb for user, pw in self.github_accounts: gh = notebook_gisthub(user, pw) gbm = GistNotebookManager(gisthub=gh) self.managers['gist:' + user] = gbm self.middleware = {} for name, middleware in self.manager_middleware.items(): cls = import_item(middleware) self.middleware[name] = cls(parent=self, log=self.log) self.root = RootManager(meta_manager=self)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) #self.app = kwargs['parent'] self.managers = {} if self.enable_custom_handlers: enable_custom_handlers() for alias, path in self.bundle_dirs.items(): fb = BundleNotebookManager(root_dir=path, trash_dir=self.trash_dir) self.managers[alias] = fb for alias, workarea_paths in self.workarea_dirs.items(): fb = WorkareaManager(workarea_paths=workarea_paths) self.managers[alias] = fb for user, pw in self.github_accounts: gh = notebook_gisthub(user, pw) gbm = GistNotebookManager(gisthub=gh) self.managers['gist:'+user] = gbm self.middleware = {} for name, middleware in self.manager_middleware.items(): cls = import_item(middleware) self.middleware[name] = cls(parent=self, log=self.log) self.root = RootManager(meta_manager=self)
def init_schedulers(self): children = self.children mq = import_item(str(self.mq_class)) hub = self.factory # maybe_inproc = 'inproc://monitor' if self.use_threads else self.monitor_url # IOPub relay (in a Process) q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A','iopub') q.bind_in(hub.client_info['iopub']) q.bind_out(hub.engine_info['iopub']) q.setsockopt_out(zmq.SUBSCRIBE, '') q.connect_mon(hub.monitor_url) q.daemon=True children.append(q) # Multiplexer Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'in', 'out') q.bind_in(hub.client_info['mux']) q.setsockopt_in(zmq.IDENTITY, 'mux') q.bind_out(hub.engine_info['mux']) q.connect_mon(hub.monitor_url) q.daemon=True children.append(q) # Control Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'incontrol', 'outcontrol') q.bind_in(hub.client_info['control']) q.setsockopt_in(zmq.IDENTITY, 'control') q.bind_out(hub.engine_info['control']) q.connect_mon(hub.monitor_url) q.daemon=True children.append(q) try: scheme = self.config.TaskScheduler.scheme_name except AttributeError: scheme = TaskScheduler.scheme_name.get_default_value() # Task Queue (in a Process) if scheme == 'pure': self.log.warn("task::using pure XREQ Task scheduler") q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, 'intask', 'outtask') # q.setsockopt_out(zmq.HWM, hub.hwm) q.bind_in(hub.client_info['task'][1]) q.setsockopt_in(zmq.IDENTITY, 'task') q.bind_out(hub.engine_info['task']) q.connect_mon(hub.monitor_url) q.daemon=True children.append(q) elif scheme == 'none': self.log.warn("task::using no Task scheduler") else: self.log.info("task::using Python %s Task scheduler"%scheme) sargs = (hub.client_info['task'][1], hub.engine_info['task'], hub.monitor_url, hub.client_info['notification']) kwargs = dict(logname='scheduler', loglevel=self.log_level, log_url = self.log_url, config=dict(self.config)) q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs) q.daemon=True children.append(q)
def _unpacker_changed(self, name, old, new): if new.lower() == 'json': self.pack = json_packer self.unpack = json_unpacker elif new.lower() == 'pickle': self.pack = pickle_packer self.unpack = pickle_unpacker else: self.unpack = import_item(str(new))
def __init__(self, **kwargs): super(SessionFactory, self).__init__(**kwargs) exec_key = self.exec_key or None # set the packers: if not self.packer: packer_f = unpacker_f = None elif self.packer.lower() == 'json': packer_f = ss.json_packer unpacker_f = ss.json_unpacker elif self.packer.lower() == 'pickle': packer_f = ss.pickle_packer unpacker_f = ss.pickle_unpacker else: packer_f = import_item(self.packer) unpacker_f = import_item(self.unpacker) # construct the session self.session = ss.StreamSession(self.username, self.ident, packer=packer_f, unpacker=unpacker_f, key=exec_key)
def construct_schedulers(self): children = self.children mq = import_item(self.mq_class) # maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url # IOPub relay (in a Process) q = mq(zmq.PUB, zmq.SUB, zmq.PUB, "N/A", "iopub") q.bind_in(self.client_info["iopub"]) q.bind_out(self.engine_info["iopub"]) q.setsockopt_out(zmq.SUBSCRIBE, "") q.connect_mon(self.monitor_url) q.daemon = True children.append(q) # Multiplexer Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, "in", "out") q.bind_in(self.client_info["mux"]) q.setsockopt_in(zmq.IDENTITY, "mux") q.bind_out(self.engine_info["mux"]) q.connect_mon(self.monitor_url) q.daemon = True children.append(q) # Control Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, "incontrol", "outcontrol") q.bind_in(self.client_info["control"]) q.setsockopt_in(zmq.IDENTITY, "control") q.bind_out(self.engine_info["control"]) q.connect_mon(self.monitor_url) q.daemon = True children.append(q) # Task Queue (in a Process) if self.scheme == "pure": self.log.warn("task::using pure XREQ Task scheduler") q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, "intask", "outtask") q.setsockopt_out(zmq.HWM, self.hwm) q.bind_in(self.client_info["task"][1]) q.setsockopt_in(zmq.IDENTITY, "task") q.bind_out(self.engine_info["task"]) q.connect_mon(self.monitor_url) q.daemon = True children.append(q) elif self.scheme == "none": self.log.warn("task::using no Task scheduler") else: self.log.info("task::using Python %s Task scheduler" % self.scheme) sargs = ( self.client_info["task"][1], self.engine_info["task"], self.monitor_url, self.client_info["notification"], ) kwargs = dict(scheme=self.scheme, logname=self.log.name, loglevel=self.log.level, config=self.config) q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs) q.daemon = True children.append(q)
def init_kernel(self): """Create the Kernel object itself""" kernel_factory = import_item(str(self.kernel_class)) self.kernel = kernel_factory(config=self.config, session=self.session, shell_socket=self.shell_socket, iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log ) self.kernel.record_ports(self.ports)
def init_kernel(self): """Create the Kernel object itself""" kernel_factory = import_item(str(self.kernel_class)) self.kernel = kernel_factory(config=self.config, session=self.session, shell_socket=self.shell_socket, iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log) self.kernel.record_ports(self.ports)
def _wrap_remote_reference(rr): d = rr.callRemote('get_client_name') d.addCallback(lambda name: import_item(name)) def adapt(client_interface): client = client_interface(rr) client.tub = self.tub return client d.addCallback(adapt) return d
def construct_schedulers(self): children = self.children mq = import_item(self.mq_class) # maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url # IOPub relay (in a Process) q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A', 'iopub') q.bind_in(self.client_info['iopub']) q.bind_out(self.engine_info['iopub']) q.setsockopt_out(zmq.SUBSCRIBE, '') q.connect_mon(self.monitor_url) q.daemon = True children.append(q) # Multiplexer Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'in', 'out') q.bind_in(self.client_info['mux']) q.setsockopt_in(zmq.IDENTITY, 'mux') q.bind_out(self.engine_info['mux']) q.connect_mon(self.monitor_url) q.daemon = True children.append(q) # Control Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'incontrol', 'outcontrol') q.bind_in(self.client_info['control']) q.setsockopt_in(zmq.IDENTITY, 'control') q.bind_out(self.engine_info['control']) q.connect_mon(self.monitor_url) q.daemon = True children.append(q) # Task Queue (in a Process) if self.scheme == 'pure': self.log.warn("task::using pure XREQ Task scheduler") q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, 'intask', 'outtask') q.bind_in(self.client_info['task'][1]) q.setsockopt_in(zmq.IDENTITY, 'task') q.bind_out(self.engine_info['task']) q.connect_mon(self.monitor_url) q.daemon = True children.append(q) elif self.scheme == 'none': self.log.warn("task::using no Task scheduler") else: self.log.info("task::using Python %s Task scheduler" % self.scheme) sargs = (self.client_info['task'][1], self.engine_info['task'], self.monitor_url, self.client_info['notification']) kwargs = dict(scheme=self.scheme, logname=self.log.name, loglevel=self.log.level, config=dict(self.config)) q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs) q.daemon = True children.append(q)
def start_launchers(self, controller=True): config = self.master_config # Create the launchers. In both bases, we set the work_dir of # the launcher to the cluster_dir. This is where the launcher's # subprocesses will be launched. It is not where the controller # and engine will be launched. if controller: cl_class = import_item(config.Global.controller_launcher) self.controller_launcher = cl_class(work_dir=self.cluster_dir, config=config, logname=self.log.name) # Setup the observing of stopping. If the controller dies, shut # everything down as that will be completely fatal for the engines. self.controller_launcher.on_stop(self.stop_launchers) # But, we don't monitor the stopping of engines. An engine dying # is just fine and in principle a user could start a new engine. # Also, if we did monitor engine stopping, it is difficult to # know what to do when only some engines die. Currently, the # observing of engine stopping is inconsistent. Some launchers # might trigger on a single engine stopping, other wait until # all stop. TODO: think more about how to handle this. else: self.controller_launcher = None el_class = import_item(config.Global.engine_launcher) self.engine_launcher = el_class(work_dir=self.cluster_dir, config=config, logname=self.log.name) # Setup signals signal.signal(signal.SIGINT, self.sigint_handler) # Start the controller and engines self._stopping = False # Make sure stop_launchers is not called 2x. if controller: self.start_controller() dc = ioloop.DelayedCallback(self.start_engines, 1000 * config.Global.delay * controller, self.loop) dc.start() self.startup_message()
def _import_app(self, app_path): """import an app class""" app = None name = app_path.rsplit('.', 1)[-1] try: app = import_item(app_path) except ImportError: self.log.info("Couldn't import %s, config file will be excluded", name) except Exception: self.log.warn('Unexpected error importing %s', name, exc_info=True) return app
def _unpacker_changed(self, name, old, new): if new.lower() == "json": self.pack = json_packer self.unpack = json_unpacker self.packer = new elif new.lower() == "pickle": self.pack = pickle_packer self.unpack = pickle_unpacker self.packer = new else: self.unpack = import_item(str(new))
def _import_app(self, app_path): """import an app class""" app = None name = app_path.rsplit('.', 1)[-1] try: app = import_item(app_path) except ImportError: self.log.info("Couldn't import %s, config file will be excluded", name) except Exception: self.log.warning('Unexpected error importing %s', name, exc_info=True) return app
def start_launchers(self, controller=True): config = self.master_config # Create the launchers. In both bases, we set the work_dir of # the launcher to the cluster_dir. This is where the launcher's # subprocesses will be launched. It is not where the controller # and engine will be launched. if controller: cl_class = import_item(config.Global.controller_launcher) self.controller_launcher = cl_class( work_dir=self.cluster_dir, config=config, logname=self.log.name ) # Setup the observing of stopping. If the controller dies, shut # everything down as that will be completely fatal for the engines. self.controller_launcher.on_stop(self.stop_launchers) # But, we don't monitor the stopping of engines. An engine dying # is just fine and in principle a user could start a new engine. # Also, if we did monitor engine stopping, it is difficult to # know what to do when only some engines die. Currently, the # observing of engine stopping is inconsistent. Some launchers # might trigger on a single engine stopping, other wait until # all stop. TODO: think more about how to handle this. else: self.controller_launcher = None el_class = import_item(config.Global.engine_launcher) self.engine_launcher = el_class( work_dir=self.cluster_dir, config=config, logname=self.log.name ) # Setup signals signal.signal(signal.SIGINT, self.sigint_handler) # Start the controller and engines self._stopping = False # Make sure stop_launchers is not called 2x. if controller: self.start_controller() dc = ioloop.DelayedCallback(self.start_engines, 1000*config.Global.delay*controller, self.loop) dc.start() self.startup_message()
def build_launcher(self, clsname): """import and instantiate a Launcher based on importstring""" if '.' not in clsname: # not a module, presume it's the raw name in apps.launcher clsname = 'IPython.parallel.apps.launcher.'+clsname # print repr(clsname) klass = import_item(clsname) launcher = klass( work_dir=self.profile_dir.location, config=self.config, log=self.log ) return launcher
def client(self, **kwargs): """Create a client configured to connect to our kernel""" if self.client_factory is None: self.client_factory = import_item(self.client_class) kw = {} kw.update(self.get_connection_info()) kw.update(dict(connection_file=self.connection_file, session=self.session, parent=self)) # add kwargs last, for manual overrides kw.update(kwargs) return self.client_factory(**kw)
def construct_schedulers(self): children = self.children mq = import_item(self.mq_class) # maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url # IOPub relay (in a Process) q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A','iopub') q.bind_in(self.client_info['iopub']) q.bind_out(self.engine_info['iopub']) q.setsockopt_out(zmq.SUBSCRIBE, '') q.connect_mon(self.monitor_url) q.daemon=True children.append(q) # Multiplexer Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'in', 'out') q.bind_in(self.client_info['mux']) q.setsockopt_in(zmq.IDENTITY, 'mux') q.bind_out(self.engine_info['mux']) q.connect_mon(self.monitor_url) q.daemon=True children.append(q) # Control Queue (in a Process) q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'incontrol', 'outcontrol') q.bind_in(self.client_info['control']) q.setsockopt_in(zmq.IDENTITY, 'control') q.bind_out(self.engine_info['control']) q.connect_mon(self.monitor_url) q.daemon=True children.append(q) # Task Queue (in a Process) if self.scheme == 'pure': self.log.warn("task::using pure XREQ Task scheduler") q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, 'intask', 'outtask') q.bind_in(self.client_info['task'][1]) q.setsockopt_in(zmq.IDENTITY, 'task') q.bind_out(self.engine_info['task']) q.connect_mon(self.monitor_url) q.daemon=True children.append(q) elif self.scheme == 'none': self.log.warn("task::using no Task scheduler") else: self.log.info("task::using Python %s Task scheduler"%self.scheme) sargs = (self.client_info['task'][1], self.engine_info['task'], self.monitor_url, self.client_info['notification']) kwargs = dict(scheme=self.scheme,logname=self.log.name, loglevel=self.log.level, config=dict(self.config)) q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs) q.daemon=True children.append(q)
def adapt_to_interfaces(self, d): """Run through the interfaces, adapt and register.""" for ifname, ifconfig in self.interfaces.items(): ff = self._get_security_file(ifconfig.furl_file) log.msg("Adapting [%s] to interface: %s" % (self.adaptee.__class__.__name__, ifname)) log.msg("Saving FURL for interface [%s] to file: %s" % (ifname, ff)) check_furl_file_security(ff, self.secure) adaptee = self.adaptee for i in ifconfig.interface_chain: adaptee = import_item(i)(adaptee) d.addCallback(self.register, adaptee, furl_file=ff)
def init_configurables(self): # force Session default to be secure default_secure(self.config) self.kernel_manager = MappingKernelManager( config=self.config, log=self.log, kernel_argv=self.kernel_argv, connection_dir = self.profile_dir.security_dir, ) kls = import_item(self.notebook_manager_class) self.notebook_manager = kls(config=self.config, log=self.log) self.notebook_manager.load_notebook_names() self.cluster_manager = ClusterManager(config=self.config, log=self.log) self.cluster_manager.update_profiles()
def get_ipython_module_path(module_str): """Find the path to an IPython module in this version of IPython. This will always find the version of the module that is in this importable IPython package. This will always return the path to the ``.py`` version of the module. """ if module_str == "IPython": return os.path.join(get_ipython_package_dir(), "__init__.py") mod = import_item(module_str) the_path = mod.__file__.replace(".pyc", ".py") the_path = the_path.replace(".pyo", ".py") return the_path.decode(sys.getfilesystemencoding())
def initialize_subcommand(self, subc, argv=None): """Initialize a subcommand with argv.""" subapp, help = self.subcommands.get(subc) if isinstance(subapp, string_types): subapp = import_item(subapp) # clear existing instances self.__class__.clear_instance() # instantiate self.subapp = subapp.instance(config=self.config) # and initialize subapp self.subapp.initialize(argv)
def __init__(self, **kwargs): super(MixedContentsManager, self).__init__(**kwargs) self.managers = {} ## check consistency of scheme. if not len(set(map(lambda x:x['root'], self.filesystem_scheme))) == len(self.filesystem_scheme): raise ValueError('Scheme should not mount two contents manager on the same mountpoint') kwargs.update({'parent':self}) for scheme in self.filesystem_scheme: manager_class = import_item(scheme['contents']) self.managers[scheme['root']] = manager_class(**kwargs)
def get_ipython_module_path(module_str): """Find the path to an IPython module in this version of IPython. This will always find the version of the module that is in this importable IPython package. This will always return the path to the ``.py`` version of the module. """ if module_str == 'IPython': return os.path.join(get_ipython_package_dir(), '__init__.py') mod = import_item(module_str) the_path = mod.__file__.replace('.pyc', '.py') the_path = the_path.replace('.pyo', '.py') return py3compat.cast_unicode(the_path, fs_encoding)
def adapt_to_interfaces(self, d): """Run through the interfaces, adapt and register.""" for ifname, ifconfig in self.interfaces.iteritems(): ff = self._get_security_file(ifconfig.furl_file) log.msg("Adapting [%s] to interface: %s" % \ (self.adaptee.__class__.__name__, ifname)) log.msg("Saving FURL for interface [%s] to file: %s" % (ifname, ff)) check_furl_file_security(ff, self.secure) adaptee = self.adaptee for i in ifconfig.interface_chain: adaptee = import_item(i)(adaptee) d.addCallback(self.register, adaptee, furl_file=ff)
def build_launcher(self, clsname): """import and instantiate a Launcher based on importstring""" if '.' not in clsname: # not a module, presume it's the raw name in apps.launcher clsname = 'IPython.parallel.apps.launcher.' + clsname # print repr(clsname) try: klass = import_item(clsname) except (ImportError, KeyError): self.log.fatal("Could not import launcher class: %r" % clsname) self.exit(1) launcher = klass(work_dir=u'.', config=self.config, log=self.log) return launcher
def register_target(self, target_name, f): """Register a callable f for a given target name f will be called with two arguments when a comm_open message is received with `target`: - the Comm instance - the `comm_open` message itself. f can be a Python callable or an import string for one. """ if isinstance(f, string_types): f = import_item(f) self.targets[target_name] = f
def init_kernel(self): """Create the Kernel object itself""" shell_stream = ZMQStream(self.shell_socket) kernel_factory = import_item(str(self.kernel_class)) kernel = kernel_factory(config=self.config, session=self.session, shell_streams=[shell_stream], iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, ) kernel.record_ports(self.ports) self.kernel = kernel
def client(self, **kwargs): """Create a client configured to connect to our kernel""" if self.client_factory is None: self.client_factory = import_item(self.client_class) kw = {} kw.update(self.get_connection_info()) kw.update(dict( connection_file=self.connection_file, session=self.session, parent=self, )) # add kwargs last, for manual overrides kw.update(kwargs) return self.client_factory(**kw)
def _import_mapping(mapping, original=None): """import any string-keys in a type mapping """ log = get_logger() log.debug("Importing canning map") for key,value in list(mapping.items()): if isinstance(key, string_types): try: cls = import_item(key) except Exception: if original and key not in original: # only message on user-added classes log.error("canning class not importable: %r", key, exc_info=True) mapping.pop(key) else: mapping[cls] = mapping.pop(key)
def register_preprocessor(self, preprocessor, enabled=False): """ Register a preprocessor. Preprocessors are classes that act upon the notebook before it is passed into the Jinja templating engine. Preprocessors are also capable of passing additional information to the Jinja templating engine. Parameters ---------- preprocessor : preprocessor """ if preprocessor is None: raise TypeError('preprocessor') isclass = isinstance(preprocessor, type) constructed = not isclass #Handle preprocessor's registration based on it's type if constructed and isinstance(preprocessor, py3compat.string_types): #Preprocessor is a string, import the namespace and recursively call #this register_preprocessor method preprocessor_cls = import_item(preprocessor) return self.register_preprocessor(preprocessor_cls, enabled) if constructed and hasattr(preprocessor, '__call__'): #Preprocessor is a function, no need to construct it. #Register and return the preprocessor. if enabled: preprocessor.enabled = True self._preprocessors.append(preprocessor) return preprocessor elif isclass and isinstance(preprocessor, MetaHasTraits): #Preprocessor is configurable. Make sure to pass in new default for #the enabled flag if one was specified. self.register_preprocessor(preprocessor(parent=self), enabled) elif isclass: #Preprocessor is not configurable, construct it self.register_preprocessor(preprocessor(), enabled) else: #Preprocessor is an instance of something without a __call__ #attribute. raise TypeError('preprocessor')
def register_filter(self, name, jinja_filter): """ Register a filter. A filter is a function that accepts and acts on one string. The filters are accesible within the Jinja templating engine. Parameters ---------- name : str name to give the filter in the Jinja engine filter : filter """ if jinja_filter is None: raise TypeError('filter') isclass = isinstance(jinja_filter, type) constructed = not isclass #Handle filter's registration based on it's type if constructed and isinstance(jinja_filter, py3compat.string_types): #filter is a string, import the namespace and recursively call #this register_filter method filter_cls = import_item(jinja_filter) return self.register_filter(name, filter_cls) if constructed and hasattr(jinja_filter, '__call__'): #filter is a function, no need to construct it. self.environment.filters[name] = jinja_filter return jinja_filter elif isclass and isinstance(jinja_filter, MetaHasTraits): #filter is configurable. Make sure to pass in new default for #the enabled flag if one was specified. filter_instance = jinja_filter(parent=self) self.register_filter(name, filter_instance ) elif isclass: #filter is not configurable, construct it filter_instance = jinja_filter() self.register_filter(name, filter_instance) else: #filter is an instance of something without a __call__ #attribute. raise TypeError('filter')
def build_launcher(self, clsname, kind=None): """import and instantiate a Launcher based on importstring""" if '.' not in clsname: # not a module, presume it's the raw name in apps.launcher if kind and kind not in clsname: # doesn't match necessary full class name, assume it's # just 'PBS' or 'MPIExec' prefix: clsname = clsname + kind + 'Launcher' clsname = 'IPython.parallel.apps.launcher.'+clsname try: klass = import_item(clsname) except (ImportError, KeyError): self.log.fatal("Could not import launcher class: %r"%clsname) self.exit(1) launcher = klass( work_dir=u'.', config=self.config, log=self.log, profile_dir=self.profile_dir.location, cluster_id=self.cluster_id, ) return launcher