class EnterpriseGatewayConfigMixin(Configurable): # Server IP / PORT binding port_env = 'EG_PORT' port_default_value = 8888 port = Integer(port_default_value, config=True, help='Port on which to listen (EG_PORT env var)') @default('port') def port_default(self): return int( os.getenv(self.port_env, os.getenv('KG_PORT', self.port_default_value))) port_retries_env = 'EG_PORT_RETRIES' port_retries_default_value = 50 port_retries = Integer( port_retries_default_value, config=True, help="""Number of ports to try if the specified port is not available (EG_PORT_RETRIES env var)""") @default('port_retries') def port_retries_default(self): return int( os.getenv( self.port_retries_env, os.getenv('KG_PORT_RETRIES', self.port_retries_default_value))) ip_env = 'EG_IP' ip_default_value = '127.0.0.1' ip = Unicode(ip_default_value, config=True, help='IP address on which to listen (EG_IP env var)') @default('ip') def ip_default(self): return os.getenv(self.ip_env, os.getenv('KG_IP', self.ip_default_value)) # Base URL base_url_env = 'EG_BASE_URL' base_url_default_value = '/' base_url = Unicode( base_url_default_value, config=True, help= 'The base path for mounting all API resources (EG_BASE_URL env var)') @default('base_url') def base_url_default(self): return os.getenv(self.base_url_env, os.getenv('KG_BASE_URL', self.base_url_default_value)) # Token authorization auth_token_env = 'EG_AUTH_TOKEN' auth_token = Unicode( config=True, help= 'Authorization token required for all requests (EG_AUTH_TOKEN env var)' ) @default('auth_token') def _auth_token_default(self): return os.getenv(self.auth_token_env, os.getenv('KG_AUTH_TOKEN', '')) # Begin CORS headers allow_credentials_env = 'EG_ALLOW_CREDENTIALS' allow_credentials = Unicode( config=True, help= 'Sets the Access-Control-Allow-Credentials header. (EG_ALLOW_CREDENTIALS env var)' ) @default('allow_credentials') def allow_credentials_default(self): return os.getenv(self.allow_credentials_env, os.getenv('KG_ALLOW_CREDENTIALS', '')) allow_headers_env = 'EG_ALLOW_HEADERS' allow_headers = Unicode( config=True, help= 'Sets the Access-Control-Allow-Headers header. (EG_ALLOW_HEADERS env var)' ) @default('allow_headers') def allow_headers_default(self): return os.getenv(self.allow_headers_env, os.getenv('KG_ALLOW_HEADERS', '')) allow_methods_env = 'EG_ALLOW_METHODS' allow_methods = Unicode( config=True, help= 'Sets the Access-Control-Allow-Methods header. (EG_ALLOW_METHODS env var)' ) @default('allow_methods') def allow_methods_default(self): return os.getenv(self.allow_methods_env, os.getenv('KG_ALLOW_METHODS', '')) allow_origin_env = 'EG_ALLOW_ORIGIN' allow_origin = Unicode( config=True, help= 'Sets the Access-Control-Allow-Origin header. (EG_ALLOW_ORIGIN env var)' ) @default('allow_origin') def allow_origin_default(self): return os.getenv(self.allow_origin_env, os.getenv('KG_ALLOW_ORIGIN', '')) expose_headers_env = 'EG_EXPOSE_HEADERS' expose_headers = Unicode( config=True, help= 'Sets the Access-Control-Expose-Headers header. (EG_EXPOSE_HEADERS env var)' ) @default('expose_headers') def expose_headers_default(self): return os.getenv(self.expose_headers_env, os.getenv('KG_EXPOSE_HEADERS', '')) trust_xheaders_env = 'EG_TRUST_XHEADERS' trust_xheaders = CBool( False, config=True, help="""Use x-* header values for overriding the remote-ip, useful when application is behing a proxy. (EG_TRUST_XHEADERS env var)""" ) @default('trust_xheaders') def trust_xheaders_default(self): return strtobool( os.getenv(self.trust_xheaders_env, os.getenv('KG_TRUST_XHEADERS', 'False'))) certfile_env = 'EG_CERTFILE' certfile = Unicode( None, config=True, allow_none=True, help= 'The full path to an SSL/TLS certificate file. (EG_CERTFILE env var)') @default('certfile') def certfile_default(self): return os.getenv(self.certfile_env, os.getenv('KG_CERTFILE')) keyfile_env = 'EG_KEYFILE' keyfile = Unicode( None, config=True, allow_none=True, help= 'The full path to a private key file for usage with SSL/TLS. (EG_KEYFILE env var)' ) @default('keyfile') def keyfile_default(self): return os.getenv(self.keyfile_env, os.getenv('KG_KEYFILE')) client_ca_env = 'EG_CLIENT_CA' client_ca = Unicode( None, config=True, allow_none=True, help="""The full path to a certificate authority certificate for SSL/TLS client authentication. (EG_CLIENT_CA env var)""") @default('client_ca') def client_ca_default(self): return os.getenv(self.client_ca_env, os.getenv('KG_CLIENT_CA')) ssl_version_env = 'EG_SSL_VERSION' ssl_version_default_value = ssl.PROTOCOL_TLSv1_2 ssl_version = Integer( None, config=True, allow_none=True, help="""Sets the SSL version to use for the web socket connection. (EG_SSL_VERSION env var)""") @default('ssl_version') def ssl_version_default(self): ssl_from_env = os.getenv(self.ssl_version_env, os.getenv('KG_SSL_VERSION')) return ssl_from_env if ssl_from_env is None else int(ssl_from_env) max_age_env = 'EG_MAX_AGE' max_age = Unicode( config=True, help='Sets the Access-Control-Max-Age header. (EG_MAX_AGE env var)') @default('max_age') def max_age_default(self): return os.getenv(self.max_age_env, os.getenv('KG_MAX_AGE', '')) # End CORS headers max_kernels_env = 'EG_MAX_KERNELS' max_kernels = Integer( None, config=True, allow_none=True, help= """Limits the number of kernel instances allowed to run by this gateway. Unbounded by default. (EG_MAX_KERNELS env var)""") @default('max_kernels') def max_kernels_default(self): val = os.getenv(self.max_kernels_env, os.getenv('KG_MAX_KERNELS')) return val if val is None else int(val) default_kernel_name_env = 'EG_DEFAULT_KERNEL_NAME' default_kernel_name = Unicode( config=True, help= 'Default kernel name when spawning a kernel (EG_DEFAULT_KERNEL_NAME env var)' ) @default('default_kernel_name') def default_kernel_name_default(self): # defaults to Jupyter's default kernel name on empty string return os.getenv(self.default_kernel_name_env, os.getenv('KG_DEFAULT_KERNEL_NAME', '')) list_kernels_env = 'EG_LIST_KERNELS' list_kernels = Bool( config=True, help= """Permits listing of the running kernels using API endpoints /api/kernels and /api/sessions. (EG_LIST_KERNELS env var) Note: Jupyter Notebook allows this by default but Jupyter Enterprise Gateway does not.""" ) @default('list_kernels') def list_kernels_default(self): return os.getenv(self.list_kernels_env, os.getenv('KG_LIST_KERNELS', 'False')).lower() == 'true' env_whitelist_env = 'EG_ENV_WHITELIST' env_whitelist = List( config=True, help="""Environment variables allowed to be set when a client requests a new kernel. Use '*' to allow all environment variables sent in the request. (EG_ENV_WHITELIST env var)""") @default('env_whitelist') def env_whitelist_default(self): return os.getenv(self.env_whitelist_env, os.getenv('KG_ENV_WHITELIST', '')).split(',') env_process_whitelist_env = 'EG_ENV_PROCESS_WHITELIST' env_process_whitelist = List( config=True, help="""Environment variables allowed to be inherited from the spawning process by the kernel. (EG_ENV_PROCESS_WHITELIST env var)""" ) @default('env_process_whitelist') def env_process_whitelist_default(self): return os.getenv(self.env_process_whitelist_env, os.getenv('KG_ENV_PROCESS_WHITELIST', '')).split(',') kernel_headers_env = 'EG_KERNEL_HEADERS' kernel_headers = List( config=True, help="""Request headers to make available to kernel launch framework. (EG_KERNEL_HEADERS env var)""") @default('kernel_headers') def kernel_headers_default(self): default_headers = os.getenv(self.kernel_headers_env) return default_headers.split(',') if default_headers else [] # Remote hosts remote_hosts_env = 'EG_REMOTE_HOSTS' remote_hosts_default_value = 'localhost' remote_hosts = List( default_value=[remote_hosts_default_value], config=True, help= """Bracketed comma-separated list of hosts on which DistributedProcessProxy kernels will be launched e.g., ['host1','host2']. (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated)""") @default('remote_hosts') def remote_hosts_default(self): return os.getenv(self.remote_hosts_env, self.remote_hosts_default_value).split(',') # Yarn endpoint yarn_endpoint_env = 'EG_YARN_ENDPOINT' yarn_endpoint = Unicode( None, config=True, allow_none=True, help= """The http url specifying the YARN Resource Manager. Note: If this value is NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. (EG_YARN_ENDPOINT env var)""" ) @default('yarn_endpoint') def yarn_endpoint_default(self): return os.getenv(self.yarn_endpoint_env) # Alt Yarn endpoint alt_yarn_endpoint_env = 'EG_ALT_YARN_ENDPOINT' alt_yarn_endpoint = Unicode( None, config=True, allow_none=True, help= """The http url specifying the alternate YARN Resource Manager. This value should be set when YARN Resource Managers are configured for high availability. Note: If both YARN endpoints are NOT set, the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the active resource manager. (EG_ALT_YARN_ENDPOINT env var)""") @default('alt_yarn_endpoint') def alt_yarn_endpoint_default(self): return os.getenv(self.alt_yarn_endpoint_env) yarn_endpoint_security_enabled_env = 'EG_YARN_ENDPOINT_SECURITY_ENABLED' yarn_endpoint_security_enabled_default_value = False yarn_endpoint_security_enabled = Bool( yarn_endpoint_security_enabled_default_value, config=True, help="""Is YARN Kerberos/SPNEGO Security enabled (True/False). (EG_YARN_ENDPOINT_SECURITY_ENABLED env var)""" ) @default('yarn_endpoint_security_enabled') def yarn_endpoint_security_enabled_default(self): return bool( os.getenv(self.yarn_endpoint_security_enabled_env, self.yarn_endpoint_security_enabled_default_value)) # Conductor endpoint conductor_endpoint_env = 'EG_CONDUCTOR_ENDPOINT' conductor_endpoint_default_value = None conductor_endpoint = Unicode( conductor_endpoint_default_value, allow_none=True, config=True, help="""The http url for accessing the Conductor REST API. (EG_CONDUCTOR_ENDPOINT env var)""") @default('conductor_endpoint') def conductor_endpoint_default(self): return os.getenv(self.conductor_endpoint_env, self.conductor_endpoint_default_value) _log_formatter_cls = LogFormatter # traitlet default is LevelFormatter @default('log_format') def _default_log_format(self): """override default log format to include milliseconds""" return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" # Impersonation enabled impersonation_enabled_env = 'EG_IMPERSONATION_ENABLED' impersonation_enabled = Bool( False, config=True, help= """Indicates whether impersonation will be performed during kernel launch. (EG_IMPERSONATION_ENABLED env var)""") @default('impersonation_enabled') def impersonation_enabled_default(self): return bool( os.getenv(self.impersonation_enabled_env, 'false').lower() == 'true') # Unauthorized users unauthorized_users_env = 'EG_UNAUTHORIZED_USERS' unauthorized_users_default_value = 'root' unauthorized_users = Set( default_value={unauthorized_users_default_value}, config=True, help= """Comma-separated list of user names (e.g., ['root','admin']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will prevent the kernel's launch and result in an HTTP 403 (Forbidden) error. (EG_UNAUTHORIZED_USERS env var - non-bracketed, just comma-separated)""" ) @default('unauthorized_users') def unauthorized_users_default(self): return os.getenv(self.unauthorized_users_env, self.unauthorized_users_default_value).split(',') # Authorized users authorized_users_env = 'EG_AUTHORIZED_USERS' authorized_users = Set( config=True, help= """Comma-separated list of user names (e.g., ['bob','alice']) against which KERNEL_USERNAME will be compared. Any match (case-sensitive) will allow the kernel's launch, otherwise an HTTP 403 (Forbidden) error will be raised. The set of unauthorized users takes precedence. This option should be used carefully as it can dramatically limit who can launch kernels. (EG_AUTHORIZED_USERS env var - non-bracketed, just comma-separated)""") @default('authorized_users') def authorized_users_default(self): au_env = os.getenv(self.authorized_users_env) return au_env.split(',') if au_env is not None else [] # Authorized origin authorized_origin_env = 'EG_AUTHORIZED_ORIGIN' authorized_origin = Unicode( config=True, help= """Hostname (e.g. 'localhost', 'reverse.proxy.net') which the handler will match against the request's SSL certificate. An HTTP 403 (Forbidden) error will be raised on a failed match. This option requires TLS to be enabled. It does not support IP addresses. (EG_AUTHORIZED_ORIGIN env var)""") # Port range port_range_env = 'EG_PORT_RANGE' port_range_default_value = "0..0" port_range = Unicode( port_range_default_value, config=True, help= """Specifies the lower and upper port numbers from which ports are created. The bounded values are separated by '..' (e.g., 33245..34245 specifies a range of 1000 ports to be randomly selected). A range of zero (e.g., 33245..33245 or 0..0) disables port-range enforcement. (EG_PORT_RANGE env var)""") @default('port_range') def port_range_default(self): return os.getenv(self.port_range_env, self.port_range_default_value) # Max Kernels per User max_kernels_per_user_env = 'EG_MAX_KERNELS_PER_USER' max_kernels_per_user_default_value = -1 max_kernels_per_user = Integer( max_kernels_per_user_default_value, config=True, help="""Specifies the maximum number of kernels a user can have active simultaneously. A value of -1 disables enforcement. (EG_MAX_KERNELS_PER_USER env var)""") @default('max_kernels_per_user') def max_kernels_per_user_default(self): return int( os.getenv(self.max_kernels_per_user_env, self.max_kernels_per_user_default_value)) ws_ping_interval_env = 'EG_WS_PING_INTERVAL_SECS' ws_ping_interval_default_value = 30 ws_ping_interval = Integer( ws_ping_interval_default_value, config=True, help= """Specifies the ping interval(in seconds) that should be used by zmq port associated withspawned kernels.Set this variable to 0 to disable ping mechanism. (EG_WS_PING_INTERVAL_SECS env var)""") @default('ws_ping_interval') def ws_ping_interval_default(self): return int( os.getenv(self.ws_ping_interval_env, self.ws_ping_interval_default_value)) # Dynamic Update Interval dynamic_config_interval_env = 'EG_DYNAMIC_CONFIG_INTERVAL' dynamic_config_interval_default_value = 0 dynamic_config_interval = Integer( dynamic_config_interval_default_value, min=0, config=True, help= """Specifies the number of seconds configuration files are polled for changes. A value of 0 or less disables dynamic config updates. (EG_DYNAMIC_CONFIG_INTERVAL env var)""") @default('dynamic_config_interval') def dynamic_config_interval_default(self): return int( os.getenv(self.dynamic_config_interval_env, self.dynamic_config_interval_default_value)) @observe('dynamic_config_interval') def dynamic_config_interval_changed(self, event): prev_val = event['old'] self.dynamic_config_interval = event['new'] if self.dynamic_config_interval != prev_val: # Values are different. Stop the current poller. If new value is > 0, start a poller. if self.dynamic_config_poller: self.dynamic_config_poller.stop() self.dynamic_config_poller = None if self.dynamic_config_interval <= 0: self.log.warning( "Dynamic configuration updates have been disabled and cannot be re-enabled " "without restarting Enterprise Gateway!") # The interval has been changed, but still positive elif prev_val > 0 and hasattr(self, "init_dynamic_configs"): self.init_dynamic_configs() # Restart the poller dynamic_config_poller = None kernel_spec_manager = Instance( "jupyter_client.kernelspec.KernelSpecManager", allow_none=True) kernel_spec_manager_class = Type( default_value="jupyter_client.kernelspec.KernelSpecManager", config=True, help=""" The kernel spec manager class to use. Must be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. """) kernel_spec_cache_class = Type( default_value="enterprise_gateway.services.kernelspecs.KernelSpecCache", config=True, help=""" The kernel spec cache class to use. Must be a subclass of `enterprise_gateway.services.kernelspecs.KernelSpecCache`. """) kernel_manager_class = Type( klass= "enterprise_gateway.services.kernels.remotemanager.RemoteMappingKernelManager", default_value= "enterprise_gateway.services.kernels.remotemanager.RemoteMappingKernelManager", config=True, help=""" The kernel manager class to use. Must be a subclass of `enterprise_gateway.services.kernels.RemoteMappingKernelManager`. """) kernel_session_manager_class = Type( klass= "enterprise_gateway.services.sessions.kernelsessionmanager.KernelSessionManager", default_value= "enterprise_gateway.services.sessions.kernelsessionmanager.FileKernelSessionManager", config=True, help=""" The kernel session manager class to use. Must be a subclass of `enterprise_gateway.services.sessions.KernelSessionManager`. """)
class Transfer(ReactWidget): _model_name = Unicode('TransferModel').tag(sync=True) data_source = List(help="data_source").tag(sync=True) show_search = CBool(False, allow_none=True, help="show_search").tag(sync=True) target_keys = List(help="target_keys").tag(sync=True)
class Tag(ReactWidget): _model_name = Unicode('TagModel').tag(sync=True) closable = CBool(False, help="closable").tag(sync=True) color = Unicode(help="color").tag(sync=True) visible = CBool(True, help="visible").tag(sync=True)
class Switch(ButtonBase, ValueMixin): _model_name = Unicode('SwitchModel').tag(sync=True) checked = CBool(None, allow_none=True, help="checked or not").tag(sync=True) checked_children = Unicode('On', help="content to be shown when the state is checked").tag(sync=True) un_checked_children = Unicode('Off', help="content to be shown when the state is unchecked").tag(sync=True) size = Unicode('default', help="size of the widget").tag(sync=True)
class Radio(ReactWidget, ValueMixin): _model_name = Unicode('RadioModel').tag(sync=True) default_checked = CBool(help="checked or not").tag(sync=True) size = Unicode('default', help="size of the widget").tag(sync=True)
class MenuSubMenu(ReactWidget): _model_name = Unicode('MenuSubMenuModel').tag(sync=True) disabled = CBool(False, help='disabled').tag(sync=True) key = Unicode('', help='key').tag(sync=True) title = Unicode('', help='title').tag(sync=True)
class InputGroup(ReactWidget): _model_name = Unicode('InputGroupModel').tag(sync=True) compact = CBool(False, help="compact").tag(sync=True) size = Unicode('default', help="size of the widget").tag(sync=True)
class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" displayhook_class = Type(ZMQShellDisplayHook) display_pub_class = Type(ZMQDisplayPublisher) data_pub_class = Type('ipykernel.datapub.ZMQDataPublisher') kernel = Any() parent_header = Any() @default('banner1') def _default_banner1(self): return default_banner # Override the traitlet in the parent class, because there's no point using # readline for the kernel. Can be removed when the readline code is moved # to the terminal frontend. colors_force = CBool(True) readline_use = CBool(False) # autoindent has no meaning in a zmqshell, and attempting to enable it # will print a warning in the absence of readline. autoindent = CBool(False) exiter = Instance(ZMQExitAutocall) @default('exiter') def _default_exiter(self): return ZMQExitAutocall(self) @observe('exit_now') def _update_exit_now(self, change): """stop eventloop when exit_now fires""" if change['new']: loop = self.kernel.io_loop loop.call_later(0.1, loop.stop) keepkernel_on_exit = None # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no # interactive input being read; we provide event loop support in ipkernel def enable_gui(self, gui): from .eventloops import enable_gui as real_enable_gui try: real_enable_gui(gui) self.active_eventloop = gui except ValueError as e: raise UsageError("%s" % e) def init_environment(self): """Configure the user's environment.""" env = os.environ # These two ensure 'ls' produces nice coloring on BSD-derived systems env['TERM'] = 'xterm-color' env['CLICOLOR'] = '1' # Since normal pagers don't work at all (over pexpect we don't have # single-key control of the subprocess), try to disable paging in # subprocesses as much as possible. env['PAGER'] = 'cat' env['GIT_PAGER'] = 'cat' def init_hooks(self): super(ZMQInteractiveShell, self).init_hooks() self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99) def init_data_pub(self): """Delay datapub init until request, for deprecation warnings""" pass @property def data_pub(self): if not hasattr(self, '_data_pub'): warnings.warn("InteractiveShell.data_pub is deprecated outside IPython parallel.", DeprecationWarning, stacklevel=2) self._data_pub = self.data_pub_class(parent=self) self._data_pub.session = self.display_pub.session self._data_pub.pub_socket = self.display_pub.pub_socket return self._data_pub @data_pub.setter def data_pub(self, pub): self._data_pub = pub def ask_exit(self): """Engage the exit actions.""" self.exit_now = (not self.keepkernel_on_exit) payload = dict( source='ask_exit', keepkernel=self.keepkernel_on_exit, ) self.payload_manager.write_payload(payload) def run_cell(self, *args, **kwargs): self._last_traceback = None return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) def _showtraceback(self, etype, evalue, stb): # try to preserve ordering of tracebacks and print statements sys.stdout.flush() sys.stderr.flush() exc_content = { u'traceback' : stb, u'ename' : unicode_type(etype.__name__), u'evalue' : py3compat.safe_unicode(evalue), } dh = self.displayhook # Send exception info over pub socket for other clients than the caller # to pick up topic = None if dh.topic: topic = dh.topic.replace(b'execute_result', b'error') exc_msg = dh.session.send(dh.pub_socket, u'error', json_clean(exc_content), dh.parent_header, ident=topic) # FIXME - Once we rely on Python 3, the traceback is stored on the # exception object, so we shouldn't need to store it here. self._last_traceback = stb def set_next_input(self, text, replace=False): """Send the specified text to the frontend to be presented at the next input cell.""" payload = dict( source='set_next_input', text=text, replace=replace, ) self.payload_manager.write_payload(payload) def set_parent(self, parent): """Set the parent header for associating output with its triggering input""" self.parent_header = parent self.displayhook.set_parent(parent) self.display_pub.set_parent(parent) if hasattr(self, '_data_pub'): self.data_pub.set_parent(parent) try: sys.stdout.set_parent(parent) except AttributeError: pass try: sys.stderr.set_parent(parent) except AttributeError: pass def get_parent(self): return self.parent_header def init_magics(self): super(ZMQInteractiveShell, self).init_magics() self.register_magics(KernelMagics) self.magics_manager.register_alias('ed', 'edit') def init_virtualenv(self): # Overridden not to do virtualenv detection, because it's probably # not appropriate in a kernel. To use a kernel in a virtualenv, install # it inside the virtualenv. # https://ipython.readthedocs.io/en/latest/install/kernel_install.html pass
class PyDevTerminalInteractiveShell(TerminalInteractiveShell): banner1 = Unicode( default_pydev_banner, config=True, help="""The part of the banner to be printed before the profile""") # TODO term_title: (can PyDev's title be changed???, see terminal.py for where to inject code, in particular set_term_title as used by %cd) # for now, just disable term_title term_title = CBool(False) # Note in version 0.11 there is no guard in the IPython code about displaying a # warning, so with 0.11 you get: # WARNING: Readline services not available or not loaded. # WARNING: The auto-indent feature requires the readline library # Disable readline, readline type code is all handled by PyDev (on Java side) readline_use = CBool(False) # autoindent has no meaning in PyDev (PyDev always handles that on the Java side), # and attempting to enable it will print a warning in the absence of readline. autoindent = CBool(False) # Force console to not give warning about color scheme choice and default to NoColor. # TODO It would be nice to enable colors in PyDev but: # - The PyDev Console (Eclipse Console) does not support the full range of colors, so the # effect isn't as nice anyway at the command line # - If done, the color scheme should default to LightBG, but actually be dependent on # any settings the user has (such as if a dark theme is in use, then Linux is probably # a better theme). colors_force = CBool(True) colors = Unicode("NoColor") # Since IPython 5 the terminal interface is not compatible with Emacs `inferior-shell` and # the `simple_prompt` flag is needed simple_prompt = CBool(True) if INLINE_OUTPUT_SUPPORTED: displayhook_class = Type(PyDevDisplayHook) display_pub_class = Type(PyDevDisplayPub) def __init__(self, *args, **kwargs): super(PyDevTerminalInteractiveShell, self).__init__(*args, **kwargs) if INLINE_OUTPUT_SUPPORTED: try: self.enable_matplotlib('inline') except: sys.stderr.write("Failed to enable inline matplotlib plots\n") sys.stderr.flush() patch_stdout() # In the PyDev Console, GUI control is done via hookable XML-RPC server @staticmethod def enable_gui(gui=None, app=None): """Switch amongst GUI input hooks by name. """ # Deferred import if not INLINE_OUTPUT_SUPPORTED: from pydev_ipython.inputhook import enable_gui as real_enable_gui try: return real_enable_gui(gui, app) except ValueError as e: raise UsageError("%s" % e) def init_display_formatter(self): if INLINE_OUTPUT_SUPPORTED: self.display_formatter = DisplayFormatter(parent=self) self.configurables.append(self.display_formatter) self.display_formatter.ipython_display_formatter.enabled = True else: super(PyDevTerminalInteractiveShell, self).init_display_formatter() #------------------------------------------------------------------------- # Things related to hooks #------------------------------------------------------------------------- def init_hooks(self): super(PyDevTerminalInteractiveShell, self).init_hooks() self.set_hook('show_in_pager', show_in_pager) #------------------------------------------------------------------------- # Things related to exceptions #------------------------------------------------------------------------- def showtraceback(self, exc_tuple=None, *args, **kwargs): # IPython does a lot of clever stuff with Exceptions. However mostly # it is related to IPython running in a terminal instead of an IDE. # (e.g. it prints out snippets of code around the stack trace) # PyDev does a lot of clever stuff too, so leave exception handling # with default print_exc that PyDev can parse and do its clever stuff # with (e.g. it puts links back to the original source code) try: if exc_tuple is None: etype, value, tb = sys.exc_info() else: etype, value, tb = exc_tuple except ValueError: return if tb is not None: traceback.print_exception(etype, value, tb) sys.last_type, sys.last_value, sys.last_traceback = etype, value, tb def init_completer(self): init_shell_completer(self) #------------------------------------------------------------------------- # Things related to aliases #------------------------------------------------------------------------- def init_alias(self): # InteractiveShell defines alias's we want, but TerminalInteractiveShell defines # ones we don't. So don't use super and instead go right to InteractiveShell InteractiveShell.init_alias(self) #------------------------------------------------------------------------- # Things related to exiting #------------------------------------------------------------------------- def ask_exit(self): """ Ask the shell to exit. Can be overiden and used as a callback. """ # TODO PyDev's console does not have support from the Python side to exit # the console. If user forces the exit (with sys.exit()) then the console # simply reports errors. e.g.: # >>> import sys # >>> sys.exit() # Failed to create input stream: Connection refused # >>> # Console already exited with value: 0 while waiting for an answer. # Error stream: # Output stream: # >>> # # Alternatively if you use the non-IPython shell this is what happens # >>> exit() # <type 'exceptions.SystemExit'>:None # >>> # <type 'exceptions.SystemExit'>:None # >>> # super(PyDevTerminalInteractiveShell, self).ask_exit() print('To exit the PyDev Console, terminate the console within IDE.') #------------------------------------------------------------------------- # Things related to magics #------------------------------------------------------------------------- def init_magics(self): super(PyDevTerminalInteractiveShell, self).init_magics()
class KernelGatewayApp(JupyterApp): """Application that provisions Jupyter kernels and proxies HTTP/Websocket traffic to the kernels. - reads command line and environment variable settings - initializes managers and routes - creates a Tornado HTTP server - starts the Tornado event loop """ name = 'jupyter-kernel-gateway' version = __version__ description = """ Jupyter Kernel Gateway Provisions Jupyter kernels and proxies HTTP/Websocket traffic to them. """ # Also include when generating help options classes = [NotebookHTTPPersonality, JupyterWebsocketPersonality] # Enable some command line shortcuts aliases = aliases # Server IP / PORT binding port_env = 'KG_PORT' port_default_value = 8888 port = Integer(port_default_value, config=True, help="Port on which to listen (KG_PORT env var)") @default('port') def port_default(self): return int(os.getenv(self.port_env, self.port_default_value)) port_retries_env = 'KG_PORT_RETRIES' port_retries_default_value = 50 port_retries = Integer( port_retries_default_value, config=True, help= "Number of ports to try if the specified port is not available (KG_PORT_RETRIES env var)" ) @default('port_retries') def port_retries_default(self): return int( os.getenv(self.port_retries_env, self.port_retries_default_value)) ip_env = 'KG_IP' ip_default_value = '127.0.0.1' ip = Unicode(ip_default_value, config=True, help="IP address on which to listen (KG_IP env var)") @default('ip') def ip_default(self): return os.getenv(self.ip_env, self.ip_default_value) # Base URL base_url_env = 'KG_BASE_URL' base_url_default_value = '/' base_url = Unicode( base_url_default_value, config=True, help= """The base path for mounting all API resources (KG_BASE_URL env var)""" ) @default('base_url') def base_url_default(self): return os.getenv(self.base_url_env, self.base_url_default_value) # Token authorization auth_token_env = 'KG_AUTH_TOKEN' auth_token = Unicode( config=True, help= 'Authorization token required for all requests (KG_AUTH_TOKEN env var)' ) @default('auth_token') def _auth_token_default(self): return os.getenv(self.auth_token_env, '') # CORS headers allow_credentials_env = 'KG_ALLOW_CREDENTIALS' allow_credentials = Unicode( config=True, help= 'Sets the Access-Control-Allow-Credentials header. (KG_ALLOW_CREDENTIALS env var)' ) @default('allow_credentials') def allow_credentials_default(self): return os.getenv(self.allow_credentials_env, '') allow_headers_env = 'KG_ALLOW_HEADERS' allow_headers = Unicode( config=True, help= 'Sets the Access-Control-Allow-Headers header. (KG_ALLOW_HEADERS env var)' ) @default('allow_headers') def allow_headers_default(self): return os.getenv(self.allow_headers_env, '') allow_methods_env = 'KG_ALLOW_METHODS' allow_methods = Unicode( config=True, help= 'Sets the Access-Control-Allow-Methods header. (KG_ALLOW_METHODS env var)' ) @default('allow_methods') def allow_methods_default(self): return os.getenv(self.allow_methods_env, '') allow_origin_env = 'KG_ALLOW_ORIGIN' allow_origin = Unicode( config=True, help= 'Sets the Access-Control-Allow-Origin header. (KG_ALLOW_ORIGIN env var)' ) @default('allow_origin') def allow_origin_default(self): return os.getenv(self.allow_origin_env, '') expose_headers_env = 'KG_EXPOSE_HEADERS' expose_headers = Unicode( config=True, help= 'Sets the Access-Control-Expose-Headers header. (KG_EXPOSE_HEADERS env var)' ) @default('expose_headers') def expose_headers_default(self): return os.getenv(self.expose_headers_env, '') trust_xheaders_env = 'KG_TRUST_XHEADERS' trust_xheaders = CBool( False, config=True, help= 'Use x-* header values for overriding the remote-ip, useful when application is behing a proxy. (KG_TRUST_XHEADERS env var)' ) @default('trust_xheaders') def trust_xheaders_default(self): return strtobool(os.getenv(self.trust_xheaders_env, 'False')) max_age_env = 'KG_MAX_AGE' max_age = Unicode( config=True, help='Sets the Access-Control-Max-Age header. (KG_MAX_AGE env var)') @default('max_age') def max_age_default(self): return os.getenv(self.max_age_env, '') max_kernels_env = 'KG_MAX_KERNELS' max_kernels = Integer( None, config=True, allow_none=True, help= 'Limits the number of kernel instances allowed to run by this gateway. Unbounded by default. (KG_MAX_KERNELS env var)' ) @default('max_kernels') def max_kernels_default(self): val = os.getenv(self.max_kernels_env) return val if val is None else int(val) seed_uri_env = 'KG_SEED_URI' seed_uri = Unicode( None, config=True, allow_none=True, help= 'Runs the notebook (.ipynb) at the given URI on every kernel launched. No seed by default. (KG_SEED_URI env var)' ) @default('seed_uri') def seed_uri_default(self): return os.getenv(self.seed_uri_env) prespawn_count_env = 'KG_PRESPAWN_COUNT' prespawn_count = Integer( None, config=True, allow_none=True, help= 'Number of kernels to prespawn using the default language. No prespawn by default. (KG_PRESPAWN_COUNT env var)' ) @default('prespawn_count') def prespawn_count_default(self): val = os.getenv(self.prespawn_count_env) return val if val is None else int(val) default_kernel_name_env = 'KG_DEFAULT_KERNEL_NAME' default_kernel_name = Unicode( config=True, help= 'Default kernel name when spawning a kernel (KG_DEFAULT_KERNEL_NAME env var)' ) @default('default_kernel_name') def default_kernel_name_default(self): # defaults to Jupyter's default kernel name on empty string return os.getenv(self.default_kernel_name_env, '') force_kernel_name_env = 'KG_FORCE_KERNEL_NAME' force_kernel_name = Unicode( config=True, help= 'Override any kernel name specified in a notebook or request (KG_FORCE_KERNEL_NAME env var)' ) @default('force_kernel_name') def force_kernel_name_default(self): return os.getenv(self.force_kernel_name_env, '') env_process_whitelist_env = 'KG_ENV_PROCESS_WHITELIST' env_process_whitelist = List( config=True, help= """Environment variables allowed to be inherited from the spawning process by the kernel""" ) @default('env_process_whitelist') def env_process_whitelist_default(self): return os.getenv(self.env_process_whitelist_env, '').split(',') api_env = 'KG_API' api_default_value = 'kernel_gateway.jupyter_websocket' api = Unicode( api_default_value, config=True, help= """Controls which API to expose, that of a Jupyter notebook server, the seed notebook's, or one provided by another module, respectively using values 'kernel_gateway.jupyter_websocket', 'kernel_gateway.notebook_http', or another fully qualified module name (KG_API env var) """) @default('api') def api_default(self): return os.getenv(self.api_env, self.api_default_value) @observe('api') def api_changed(self, event): try: self._load_api_module(event['new']) except ImportError: # re-raise with more sensible message to help the user raise ImportError('API module {} not found'.format(event['new'])) certfile_env = 'KG_CERTFILE' certfile = Unicode( None, config=True, allow_none=True, help= """The full path to an SSL/TLS certificate file. (KG_CERTFILE env var)""" ) @default('certfile') def certfile_default(self): return os.getenv(self.certfile_env) keyfile_env = 'KG_KEYFILE' keyfile = Unicode( None, config=True, allow_none=True, help= """The full path to a private key file for usage with SSL/TLS. (KG_KEYFILE env var)""" ) @default('keyfile') def keyfile_default(self): return os.getenv(self.keyfile_env) client_ca_env = 'KG_CLIENT_CA' client_ca = Unicode( None, config=True, allow_none=True, help= """The full path to a certificate authority certificate for SSL/TLS client authentication. (KG_CLIENT_CA env var)""" ) @default('client_ca') def client_ca_default(self): return os.getenv(self.client_ca_env) ssl_version_env = 'KG_SSL_VERSION' ssl_version_default_value = ssl.PROTOCOL_TLSv1_2 ssl_version = Integer( None, config=True, allow_none=True, help= """Sets the SSL version to use for the web socket connection. (KG_SSL_VERSION env var)""" ) @default('ssl_version') def ssl_version_default(self): ssl_from_env = os.getenv(self.ssl_version_env) return ssl_from_env if ssl_from_env is None else int(ssl_from_env) kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) kernel_spec_manager_class = Type(default_value=KernelSpecManager, config=True, help=""" The kernel spec manager class to use. Should be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. """) kernel_manager_class = Type(klass=MappingKernelManager, default_value=SeedingMappingKernelManager, config=True, help="""The kernel manager class to use.""") def _load_api_module(self, module_name): """Tries to import the given module name. Parameters ---------- module_name: str Module name to import Returns ------- module Module with the given name loaded using importlib.import_module """ # some compatibility allowances if module_name == 'jupyter-websocket': module_name = 'kernel_gateway.jupyter_websocket' elif module_name == 'notebook-http': module_name = 'kernel_gateway.notebook_http' return importlib.import_module(module_name) def _load_notebook(self, uri): """Loads a notebook from the local filesystem or HTTP(S) URL. Raises ------ RuntimeError if there is no kernel spec matching the one specified in the notebook or forced via configuration. Returns ------- object Notebook object from nbformat """ parts = urlparse(uri) if parts.scheme not in ('http', 'https'): # Local file path = parts._replace(scheme='', netloc='').geturl() with open(path) as nb_fh: notebook = nbformat.read(nb_fh, 4) else: # Remote file import requests resp = requests.get(uri) resp.raise_for_status() notebook = nbformat.reads(resp.text, 4) # Error if no kernel spec can handle the language requested kernel_name = self.force_kernel_name if self.force_kernel_name \ else notebook['metadata']['kernelspec']['name'] self.kernel_spec_manager.get_kernel_spec(kernel_name) return notebook def initialize(self, argv=None): """Initializes the base class, configurable manager instances, the Tornado web app, and the tornado HTTP server. Parameters ---------- argv Command line arguments """ super(KernelGatewayApp, self).initialize(argv) self.init_configurables() self.init_webapp() self.init_http_server() def init_configurables(self): """Initializes all configurable objects including a kernel manager, kernel spec manager, session manager, and personality. Any kernel pool configured by the personality will be its responsibility to shut down. Optionally, loads a notebook and prespawns the configured number of kernels. """ self.kernel_spec_manager = KernelSpecManager(parent=self) self.seed_notebook = None if self.seed_uri is not None: # Note: must be set before instantiating a SeedingMappingKernelManager self.seed_notebook = self._load_notebook(self.seed_uri) # Only pass a default kernel name when one is provided. Otherwise, # adopt whatever default the kernel manager wants to use. kwargs = {} if self.default_kernel_name: kwargs['default_kernel_name'] = self.default_kernel_name self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ) self.kernel_manager = self.kernel_manager_class( parent=self, log=self.log, connection_dir=self.runtime_dir, kernel_spec_manager=self.kernel_spec_manager, **kwargs) self.session_manager = SessionManager( log=self.log, kernel_manager=self.kernel_manager) self.contents_manager = None if self.prespawn_count: if self.max_kernels and self.prespawn_count > self.max_kernels: raise RuntimeError( 'cannot prespawn {}; more than max kernels {}'.format( self.prespawn_count, self.max_kernels)) api_module = self._load_api_module(self.api) func = getattr(api_module, 'create_personality') self.personality = func(parent=self, log=self.log) self.personality.init_configurables() def init_webapp(self): """Initializes Tornado web application with uri handlers. Adds the various managers and web-front configuration values to the Tornado settings for reference by the handlers. """ # Enable the same pretty logging the notebook uses enable_pretty_logging() # Configure the tornado logging level too logging.getLogger().setLevel(self.log_level) handlers = self.personality.create_request_handlers() self.web_app = web.Application( handlers=handlers, kernel_manager=self.kernel_manager, session_manager=self.session_manager, contents_manager=self.contents_manager, kernel_spec_manager=self.kernel_spec_manager, kg_auth_token=self.auth_token, kg_allow_credentials=self.allow_credentials, kg_allow_headers=self.allow_headers, kg_allow_methods=self.allow_methods, kg_allow_origin=self.allow_origin, kg_expose_headers=self.expose_headers, kg_max_age=self.max_age, kg_max_kernels=self.max_kernels, kg_env_process_whitelist=self.env_process_whitelist, kg_api=self.api, kg_personality=self.personality, # Also set the allow_origin setting used by notebook so that the # check_origin method used everywhere respects the value allow_origin=self.allow_origin, # Set base_url for use in request handlers base_url=self.base_url, # Always allow remote access (has been limited to localhost >= notebook 5.6) allow_remote_access=True) # promote the current personality's "config" tagged traitlet values to webapp settings for trait_name, trait_value in self.personality.class_traits( config=True).items(): kg_name = 'kg_' + trait_name # a personality's traitlets may not overwrite the kernel gateway's if kg_name not in self.web_app.settings: self.web_app.settings[kg_name] = trait_value.get( obj=self.personality) else: self.log.warning( 'The personality trait name, %s, conflicts with a kernel gateway trait.', trait_name) def _build_ssl_options(self): """Build a dictionary of SSL options for the tornado HTTP server. Taken directly from jupyter/notebook code. """ ssl_options = {} if self.certfile: ssl_options['certfile'] = self.certfile if self.keyfile: ssl_options['keyfile'] = self.keyfile if self.client_ca: ssl_options['ca_certs'] = self.client_ca if self.ssl_version: ssl_options['ssl_version'] = self.ssl_version if not ssl_options: # None indicates no SSL config ssl_options = None else: ssl_options.setdefault('ssl_version', self.ssl_version_default_value) if ssl_options.get('ca_certs', False): ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED) return ssl_options def init_http_server(self): """Initializes a HTTP server for the Tornado web application on the configured interface and port. Tries to find an open port if the one configured is not available using the same logic as the Jupyer Notebook server. """ ssl_options = self._build_ssl_options() self.http_server = httpserver.HTTPServer(self.web_app, xheaders=self.trust_xheaders, ssl_options=ssl_options) for port in random_ports(self.port, self.port_retries + 1): try: self.http_server.listen(port, self.ip) except socket.error as e: if e.errno == errno.EADDRINUSE: self.log.info( 'The port %i is already in use, trying another port.' % port) continue elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)): self.log.warning("Permission to listen on port %i denied" % port) continue else: raise else: self.port = port break else: self.log.critical( 'ERROR: the notebook server could not be started because ' 'no available port could be found.') self.exit(1) def start(self): """Starts an IO loop for the application.""" super(KernelGatewayApp, self).start() self.log.info('Jupyter Kernel Gateway at http{}://{}:{}'.format( 's' if self.keyfile else '', self.ip, self.port)) self.io_loop = ioloop.IOLoop.current() if sys.platform != 'win32': signal.signal(signal.SIGHUP, signal.SIG_IGN) signal.signal(signal.SIGTERM, self._signal_stop) try: self.io_loop.start() except KeyboardInterrupt: self.log.info("Interrupted...") finally: self.shutdown() def stop(self): """ Stops the HTTP server and IO loop associated with the application. """ def _stop(): self.http_server.stop() self.io_loop.stop() self.io_loop.add_callback(_stop) def shutdown(self): """Stop all kernels in the pool.""" self.personality.shutdown() def _signal_stop(self, sig, frame): self.log.info("Received signal to terminate.") self.io_loop.stop()
class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" displayhook_class = Type(ZMQShellDisplayHook) display_pub_class = Type(ZMQDisplayPublisher) data_pub_class = Type('ipykernel.datapub.ZMQDataPublisher') kernel = Any() parent_header = Any() def _banner1_default(self): return default_banner # Override the traitlet in the parent class, because there's no point using # readline for the kernel. Can be removed when the readline code is moved # to the terminal frontend. colors_force = CBool(True) readline_use = CBool(False) # autoindent has no meaning in a zmqshell, and attempting to enable it # will print a warning in the absence of readline. autoindent = CBool(False) exiter = Instance(ZMQExitAutocall) def _exiter_default(self): return ZMQExitAutocall(self) def _exit_now_changed(self, name, old, new): """stop eventloop when exit_now fires""" if new: loop = ioloop.IOLoop.instance() loop.add_timeout(time.time()+0.1, loop.stop) keepkernel_on_exit = None # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no # interactive input being read; we provide event loop support in ipkernel @staticmethod def enable_gui(gui): from .eventloops import enable_gui as real_enable_gui try: real_enable_gui(gui) except ValueError as e: raise UsageError("%s" % e) def init_environment(self): """Configure the user's environment.""" env = os.environ # These two ensure 'ls' produces nice coloring on BSD-derived systems env['TERM'] = 'xterm-color' env['CLICOLOR'] = '1' # Since normal pagers don't work at all (over pexpect we don't have # single-key control of the subprocess), try to disable paging in # subprocesses as much as possible. env['PAGER'] = 'cat' env['GIT_PAGER'] = 'cat' def init_hooks(self): super(ZMQInteractiveShell, self).init_hooks() self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99) def init_data_pub(self): """Delay datapub init until request, for deprecation warnings""" pass @property def data_pub(self): if not hasattr(self, '_data_pub'): warnings.warn("InteractiveShell.data_pub is deprecated outside IPython parallel.", DeprecationWarning, stacklevel=2) self._data_pub = self.data_pub_class(parent=self) self._data_pub.session = self.display_pub.session self._data_pub.pub_socket = self.display_pub.pub_socket return self._data_pub @data_pub.setter def data_pub(self, pub): self._data_pub = pub def ask_exit(self): """Engage the exit actions.""" self.exit_now = (not self.keepkernel_on_exit) payload = dict( source='ask_exit', keepkernel=self.keepkernel_on_exit, ) self.payload_manager.write_payload(payload) def _showtraceback(self, etype, evalue, stb): # try to preserve ordering of tracebacks and print statements sys.stdout.flush() sys.stderr.flush() exc_content = { u'traceback' : stb, u'ename' : unicode_type(etype.__name__), u'evalue' : py3compat.safe_unicode(evalue), } dh = self.displayhook # Send exception info over pub socket for other clients than the caller # to pick up topic = None if dh.topic: topic = dh.topic.replace(b'execute_result', b'error') exc_msg = dh.session.send(dh.pub_socket, u'error', json_clean(exc_content), dh.parent_header, ident=topic) # FIXME - Hack: store exception info in shell object. Right now, the # caller is reading this info after the fact, we need to fix this logic # to remove this hack. Even uglier, we need to store the error status # here, because in the main loop, the logic that sets it is being # skipped because runlines swallows the exceptions. exc_content[u'status'] = u'error' self._reply_content = exc_content # /FIXME return exc_content def set_next_input(self, text, replace=False): """Send the specified text to the frontend to be presented at the next input cell.""" payload = dict( source='set_next_input', text=text, replace=replace, ) self.payload_manager.write_payload(payload) def set_parent(self, parent): """Set the parent header for associating output with its triggering input""" self.parent_header = parent self.displayhook.set_parent(parent) self.display_pub.set_parent(parent) if hasattr(self, '_data_pub'): self.data_pub.set_parent(parent) try: sys.stdout.set_parent(parent) except AttributeError: pass try: sys.stderr.set_parent(parent) except AttributeError: pass def get_parent(self): return self.parent_header #------------------------------------------------------------------------- # Things related to magics #------------------------------------------------------------------------- def init_magics(self): super(ZMQInteractiveShell, self).init_magics() self.register_magics(KernelMagics) self.magics_manager.register_alias('ed', 'edit')
class InteractiveShellEmbed(TerminalInteractiveShell): dummy_mode = Bool(False) exit_msg = Unicode('') embedded = CBool(True) should_raise = CBool(False) # Like the base class display_banner is not configurable, but here it # is True by default. display_banner = CBool(True) exit_msg = Unicode() _inactive_locations = set() @property def embedded_active(self): return self._call_location_id not in InteractiveShellEmbed._inactive_locations @embedded_active.setter def embedded_active(self, value): if value: if self._call_location_id in InteractiveShellEmbed._inactive_locations: InteractiveShellEmbed._inactive_locations.remove( self._call_location_id) else: InteractiveShellEmbed._inactive_locations.add( self._call_location_id) def __init__(self, **kw): if kw.get('user_global_ns', None) is not None: raise DeprecationWarning( "Key word argument `user_global_ns` has been replaced by `user_module` since IPython 4.0." ) self._call_location_id = kw.pop('_call_location_id', None) super(InteractiveShellEmbed, self).__init__(**kw) if not self._call_location_id: frame = sys._getframe(1) self._call_location_id = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno) # don't use the ipython crash handler so that user exceptions aren't # trapped sys.excepthook = ultratb.FormattedTB(color_scheme=self.colors, mode=self.xmode, call_pdb=self.pdb) def init_sys_modules(self): pass def init_magics(self): super(InteractiveShellEmbed, self).init_magics() self.register_magics(EmbeddedMagics) def __call__(self, header='', local_ns=None, module=None, dummy=None, stack_depth=1, global_ns=None, compile_flags=None): """Activate the interactive interpreter. __call__(self,header='',local_ns=None,module=None,dummy=None) -> Start the interpreter shell with the given local and global namespaces, and optionally print a header string at startup. The shell can be globally activated/deactivated using the dummy_mode attribute. This allows you to turn off a shell used for debugging globally. However, *each* time you call the shell you can override the current state of dummy_mode with the optional keyword parameter 'dummy'. For example, if you set dummy mode on with IPShell.dummy_mode = True, you can still have a specific call work by making it as IPShell(dummy=False). """ # If the user has turned it off, go away if not self.embedded_active: return # Normal exits from interactive mode set this flag, so the shell can't # re-enter (it checks this variable at the start of interactive mode). self.exit_now = False # Allow the dummy parameter to override the global __dummy_mode if dummy or (dummy != 0 and self.dummy_mode): return if self.has_readline: self.set_readline_completer() # self.banner is auto computed if header: self.old_banner2 = self.banner2 self.banner2 = self.banner2 + '\n' + header + '\n' else: self.old_banner2 = '' if self.display_banner: self.show_banner() # Call the embedding code with a stack depth of 1 so it can skip over # our call and get the original caller's namespaces. self.mainloop(local_ns, module, stack_depth=stack_depth, global_ns=global_ns, compile_flags=compile_flags) self.banner2 = self.old_banner2 if self.exit_msg is not None: print(self.exit_msg) if self.should_raise: raise KillEmbeded( 'Embedded IPython raising error, as user requested.') def mainloop(self, local_ns=None, module=None, stack_depth=0, display_banner=None, global_ns=None, compile_flags=None): """Embeds IPython into a running python program. Parameters ---------- local_ns, module Working local namespace (a dict) and module (a module or similar object). If given as None, they are automatically taken from the scope where the shell was called, so that program variables become visible. stack_depth : int How many levels in the stack to go to looking for namespaces (when local_ns or module is None). This allows an intermediate caller to make sure that this function gets the namespace from the intended level in the stack. By default (0) it will get its locals and globals from the immediate caller. compile_flags A bit field identifying the __future__ features that are enabled, as passed to the builtin :func:`compile` function. If given as None, they are automatically taken from the scope where the shell was called. """ if (global_ns is not None) and (module is None): raise DeprecationWarning( "'global_ns' keyword argument is deprecated, and has been removed in IPython 5.0 use `module` keyword argument instead." ) if (display_banner is not None): warnings.warn( "The display_banner parameter is deprecated since IPython 4.0", DeprecationWarning) # Get locals and globals from caller if ((local_ns is None or module is None or compile_flags is None) and self.default_user_namespaces): call_frame = sys._getframe(stack_depth).f_back if local_ns is None: local_ns = call_frame.f_locals if module is None: global_ns = call_frame.f_globals try: module = sys.modules[global_ns['__name__']] except KeyError: warnings.warn("Failed to get module %s" % \ global_ns.get('__name__', 'unknown module') ) module = DummyMod() module.__dict__ = global_ns if compile_flags is None: compile_flags = (call_frame.f_code.co_flags & compilerop.PyCF_MASK) # Save original namespace and module so we can restore them after # embedding; otherwise the shell doesn't shut down correctly. orig_user_module = self.user_module orig_user_ns = self.user_ns orig_compile_flags = self.compile.flags # Update namespaces and fire up interpreter # The global one is easy, we can just throw it in if module is not None: self.user_module = module # But the user/local one is tricky: ipython needs it to store internal # data, but we also need the locals. We'll throw our hidden variables # like _ih and get_ipython() into the local namespace, but delete them # later. if local_ns is not None: reentrant_local_ns = { k: v for (k, v) in local_ns.items() if k not in self.user_ns_hidden.keys() } self.user_ns = reentrant_local_ns self.init_user_ns() # Compiler flags if compile_flags is not None: self.compile.flags = compile_flags # make sure the tab-completer has the correct frame information, so it # actually completes using the frame's locals/globals self.set_completer_frame() with self.builtin_trap, self.display_trap: self.interact() # now, purge out the local namespace of IPython's hidden variables. if local_ns is not None: local_ns.update({ k: v for (k, v) in self.user_ns.items() if k not in self.user_ns_hidden.keys() }) # Restore original namespace so shell can shut down when we exit. self.user_module = orig_user_module self.user_ns = orig_user_ns self.compile.flags = orig_compile_flags
class PyDevTerminalInteractiveShell(TerminalInteractiveShell): banner1 = Unicode( default_pydev_banner, config=True, help="""The part of the banner to be printed before the profile""") # TODO term_title: (can PyDev's title be changed???, see terminal.py for where to inject code, in particular set_term_title as used by %cd) # for now, just disable term_title term_title = CBool(False) # Note in version 0.11 there is no guard in the IPython code about displaying a # warning, so with 0.11 you get: # WARNING: Readline services not available or not loaded. # WARNING: The auto-indent feature requires the readline library # Disable readline, readline type code is all handled by PyDev (on Java side) readline_use = CBool(False) # autoindent has no meaning in PyDev (PyDev always handles that on the Java side), # and attempting to enable it will print a warning in the absence of readline. autoindent = CBool(False) # Force console to not give warning about color scheme choice and default to NoColor. # TODO It would be nice to enable colors in PyDev but: # - The PyDev Console (Eclipse Console) does not support the full range of colors, so the # effect isn't as nice anyway at the command line # - If done, the color scheme should default to LightBG, but actually be dependent on # any settings the user has (such as if a dark theme is in use, then Linux is probably # a better theme). colors_force = CBool(True) colors = Unicode("NoColor") # Since IPython 5 the terminal interface is not compatible with Emacs `inferior-shell` and # the `simple_prompt` flag is needed simple_prompt = CBool(True) # In the PyDev Console, GUI control is done via hookable XML-RPC server @staticmethod def enable_gui(gui=None, app=None): """Switch amongst GUI input hooks by name. """ # Deferred import from pydev_ipython.inputhook import enable_gui as real_enable_gui try: return real_enable_gui(gui, app) except ValueError as e: raise UsageError("%s" % e) #------------------------------------------------------------------------- # Things related to hooks #------------------------------------------------------------------------- def init_hooks(self): super(PyDevTerminalInteractiveShell, self).init_hooks() self.set_hook('show_in_pager', show_in_pager) #------------------------------------------------------------------------- # Things related to exceptions #------------------------------------------------------------------------- def showtraceback(self, exc_tuple=None, *args, **kwargs): # IPython does a lot of clever stuff with Exceptions. However mostly # it is related to IPython running in a terminal instead of an IDE. # (e.g. it prints out snippets of code around the stack trace) # PyDev does a lot of clever stuff too, so leave exception handling # with default print_exc that PyDev can parse and do its clever stuff # with (e.g. it puts links back to the original source code) try: if exc_tuple is None: etype, value, tb = sys.exc_info() else: etype, value, tb = exc_tuple except ValueError: return if tb is not None: traceback.print_exception(etype, value, tb) #------------------------------------------------------------------------- # Things related to text completion #------------------------------------------------------------------------- # The way to construct an IPCompleter changed in most versions, # so we have a custom, per version implementation of the construction def _new_completer_100(self): completer = PyDevIPCompleter( shell=self, namespace=self.user_ns, global_namespace=self.user_global_ns, alias_table=self.alias_manager.alias_table, use_readline=self.has_readline, parent=self, ) return completer def _new_completer_234(self): # correct for IPython versions 2.x, 3.x, 4.x completer = PyDevIPCompleter( shell=self, namespace=self.user_ns, global_namespace=self.user_global_ns, use_readline=self.has_readline, parent=self, ) return completer def _new_completer_500(self): completer = PyDevIPCompleter(shell=self, namespace=self.user_ns, global_namespace=self.user_global_ns, use_readline=False, parent=self) return completer def _new_completer_600(self): completer = PyDevIPCompleter6(shell=self, namespace=self.user_ns, global_namespace=self.user_global_ns, use_readline=False, parent=self) return completer def add_completer_hooks(self): from IPython.core.completerlib import module_completer, magic_run_completer, cd_completer try: from IPython.core.completerlib import reset_completer except ImportError: # reset_completer was added for rel-0.13 reset_completer = None self.configurables.append(self.Completer) # Add custom completers to the basic ones built into IPCompleter sdisp = self.strdispatchers.get('complete_command', StrDispatch()) self.strdispatchers['complete_command'] = sdisp self.Completer.custom_completers = sdisp self.set_hook('complete_command', module_completer, str_key='import') self.set_hook('complete_command', module_completer, str_key='from') self.set_hook('complete_command', magic_run_completer, str_key='%run') self.set_hook('complete_command', cd_completer, str_key='%cd') if reset_completer: self.set_hook('complete_command', reset_completer, str_key='%reset') def init_completer(self): """Initialize the completion machinery. This creates a completer that provides the completions that are IPython specific. We use this to supplement PyDev's core code completions. """ # PyDev uses its own completer and custom hooks so that it uses # most completions from PyDev's core completer which provides # extra information. # See getCompletions for where the two sets of results are merged if IPythonRelease._version_major >= 6: self.Completer = self._new_completer_600() elif IPythonRelease._version_major >= 5: self.Completer = self._new_completer_500() elif IPythonRelease._version_major >= 2: self.Completer = self._new_completer_234() elif IPythonRelease._version_major >= 1: self.Completer = self._new_completer_100() if hasattr(self.Completer, 'use_jedi'): self.Completer.use_jedi = False self.add_completer_hooks() if IPythonRelease._version_major <= 3: # Only configure readline if we truly are using readline. IPython can # do tab-completion over the network, in GUIs, etc, where readline # itself may be absent if self.has_readline: self.set_readline_completer() #------------------------------------------------------------------------- # Things related to aliases #------------------------------------------------------------------------- def init_alias(self): # InteractiveShell defines alias's we want, but TerminalInteractiveShell defines # ones we don't. So don't use super and instead go right to InteractiveShell InteractiveShell.init_alias(self) #------------------------------------------------------------------------- # Things related to exiting #------------------------------------------------------------------------- def ask_exit(self): """ Ask the shell to exit. Can be overiden and used as a callback. """ # TODO PyDev's console does not have support from the Python side to exit # the console. If user forces the exit (with sys.exit()) then the console # simply reports errors. e.g.: # >>> import sys # >>> sys.exit() # Failed to create input stream: Connection refused # >>> # Console already exited with value: 0 while waiting for an answer. # Error stream: # Output stream: # >>> # # Alternatively if you use the non-IPython shell this is what happens # >>> exit() # <type 'exceptions.SystemExit'>:None # >>> # <type 'exceptions.SystemExit'>:None # >>> # super(PyDevTerminalInteractiveShell, self).ask_exit() print('To exit the PyDev Console, terminate the console within IDE.') #------------------------------------------------------------------------- # Things related to magics #------------------------------------------------------------------------- def init_magics(self): super(PyDevTerminalInteractiveShell, self).init_magics()
class RepresentationViewer(DOMWidget): _view_name = Unicode('ChemviewView').tag(sync=True) _model_name = Unicode('ChemviewModel').tag(sync=True) _view_module = Unicode('jupyter-widget-chemview').tag(sync=True) _model_module = Unicode('jupyter-widget-chemview').tag(sync=True) width = CInt(sync=True) height = CInt(sync=True) background = CInt(sync=True) # Update Camera Hack camera_str = CUnicode(sync=True) static_moving = CBool(sync=True) # Helper loaded = CBool(False, sync=True) def __init__(self, width=500, height=500): '''RepresentationViewer is an IPython notebook widget useful to display 3d scenes through webgl. Example: .. code:: from IPython.display import display rv = RepresentationViewer() rv.add_representation('point', {'coordinates': coordinates, 'colors': colors, 'sizes': sizes}) display(rv) .. py:attribute: width Width in pixels of the IPython widget .. py:attribute: height Height in pixels of the IPython widget .. py:attribute: camera_str A string-representation of camera position and orientation .. py:attribute: static_moving Set to True to make the camera lose the "bouncy" rotation. ''' super(RepresentationViewer, self).__init__() self.displayed = False self.width = width self.height = height # Store the events sent from the javascript side self._event_handlers = defaultdict(list) # What to do when we export def callback(content): display(Image(url=content.get('dataUrl'))) self._connect_event('displayImg', callback) # A record of the new representations self.representations = {} # Things to be called when the js part is done loading self._displayed_callbacks = [] def on_loaded(name, old, new): for cb in self._displayed_callbacks: cb(self) self.on_trait_change(on_loaded, "loaded") def add_representation(self, rep_type, options, rep_id=None): '''Add a 3D representation to the viewer. See User Guide for a complete description of the representations available. :return: An unique hexadecimal identifier for the representation. :rtype: str ''' # Add our unique id to be able to refer to the representation if rep_id is None: rep_id = uuid4().hex if rep_type in checkers: options = checkers[rep_type](options) self.representations[rep_id] = {'rep_type' : rep_type, 'options': options.copy()} self._remote_call('addRepresentation', type=rep_type, repId=rep_id, options=options) return rep_id def remove_representation(self, rep_id): '''Remove a representation from the viewer :param str rep_id: the unique identifier generated by RepresentationViewer.add_representation ''' self._remote_call('removeRepresentation', repId=rep_id) del self.representations[rep_id] def update_representation(self, rep_id, options): '''Update a representation with new data. :param str rep_id: the unique identifier returned by RepresentationViewer.add_representation :param dict options: dictionary containing the updated data. ''' self.representations[rep_id]['options'].update(options) rep_type = self.representations[rep_id]["rep_type"] if rep_type in checkers: options = checkers[rep_type](options) self._remote_call('updateRepresentation', repId=rep_id, options=options) def _connect_event(self, event_name, callback): '''Respond to an event sent by the Javascript side. Events available: - displayImg - serialize - fullscreen ''' self._event_handlers[event_name].append(callback) def _remote_call(self, method_name, **kwargs): '''Call a method remotely on the javascript side''' msg = {} msg['type'] = 'callMethod' msg['methodName'] = method_name msg['args'] = serialize_to_dict(kwargs) if self.displayed is True: self.send(msg) # This will be received with View.on_msg else: # We should prepare a callback to be # called when widget is displayed def callback(widget, msg=msg): widget.send(msg) self._displayed_callbacks.append(callback) def _handle_custom_msg(self, content, buffers=None): # Handle custom messages sent by the javascript counterpart event = content.get('event', '') for cb in self._event_handlers[event]: cb(content) def _ipython_display_(self, **kwargs): super(RepresentationViewer, self)._ipython_display_(**kwargs) self.displayed = True def get_scene(self): '''Return a dictionary that uniquely identifies the scene displayed''' scene = {} # Camera camspec = json.loads(self.camera_str) location = np.array([camspec['position']['x'], camspec['position']['y'], camspec['position']['z']], 'float') quaternion = np.array([camspec['quaternion']['_x'], camspec['quaternion']['_y'], camspec['quaternion']['_z'], camspec['quaternion']['_w']], 'float') target = np.array([camspec['target']['x'], camspec['target']['y'], camspec['target']['z']], 'float') scene['camera'] = dict(location=location, quaternion=quaternion, target=target, vfov=camspec['fov'], aspect=camspec['aspect']) # Lights: TODO scene['lights'] = [ {'position': np.array([2, 4, -3]) * 1000, 'color': 0xffffff }, {'position': np.array([-1, 2, 3]) * 1000, 'color': 0xffffff } ] # Objects rep = {k: v.copy() for v in self.representations.items()} scene['representations'] = [v.update({"id" : k}) for k, v in rep.items()] scene['representations'] = [item.update({'id'})] scene['background'] = self.background return scene @classmethod def from_scene(cls, scenedict): """Build a representation from scenedict""" self = cls() for rep in scenedict["representations"]: self.add_representation(rep["rep_type"], rep["options"], rep['rep_id']) return self def autozoom(self, coordinates): """Automatically zoom the scene to enclose *coordinates*. :param coordinates: array-like of shape (N, 3) """ coordinates = np.array(coordinates, dtype='float32') self._remote_call('zoomInto', coordinates=coordinates)
class Viewer(ViewerParent): """Viewer widget class.""" _view_name = Unicode('ViewerView').tag(sync=True) _model_name = Unicode('ViewerModel').tag(sync=True) _view_module = Unicode('itkwidgets').tag(sync=True) _model_module = Unicode('itkwidgets').tag(sync=True) _view_module_version = Unicode('^0.31.4').tag(sync=True) _model_module_version = Unicode('^0.31.4').tag(sync=True) image = ITKImage( default_value=None, allow_none=True, help="Image to visualize.").tag( sync=False, **itkimage_serialization) rendered_image = ITKImage( default_value=None, allow_none=True).tag( sync=True, **itkimage_serialization) _rendering_image = CBool( default_value=False, help="We are currently volume rendering the image.").tag(sync=True) label_image = ITKImage( default_value=None, allow_none=True, help="Label map for the image.").tag( sync=False, **itkimage_serialization) rendered_label_image = ITKImage( default_value=None, allow_none=True).tag( sync=True, **itkimage_serialization) label_image_names = List( allow_none=True, default_value=None, help="Names for labels in the label map.").tag( trait=Tuple(), sync=True) label_image_blend = CFloat( default_value=0.5, help="Blend of the label map with the intensity image.").tag(sync=True) label_image_weights = NDArray(dtype=np.float32, default_value=None, allow_none=True, help="Weights, from 0.0 to 1.0, for every label in the label map.")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None,)) interpolation = CBool( default_value=True, help="Use linear interpolation in slicing planes.").tag(sync=True) cmap = List( default_value=None, allow_none=True, ).tag(trait=Colormap('Viridis (matplotlib)', allow_none=True), sync=True) _custom_cmap = NDArray(dtype=np.float32, default_value=None, allow_none=True, help="RGB triples from 0.0 to 1.0 that define a custom linear, sequential colormap")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None, 3)) lut = LookupTable('glasbey', help='Lookup table for the label map.').tag(sync=True) _custom_cmap = NDArray(dtype=np.float32, default_value=None, allow_none=True, help="RGB triples from 0.0 to 1.0 that define a custom linear, sequential colormap")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None, 3)) shadow = CBool( default_value=True, help="Use shadowing in the volume rendering.").tag(sync=True) slicing_planes = CBool( default_value=False, help="Display the slicing planes in volume rendering view mode.").tag( sync=True) x_slice = CFloat( default_value=None, allow_none=True, help="World-space position of the X slicing plane.").tag(sync=True) y_slice = CFloat( default_value=None, allow_none=True, help="World-space position of the Y slicing plane.").tag(sync=True) z_slice = CFloat( default_value=None, allow_none=True, help="World-space position of the Z slicing plane.").tag(sync=True) clicked_slice_point = ImagePointTrait( default_value=None, allow_none=True, help="Data for the point clicked on an image slice.").tag( sync=True, **image_point_serialization) gradient_opacity = CFloat( default_value=0.2, help="Volume rendering gradient opacity, from (0.0, 1.0]").tag(sync=True) opacity_gaussians = List( default_value=None, allow_none=True, help="Volume opacity transfer function Gaussians parameters.").tag(sync=True) channels = List( default_value=None, allow_none=True, help="Components or channels enabled in a multi-component image.").tag(trait=CBool(), sync=True) blend_mode = CaselessStrEnum( ('composite', 'max', 'min', 'average'), default_value='composite', help="Volume rendering blend mode").tag(sync=True) roi = NDArray(dtype=np.float64, default_value=np.zeros((2, 3), dtype=np.float64), help="Region of interest: [[lower_x, lower_y, lower_z), (upper_x, upper_y, upper_z]]")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(2, 3)) vmin = List( default_value=None, allow_none=True, help="Value that maps to the minimum of image colormap.").tag( trait=CFloat(), sync=True) vmax = List( default_value=None, allow_none=True, help="Value that maps to the maximum of image colormap.").tag( trait=CFloat(), sync=True) _largest_roi = NDArray(dtype=np.float64, default_value=np.zeros((2, 3), dtype=np.float64), help="Largest possible region of interest: " "[[lower_x, lower_y, lower_z), (upper_x, upper_y, upper_z]]")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(2, 3)) select_roi = CBool( default_value=False, help="Enable an interactive region of interest widget for the image.").tag( sync=True) size_limit_2d = NDArray(dtype=np.int64, default_value=np.array([1024, 1024], dtype=np.int64), help="Size limit for 2D image visualization.").tag(sync=False) size_limit_3d = NDArray(dtype=np.int64, default_value=np.array([192, 192, 192], dtype=np.int64), help="Size limit for 3D image visualization.").tag(sync=False) sample_distance = CFloat(default_value=0.25, help="Normalized volume rendering sample distance.").tag(sync=True) _scale_factors = NDArray(dtype=np.uint8, default_value=np.array([1, 1, 1], dtype=np.uint8), help="Image downscaling factors.").tag(sync=True, **array_serialization) _downsampling = CBool(default_value=False, help="We are downsampling the image to meet the size limits.").tag(sync=True) _reset_crop_requested = CBool(default_value=False, help="The user requested a reset of the roi.").tag(sync=True) units = Unicode( '', help="Units to display in the scale bar.").tag( sync=True) point_set_representations = List( default_value=[], help="Point set representation").tag( trait=Unicode(), sync=True) point_sets = PointSetList( default_value=None, allow_none=True, help="Point sets to visualize").tag( sync=True, **polydata_list_serialization) point_set_colors = NDArray(dtype=np.float32, default_value=np.zeros((0, 3), dtype=np.float32), help="RGB colors for the points sets")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None, 3)) point_set_opacities = NDArray(dtype=np.float32, default_value=np.zeros((0,), dtype=np.float32), help="Opacities for the points sets")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None,)) point_set_sizes = NDArray(dtype=np.uint8, default_value=np.zeros((0,), dtype=np.uint8), help="Sizes for the points sets")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None,)) point_set_representations = List( default_value=[], help="Point set representation").tag( trait=Unicode(), sync=True) geometries = PolyDataList( default_value=None, allow_none=True, help="Geometries to visualize").tag( sync=True, **polydata_list_serialization) geometry_colors = NDArray(dtype=np.float32, default_value=np.zeros((0, 3), dtype=np.float32), help="RGB colors for the geometries")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None, 3)) geometry_opacities = NDArray(dtype=np.float32, default_value=np.zeros((0,), dtype=np.float32), help="Opacities for the geometries")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(None,)) ui_collapsed = CBool( default_value=False, help="Collapse the built in user interface.").tag( sync=True) rotate = CBool( default_value=False, help="Rotate the camera around the scene.").tag( sync=True) annotations = CBool( default_value=True, help="Show annotations.").tag( sync=True) mode = CaselessStrEnum( ('x', 'y', 'z', 'v'), default_value='v', help="View mode: x: x plane, y: y plane, z: z plane, v: volume rendering").tag( sync=True) camera = NDArray(dtype=np.float32, default_value=np.zeros((3, 3), dtype=np.float32), help="Camera parameters: [[position_x, position_y, position_z], " "[focal_point_x, focal_point_y, focal_point_z], " "[view_up_x, view_up_y, view_up_z]]")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(3, 3)) background = Tuple( allow_none=True, default_value=(), help="Background color.").tag(trait=CFloat(), sync=True) def __init__(self, **kwargs): # noqa: C901 if 'point_set_colors' in kwargs: proposal = {'value': kwargs['point_set_colors']} color_array = self._validate_point_set_colors(proposal) kwargs['point_set_colors'] = color_array if 'point_set_opacities' in kwargs: proposal = {'value': kwargs['point_set_opacities']} opacities_array = self._validate_point_set_opacities(proposal) kwargs['point_set_opacities'] = opacities_array if 'point_set_sizes' in kwargs: proposal = {'value': kwargs['point_set_sizes']} sizes_array = self._validate_point_set_sizes(proposal) kwargs['point_set_sizes'] = sizes_array if 'point_set_representations' in kwargs: proposal = {'value': kwargs['point_set_representations']} representations_list = self._validate_point_set_representations( proposal) kwargs['point_set_representations'] = representations_list self.observe(self._on_point_sets_changed, ['point_sets']) if 'geometry_colors' in kwargs: proposal = {'value': kwargs['geometry_colors']} color_array = self._validate_geometry_colors(proposal) kwargs['geometry_colors'] = color_array if 'geometry_opacities' in kwargs: proposal = {'value': kwargs['geometry_opacities']} opacities_array = self._validate_geometry_opacities(proposal) kwargs['geometry_opacities'] = opacities_array if 'cmap' in kwargs and kwargs['cmap'] is not None: proposal = {'value': kwargs['cmap']} cmap_list = self._validate_cmap(proposal) kwargs['cmap'] = cmap_list if 'vmin' in kwargs and kwargs['vmin'] is not None: proposal = {'value': kwargs['vmin']} vmin_list = self._validate_vmin(proposal) kwargs['vmin'] = vmin_list if 'vmax' in kwargs and kwargs['vmax'] is not None: proposal = {'value': kwargs['vmax']} vmax_list = self._validate_vmax(proposal) kwargs['vmax'] = vmax_list self.observe(self._on_geometries_changed, ['geometries']) have_label_image = 'label_image' in kwargs and kwargs['label_image'] is not None if have_label_image: # Interpolation is not currently supported with label maps kwargs['interpolation'] = False super(Viewer, self).__init__(**kwargs) if not self.image and not self.label_image: return if self.image: image = self.image else: image = self.label_image dimension = image.GetImageDimension() largest_region = image.GetLargestPossibleRegion() size = largest_region.GetSize() # Cache this so we do not need to recompute on it when resetting the # roi self._largest_roi_rendered_image = None self._largest_roi_rendered_label_image = None self._largest_roi = np.zeros((2, 3), dtype=np.float64) if not np.any(self.roi): largest_index = largest_region.GetIndex() self.roi[0][:dimension] = np.array( image.TransformIndexToPhysicalPoint(largest_index)) largest_index_upper = largest_index + size self.roi[1][:dimension] = np.array( image.TransformIndexToPhysicalPoint(largest_index_upper)) self._largest_roi = self.roi.copy() if dimension == 2: for dim in range(dimension): if size[dim] > self.size_limit_2d[dim]: self._downsampling = True else: for dim in range(dimension): if size[dim] > self.size_limit_3d[dim]: self._downsampling = True self._update_rendered_image() if self._downsampling: self.observe(self._on_roi_changed, ['roi']) self.observe(self._on_reset_crop_requested, ['_reset_crop_requested']) self.observe(self.update_rendered_image, ['image', 'label_image']) self.observe(self.update_rendered_image, ['image', 'label_image']) def _on_roi_changed(self, change=None): if self._downsampling: self._update_rendered_image() def _on_reset_crop_requested(self, change=None): if change.new is True and self._downsampling: if self.image: image = self.image else: image = self.label_image dimension = image.GetImageDimension() largest_region = image.GetLargestPossibleRegion() size = largest_region.GetSize() largest_index = largest_region.GetIndex() new_roi = self.roi.copy() new_roi[0][:dimension] = np.array( image.TransformIndexToPhysicalPoint(largest_index)) largest_index_upper = largest_index + size new_roi[1][:dimension] = np.array( image.TransformIndexToPhysicalPoint(largest_index_upper)) self._largest_roi = new_roi.copy() self.roi = new_roi if change.new is True: self._reset_crop_requested = False @debounced(delay_seconds=0.2, method=True) def update_rendered_image(self, change=None): self._largest_roi_rendered_image = None self._largest_roi_rendered_label_image = None self._largest_roi = np.zeros((2, 3), dtype=np.float64) self._update_rendered_image() @staticmethod def _find_scale_factors(limit, dimension, size): scale_factors = [1, ] * 3 for dim in range(dimension): while(int(np.floor(float(size[dim]) / scale_factors[dim])) > limit[dim]): scale_factors[dim] += 1 return scale_factors def _update_rendered_image(self): if self.image is None and self.label_image is None: return if self._rendering_image: @yield_for_change(self, '_rendering_image') def f(): x = yield assert(x is False) f() self._rendering_image = True if self._downsampling: if self.image: image = self.image else: image = self.label_image dimension = image.GetImageDimension() index = image.TransformPhysicalPointToIndex( self.roi[0][:dimension]) upper_index = image.TransformPhysicalPointToIndex( self.roi[1][:dimension]) size = upper_index - index if dimension == 2: scale_factors = self._find_scale_factors( self.size_limit_2d, dimension, size) else: scale_factors = self._find_scale_factors( self.size_limit_3d, dimension, size) self._scale_factors = np.array(scale_factors, dtype=np.uint8) if self.image: self.extractor = itk.ExtractImageFilter.New(self.image) self.shrinker = itk.ShrinkImageFilter.New(self.extractor) self.shrinker.SetShrinkFactors(scale_factors[:dimension]) if self.label_image: self.label_image_extractor = itk.ExtractImageFilter.New(self.label_image) self.label_image_shrinker = itk.ShrinkImageFilter.New(self.label_image_extractor) self.label_image_shrinker.SetShrinkFactors(scale_factors[:dimension]) region = itk.ImageRegion[dimension]() region.SetIndex(index) region.SetSize(tuple(size)) # Account for rounding # truncation issues region.PadByRadius(1) region.Crop(image.GetLargestPossibleRegion()) if self.image: self.extractor.SetInput(self.image) self.extractor.SetExtractionRegion(region) if self.label_image: self.label_image_extractor.SetInput(self.label_image) self.label_image_extractor.SetExtractionRegion(region) size = region.GetSize() is_largest = False if np.any(self._largest_roi) and np.all( self._largest_roi == self.roi): is_largest = True if self._largest_roi_rendered_image is not None or self._largest_roi_rendered_label_image is not None: if self.image: self.rendered_image = self._largest_roi_rendered_image if self.label_image: self.rendered_label_image = self._largest_roi_rendered_label_image return if self.image: self.shrinker.UpdateLargestPossibleRegion() if self.label_image: self.label_image_shrinker.UpdateLargestPossibleRegion() if is_largest: if self.image: self._largest_roi_rendered_image = self.shrinker.GetOutput() self._largest_roi_rendered_image.DisconnectPipeline() self._largest_roi_rendered_image.SetOrigin( self.roi[0][:dimension]) self.rendered_image = self._largest_roi_rendered_image if self.label_image: self._largest_roi_rendered_label_image = self.label_image_shrinker.GetOutput() self._largest_roi_rendered_label_image.DisconnectPipeline() self._largest_roi_rendered_label_image.SetOrigin( self.roi[0][:dimension]) self.rendered_label_image = self._largest_roi_rendered_label_image return if self.image: shrunk = self.shrinker.GetOutput() shrunk.DisconnectPipeline() shrunk.SetOrigin(self.roi[0][:dimension]) self.rendered_image = shrunk if self.label_image: shrunk = self.label_image_shrinker.GetOutput() shrunk.DisconnectPipeline() shrunk.SetOrigin(self.roi[0][:dimension]) self.rendered_label_image = shrunk else: if self.image: self.rendered_image = self.image if self.label_image: self.rendered_label_image = self.label_image @validate('label_image_weights') def _validate_label_image_weights(self, proposal): """Check the number of weights equals the number of labels.""" value = proposal['value'] value = np.array(value, dtype=np.float32) if self.rendered_label_image: labels = len(np.unique(itk.array_view_from_image(self.rendered_label_image))) if labels != len(value): raise TraitError('Number of labels, {0}, does not equal number of label weights, {1}'.format(labels, len(value))) return value @validate('label_image_blend') def _validate_label_image_blend(self, proposal): """Enforce 0 <= value <= 1.0.""" value = proposal['value'] if value < 0.0: return 0.0 if value > 1.0: return 1.0 return value @validate('gradient_opacity') def _validate_gradient_opacity(self, proposal): """Enforce 0 < value <= 1.0.""" value = proposal['value'] if value <= 0.0: return 0.01 if value > 1.0: return 1.0 return value @validate('label_image_blend') def _validate_label_image_blend(self, proposal): """Enforce 0 <= value <= 1.0.""" value = proposal['value'] if value < 0.0: return 0.0 if value > 1.0: return 1.0 return value @validate('cmap') def _validate_cmap(self, proposal): value = proposal['value'] if value is None: return None elif isinstance(value, list): return value else: return [value] @validate('vmin') def _validate_vmin(self, proposal): value = proposal['value'] if value is None: return None elif isinstance(value, list): return value else: return [value] @validate('vmax') def _validate_vmax(self, proposal): value = proposal['value'] if value is None: return None elif isinstance(value, list): return value else: return [value] @validate('point_set_colors') def _validate_point_set_colors(self, proposal): value = proposal['value'] n_colors = len(value) if self.point_sets: n_colors = len(self.point_sets) result = np.zeros((n_colors, 3), dtype=np.float32) for index, color in enumerate(value): result[index, :] = matplotlib.colors.to_rgb(color) if len(value) < n_colors: for index in range(len(value), n_colors): color = colorcet.glasbey[index % len(colorcet.glasbey)] result[index, :] = matplotlib.colors.to_rgb(color) return result @validate('point_set_opacities') def _validate_point_set_opacities(self, proposal): value = proposal['value'] n_values = 0 if isinstance(value, float): n_values = 1 else: n_values = len(value) n_opacities = n_values if self.point_sets: n_opacities = len(self.point_sets) result = np.ones((n_opacities,), dtype=np.float32) result[:n_values] = value return result @validate('point_set_sizes') def _validate_point_set_sizes(self, proposal): value = proposal['value'] n_values = 0 if isinstance(value, float): n_values = 1 else: n_values = len(value) n_sizes = n_values if self.point_sets: n_sizes = len(self.point_sets) result = 3 * np.ones((n_sizes,), dtype=np.uint8) result[:n_values] = value return result @validate('point_set_representations') def _validate_point_set_representations(self, proposal): value = proposal['value'] n_values = 0 if isinstance(value, str): n_values = 1 else: n_values = len(value) n_representations = n_values if self.point_sets: n_representations = len(self.point_sets) result = ['points'] * n_representations result[:n_values] = value return result def _on_point_sets_changed(self, change=None): # Make sure we have a sufficient number of colors old_colors = self.point_set_colors self.point_set_colors = old_colors[:len(self.point_sets)] # Make sure we have a sufficient number of opacities old_opacities = self.point_set_opacities self.point_set_opacities = old_opacities[:len(self.point_sets)] # Make sure we have a sufficient number of sizes old_sizes = self.point_set_sizes self.point_set_sizes = old_sizes[:len(self.point_sets)] # Make sure we have a sufficient number of representations old_representations = self.point_set_representations self.point_set_representations = old_representations[:len( self.point_sets)] @validate('geometry_colors') def _validate_geometry_colors(self, proposal): value = proposal['value'] n_colors = len(value) if self.geometries: n_colors = len(self.geometries) result = np.zeros((n_colors, 3), dtype=np.float32) for index, color in enumerate(value): result[index, :] = matplotlib.colors.to_rgb(color) if len(value) < n_colors: for index in range(len(value), n_colors): color = colorcet.glasbey[index % len(colorcet.glasbey)] result[index, :] = matplotlib.colors.to_rgb(color) return result @validate('geometry_opacities') def _validate_geometry_opacities(self, proposal): value = proposal['value'] n_values = 0 if isinstance(value, float): n_values = 1 else: n_values = len(value) n_opacities = n_values if self.geometries: n_opacities = len(self.geometries) result = np.ones((n_opacities,), dtype=np.float32) result[:n_values] = value return result def _on_geometries_changed(self, change=None): # Make sure we have a sufficient number of colors old_colors = self.geometry_colors self.geometry_colors = old_colors[:len(self.geometries)] # Make sure we have a sufficient number of opacities old_opacities = self.geometry_opacities self.geometry_opacities = old_opacities[:len(self.geometries)] def roi_region(self): """Return the itk.ImageRegion corresponding to the roi.""" if self.image: image = self.image else: image = self.label_image dimension = image.GetImageDimension() index = image.TransformPhysicalPointToIndex( tuple(self.roi[0][:dimension])) upper_index = image.TransformPhysicalPointToIndex( tuple(self.roi[1][:dimension])) size = upper_index - index for dim in range(dimension): size[dim] += 1 region = itk.ImageRegion[dimension]() region.SetIndex(index) region.SetSize(tuple(size)) region.Crop(image.GetLargestPossibleRegion()) return region def roi_slice(self): """Return the numpy array slice corresponding to the roi.""" if self.image: image = self.image else: image = self.label_image dimension = image.GetImageDimension() region = self.roi_region() index = region.GetIndex() upper_index = np.array(index) + np.array(region.GetSize()) slices = [] for dim in range(dimension): slices.insert(0, slice(index[dim], upper_index[dim] + 1)) return tuple(slices)
class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" displayhook_class = Type(ZMQShellDisplayHook) display_pub_class = Type(ZMQDisplayPublisher) data_pub_class = Any() kernel = Any() parent_header = Any() @default('banner1') def _default_banner1(self): return default_banner # Override the traitlet in the parent class, because there's no point using # readline for the kernel. Can be removed when the readline code is moved # to the terminal frontend. readline_use = CBool(False) # autoindent has no meaning in a zmqshell, and attempting to enable it # will print a warning in the absence of readline. autoindent = CBool(False) exiter = Instance(ZMQExitAutocall) @default('exiter') def _default_exiter(self): return ZMQExitAutocall(self) @observe('exit_now') def _update_exit_now(self, change): """stop eventloop when exit_now fires""" if change['new']: if hasattr(self.kernel, 'io_loop'): loop = self.kernel.io_loop loop.call_later(0.1, loop.stop) if self.kernel.eventloop: exit_hook = getattr(self.kernel.eventloop, 'exit_hook', None) if exit_hook: exit_hook(self.kernel) keepkernel_on_exit = None # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no # interactive input being read; we provide event loop support in ipkernel def enable_gui(self, gui): from .eventloops import enable_gui as real_enable_gui try: real_enable_gui(gui) self.active_eventloop = gui except ValueError as e: raise UsageError("%s" % e) from e def init_environment(self): """Configure the user's environment.""" env = os.environ # These two ensure 'ls' produces nice coloring on BSD-derived systems env['TERM'] = 'xterm-color' env['CLICOLOR'] = '1' # Since normal pagers don't work at all (over pexpect we don't have # single-key control of the subprocess), try to disable paging in # subprocesses as much as possible. env['PAGER'] = 'cat' env['GIT_PAGER'] = 'cat' def init_hooks(self): super(ZMQInteractiveShell, self).init_hooks() self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99) def init_data_pub(self): """Delay datapub init until request, for deprecation warnings""" pass @property def data_pub(self): if not hasattr(self, '_data_pub'): warnings.warn( "InteractiveShell.data_pub is deprecated outside IPython parallel.", DeprecationWarning, stacklevel=2) self._data_pub = self.data_pub_class(parent=self) self._data_pub.session = self.display_pub.session self._data_pub.pub_socket = self.display_pub.pub_socket return self._data_pub @data_pub.setter def data_pub(self, pub): self._data_pub = pub def ask_exit(self): """Engage the exit actions.""" self.exit_now = (not self.keepkernel_on_exit) payload = dict( source='ask_exit', keepkernel=self.keepkernel_on_exit, ) self.payload_manager.write_payload(payload) def run_cell(self, *args, **kwargs): self._last_traceback = None return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) def _showtraceback(self, etype, evalue, stb): # try to preserve ordering of tracebacks and print statements sys.stdout.flush() sys.stderr.flush() exc_content = { 'traceback': stb, 'ename': str(etype.__name__), 'evalue': str(evalue), } dh = self.displayhook # Send exception info over pub socket for other clients than the caller # to pick up topic = None if dh.topic: topic = dh.topic.replace(b'execute_result', b'error') dh.session.send( dh.pub_socket, "error", json_clean(exc_content), dh.parent_header, ident=topic, ) # FIXME - Once we rely on Python 3, the traceback is stored on the # exception object, so we shouldn't need to store it here. self._last_traceback = stb def set_next_input(self, text, replace=False): """Send the specified text to the frontend to be presented at the next input cell.""" payload = dict( source='set_next_input', text=text, replace=replace, ) self.payload_manager.write_payload(payload) def set_parent(self, parent): """Set the parent header for associating output with its triggering input""" self.parent_header = parent self.displayhook.set_parent(parent) self.display_pub.set_parent(parent) if hasattr(self, '_data_pub'): self.data_pub.set_parent(parent) try: sys.stdout.set_parent(parent) except AttributeError: pass try: sys.stderr.set_parent(parent) except AttributeError: pass def get_parent(self): return self.parent_header def init_magics(self): super(ZMQInteractiveShell, self).init_magics() self.register_magics(KernelMagics) self.magics_manager.register_alias('ed', 'edit') def init_virtualenv(self): # Overridden not to do virtualenv detection, because it's probably # not appropriate in a kernel. To use a kernel in a virtualenv, install # it inside the virtualenv. # https://ipython.readthedocs.io/en/latest/install/kernel_install.html pass def system_piped(self, cmd): """Call the given cmd in a subprocess, piping stdout/err Parameters ---------- cmd : str Command to execute (can not end in '&', as background processes are not supported. Should not be a command that expects input other than simple text. """ if cmd.rstrip().endswith('&'): # this is *far* from a rigorous test # We do not support backgrounding processes because we either use # pexpect or pipes to read from. Users can always just call # os.system() or use ip.system=ip.system_raw # if they really want a background process. raise OSError("Background processes not supported.") # we explicitly do NOT return the subprocess status code, because # a non-None value would trigger :func:`sys.displayhook` calls. # Instead, we store the exit_code in user_ns. # Also, protect system call from UNC paths on Windows here too # as is done in InteractiveShell.system_raw if sys.platform == 'win32': cmd = self.var_expand(cmd, depth=1) from IPython.utils._process_win32 import AvoidUNCPath with AvoidUNCPath() as path: if path is not None: cmd = 'pushd %s &&%s' % (path, cmd) self.user_ns['_exit_code'] = system(cmd) else: self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1)) # Ensure new system_piped implementation is used system = system_piped
class Icon(ReactWidget): _model_name = Unicode('IconModel').tag(sync=True) type = Unicode('', help='type').tag(sync=True) spin = CBool(False, help="spin").tag(sync=True) rotate = CFloat(None, allow_none=True, help="rotate").tag(sync=True)
class LineProfiler(Viewer): """LineProfiler widget class.""" _view_name = Unicode('LineProfilerView').tag(sync=True) _model_name = Unicode('LineProfilerModel').tag(sync=True) _view_module = Unicode('itkwidgets').tag(sync=True) _model_module = Unicode('itkwidgets').tag(sync=True) _view_module_version = Unicode('^0.32.0').tag(sync=True) _model_module_version = Unicode('^0.32.0').tag(sync=True) point1 = NDArray(dtype=np.float64, default_value=np.zeros((3,), dtype=np.float64), help="First point in physical space that defines the line profile")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(3,)) point2 = NDArray(dtype=np.float64, default_value=np.ones((3,), dtype=np.float64), help="First point in physical space that defines the line profile")\ .tag(sync=True, **array_serialization)\ .valid(shape_constraints(3,)) _select_initial_points = CBool( default_value=False, help="We will select the initial points for the line profile.").tag( sync=True) def __init__(self, image, order, **kwargs): self.image = image self.order = order if 'point1' not in kwargs or 'point2' not in kwargs: self._select_initial_points = True # Default to z-plane mode instead of the 3D volume if we need to # select points if 'mode' not in kwargs: kwargs['mode'] = 'z' if 'ui_collapsed' not in kwargs: kwargs['ui_collapsed'] = True super(LineProfiler, self).__init__(**kwargs) def get_profile(self, image_or_array=None, point1=None, point2=None, order=None): """Calculate the line profile. Calculate the pixel intensity values along the line that connects the given two points. The image can be 2D or 3D. If any/all of the parameters are None, default vales are assigned. Parameters ---------- image_or_array : array_like, itk.Image, or vtk.vtkImageData The 2D or 3D image to visualize. point1 : list of float List elements represent the 2D/3D coordinate of the point1. point2 : list of float List elements represent the 2D/3D coordinate of the point2. order : int, optional Spline order for line profile interpolation. The order has to be in the range 0-5. """ if not have_scipy: raise RuntimeError( 'The scipy package in necessary for the line_profiler widget.') if not have_itk: raise RuntimeError( 'The itk package in necessary for the line_profiler widget.') if image_or_array is None: image_or_array = self.image if point1 is None: point1 = self.point1 if point2 is None: point2 = self.point2 if order is None: order = self.order image = to_itk_image(image_or_array) image_array = itk.array_view_from_image(image) dimension = image.GetImageDimension() distance = np.sqrt( sum([(point1[ii] - point2[ii])**2 for ii in range(dimension)])) index1 = tuple( image.TransformPhysicalPointToIndex(tuple(point1[:dimension]))) index2 = tuple( image.TransformPhysicalPointToIndex(tuple(point2[:dimension]))) num_points = int( np.round( np.sqrt( sum([(index1[ii] - index2[ii])**2 for ii in range(dimension)])) * 2.1)) coords = [ np.linspace(index1[ii], index2[ii], num_points) for ii in range(dimension) ] mapped = scipy.ndimage.map_coordinates(image_array, np.vstack(coords[::-1]), order=order, mode='nearest') return np.linspace(0.0, distance, num_points), mapped
class Input(ReactWidget, ValueMixin): _model_name = Unicode('InputModel').tag(sync=True) default_value = Unicode('', allow_none=True, help="default_value").tag(sync=True) disabled = CBool(False, help="selected or not").tag(sync=True) type = Unicode('text', help="type").tag(sync=True) size = Unicode('default', help="size of the widget").tag(sync=True)
class NXConsoleApp(JupyterApp, JupyterConsoleApp): name = 'nexpy-console' version = __version__ description = """ The NeXpy Console. This launches a Console-style application using Qt. The console is embedded in a GUI that contains a tree view of all NXroot groups and a matplotlib plotting pane. It also has all the added benefits of a Jupyter Qt Console with multiline editing, autocompletion, tooltips, command line histories and the ability to save your session as HTML or print the output. """ examples = _examples classes = [JupyterWidget] + JupyterConsoleApp.classes flags = Dict(flags) aliases = Dict(aliases) frontend_flags = Any(qt_flags) frontend_aliases = Any(qt_aliases) stylesheet = Unicode('', config=True, help="path to a custom CSS stylesheet") hide_menubar = CBool(False, config=True, help="Start the console window with the menu bar hidden.") plain = CBool(False, config=True, help="Use a plaintext widget instead of rich text.") display_banner = CBool(True, config=True, help="Whether to display a banner upon starting the QtConsole." ) def _plain_changed(self, name, old, new): kind = 'plain' if new else 'rich' self.config.ConsoleWidget.kind = kind if new: self.widget_factory = JupyterWidget else: self.widget_factory = RichJupyterWidget # the factory for creating a widget widget_factory = Any(RichJupyterWidget) def parse_command_line(self, argv=None): super(NXConsoleApp, self).parse_command_line(argv) self.build_kernel_argv(argv) def init_dir(self): """Initialize NeXpy home directory""" home_dir = os.path.abspath(os.path.expanduser('~')) nexpy_dir = os.path.join(home_dir, '.nexpy') if not os.path.exists(nexpy_dir): parent = os.path.dirname(nexpy_dir) if not os.access(parent, os.W_OK): nexpy_dir = tempfile.mkdtemp() else: os.mkdir(nexpy_dir) for subdirectory in ['backups', 'functions', 'plugins', 'readers', 'scripts']: directory = os.path.join(nexpy_dir, subdirectory) if not os.path.exists(directory): os.mkdir(directory) global _nexpy_dir self.nexpy_dir = _nexpy_dir = nexpy_dir self.backup_dir = os.path.join(self.nexpy_dir, 'backups') self.plugin_dir = os.path.join(self.nexpy_dir, 'plugins') self.reader_dir = os.path.join(self.nexpy_dir, 'readers') self.script_dir = os.path.join(self.nexpy_dir, 'scripts') self.function_dir = os.path.join(self.nexpy_dir, 'functions') sys.path.append(self.function_dir) self.scratch_file = os.path.join(self.nexpy_dir, 'w0.nxs') if not os.path.exists(self.scratch_file): NXroot().save(self.scratch_file) def init_settings(self): """Initialize access to the NeXpy settings file.""" self.settings_file = os.path.join(self.nexpy_dir, 'settings.ini') self.settings = NXConfigParser(self.settings_file) def backup_age(backup): try: return timestamp_age(os.path.basename(os.path.dirname(backup))) except ValueError: return 0 backups = self.settings.options('backups') for backup in backups: if not (os.path.exists(backup) and os.path.realpath(backup).startswith(self.backup_dir)): self.settings.remove_option('backups', backup) elif backup_age(backup) > 5: shutil.rmtree(os.path.dirname(os.path.realpath(backup))) self.settings.remove_option('backups', backup) self.settings.save() def init_log(self): """Initialize the NeXpy logger.""" log_file = os.path.join(self.nexpy_dir, 'nexpy.log') handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=50000, backupCount=5) fmt = '%(asctime)s - %(levelname)s - %(message)s' formatter = logging.Formatter(fmt, None) handler.setFormatter(formatter) try: if logging.root.hasHandlers(): for h in logging.root.handlers: logging.root.removeHandler(h) except Exception: pass logging.root.addHandler(handler) levels = {'CRITICAL':logging.CRITICAL, 'ERROR':logging.ERROR, 'WARNING':logging.WARNING, 'INFO':logging.INFO, 'DEBUG':logging.DEBUG} level = os.getenv("NEXPY_LOG") if level is None or level.upper() not in levels: level = 'INFO' else: level = level.upper() logging.root.setLevel(levels[level]) logging.info('NeXpy launched') logging.info('Log level is ' + level) logging.info('Python ' + sys.version.split()[0] + ': ' + sys.executable) logging.info('IPython v' + ipython_version) logging.info('Matplotlib v' + mpl_version) logging.info('NeXpy v' + nexpy_version) logging.info('nexusformat v' + nxversion) sys.stdout = sys.stderr = NXLogger() def init_tree(self): """Initialize the NeXus tree used in the tree view.""" global _tree self.tree = NXtree() _tree = self.tree def init_config(self): self.config.ConsoleWidget.input_sep = '' self.config.Completer.use_jedi = False def init_gui(self): """Initialize the GUI.""" self.app = QtWidgets.QApplication.instance() if self.app is None: self.app = QtWidgets.QApplication(['nexpy']) self.app.setApplicationName('nexpy') sys.excepthook = report_exception try: if 'svg' in QtGui.QImageReader.supportedImageFormats(): self.app.icon = QtGui.QIcon( pkg_resources.resource_filename('nexpy.gui', 'resources/icon/NeXpy.svg')) else: self.app.icon = QtGui.QIcon( pkg_resources.resource_filename('nexpy.gui', 'resources/icon/NeXpy.png')) QtWidgets.QApplication.setWindowIcon(self.app.icon) self.icon_pixmap = QtGui.QPixmap( self.app.icon.pixmap(QtCore.QSize(64,64))) except Exception: self.icon_pixmap = None self.window = MainWindow(self, self.tree, self.settings, self.config) self.window.log = self.log global _mainwindow _mainwindow = self.window def init_shell(self, filename): """Initialize imports in the shell.""" global _shell _shell = self.window.user_ns s = ("import nexusformat.nexus as nx\n" "from nexusformat.nexus import NXgroup, NXfield, NXattr, NXlink\n" "from nexusformat.nexus import *\n" "import nexpy\n" "from nexpy.gui.plotview import NXPlotView") six.exec_(s, self.window.user_ns) s = "" for _class in nxclasses: s = "%s=nx.%s\n" % (_class,_class) + s six.exec_(s, self.window.user_ns) config_file = os.path.join(self.nexpy_dir, 'config.py') if not os.path.exists(config_file): s = ["import sys\n", "import os\n", "import h5py as h5\n", "import numpy as np\n", "import numpy.ma as ma\n", "import scipy as sp\n", "import matplotlib as mpl\n", "from matplotlib import pylab, mlab, pyplot\n", "plt = pyplot\n", "os.chdir(os.path.expanduser('~'))\n"] with open(config_file, 'w') as f: f.writelines(s) else: with open(config_file) as f: s = f.readlines() six.exec_('\n'.join(s), self.window.user_ns) if filename is not None: try: fname = os.path.expanduser(filename) name = self.window.treeview.tree.get_name(fname) self.window.treeview.tree[name] = self.window.user_ns[name] \ = nxload(fname) self.window.treeview.select_node( self.window.treeview.tree[name]) logging.info("NeXus file '%s' opened as workspace '%s'" % (fname, name)) self.window.user_ns[name].plot() except Exception: pass def init_colors(self): """Configure the coloring of the widget""" self.window.console.set_default_style() def init_signal(self): """allow clean shutdown on sigint""" signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2)) # need a timer, so that QApplication doesn't block until a real # Qt event fires (can require mouse movement) # timer trick from http://stackoverflow.com/q/4938723/938949 timer = QtCore.QTimer() # Let the interpreter run each 200 ms: timer.timeout.connect(lambda: None) timer.start(200) # hold onto ref, so the timer doesn't get cleaned up self._sigint_timer = timer @catch_config_error def initialize(self, filename=None, argv=None): super(NXConsoleApp, self).initialize(argv) self.init_dir() self.init_settings() self.init_log() self.init_tree() self.init_config() self.init_gui() self.init_shell(filename) self.init_colors() self.init_signal() def start(self): super(NXConsoleApp, self).start() # draw the window self.window.show() self.window.raise_() # Start the application main loop. self.app.exec_()
class Password(ReactWidget, ValueMixin): _model_name = Unicode('PasswordModel').tag(sync=True) visibility_toggle = CBool(True, allow_none=True, help="selected or not").tag(sync=True) size = Unicode('default', help="size of the widget").tag(sync=True)
class JupyterConsoleApp(ConnectionFileMixin): name = "jupyter-console-mixin" description = """ The Jupyter Console Mixin. This class contains the common portions of console client (QtConsole, ZMQ-based terminal console, etc). It is not a full console, in that launched terminal subprocesses will not be able to accept input. The Console using this mixing supports various extra features beyond the single-process Terminal IPython shell, such as connecting to existing kernel, via: jupyter console <appname> --existing as well as tunnel via SSH """ classes = classes flags = Dict(flags) aliases = Dict(aliases) kernel_manager_class = Type( default_value=KernelManager, config=True, help="The kernel manager class to use.", ) kernel_client_class = BlockingKernelClient kernel_argv = List(Unicode()) # connection info: sshserver = Unicode( "", config=True, help="""The SSH server to use to connect to the kernel.""") sshkey = Unicode( "", config=True, help="""Path to the ssh key to use for logging in to the ssh server.""", ) def _connection_file_default(self) -> str: return "kernel-%i.json" % os.getpid() existing = CUnicode("", config=True, help="""Connect to an already running kernel""") kernel_name = Unicode("python", config=True, help="""The name of the default kernel to start.""") confirm_exit = CBool( True, config=True, help=""" Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', to force a direct exit without any confirmation.""", ) def build_kernel_argv(self, argv: object = None) -> None: """build argv to be passed to kernel subprocess Override in subclasses if any args should be passed to the kernel """ self.kernel_argv = self.extra_args def init_connection_file(self) -> None: """find the connection file, and load the info if found. The current working directory and the current profile's security directory will be searched for the file if it is not given by absolute path. When attempting to connect to an existing kernel and the `--existing` argument does not match an existing file, it will be interpreted as a fileglob, and the matching file in the current profile's security dir with the latest access time will be used. After this method is called, self.connection_file contains the *full path* to the connection file, never just its name. """ if self.existing: try: cf = find_connection_file(self.existing, [".", self.runtime_dir]) except Exception: self.log.critical( "Could not find existing kernel connection file %s", self.existing) self.exit(1) self.log.debug("Connecting to existing kernel: %s" % cf) self.connection_file = cf else: # not existing, check if we are going to write the file # and ensure that self.connection_file is a full path, not just the shortname try: cf = find_connection_file(self.connection_file, [self.runtime_dir]) except Exception: # file might not exist if self.connection_file == os.path.basename( self.connection_file): # just shortname, put it in security dir cf = os.path.join(self.runtime_dir, self.connection_file) else: cf = self.connection_file self.connection_file = cf try: self.connection_file = _filefind(self.connection_file, [".", self.runtime_dir]) except IOError: self.log.debug("Connection File not found: %s", self.connection_file) return # should load_connection_file only be used for existing? # as it is now, this allows reusing ports if an existing # file is requested try: self.load_connection_file() except Exception: self.log.error( "Failed to load connection file: %r", self.connection_file, exc_info=True, ) self.exit(1) def init_ssh(self) -> None: """set up ssh tunnels, if needed.""" if not self.existing or (not self.sshserver and not self.sshkey): return self.load_connection_file() transport = self.transport ip = self.ip if transport != "tcp": self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport) sys.exit(-1) if self.sshkey and not self.sshserver: # specifying just the key implies that we are connecting directly self.sshserver = ip ip = localhost() # build connection dict for tunnels: info = dict( ip=ip, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, control_port=self.control_port, ) self.log.info("Forwarding connections to %s via %s" % (ip, self.sshserver)) # tunnels return a new set of ports, which will be on localhost: self.ip = localhost() try: newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) except: # noqa # even catch KeyboardInterrupt self.log.error("Could not setup tunnels", exc_info=True) self.exit(1) ( self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port, ) = newports cf = self.connection_file root, ext = os.path.splitext(cf) self.connection_file = root + "-ssh" + ext self.write_connection_file() # write the new connection file self.log.info("To connect another client via this tunnel, use:") self.log.info("--existing %s" % os.path.basename(self.connection_file)) def _new_connection_file(self) -> str: cf = "" while not cf: # we don't need a 128b id to distinguish kernels, use more readable # 48b node segment (12 hex chars). Users running more than 32k simultaneous # kernels can subclass. ident = str(uuid.uuid4()).split("-")[-1] cf = os.path.join(self.runtime_dir, "kernel-%s.json" % ident) # only keep if it's actually new. Protect against unlikely collision # in 48b random search space cf = cf if not os.path.exists(cf) else "" return cf def init_kernel_manager(self) -> None: # Don't let Qt or ZMQ swallow KeyboardInterupts. if self.existing: self.kernel_manager = None return signal.signal(signal.SIGINT, signal.SIG_DFL) # Create a KernelManager and start a kernel. try: self.kernel_manager = self.kernel_manager_class( ip=self.ip, session=self.session, transport=self.transport, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, control_port=self.control_port, connection_file=self.connection_file, kernel_name=self.kernel_name, parent=self, data_dir=self.data_dir, ) except NoSuchKernel: self.log.critical("Could not find kernel %s", self.kernel_name) self.exit(1) self.kernel_manager = cast(KernelManager, self.kernel_manager) self.kernel_manager.client_factory = self.kernel_client_class kwargs = {} kwargs["extra_arguments"] = self.kernel_argv self.kernel_manager.start_kernel(**kwargs) atexit.register(self.kernel_manager.cleanup_ipc_files) if self.sshserver: # ssh, write new connection file self.kernel_manager.write_connection_file() # in case KM defaults / ssh writing changes things: km = self.kernel_manager self.shell_port = km.shell_port self.iopub_port = km.iopub_port self.stdin_port = km.stdin_port self.hb_port = km.hb_port self.control_port = km.control_port self.connection_file = km.connection_file atexit.register(self.kernel_manager.cleanup_connection_file) def init_kernel_client(self) -> None: if self.kernel_manager is not None: self.kernel_client = self.kernel_manager.client() else: self.kernel_client = self.kernel_client_class( session=self.session, ip=self.ip, transport=self.transport, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, control_port=self.control_port, connection_file=self.connection_file, parent=self, ) self.kernel_client.start_channels() def initialize(self, argv: object = None) -> None: """ Classes which mix this class in should call: JupyterConsoleApp.initialize(self,argv) """ if self._dispatching: return self.init_connection_file() self.init_ssh() self.init_kernel_manager() self.init_kernel_client()
class Checkbox(ReactWidget): _model_name = Unicode('CheckboxModel').tag(sync=True) description = Unicode(help="Menu item").tag(sync=True) selected = CBool(help="selected or not").tag(sync=True) checked = CBool(help="checked or not").tag(sync=True) size = Unicode('default', help="size of the widget").tag(sync=True)
class JupyterQtConsoleApp(JupyterApp, JupyterConsoleApp): name = 'jupyter-qtconsole' version = __version__ description = """ The Jupyter QtConsole. This launches a Console-style application using Qt. It is not a full console, in that launched terminal subprocesses will not be able to accept input. """ examples = _examples classes = [JupyterWidget] + JupyterConsoleApp.classes flags = Dict(flags) aliases = Dict(aliases) frontend_flags = Any(qt_flags) frontend_aliases = Any(qt_aliases) kernel_client_class = QtKernelClient kernel_manager_class = QtKernelManager stylesheet = Unicode('', config=True, help="path to a custom CSS stylesheet") hide_menubar = CBool(False, config=True, help="Start the console window with the menu bar hidden.") maximize = CBool(False, config=True, help="Start the console window maximized.") plain = CBool(False, config=True, help="Use a plaintext widget instead of rich text (plain can't print/save).") display_banner = CBool(True, config=True, help="Whether to display a banner upon starting the QtConsole." ) def _plain_changed(self, name, old, new): kind = 'plain' if new else 'rich' self.config.ConsoleWidget.kind = kind if new: self.widget_factory = JupyterWidget else: self.widget_factory = RichJupyterWidget # the factory for creating a widget widget_factory = Any(RichJupyterWidget) def parse_command_line(self, argv=None): super(JupyterQtConsoleApp, self).parse_command_line(argv) self.build_kernel_argv(self.extra_args) def new_frontend_master(self): """ Create and return new frontend attached to new kernel, launched on localhost. """ kernel_manager = self.kernel_manager_class( connection_file=self._new_connection_file(), parent=self, autorestart=True, ) # start the kernel kwargs = {} # FIXME: remove special treatment of IPython kernels if self.kernel_manager.ipykernel: kwargs['extra_arguments'] = self.kernel_argv kernel_manager.start_kernel(**kwargs) kernel_manager.client_factory = self.kernel_client_class kernel_client = kernel_manager.client() kernel_client.start_channels(shell=True, iopub=True) widget = self.widget_factory(config=self.config, local_kernel=True) self.init_colors(widget) widget.kernel_manager = kernel_manager widget.kernel_client = kernel_client widget._existing = False widget._may_close = True widget._confirm_exit = self.confirm_exit widget._display_banner = self.display_banner return widget def new_frontend_connection(self, connection_file): """Create and return a new frontend attached to an existing kernel. Parameters ---------- connection_file : str The connection_file path this frontend is to connect to """ kernel_client = self.kernel_client_class( connection_file=connection_file, config=self.config, ) kernel_client.load_connection_file() kernel_client.start_channels() widget = self.widget_factory(config=self.config, local_kernel=False) self.init_colors(widget) widget._existing = True widget._may_close = False widget._confirm_exit = False widget._display_banner = self.display_banner widget.kernel_client = kernel_client widget.kernel_manager = None return widget def new_frontend_slave(self, current_widget): """Create and return a new frontend attached to an existing kernel. Parameters ---------- current_widget : JupyterWidget The JupyterWidget whose kernel this frontend is to share """ kernel_client = self.kernel_client_class( connection_file=current_widget.kernel_client.connection_file, config = self.config, ) kernel_client.load_connection_file() kernel_client.start_channels() widget = self.widget_factory(config=self.config, local_kernel=False) self.init_colors(widget) widget._existing = True widget._may_close = False widget._confirm_exit = False widget._display_banner = self.display_banner widget.kernel_client = kernel_client widget.kernel_manager = current_widget.kernel_manager return widget def init_qt_app(self): # separate from qt_elements, because it must run first self.app = QtGui.QApplication(['jupyter-qtconsole']) self.app.setApplicationName('jupyter-qtconsole') def init_qt_elements(self): # Create the widget. base_path = os.path.abspath(os.path.dirname(__file__)) icon_path = os.path.join(base_path, 'resources', 'icon', 'JupyterConsole.svg') self.app.icon = QtGui.QIcon(icon_path) QtGui.QApplication.setWindowIcon(self.app.icon) ip = self.ip local_kernel = (not self.existing) or is_local_ip(ip) self.widget = self.widget_factory(config=self.config, local_kernel=local_kernel) self.init_colors(self.widget) self.widget._existing = self.existing self.widget._may_close = not self.existing self.widget._confirm_exit = self.confirm_exit self.widget._display_banner = self.display_banner self.widget.kernel_manager = self.kernel_manager self.widget.kernel_client = self.kernel_client self.window = MainWindow(self.app, confirm_exit=self.confirm_exit, new_frontend_factory=self.new_frontend_master, slave_frontend_factory=self.new_frontend_slave, connection_frontend_factory=self.new_frontend_connection, ) self.window.log = self.log self.window.add_tab_with_frontend(self.widget) self.window.init_menu_bar() # Ignore on OSX, where there is always a menu bar if sys.platform != 'darwin' and self.hide_menubar: self.window.menuBar().setVisible(False) self.window.setWindowTitle('Jupyter QtConsole') def init_colors(self, widget): """Configure the coloring of the widget""" # Note: This will be dramatically simplified when colors # are removed from the backend. # parse the colors arg down to current known labels cfg = self.config colors = cfg.ZMQInteractiveShell.colors if 'ZMQInteractiveShell.colors' in cfg else None style = cfg.JupyterWidget.syntax_style if 'JupyterWidget.syntax_style' in cfg else None sheet = cfg.JupyterWidget.style_sheet if 'JupyterWidget.style_sheet' in cfg else None # find the value for colors: if colors: colors=colors.lower() if colors in ('lightbg', 'light'): colors='lightbg' elif colors in ('dark', 'linux'): colors='linux' else: colors='nocolor' elif style: if style=='bw': colors='nocolor' elif styles.dark_style(style): colors='linux' else: colors='lightbg' else: colors=None # Configure the style if style: widget.style_sheet = styles.sheet_from_template(style, colors) widget.syntax_style = style widget._syntax_style_changed() widget._style_sheet_changed() elif colors: # use a default dark/light/bw style widget.set_default_style(colors=colors) if self.stylesheet: # we got an explicit stylesheet if os.path.isfile(self.stylesheet): with open(self.stylesheet) as f: sheet = f.read() else: raise IOError("Stylesheet %r not found." % self.stylesheet) if sheet: widget.style_sheet = sheet widget._style_sheet_changed() def init_signal(self): """allow clean shutdown on sigint""" signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2)) # need a timer, so that QApplication doesn't block until a real # Qt event fires (can require mouse movement) # timer trick from http://stackoverflow.com/q/4938723/938949 timer = QtCore.QTimer() # Let the interpreter run each 200 ms: timer.timeout.connect(lambda: None) timer.start(200) # hold onto ref, so the timer doesn't get cleaned up self._sigint_timer = timer def _deprecate_config(self, cfg, old_name, new_name): """Warn about deprecated config""" if old_name in cfg: self.log.warn("Use %s in config, not %s. Outdated config:\n %s", new_name, old_name, '\n '.join('{name}.{key} = {value!r}'.format(key=key, value=value, name=old_name) for key, value in self.config[old_name].items() ) ) cfg = cfg.copy() cfg[new_name].merge(cfg[old_name]) return cfg @catch_config_error def initialize(self, argv=None): self.init_qt_app() super(JupyterQtConsoleApp, self).initialize(argv) if self._dispatching: return # handle deprecated renames for old_name, new_name in [ ('IPythonQtConsoleApp', 'JupyterQtConsole'), ('IPythonWidget', 'JupyterWidget'), ('RichIPythonWidget', 'RichJupyterWidget'), ]: cfg = self._deprecate_config(self.config, old_name, new_name) if cfg: self.update_config(cfg) JupyterConsoleApp.initialize(self,argv) self.init_qt_elements() self.init_signal() def start(self): super(JupyterQtConsoleApp, self).start() # draw the window if self.maximize: self.window.showMaximized() else: self.window.show() self.window.raise_() # Start the application main loop. self.app.exec_()
class SelectOption(ReactWidget, ValueMixin, Selectable): _model_name = Unicode('SelectOptionModel').tag(sync=True) description = Unicode(help="SelectOption").tag(sync=True) key = Unicode(help="SelectOption key").tag(sync=True) selected = CBool(help="selected or not").tag(sync=True) # if removed, traitlets goes to inf recursion
class Completer(Configurable): greedy = CBool(False, config=True, help="""Activate greedy completion This will enable completion on elements of lists, results of function calls, etc., but can be unsafe because the code is actually evaluated on TAB. """) def __init__(self, namespace=None, global_namespace=None, **kwargs): """Create a new completer for the command line. Completer(namespace=ns,global_namespace=ns2) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete) """ # Don't bind to namespace quite yet, but flag whether the user wants a # specific namespace or to use __main__.__dict__. This will allow us # to bind to __main__.__dict__ at completion time, not now. if namespace is None: self.use_main_ns = 1 else: self.use_main_ns = 0 self.namespace = namespace # The global namespace, if given, can be bound directly if global_namespace is None: self.global_namespace = {} else: self.global_namespace = global_namespace super(Completer, self).__init__(**kwargs) def complete(self, text, state): """Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. """ if self.use_main_ns: self.namespace = __main__.__dict__ if state == 0: if "." in text: self.matches = self.attr_matches(text) else: self.matches = self.global_matches(text) try: return self.matches[state] except IndexError: return None def global_matches(self, text): """Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match. """ #print 'Completer->global_matches, txt=%r' % text # dbg matches = [] match_append = matches.append n = len(text) for lst in [ keyword.kwlist, builtin_mod.__dict__.keys(), self.namespace.keys(), self.global_namespace.keys() ]: for word in lst: if word[:n] == text and word != "__builtins__": match_append(word) return [cast_unicode_py2(m) for m in matches] def attr_matches(self, text): """Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated. """ #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg # Another option, seems to work great. Catches things like ''.<tab> m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text) if m: expr, attr = m.group(1, 3) elif self.greedy: m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer) if not m2: return [] expr, attr = m2.group(1, 2) else: return [] try: obj = eval(expr, self.namespace) except: try: obj = eval(expr, self.global_namespace) except: return [] if self.limit_to__all__ and hasattr(obj, '__all__'): words = get__all__entries(obj) else: words = dir2(obj) try: words = generics.complete_object(obj, words) except TryNext: pass except Exception: # Silence errors from completion function #raise # dbg pass # Build match list to return n = len(attr) return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr]
class Upload(ReactWidget): _model_name = Unicode('UploadModel').tag(sync=True) name = Unicode('file', help="name").tag(sync=True) multiple = CBool(False, help="multiple").tag(sync=True) list_type = Unicode('text', help="list_type").tag(sync=True)
class IPCompleter(Completer): """Extension of the completer class with IPython-specific features""" def _greedy_changed(self, name, old, new): """update the splitter and readline delims when greedy is changed""" if new: self.splitter.delims = GREEDY_DELIMS else: self.splitter.delims = DELIMS if self.readline: self.readline.set_completer_delims(self.splitter.delims) merge_completions = CBool( True, config=True, help="""Whether to merge completion results into a single list If False, only the completion results from the first non-empty completer will be returned. """) omit__names = Enum( (0, 1, 2), default_value=2, config=True, help="""Instruct the completer to omit private method names Specifically, when completing on ``object.<tab>``. When 2 [default]: all names that start with '_' will be excluded. When 1: all 'magic' names (``__foo__``) will be excluded. When 0: nothing will be excluded. """) limit_to__all__ = CBool( default_value=False, config=True, help="""Instruct the completer to use __all__ for the completion Specifically, when completing on ``object.<tab>``. When True: only those names in obj.__all__ will be included. When False [default]: the __all__ attribute is ignored """) def __init__(self, shell=None, namespace=None, global_namespace=None, use_readline=True, config=None, **kwargs): """IPCompleter() -> completer Return a completer object suitable for use by the readline library via readline.set_completer(). Inputs: - shell: a pointer to the ipython shell itself. This is needed because this completer knows about magic functions, and those can only be accessed via the ipython instance. - namespace: an optional dict where completions are performed. - global_namespace: secondary optional dict for completions, to handle cases (such as IPython embedded inside functions) where both Python scopes are visible. use_readline : bool, optional If true, use the readline library. This completer can still function without readline, though in that case callers must provide some extra information on each call about the current line.""" self.magic_escape = ESC_MAGIC self.splitter = CompletionSplitter() # Readline configuration, only used by the rlcompleter method. if use_readline: # We store the right version of readline so that later code import IPython.utils.rlineimpl as readline self.readline = readline else: self.readline = None # _greedy_changed() depends on splitter and readline being defined: Completer.__init__(self, namespace=namespace, global_namespace=global_namespace, config=config, **kwargs) # List where completion matches will be stored self.matches = [] self.shell = shell # Regexp to split filenames with spaces in them self.space_name_re = re.compile(r'([^\\] )') # Hold a local ref. to glob.glob for speed self.glob = glob.glob # Determine if we are running on 'dumb' terminals, like (X)Emacs # buffers, to avoid completion problems. term = os.environ.get('TERM', 'xterm') self.dumb_terminal = term in ['dumb', 'emacs'] # Special handling of backslashes needed in win32 platforms if sys.platform == "win32": self.clean_glob = self._clean_glob_win32 else: self.clean_glob = self._clean_glob #regexp to parse docstring for function signature self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*') self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)') #use this if positional argument name is also needed #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)') # All active matcher routines for completion self.matchers = [ self.python_matches, self.file_matches, self.magic_matches, self.python_func_kw_matches, self.dict_key_matches, ] def all_completions(self, text): """ Wrapper around the complete method for the benefit of emacs and pydb. """ return self.complete(text)[1] def _clean_glob(self, text): return self.glob("%s*" % text) def _clean_glob_win32(self, text): return [f.replace("\\", "/") for f in self.glob("%s*" % text)] def file_matches(self, text): """Match filenames, expanding ~USER type strings. Most of the seemingly convoluted logic in this completer is an attempt to handle filenames with spaces in them. And yet it's not quite perfect, because Python's readline doesn't expose all of the GNU readline details needed for this to be done correctly. For a filename with a space in it, the printed completions will be only the parts after what's already been typed (instead of the full completions, as is normally done). I don't think with the current (as of Python 2.3) Python readline it's possible to do better.""" #io.rprint('Completer->file_matches: <%r>' % text) # dbg # chars that require escaping with backslash - i.e. chars # that readline treats incorrectly as delimiters, but we # don't want to treat as delimiters in filename matching # when escaped with backslash if text.startswith('!'): text = text[1:] text_prefix = u'!' else: text_prefix = u'' text_until_cursor = self.text_until_cursor # track strings with open quotes open_quotes = has_open_quotes(text_until_cursor) if '(' in text_until_cursor or '[' in text_until_cursor: lsplit = text else: try: # arg_split ~ shlex.split, but with unicode bugs fixed by us lsplit = arg_split(text_until_cursor)[-1] except ValueError: # typically an unmatched ", or backslash without escaped char. if open_quotes: lsplit = text_until_cursor.split(open_quotes)[-1] else: return [] except IndexError: # tab pressed on empty line lsplit = "" if not open_quotes and lsplit != protect_filename(lsplit): # if protectables are found, do matching on the whole escaped name has_protectables = True text0, text = text, lsplit else: has_protectables = False text = os.path.expanduser(text) if text == "": return [ text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*") ] # Compute the matches from the filesystem m0 = self.clean_glob(text.replace('\\', '')) if has_protectables: # If we had protectables, we need to revert our changes to the # beginning of filename so that we don't double-write the part # of the filename we have so far len_lsplit = len(lsplit) matches = [ text_prefix + text0 + protect_filename(f[len_lsplit:]) for f in m0 ] else: if open_quotes: # if we have a string with an open quote, we don't need to # protect the names at all (and we _shouldn't_, as it # would cause bugs when the filesystem call is made). matches = m0 else: matches = [text_prefix + protect_filename(f) for f in m0] # Mark directories in input list by appending '/' to their names. return [ cast_unicode_py2(x + '/') if os.path.isdir(x) else x for x in matches ] def magic_matches(self, text): """Match magics""" #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg # Get all shell magics now rather than statically, so magics loaded at # runtime show up too. lsm = self.shell.magics_manager.lsmagic() line_magics = lsm['line'] cell_magics = lsm['cell'] pre = self.magic_escape pre2 = pre + pre # Completion logic: # - user gives %%: only do cell magics # - user gives %: do both line and cell magics # - no prefix: do both # In other words, line magics are skipped if the user gives %% explicitly bare_text = text.lstrip(pre) comp = [pre2 + m for m in cell_magics if m.startswith(bare_text)] if not text.startswith(pre2): comp += [pre + m for m in line_magics if m.startswith(bare_text)] return [cast_unicode_py2(c) for c in comp] def python_matches(self, text): """Match attributes or global python names""" #io.rprint('Completer->python_matches, txt=%r' % text) # dbg if "." in text: try: matches = self.attr_matches(text) if text.endswith('.') and self.omit__names: if self.omit__names == 1: # true if txt is _not_ a __ name, false otherwise: no__name = ( lambda txt: re.match(r'.*\.__.*?__', txt) is None) else: # true if txt is _not_ a _ name, false otherwise: no__name = (lambda txt: re.match( r'\._.*?', txt[txt.rindex('.'):]) is None) matches = filter(no__name, matches) except NameError: # catches <undefined attributes>.<tab> matches = [] else: matches = self.global_matches(text) return matches def _default_arguments_from_docstring(self, doc): """Parse the first line of docstring for call signature. Docstring should be of the form 'min(iterable[, key=func])\n'. It can also parse cython docstring of the form 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'. """ if doc is None: return [] #care only the firstline line = doc.lstrip().splitlines()[0] #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*') #'min(iterable[, key=func])\n' -> 'iterable[, key=func]' sig = self.docstring_sig_re.search(line) if sig is None: return [] # iterable[, key=func]' -> ['iterable[' ,' key=func]'] sig = sig.groups()[0].split(',') ret = [] for s in sig: #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)') ret += self.docstring_kwd_re.findall(s) return ret def _default_arguments(self, obj): """Return the list of default arguments of obj if it is callable, or empty list otherwise.""" call_obj = obj ret = [] if inspect.isbuiltin(obj): pass elif not (inspect.isfunction(obj) or inspect.ismethod(obj)): if inspect.isclass(obj): #for cython embededsignature=True the constructor docstring #belongs to the object itself not __init__ ret += self._default_arguments_from_docstring( getattr(obj, '__doc__', '')) # for classes, check for __init__,__new__ call_obj = (getattr(obj, '__init__', None) or getattr(obj, '__new__', None)) # for all others, check if they are __call__able elif hasattr(obj, '__call__'): call_obj = obj.__call__ ret += self._default_arguments_from_docstring( getattr(call_obj, '__doc__', '')) if PY3: _keeps = (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD) signature = inspect.signature else: import IPython.utils.signatures _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY, IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD) signature = IPython.utils.signatures.signature try: sig = signature(call_obj) ret.extend(k for k, v in sig.parameters.items() if v.kind in _keeps) except ValueError: pass return list(set(ret)) def python_func_kw_matches(self, text): """Match named parameters (kwargs) of the last open function""" if "." in text: # a parameter cannot be dotted return [] try: regexp = self.__funcParamsRegex except AttributeError: regexp = self.__funcParamsRegex = re.compile( r''' '.*?(?<!\\)' | # single quoted strings or ".*?(?<!\\)" | # double quoted strings or \w+ | # identifier \S # other characters ''', re.VERBOSE | re.DOTALL) # 1. find the nearest identifier that comes before an unclosed # parenthesis before the cursor # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo" tokens = regexp.findall(self.text_until_cursor) tokens.reverse() iterTokens = iter(tokens) openPar = 0 for token in iterTokens: if token == ')': openPar -= 1 elif token == '(': openPar += 1 if openPar > 0: # found the last unclosed parenthesis break else: return [] # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" ) ids = [] isId = re.compile(r'\w+$').match while True: try: ids.append(next(iterTokens)) if not isId(ids[-1]): ids.pop() break if not next(iterTokens) == '.': break except StopIteration: break # lookup the candidate callable matches either using global_matches # or attr_matches for dotted names if len(ids) == 1: callableMatches = self.global_matches(ids[0]) else: callableMatches = self.attr_matches('.'.join(ids[::-1])) argMatches = [] for callableMatch in callableMatches: try: namedArgs = self._default_arguments( eval(callableMatch, self.namespace)) except: continue for namedArg in namedArgs: if namedArg.startswith(text): argMatches.append(u"%s=" % namedArg) return argMatches def dict_key_matches(self, text): "Match string keys in a dictionary, after e.g. 'foo[' " def get_keys(obj): # Objects can define their own completions by defining an # _ipy_key_completions_() method. method = get_real_method(obj, '_ipython_key_completions_') if method is not None: return method() # Special case some common in-memory dict-like types if isinstance(obj, dict) or\ _safe_isinstance(obj, 'pandas', 'DataFrame'): try: return list(obj.keys()) except Exception: return [] elif _safe_isinstance(obj, 'numpy', 'ndarray') or\ _safe_isinstance(obj, 'numpy', 'void'): return obj.dtype.names or [] return [] try: regexps = self.__dict_key_regexps except AttributeError: dict_key_re_fmt = r'''(?x) ( # match dict-referring expression wrt greedy setting %s ) \[ # open bracket \s* # and optional whitespace ([uUbB]? # string prefix (r not handled) (?: # unclosed string '(?:[^']|(?<!\\)\\')* | "(?:[^"]|(?<!\\)\\")* ) )? $ ''' regexps = self.__dict_key_regexps = { False: re.compile(dict_key_re_fmt % ''' # identifiers separated by . (?!\d)\w+ (?:\.(?!\d)\w+)* '''), True: re.compile(dict_key_re_fmt % ''' .+ ''') } match = regexps[self.greedy].search(self.text_until_cursor) if match is None: return [] expr, prefix = match.groups() try: obj = eval(expr, self.namespace) except Exception: try: obj = eval(expr, self.global_namespace) except Exception: return [] keys = get_keys(obj) if not keys: return keys closing_quote, token_offset, matches = match_dict_keys( keys, prefix, self.splitter.delims) if not matches: return matches # get the cursor position of # - the text being completed # - the start of the key text # - the start of the completion text_start = len(self.text_until_cursor) - len(text) if prefix: key_start = match.start(2) completion_start = key_start + token_offset else: key_start = completion_start = match.end() # grab the leading prefix, to make sure all completions start with `text` if text_start > key_start: leading = '' else: leading = text[text_start:completion_start] # the index of the `[` character bracket_idx = match.end(1) # append closing quote and bracket as appropriate # this is *not* appropriate if the opening quote or bracket is outside # the text given to this method suf = '' continuation = self.line_buffer[len(self.text_until_cursor):] if key_start > text_start and closing_quote: # quotes were opened inside text, maybe close them if continuation.startswith(closing_quote): continuation = continuation[len(closing_quote):] else: suf += closing_quote if bracket_idx > text_start: # brackets were opened inside text, maybe close them if not continuation.startswith(']'): suf += ']' return [leading + k + suf for k in matches] def unicode_name_matches(self, text): u"""Match Latex-like syntax for unicode characters base on the name of the character. This does \\GREEK SMALL LETTER ETA -> η Works only on valid python 3 identifier, or on combining characters that will combine to form a valid identifier. Used on Python 3 only. """ slashpos = text.rfind('\\') if slashpos > -1: s = text[slashpos + 1:] try: unic = unicodedata.lookup(s) # allow combining chars if ('a' + unic).isidentifier(): return '\\' + s, [unic] except KeyError as e: pass return u'', [] def latex_matches(self, text): u"""Match Latex syntax for unicode characters. This does both \\alp -> \\alpha and \\alpha -> α Used on Python 3 only. """ slashpos = text.rfind('\\') if slashpos > -1: s = text[slashpos:] if s in latex_symbols: # Try to complete a full latex symbol to unicode # \\alpha -> α return s, [latex_symbols[s]] else: # If a user has partially typed a latex symbol, give them # a full list of options \al -> [\aleph, \alpha] matches = [k for k in latex_symbols if k.startswith(s)] return s, matches return u'', [] def dispatch_custom_completer(self, text): line = self.line_buffer if not line.strip(): return None # Create a little structure to pass all the relevant information about # the current completion to any custom completer. event = Bunch() event.line = line event.symbol = text cmd = line.split(None, 1)[0] event.command = cmd event.text_until_cursor = self.text_until_cursor #print "\ncustom:{%s]\n" % event # dbg # for foo etc, try also to find completer for %foo if not cmd.startswith(self.magic_escape): try_magic = self.custom_completers.s_matches(self.magic_escape + cmd) else: try_magic = [] for c in itertools.chain( self.custom_completers.s_matches(cmd), try_magic, self.custom_completers.flat_matches(self.text_until_cursor)): try: res = c(event) if res: # first, try case sensitive match withcase = [ cast_unicode_py2(r) for r in res if r.startswith(text) ] if withcase: return withcase # if none, then case insensitive ones are ok too text_low = text.lower() return [ cast_unicode_py2(r) for r in res if r.lower().startswith(text_low) ] except TryNext: pass return None def complete(self, text=None, line_buffer=None, cursor_pos=None): """Find completions for the given text and line context. Note that both the text and the line_buffer are optional, but at least one of them must be given. Parameters ---------- text : string, optional Text to perform the completion on. If not given, the line buffer is split using the instance's CompletionSplitter object. line_buffer : string, optional If not given, the completer attempts to obtain the current line buffer via readline. This keyword allows clients which are requesting for text completions in non-readline contexts to inform the completer of the entire text. cursor_pos : int, optional Index of the cursor in the full line buffer. Should be provided by remote frontends where kernel has no access to frontend state. Returns ------- text : str Text that was actually used in the completion. matches : list A list of completion matches. """ # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg # if the cursor position isn't given, the only sane assumption we can # make is that it's at the end of the line (the common case) if cursor_pos is None: cursor_pos = len(line_buffer) if text is None else len(text) if PY3: base_text = text if not line_buffer else line_buffer[:cursor_pos] latex_text, latex_matches = self.latex_matches(base_text) if latex_matches: return latex_text, latex_matches name_text = '' name_matches = [] for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches): name_text, name_matches = meth(base_text) if name_text: return name_text, name_matches # if text is either None or an empty string, rely on the line buffer if not text: text = self.splitter.split_line(line_buffer, cursor_pos) # If no line buffer is given, assume the input text is all there was if line_buffer is None: line_buffer = text self.line_buffer = line_buffer self.text_until_cursor = self.line_buffer[:cursor_pos] # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg # Start with a clean slate of completions self.matches[:] = [] custom_res = self.dispatch_custom_completer(text) if custom_res is not None: # did custom completers produce something? self.matches = custom_res else: # Extend the list of completions with the results of each # matcher, so we return results to the user from all # namespaces. if self.merge_completions: self.matches = [] for matcher in self.matchers: try: self.matches.extend(matcher(text)) except: # Show the ugly traceback if the matcher causes an # exception, but do NOT crash the kernel! sys.excepthook(*sys.exc_info()) else: for matcher in self.matchers: self.matches = matcher(text) if self.matches: break # FIXME: we should extend our api to return a dict with completions for # different types of objects. The rlcomplete() method could then # simply collapse the dict into a list for readline, but we'd have # richer completion semantics in other evironments. self.matches = sorted(set(self.matches), key=completions_sorting_key) #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg return text, self.matches def rlcomplete(self, text, state): """Return the state-th possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'. Parameters ---------- text : string Text to perform the completion on. state : int Counter used by readline. """ if state == 0: self.line_buffer = line_buffer = self.readline.get_line_buffer() cursor_pos = self.readline.get_endidx() #io.rprint("\nRLCOMPLETE: %r %r %r" % # (text, line_buffer, cursor_pos) ) # dbg # if there is only a tab on a line with only whitespace, instead of # the mostly useless 'do you want to see all million completions' # message, just do the right thing and give the user his tab! # Incidentally, this enables pasting of tabbed text from an editor # (as long as autoindent is off). # It should be noted that at least pyreadline still shows file # completions - is there a way around it? # don't apply this on 'dumb' terminals, such as emacs buffers, so # we don't interfere with their own tab-completion mechanism. if not (self.dumb_terminal or line_buffer.strip()): self.readline.insert_text('\t') sys.stdout.flush() return None # Note: debugging exceptions that may occur in completion is very # tricky, because readline unconditionally silences them. So if # during development you suspect a bug in the completion code, turn # this flag on temporarily by uncommenting the second form (don't # flip the value in the first line, as the '# dbg' marker can be # automatically detected and is used elsewhere). DEBUG = False #DEBUG = True # dbg if DEBUG: try: self.complete(text, line_buffer, cursor_pos) except: import traceback traceback.print_exc() else: # The normal production version is here # This method computes the self.matches array self.complete(text, line_buffer, cursor_pos) try: return self.matches[state] except IndexError: return None
class Table(ReactWidget): _model_name = Unicode('TableModel').tag(sync=True) data_source = List(help="data_source").tag(sync=True) columns = List(help="columns").tag(sync=True) bordered = CBool(False, help="bordered").tag(sync=True) size = Unicode('small', help="size").tag(sync=True)
class TerminalInteractiveShell(InteractiveShell): autoedit_syntax = CBool(False, config=True, help="auto editing of files with syntax errors.") confirm_exit = CBool( True, config=True, help=""" Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a direct exit without any confirmation.""", ) # This display_banner only controls whether or not self.show_banner() # is called when mainloop/interact are called. The default is False # because for the terminal based application, the banner behavior # is controlled by the application. display_banner = CBool(False) # This isn't configurable! embedded = CBool(False) embedded_active = CBool(False) editor = Unicode( get_default_editor(), config=True, help="Set the editor used by IPython (default to $EDITOR/vi/notepad).") pager = Unicode('less', config=True, help="The shell program to be used for paging.") screen_length = Integer( 0, config=True, help="""Number of lines of your screen, used to control printing of very long strings. Strings longer than this number of lines will be sent through a pager instead of directly printed. The default value for this is 0, which means IPython will auto-detect your screen size every time it needs to print certain potentially long strings (this doesn't change the behavior of the 'print' keyword, it's only triggered internally). If for some reason this isn't working well (it needs curses support), specify it yourself. Otherwise don't change the default.""", ) _term_reset = "\033[0m" # This is ugly because not only do we have a bunch of ansi escape # sequences, but we also have to wrap each escape code in \001 and \002 # for readline to be able to insert history matches properly. # prompt_in1 = "\033[32mIn [\033[32;1m{}\033[0;32m]: " + _term_reset prompt_in1 = "\001\033[32m\002In [\001\033[32;1m\002{}\001\033[0;32m\002]: \001" + _term_reset + "\002" prompt_in2 = "\001\033[32m\002 ...: \001" + _term_reset + "\002" @default('displayhook_class') def _displayhook_class_default(self): from IPython.core.displayhook import DisplayHook class OldSchoolPrompt(DisplayHook): out = "\033[31mOut[\033[31;1m{}\033[0;31m]: " reset = self._term_reset def write_output_prompt(self): out = self.out.format(self.prompt_count) sys.stdout.write(out + self.reset) return OldSchoolPrompt term_title = CBool(False, config=True, help="Enable auto setting the terminal title.") usage = Unicode(interactive_usage) # This `using_paste_magics` is used to detect whether the code is being # executed via paste magics functions using_paste_magics = CBool(False) # ignore prompt_toolkit specific settings, see this github issue for # more context: https://github.com/ipython/rlipython/issues/13 editing_mode = Unicode("ignored").tag(config=True) display_completions = Unicode("ignored").tag(config=True) # In the terminal, GUI control is done via PyOS_InputHook @staticmethod def enable_gui(gui=None, app=None): """Switch amongst GUI input hooks by name. """ # Deferred import from IPython.lib.inputhook import enable_gui as real_enable_gui try: return real_enable_gui(gui, app) except ValueError as e: raise UsageError("%s" % e) system = InteractiveShell.system_raw #------------------------------------------------------------------------- # Overrides of init stages #------------------------------------------------------------------------- def init_display_formatter(self): super(TerminalInteractiveShell, self).init_display_formatter() # terminal only supports plaintext self.display_formatter.active_types = ['text/plain'] #------------------------------------------------------------------------- # Things related to readline #------------------------------------------------------------------------- def init_readline(self): """Command history completion/saving/reloading.""" self.readline_use = True self._custom_readline_config = False self.readline_parse_and_bind = [ 'tab: complete', '"\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\C-o": tab-insert', '"\C-r": reverse-search-history', '"\C-s": forward-search-history', '"\C-p": history-search-backward', '"\C-n": history-search-forward', '"\e[A": history-search-backward', '"\e[B": history-search-forward', '"\C-k": kill-line', '"\C-u": unix-line-discard' ] self.readline_remove_delims = '-/~' self.multiline_history = False if self.readline_use: from . import rlineimpl as readline self.rl_next_input = None self.rl_do_indent = False if not self.readline_use or not readline.have_readline: self.readline = None # Set a number of methods that depend on readline to be no-op self.readline_no_record = NoOpContext() self.set_readline_completer = no_op self.set_custom_completer = no_op if self.readline_use: warn('Readline services not available or not loaded.') else: self.has_readline = True self.readline = readline sys.modules['readline'] = readline # Platform-specific configuration if os.name == 'nt': # FIXME - check with Frederick to see if we can harmonize # naming conventions with pyreadline to avoid this # platform-dependent check self.readline_startup_hook = readline.set_pre_input_hook else: self.readline_startup_hook = readline.set_startup_hook # Readline config order: # - IPython config (default value) # - custom inputrc # - IPython config (user customized) # load IPython config before inputrc if default # skip if libedit because parse_and_bind syntax is different if not self._custom_readline_config and not readline.uses_libedit: for rlcommand in self.readline_parse_and_bind: readline.parse_and_bind(rlcommand) # Load user's initrc file (readline config) # Or if libedit is used, load editrc. inputrc_name = os.environ.get('INPUTRC') if inputrc_name is None: inputrc_name = '.inputrc' if readline.uses_libedit: inputrc_name = '.editrc' inputrc_name = os.path.join(self.home_dir, inputrc_name) if os.path.isfile(inputrc_name): try: readline.read_init_file(inputrc_name) except: warn('Problems reading readline initialization file <%s>' % inputrc_name) # load IPython config after inputrc if user has customized if self._custom_readline_config: for rlcommand in self.readline_parse_and_bind: readline.parse_and_bind(rlcommand) # Remove some chars from the delimiters list. If we encounter # unicode chars, discard them. delims = readline.get_completer_delims() if not py3compat.PY3: delims = delims.encode("ascii", "ignore") for d in self.readline_remove_delims: delims = delims.replace(d, "") delims = delims.replace(ESC_MAGIC, '') readline.set_completer_delims(delims) # Store these so we can restore them if something like rpy2 modifies # them. self.readline_delims = delims # otherwise we end up with a monster history after a while: readline.set_history_length(self.history_length) self.refill_readline_hist() self.readline_no_record = ReadlineNoRecord(self) # Configure auto-indent for all platforms self.set_autoindent(self.autoindent) def init_completer(self): """Initialize the completion machinery. This creates completion machinery that can be used by client code, either interactively in-process (typically triggered by the readline library), programmatically (such as in test suites) or out-of-process (typically over the network by remote frontends). """ from .completer import RLCompleter from IPython.core.completerlib import (module_completer, magic_run_completer, cd_completer, reset_completer) self.Completer = RLCompleter( shell=self, namespace=self.user_ns, global_namespace=self.user_global_ns, parent=self, ) self.configurables.append(self.Completer) # Add custom completers to the basic ones built into IPCompleter sdisp = self.strdispatchers.get('complete_command', StrDispatch()) self.strdispatchers['complete_command'] = sdisp self.Completer.custom_completers = sdisp self.set_hook('complete_command', module_completer, str_key='import') self.set_hook('complete_command', module_completer, str_key='from') self.set_hook('complete_command', module_completer, str_key='%aimport') self.set_hook('complete_command', magic_run_completer, str_key='%run') self.set_hook('complete_command', cd_completer, str_key='%cd') self.set_hook('complete_command', reset_completer, str_key='%reset') self.init_readline() if self.has_readline: self.set_readline_completer() def set_readline_completer(self): """Reset readline's completer to be our own.""" self.Completer.readline = self.readline self.readline.set_completer(self.Completer.rlcomplete) def pre_readline(self): """readline hook to be used at the start of each line. It handles auto-indent and text from set_next_input.""" if self.rl_do_indent: self.readline.insert_text(self._indent_current_str()) if self.rl_next_input is not None: self.readline.insert_text(self.rl_next_input) self.rl_next_input = None def refill_readline_hist(self): # Load the last 1000 lines from history self.readline.clear_history() stdin_encoding = sys.stdin.encoding or "utf-8" last_cell = u"" for _, _, cell in self.history_manager.get_tail( self.history_load_length, include_latest=True): # Ignore blank lines and consecutive duplicates cell = cell.rstrip() if cell and (cell != last_cell): try: if self.multiline_history: self.readline.add_history( py3compat.unicode_to_str(cell, stdin_encoding)) else: for line in cell.splitlines(): self.readline.add_history( py3compat.unicode_to_str(line, stdin_encoding)) last_cell = cell except (TypeError, ValueError) as e: # The history DB can get corrupted so it returns strings # containing null bytes, which readline objects to. warn(("Failed to add string to readline history.\n" "Error: {}\n" "Cell: {!r}").format(e, cell)) #------------------------------------------------------------------------- # Things related to the terminal #------------------------------------------------------------------------- @property def usable_screen_length(self): if self.screen_length == 0: return 0 else: num_lines_bot = self.separate_in.count('\n') + 1 return self.screen_length - num_lines_bot def _term_title_changed(self, name, new_value): self.init_term_title() def init_term_title(self): # Enable or disable the terminal title. if self.term_title: toggle_set_term_title(True) set_term_title('IPython: ' + abbrev_cwd()) else: toggle_set_term_title(False) #------------------------------------------------------------------------- # Things related to aliases #------------------------------------------------------------------------- def init_alias(self): # The parent class defines aliases that can be safely used with any # frontend. super(TerminalInteractiveShell, self).init_alias() # Now define aliases that only make sense on the terminal, because they # need direct access to the console in a way that we can't emulate in # GUI or web frontend if os.name == 'posix': aliases = [('clear', 'clear'), ('more', 'more'), ('less', 'less'), ('man', 'man')] else: aliases = [] for name, cmd in aliases: self.alias_manager.soft_define_alias(name, cmd) #------------------------------------------------------------------------- # Mainloop and code execution logic #------------------------------------------------------------------------- def mainloop(self, display_banner=None): """Start the mainloop. If an optional banner argument is given, it will override the internally created default banner. """ with self.builtin_trap, self.display_trap: while 1: try: self.interact(display_banner=display_banner) #self.interact_with_readline() # XXX for testing of a readline-decoupled repl loop, call # interact_with_readline above break except KeyboardInterrupt: # this should not be necessary, but KeyboardInterrupt # handling seems rather unpredictable... self.write("\nKeyboardInterrupt in interact()\n") def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" # do nothing without readline or disabled multiline if not self.has_readline or not self.multiline_history: return hlen_before_cell # windows rl has no remove_history_item if not hasattr(self.readline, "remove_history_item"): return hlen_before_cell # skip empty cells if not source_raw.rstrip(): return hlen_before_cell # nothing changed do nothing, e.g. when rl removes consecutive dups hlen = self.readline.get_current_history_length() if hlen == hlen_before_cell: return hlen_before_cell for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = get_stream_enc(sys.stdin, 'utf-8') self.readline.add_history( py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding)) return self.readline.get_current_history_length() def interact(self, display_banner=None): """Closely emulate the interactive Python console.""" # batch run -> do not interact if self.exit_now: return if display_banner is None: display_banner = self.display_banner if isinstance(display_banner, py3compat.string_types): self.show_banner(display_banner) elif display_banner: self.show_banner() more = False if self.has_readline: self.readline_startup_hook(self.pre_readline) hlen_b4_cell = self.readline.get_current_history_length() else: hlen_b4_cell = 0 # exit_now is set by a call to %Exit or %Quit, through the # ask_exit callback. while not self.exit_now: self.hooks.pre_prompt_hook() if more: # Our default continuation prompt has the right length at the # beginning (when execution count is in the single digits). # We add padding to accomodate multi-digit execution counts try: pad = " " * (len(str(self.execution_count)) - 1) prompt = pad + self.prompt_in2 except: self.showtraceback() if self.autoindent: self.rl_do_indent = True else: try: prompt = self.separate_in + self.prompt_in1.format( self.execution_count) except: self.showtraceback() try: line = self.raw_input(prompt) if self.exit_now: # quick exit on sys.std[in|out] close break if self.autoindent: self.rl_do_indent = False except KeyboardInterrupt: #double-guard against keyboardinterrupts during kbdint handling try: self.write('\n' + self.get_exception_only()) source_raw = self.input_splitter.raw_reset() hlen_b4_cell = \ self._replace_rlhist_multiline(source_raw, hlen_b4_cell) more = False except KeyboardInterrupt: pass except EOFError: if self.autoindent: self.rl_do_indent = False if self.has_readline: self.readline_startup_hook(None) self.write('\n') self.exit() except bdb.BdbQuit: warn( 'The Python debugger has exited with a BdbQuit exception.\n' 'Because of how pdb handles the stack, it is impossible\n' 'for IPython to properly format this particular exception.\n' 'IPython will resume normal operation.') except: # exceptions here are VERY RARE, but they can be triggered # asynchronously by signal handlers, for example. self.showtraceback() else: try: self.input_splitter.push(line) more = self.input_splitter.push_accepts_more() except SyntaxError: # Run the code directly - run_cell takes care of displaying # the exception. more = False if (self.SyntaxTB.last_syntax_error and self.autoedit_syntax): self.edit_syntax_error() if not more: source_raw = self.input_splitter.raw_reset() self.run_cell(source_raw, store_history=True) hlen_b4_cell = \ self._replace_rlhist_multiline(source_raw, hlen_b4_cell) # Turn off the exit flag, so the mainloop can be restarted if desired self.exit_now = False def raw_input(self, prompt=''): """Write a prompt and read a line. The returned line does not include the trailing newline. When the user enters the EOF key sequence, EOFError is raised. Parameters ---------- prompt : str, optional A string to be printed to prompt the user. """ # raw_input expects str, but we pass it unicode sometimes prompt = py3compat.cast_bytes_py2(prompt) try: line = py3compat.cast_unicode_py2(self.raw_input_original(prompt)) except ValueError: warn("\n********\nYou or a %run:ed script called sys.stdin.close()" " or sys.stdout.close()!\nExiting IPython!\n") self.ask_exit() return "" # Try to be reasonably smart about not re-indenting pasted input more # than necessary. We do this by trimming out the auto-indent initial # spaces, if the user's actual input started itself with whitespace. if self.autoindent: if num_ini_spaces(line) > self.indent_current_nsp: line = line[self.indent_current_nsp:] self.indent_current_nsp = 0 return line #------------------------------------------------------------------------- # Methods to support auto-editing of SyntaxErrors. #------------------------------------------------------------------------- def edit_syntax_error(self): """The bottom half of the syntax error handler called in the main loop. Loop until syntax error is fixed or user cancels. """ while self.SyntaxTB.last_syntax_error: # copy and clear last_syntax_error err = self.SyntaxTB.clear_err_state() if not self._should_recompile(err): return try: # may set last_syntax_error again if a SyntaxError is raised self.safe_execfile(err.filename, self.user_ns) except: self.showtraceback() else: try: f = open(err.filename) try: # This should be inside a display_trap block and I # think it is. sys.displayhook(f.read()) finally: f.close() except: self.showtraceback() def _should_recompile(self, e): """Utility routine for edit_syntax_error""" if e.filename in ('<ipython console>', '<input>', '<string>', '<console>', '<BackgroundJob compilation>', None): return False try: if (self.autoedit_syntax and not self.ask_yes_no( 'Return to editor to correct syntax error? ' '[Y/n] ', 'y')): return False except EOFError: return False def int0(x): try: return int(x) except TypeError: return 0 # always pass integer line and offset values to editor hook try: self.hooks.fix_error_editor(e.filename, int0(e.lineno), int0(e.offset), e.msg) except TryNext: warn('Could not open editor') return False return True #------------------------------------------------------------------------- # Things related to exiting #------------------------------------------------------------------------- def ask_exit(self): """ Ask the shell to exit. Can be overiden and used as a callback. """ self.exit_now = True def exit(self): """Handle interactive exit. This method calls the ask_exit callback.""" if self.confirm_exit: if self.ask_yes_no('Do you really want to exit ([y]/n)?', 'y', 'n'): self.ask_exit() else: self.ask_exit() #------------------------------------------------------------------------- # Things related to magics #------------------------------------------------------------------------- def init_magics(self): super(TerminalInteractiveShell, self).init_magics() self.register_magics(TerminalMagics) def showindentationerror(self): super(TerminalInteractiveShell, self).showindentationerror() if not self.using_paste_magics: print("If you want to paste code into IPython, try the " "%paste and %cpaste magic functions.")