Esempio n. 1
0
class _Selection(LabeledWidget, ValueWidget, CoreWidget):
    """Base class for Selection widgets

    ``options`` can be specified as a list of values, list of (label, value)
    tuples, or a dict of {label: value}. The labels are the strings that will be
    displayed in the UI, representing the actual Python choices, and should be
    unique. If labels are not specified, they are generated from the values.

    When programmatically setting the value, a reverse lookup is performed
    among the options to check that the value is valid. The reverse lookup uses
    the equality operator by default, but another predicate may be provided via
    the ``equals`` keyword argument. For example, when dealing with numpy arrays,
    one may set equals=np.array_equal.

    Only labels are synced (values are converted to/from labels), so the labels should
    be unique.
    """

    value = Any(help="Selected value").tag(sync=True,
                                           to_json=_value_to_label,
                                           from_json=_label_to_value)

    options = Union([List(), Dict()],
    help="""List of values, or (label, value) tuples, or a dict of {label: value} pairs that the user can select.

    The labels are the strings that will be displayed in the UI, representing the
    actual Python choices, and should be unique. If labels are not specified, they
    are generated from the values.

    The keys are also available as _options_labels.
    """)
    _options_dict = Dict(read_only=True)
    _options_labels = Tuple(read_only=True).tag(sync=True)
    _options_values = Tuple(read_only=True)

    _model_module = Unicode('jupyter-js-widgets').tag(sync=True)
    _view_module = Unicode('jupyter-js-widgets').tag(sync=True)

    disabled = Bool(help="Enable or disable user changes").tag(sync=True)

    def __init__(self, *args, **kwargs):
        self.equals = kwargs.pop('equals', lambda x, y: x == y)
        super(_Selection, self).__init__(*args, **kwargs)

    def _make_options(self, x):
        """Standardize the options list format.

        The returned list should be in the format [('label', value), ('label', value), ...].

        The input can be
        * a Mapping of labels to values
        * an iterable of values (of which at least one is not a list or tuple of length 2)
        * an iterable with entries that are lists or tuples of the form ('label', value)
        """
        # Return a list of key-value pairs where the keys are strings
        # If x is a dict, convert it to list format.
        if isinstance(x, Mapping):
            return [(unicode_type(k), v) for k, v in x.items()]

        # If any entry of x is not a list or tuple of length 2, convert
        # the entries to unicode for the labels.
        for y in x:
            if not (isinstance(y, (list, tuple)) and len(y) == 2):
                return [(unicode_type(i), i) for i in x]

        # x is already in the correct format: a list of 2-tuples.
        # The first element of each tuple should be unicode, this might
        # not yet be the case.
        return [(unicode_type(k), v) for k, v in x]

    @validate('options')
    def _validate_options(self, proposal):
        """Handles when the options tuple has been changed.

        Setting options with a dict implies setting option labels from the keys of the dict.
        """
        new = proposal['value']
        options = self._make_options(new)
        self.set_trait('_options_dict', dict(options))
        self.set_trait('_options_labels', [ i[0] for i in options ])
        self.set_trait('_options_values', [ i[1] for i in options ])
        return new

    @observe('options')
    def _value_in_options(self, change):
        "Ensure the value is an option; if not, set to the first value"
        # ensure that the chosen value is still one of the options
        if len(self.options) == 0:
            self.value = None
        else:
            try:
                _value_to_label(self.value, self)
            except KeyError:
                self.value = self._options_values[0]

    @validate('value')
    def _validate_value(self, proposal):
        value = proposal['value']
        if len(self.options) == 0:
            if value is None:
                return value
            else:
                raise TraitError('Invalid selection: empty options list')
        else:
            try:
                _value_to_label(value, self)
                return value
            except KeyError:
                raise TraitError('Invalid selection')
Esempio n. 2
0
class Button(DOMWidget, CoreWidget):
    """Button widget.

    This widget has an `on_click` method that allows you to listen for the
    user clicking on the button.  The click event itself is stateless.

    Parameters
    ----------
    description: str
       description displayed next to the button
    tooltip: str
       tooltip caption of the toggle button
    icon: str
       font-awesome icon name
    """
    _model_module = Unicode('jupyter-js-widgets').tag(sync=True)
    _view_module = Unicode('jupyter-js-widgets').tag(sync=True)
    _view_name = Unicode('ButtonView').tag(sync=True)
    _model_name = Unicode('ButtonModel').tag(sync=True)

    description = Unicode('Button', help="Button label.").tag(sync=True)
    tooltip = Unicode(help="Tooltip caption of the button.").tag(sync=True)
    disabled = Bool(False,
                    help="Enable or disable user changes.").tag(sync=True)
    icon = Unicode(
        '', help="Font-awesome icon name, without the 'fa-' prefix.").tag(
            sync=True)

    button_style = CaselessStrEnum(
        values=['primary', 'success', 'info', 'warning', 'danger', ''],
        default_value='',
        help="""Use a predefined styling for the button.""").tag(sync=True)

    style = Instance(ButtonStyle).tag(sync=True, **widget_serialization)

    @default('style')
    def _default_style(self):
        return ButtonStyle()

    def __init__(self, **kwargs):
        super(Button, self).__init__(**kwargs)
        self._click_handlers = CallbackDispatcher()
        self.on_msg(self._handle_button_msg)

    @validate('icon')
    def _validate_icon(self, proposal):
        """Strip 'fa-' if necessary'"""
        value = proposal['value']
        if value.startswith('fa-'):
            warnings.warn(
                "icons names no longer start with 'fa-', "
                "just use the class name itself (for example, 'check' instead of 'fa-check')",
                DeprecationWarning)
            value = value[3:]
        return value

    def on_click(self, callback, remove=False):
        """Register a callback to execute when the button is clicked.

        The callback will be called with one argument, the clicked button
        widget instance.

        Parameters
        ----------
        remove: bool (optional)
            Set to true to remove the callback from the list of callbacks.
        """
        self._click_handlers.register_callback(callback, remove=remove)

    def _handle_button_msg(self, _, content, buffers):
        """Handle a msg from the front-end.

        Parameters
        ----------
        content: dict
            Content of the msg.
        """
        if content.get('event', '') == 'click':
            self._click_handlers(self)
Esempio n. 3
0
class OAuthenticator(Authenticator):
    """Base class for OAuthenticators

    Subclasses must override:

    login_service (string identifying the service provider)
    login_handler (likely a subclass of OAuthLoginHandler)
    authenticate (method takes one arg - the request handler handling the oauth callback)
    """

    scope = List(Unicode(), config=True,
        help="""The OAuth scopes to request.
        See the OAuth documentation of your OAuth provider for options.
        For GitHub in particular, you can see github_scopes.md in this repo.
        """
    )

    login_service = 'override in subclass'
    oauth_callback_url = Unicode(
        os.getenv('OAUTH_CALLBACK_URL', ''),
        config=True,
        help="""Callback URL to use.
        Typically `https://{host}/hub/oauth_callback`"""
    )

    client_id_env = ''
    client_id = Unicode(config=True)
    def _client_id_default(self):
        if self.client_id_env:
            client_id = os.getenv(self.client_id_env, '')
            if client_id:
                return client_id
        return os.getenv('OAUTH_CLIENT_ID', '')

    client_secret_env = ''
    client_secret = Unicode(config=True)
    def _client_secret_default(self):
        if self.client_secret_env:
            client_secret = os.getenv(self.client_secret_env, '')
            if client_secret:
                return client_secret
        return os.getenv('OAUTH_CLIENT_SECRET', '')

    validate_server_cert_env = 'OAUTH_TLS_VERIFY'
    validate_server_cert = Bool(config=True)
    def _validate_server_cert_default(self):
        env_value = os.getenv(self.validate_server_cert_env, '')
        if env_value == '0':
            return False
        else:
            return True

    def login_url(self, base_url):
        return url_path_join(base_url, 'oauth_login')

    login_handler = "Specify login handler class in subclass"
    callback_handler = OAuthCallbackHandler
    
    def get_callback_url(self, handler=None):
        """Get my OAuth redirect URL
        
        Either from config or guess based on the current request.
        """
        if self.oauth_callback_url:
            return self.oauth_callback_url
        elif handler:
            return guess_callback_uri(
                handler.request.protocol,
                handler.request.host,
                handler.hub.server.base_url
            )
        else:
            raise ValueError("Specify callback oauth_callback_url or give me a handler to guess with")

    def get_handlers(self, app):
        return [
            (r'/oauth_login', self.login_handler),
            (r'/oauth_callback', self.callback_handler),
        ]

    async def authenticate(self, handler, data=None):
        raise NotImplementedError()
class LDAPAuthenticator(Authenticator):
    server_address = Unicode(config=True,
                             help='Address of LDAP server to contact')
    server_port = Int(
        config=True,
        help='Port on which to contact LDAP server',
    )

    def _server_port_default(self):
        if self.use_ssl:
            return 636  # default SSL port for LDAP
        else:
            return 389  # default plaintext port for LDAP

    use_ssl = Bool(True,
                   config=True,
                   help='Use SSL to encrypt connection to LDAP server')

    bind_dn_template = Unicode(config=True,
                               help="""
        Template from which to construct the full dn
        when authenticating to LDAP. {username} is replaced
        with the actual username.

        Example:

            uid={username},ou=people,dc=wikimedia,dc=org
        """)

    allowed_groups = List(
        config=True,
        help="List of LDAP Group DNs whose members are allowed access")

    valid_username_regex = Unicode(
        r'^[a-z][.a-z0-9_-]*$',
        config=True,
        help="""Regex to use to validate usernames before sending to LDAP

        Also acts as a security measure to prevent LDAP injection. If you
        are customizing this, be careful to ensure that attempts to do LDAP
        injection are rejected by your customization
        """)

    lookup_dn = Bool(False,
                     config=True,
                     help='Look up the user\'s DN based on an attribute')

    user_search_base = Unicode(
        config=True,
        help="""Base for looking up user accounts in the directory.

        Example:

            ou=people,dc=wikimedia,dc=org
        """)

    user_attribute = Unicode(
        config=True,
        help="""LDAP attribute that stores the user's username.

        For most LDAP servers, this is uid.  For Active Directory, it is
        sAMAccountName.
        """)

    @gen.coroutine
    def authenticate(self, handler, data):
        username = data['username']
        password = data['password']

        # Protect against invalid usernames as well as LDAP injection attacks
        if not re.match(self.valid_username_regex, username):
            self.log.warn('Invalid username')
            return None

        # No empty passwords!
        if password is None or password.strip() == '':
            self.log.warn('Empty password')
            return None

        userdn = self.bind_dn_template.format(username=username)

        server = ldap3.Server(self.server_address,
                              port=self.server_port,
                              use_ssl=self.use_ssl)
        conn = ldap3.Connection(server, user=userdn, password=password)

        if conn.bind():
            if self.allowed_groups:
                if self.lookup_dn:
                    # In some cases, like AD, we don't bind with the DN, and need to discover it.
                    conn.search(search_base=self.user_search_base,
                                search_scope=ldap3.SUBTREE,
                                search_filter='({userattr}={username})'.format(
                                    userattr=self.user_attribute,
                                    username=username),
                                attributes=[self.user_attribute])

                    if len(conn.response) == 0:
                        self.log.warn(
                            'User with {userattr}={username} not found in directory'
                            .format(userattr=self.user_attribute,
                                    username=username))
                        return None
                    userdn = conn.response[0]['dn']

                for group in self.allowed_groups:
                    groupfilter = ('(|'
                                   '(member={userdn})'
                                   '(uniqueMember={userdn})'
                                   '(memberUid={uid})'
                                   ')').format(userdn=userdn, uid=username)
                    groupattributes = ['member', 'uniqueMember', 'memberUid']
                    if conn.search(group,
                                   search_scope=ldap3.BASE,
                                   search_filter=groupfilter,
                                   attributes=groupattributes):
                        return username
                # If we reach here, then none of the groups matched
                self.log.warn(
                    'User {username} not in any of the allowed groups'.format(
                        username=userdn))
                return None
            else:
                return username
        else:
            self.log.warn('Invalid password for user {username}'.format(
                username=userdn, ))
            return None
Esempio n. 5
0
class Voila(Application):
    name = 'voila'
    version = __version__
    examples = 'voila example.ipynb --port 8888'

    flags = {
        'debug': ({'Voila': {'log_level': logging.DEBUG}}, _("Set the log level to logging.DEBUG")),
        'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
    }

    description = Unicode(
        """voila [OPTIONS] NOTEBOOK_FILENAME

        This launches a stand-alone server for read-only notebooks.
        """
    )
    option_description = Unicode(
        """
        notebook_path:
            File name of the Jupyter notebook to display.
        """
    )
    notebook_filename = Unicode()
    port = Integer(
        8866,
        config=True,
        help=_(
            'Port of the voila server. Default 8866.'
        )
    )
    autoreload = Bool(
        False,
        config=True,
        help=_(
            'Will autoreload to server and the page when a template, js file or Python code changes'
        )
    )
    root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
    static_root = Unicode(
        STATIC_ROOT,
        config=True,
        help=_(
            'Directory holding static assets (HTML, JS and CSS files).'
        )
    )
    aliases = {
        'port': 'Voila.port',
        'static': 'Voila.static_root',
        'strip_sources': 'VoilaConfiguration.strip_sources',
        'autoreload': 'Voila.autoreload',
        'template': 'VoilaConfiguration.template',
        'theme': 'VoilaConfiguration.theme',
        'base_url': 'Voila.base_url',
        'server_url': 'Voila.server_url',
        'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions'
    }
    classes = [
        VoilaConfiguration,
        VoilaExecutor,
        VoilaExporter,
        VoilaCSSPreprocessor
    ]
    connection_dir_root = Unicode(
        config=True,
        help=_(
            'Location of temporry connection files. Defaults '
            'to system `tempfile.gettempdir()` value.'
        )
    )
    connection_dir = Unicode()

    base_url = Unicode(
        '/',
        config=True,
        help=_(
            'Path for voila API calls. If server_url is unset, this will be \
            used for both the base route of the server and the client. \
            If server_url is set, the server will server the routes prefixed \
            by server_url, while the client will prefix by base_url (this is \
            useful in reverse proxies).'
        )
    )

    server_url = Unicode(
        None,
        config=True,
        allow_none=True,
        help=_(
            'Path to prefix to voila API handlers. Leave unset to default to base_url'
        )
    )

    notebook_path = Unicode(
        None,
        config=True,
        allow_none=True,
        help=_(
            'path to notebook to serve with voila'
        )
    )

    nbconvert_template_paths = List(
        [],
        config=True,
        help=_(
            'path to nbconvert templates'
        )
    )

    template_paths = List(
        [],
        allow_none=True,
        config=True,
        help=_(
            'path to nbconvert templates'
        )
    )

    static_paths = List(
        [STATIC_ROOT],
        config=True,
        help=_(
            'paths to static assets'
        )
    )

    port_retries = Integer(50, config=True,
                           help=_("The number of additional ports to try if the specified port is not available.")
                           )

    ip = Unicode('localhost', config=True,
                 help=_("The IP address the notebook server will listen on."))

    open_browser = Bool(True, config=True,
                        help=_("""Whether to open in a browser after starting.
                        The specific browser used is platform dependent and
                        determined by the python standard library `webbrowser`
                        module, unless it is overridden using the --browser
                        (NotebookApp.browser) configuration option.
                        """))

    browser = Unicode(u'', config=True,
                      help="""Specify what command to use to invoke a web
                      browser when opening the notebook. If not specified, the
                      default browser will be determined by the `webbrowser`
                      standard library module, which allows setting of the
                      BROWSER environment variable to override it.
                      """)

    webbrowser_open_new = Integer(2, config=True,
                                  help=_("""Specify Where to open the notebook on startup. This is the
                                  `new` argument passed to the standard library method `webbrowser.open`.
                                  The behaviour is not guaranteed, but depends on browser support. Valid
                                  values are:
                                  - 2 opens a new tab,
                                  - 1 opens a new window,
                                  - 0 opens in an existing window.
                                  See the `webbrowser.open` documentation for details.
                                  """))

    custom_display_url = Unicode(u'', config=True,
                                 help=_("""Override URL shown to users.
                                 Replace actual URL, including protocol, address, port and base URL,
                                 with the given value when displaying URL to the users. Do not change
                                 the actual connection URL. If authentication token is enabled, the
                                 token is added to the custom URL automatically.
                                 This option is intended to be used when the URL to display to the user
                                 cannot be determined reliably by the Jupyter notebook server (proxified
                                 or containerized setups for example)."""))

    @property
    def display_url(self):
        if self.custom_display_url:
            url = self.custom_display_url
            if not url.endswith('/'):
                url += '/'
        else:
            if self.ip in ('', '0.0.0.0'):
                ip = "%s" % socket.gethostname()
            else:
                ip = self.ip
            url = self._url(ip)
        # TODO: do we want to have the token?
        # if self.token:
        #     # Don't log full token if it came from config
        #     token = self.token if self._token_generated else '...'
        #     url = (url_concat(url, {'token': token})
        #           + '\n or '
        #           + url_concat(self._url('127.0.0.1'), {'token': token}))
        return url

    @property
    def connection_url(self):
        ip = self.ip if self.ip else 'localhost'
        return self._url(ip)

    def _url(self, ip):
        # TODO: https / certfile
        # proto = 'https' if self.certfile else 'http'
        proto = 'http'
        return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)

    config_file_paths = List(
        Unicode(),
        config=True,
        help=_(
            'Paths to search for voila.(py|json)'
        )
    )

    tornado_settings = Dict(
        {},
        config=True,
        help=_(
            'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
        )
    )

    @default('config_file_paths')
    def _config_file_paths_default(self):
        return [os.getcwd()] + jupyter_config_path()

    @default('connection_dir_root')
    def _default_connection_dir(self):
        connection_dir = tempfile.gettempdir()
        self.log.info('Using %s to store connection files' % connection_dir)
        return connection_dir

    @default('log_level')
    def _default_log_level(self):
        return logging.INFO

    # similar to NotebookApp, except no extra path
    @property
    def nbextensions_path(self):
        """The path to look for Javascript notebook extensions"""
        path = jupyter_path('nbextensions')
        # FIXME: remove IPython nbextensions path after a migration period
        try:
            from IPython.paths import get_ipython_dir
        except ImportError:
            pass
        else:
            path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
        return path

    @default('root_dir')
    def _default_root_dir(self):
        if self.notebook_path:
            return os.path.dirname(os.path.abspath(self.notebook_path))
        else:
            return getcwd()

    def initialize(self, argv=None):
        self.log.debug("Searching path %s for config files", self.config_file_paths)
        # to make config_file_paths settable via cmd line, we first need to parse it
        super(Voila, self).initialize(argv)
        if len(self.extra_args) == 1:
            arg = self.extra_args[0]
            # I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
            if not self.notebook_path:
                if os.path.isdir(arg):
                    self.root_dir = arg
                elif os.path.isfile(arg):
                    self.notebook_path = arg
                else:
                    raise ValueError('argument is neither a file nor a directory: %r' % arg)
        elif len(self.extra_args) != 0:
            raise ValueError('provided more than 1 argument: %r' % self.extra_args)

        # then we load the config
        self.load_config_file('voila', path=self.config_file_paths)
        # common configuration options between the server extension and the application
        self.voila_configuration = VoilaConfiguration(parent=self)
        self.setup_template_dirs()
        signal.signal(signal.SIGTERM, self._handle_signal_stop)

    def setup_template_dirs(self):
        if self.voila_configuration.template:
            collect_template_paths(
                self.nbconvert_template_paths,
                self.static_paths,
                self.template_paths,
                self.voila_configuration.template)
            # look for possible template-related config files
            template_conf_dir = [os.path.join(k, '..') for k in self.nbconvert_template_paths]
            conf_paths = [os.path.join(d, 'conf.json') for d in template_conf_dir]
            for p in conf_paths:
                # see if config file exists
                if os.path.exists(p):
                    # load the template-related config
                    with open(p) as json_file:
                        conf = json.load(json_file)
                    # update the overall config with it, preserving CLI config priority
                    if 'traitlet_configuration' in conf:
                        recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
                        # pass merged config to overall voila config
                        self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
        self.log.debug('using template: %s', self.voila_configuration.template)
        self.log.debug('nbconvert template paths:\n\t%s', '\n\t'.join(self.nbconvert_template_paths))
        self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
        self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
        if self.notebook_path and not os.path.exists(self.notebook_path):
            raise ValueError('Notebook not found: %s' % self.notebook_path)

    def _handle_signal_stop(self, sig, frame):
        self.log.info('Handle signal %s.' % sig)
        self.ioloop.add_callback_from_signal(self.ioloop.stop)

    def start(self):
        self.connection_dir = tempfile.mkdtemp(
            prefix='voila_',
            dir=self.connection_dir_root
        )
        self.log.info('Storing connection files in %s.' % self.connection_dir)
        self.log.info('Serving static files from %s.' % self.static_root)

        self.kernel_spec_manager = KernelSpecManager(
            parent=self
        )

        self.kernel_manager = AsyncMappingKernelManager(
            parent=self,
            connection_dir=self.connection_dir,
            kernel_spec_manager=self.kernel_spec_manager,
            allowed_message_types=[
                'comm_open',
                'comm_close',
                'comm_msg',
                'comm_info_request',
                'kernel_info_request',
                'shutdown_request'
            ]
        )

        jenv_opt = {"autoescape": True}  # we might want extra options via cmd line like notebook server
        env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
        nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
        env.install_gettext_translations(nbui, newstyle=False)
        self.contents_manager = LargeFileManager(parent=self)

        # we create a config manager that load both the serverconfig and nbconfig (classical notebook)
        read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
        read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
        self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)

        # default server_url to base_url
        self.server_url = self.server_url or self.base_url

        self.app = tornado.web.Application(
            base_url=self.base_url,
            server_url=self.server_url or self.base_url,
            kernel_manager=self.kernel_manager,
            kernel_spec_manager=self.kernel_spec_manager,
            allow_remote_access=True,
            autoreload=self.autoreload,
            voila_jinja2_env=env,
            jinja2_env=env,
            static_path='/',
            server_root_dir='/',
            contents_manager=self.contents_manager,
            config_manager=self.config_manager
        )

        self.app.settings.update(self.tornado_settings)

        handlers = []

        handlers.extend([
            (url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
            (url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
            (
                url_path_join(self.server_url, r'/voila/static/(.*)'),
                MultiStaticFileHandler,
                {
                    'paths': self.static_paths,
                    'default_filename': 'index.html'
                }
            )
        ])

        # Serving notebook extensions
        if self.voila_configuration.enable_nbextensions:
            handlers.append(
                (
                    url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
                    FileFindHandler,
                    {
                        'path': self.nbextensions_path,
                        'no_cache_paths': ['/'],  # don't cache anything in nbextensions
                    },
                )
            )
        handlers.append(
            (
                url_path_join(self.server_url, r'/voila/files/(.*)'),
                WhiteListFileHandler,
                {
                    'whitelist': self.voila_configuration.file_whitelist,
                    'blacklist': self.voila_configuration.file_blacklist,
                    'path': self.root_dir,
                },
            )
        )

        tree_handler_conf = {
            'voila_configuration': self.voila_configuration
        }
        if self.notebook_path:
            handlers.append((
                url_path_join(self.server_url, r'/(.*)'),
                VoilaHandler,
                {
                    'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
                    'nbconvert_template_paths': self.nbconvert_template_paths,
                    'config': self.config,
                    'voila_configuration': self.voila_configuration
                }
            ))
        else:
            self.log.debug('serving directory: %r', self.root_dir)
            handlers.extend([
                (self.server_url, VoilaTreeHandler, tree_handler_conf),
                (url_path_join(self.server_url, r'/voila/tree' + path_regex),
                 VoilaTreeHandler, tree_handler_conf),
                (url_path_join(self.server_url, r'/voila/render/(.*)'),
                 VoilaHandler,
                 {
                     'nbconvert_template_paths': self.nbconvert_template_paths,
                     'config': self.config,
                     'voila_configuration': self.voila_configuration
                 }),
            ])

        self.app.add_handlers('.*$', handlers)
        self.listen()

    def stop(self):
        shutil.rmtree(self.connection_dir)
        run_sync(self.kernel_manager.shutdown_all())

    def random_ports(self, port, n):
        """Generate a list of n random ports near the given port.

        The first 5 ports will be sequential, and the remaining n-5 will be
        randomly selected in the range [port-2*n, port+2*n].
        """
        for i in range(min(5, n)):
            yield port + i
        for i in range(n-5):
            yield max(1, port + random.randint(-2*n, 2*n))

    def listen(self):
        for port in self.random_ports(self.port, self.port_retries+1):
            try:
                self.app.listen(port)
                self.port = port
                self.log.info('Voila is running at:\n%s' % self.display_url)
            except socket.error as e:
                if e.errno == errno.EADDRINUSE:
                    self.log.info(_('The port %i is already in use, trying another port.') % port)
                    continue
                elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
                    self.log.warning(_("Permission to listen on port %i denied") % port)
                    continue
                else:
                    raise
            else:
                self.port = port
                success = True
                break

        if not success:
            self.log.critical(_('ERROR: the voila server could not be started because '
                              'no available port could be found.'))
            self.exit(1)

        if self.open_browser:
            self.launch_browser()

        self.ioloop = tornado.ioloop.IOLoop.current()
        try:
            self.ioloop.start()
        except KeyboardInterrupt:
            self.log.info('Stopping...')
        finally:
            self.stop()

    def launch_browser(self):
        try:
            browser = webbrowser.get(self.browser or None)
        except webbrowser.Error as e:
            self.log.warning(_('No web browser found: %s.') % e)
            browser = None

        if not browser:
            return

        uri = self.base_url
        fd, open_file = tempfile.mkstemp(suffix='.html')
        # Write a temporary file to open in the browser
        with io.open(fd, 'w', encoding='utf-8') as fh:
            # TODO: do we want to have the token?
            # if self.token:
            #     url = url_concat(url, {'token': self.token})
            url = url_path_join(self.connection_url, uri)

            jinja2_env = self.app.settings['jinja2_env']
            template = jinja2_env.get_template('browser-open.html')
            fh.write(template.render(open_url=url, base_url=url))

        def target():
            return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
        threading.Thread(target=target).start()
Esempio n. 6
0
class Authenticator(LoggingConfigurable):
    """Base class for implementing an authentication provider for JupyterHub"""

    db = Any()

    enable_auth_state = Bool(
        False,
        config=True,
        help="""Enable persisting auth_state (if available).

        auth_state will be encrypted and stored in the Hub's database.
        This can include things like authentication tokens, etc.
        to be passed to Spawners as environment variables.

        Encrypting auth_state requires the cryptography package.

        Additionally, the JUPYTERHUB_CRYPTO_KEY envirionment variable must
        contain one (or more, separated by ;) 32B encryption keys.
        These can be either base64 or hex-encoded.

        If encryption is unavailable, auth_state cannot be persisted.

        New in JupyterHub 0.8
        """,
    )

    admin_users = Set(help="""
        Set of users that will have admin rights on this JupyterHub.

        Admin users have extra privileges:
         - Use the admin panel to see list of users logged in
         - Add / remove users in some authenticators
         - Restart / halt the hub
         - Start / stop users' single-user servers
         - Can access each individual users' single-user server (if configured)

        Admin access should be treated the same way root access is.

        Defaults to an empty set, in which case no user has admin access.
        """).tag(config=True)

    whitelist = Set(help="""
        Whitelist of usernames that are allowed to log in.

        Use this with supported authenticators to restrict which users can log in. This is an
        additional whitelist that further restricts users, beyond whatever restrictions the
        authenticator has in place.

        If empty, does not perform any additional restriction.
        """).tag(config=True)

    @observe('whitelist')
    def _check_whitelist(self, change):
        short_names = [name for name in change['new'] if len(name) <= 1]
        if short_names:
            sorted_names = sorted(short_names)
            single = ''.join(sorted_names)
            string_set_typo = "set('%s')" % single
            self.log.warning(
                "whitelist contains single-character names: %s; did you mean set([%r]) instead of %s?",
                sorted_names[:8],
                single,
                string_set_typo,
            )

    custom_html = Unicode(help="""
        HTML form to be overridden by authenticators if they want a custom authentication form.

        Defaults to an empty string, which shows the default username/password form.
        """)

    login_service = Unicode(help="""
        Name of the login service that this authenticator is providing using to authenticate users.

        Example: GitHub, MediaWiki, Google, etc.

        Setting this value replaces the login form with a "Login with <login_service>" button.

        Any authenticator that redirects to an external service (e.g. using OAuth) should set this.
        """)

    username_pattern = Unicode(help="""
        Regular expression pattern that all valid usernames must match.

        If a username does not match the pattern specified here, authentication will not be attempted.

        If not set, allow any username.
        """).tag(config=True)

    @observe('username_pattern')
    def _username_pattern_changed(self, change):
        if not change['new']:
            self.username_regex = None
        self.username_regex = re.compile(change['new'])

    username_regex = Any(help="""
        Compiled regex kept in sync with `username_pattern`
        """)

    def validate_username(self, username):
        """Validate a normalized username

        Return True if username is valid, False otherwise.
        """
        if not self.username_regex:
            return True
        return bool(self.username_regex.match(username))

    username_map = Dict(
        help="""Dictionary mapping authenticator usernames to JupyterHub users.

        Primarily used to normalize OAuth user names to local users.
        """).tag(config=True)

    delete_invalid_users = Bool(
        False,
        help="""Delete any users from the database that do not pass validation

        When JupyterHub starts, `.add_user` will be called
        on each user in the database to verify that all users are still valid.

        If `delete_invalid_users` is True,
        any users that do not pass validation will be deleted from the database.
        Use this if users might be deleted from an external system,
        such as local user accounts.

        If False (default), invalid users remain in the Hub's database
        and a warning will be issued.
        This is the default to avoid data loss due to config changes.
        """)

    def normalize_username(self, username):
        """Normalize the given username and return it

        Override in subclasses if usernames need different normalization rules.

        The default attempts to lowercase the username and apply `username_map` if it is
        set.
        """
        username = username.lower()
        username = self.username_map.get(username, username)
        return username

    def check_whitelist(self, username):
        """Check if a username is allowed to authenticate based on whitelist configuration

        Return True if username is allowed, False otherwise.
        No whitelist means any username is allowed.

        Names are normalized *before* being checked against the whitelist.
        """
        if not self.whitelist:
            # No whitelist means any name is allowed
            return True
        return username in self.whitelist

    @gen.coroutine
    def get_authenticated_user(self, handler, data):
        """Authenticate the user who is attempting to log in

        Returns user dict if successful, None otherwise.

        This calls `authenticate`, which should be overridden in subclasses,
        normalizes the username if any normalization should be done,
        and then validates the name in the whitelist.

        This is the outer API for authenticating a user.
        Subclasses should not override this method.

        The various stages can be overridden separately:
         - `authenticate` turns formdata into a username
         - `normalize_username` normalizes the username
         - `check_whitelist` checks against the user whitelist
        
        .. versionchanged:: 0.8
            return dict instead of username
        """
        authenticated = yield self.authenticate(handler, data)
        if authenticated is None:
            return
        if isinstance(authenticated, dict):
            if 'name' not in authenticated:
                raise ValueError("user missing a name: %r" % authenticated)
        else:
            authenticated = {
                'name': authenticated,
            }
        authenticated.setdefault('auth_state', None)

        # normalize the username
        authenticated['name'] = username = self.normalize_username(
            authenticated['name'])
        if not self.validate_username(username):
            self.log.warning("Disallowing invalid username %r.", username)
            return

        whitelist_pass = yield gen.maybe_future(self.check_whitelist(username))
        if whitelist_pass:
            return authenticated
        else:
            self.log.warning("User %r not in whitelist.", username)
            return

    @gen.coroutine
    def authenticate(self, handler, data):
        """Authenticate a user with login form data

        This must be a tornado gen.coroutine.
        It must return the username on successful authentication,
        and return None on failed authentication.

        Checking the whitelist is handled separately by the caller.

        .. versionchanged:: 0.8
            Allow `authenticate` to return a dict containing auth_state.

        Args:
            handler (tornado.web.RequestHandler): the current request handler
            data (dict): The formdata of the login form.
                         The default form has 'username' and 'password' fields.
        Returns:
            user (str or dict or None): The username of the authenticated user,
                or None if Authentication failed.
                If the Authenticator has state associated with the user,
                it can return a dict with the keys 'name' and 'auth_state',
                where 'name' is the username and 'auth_state' is a dictionary
                of auth state that will be persisted.
        """

    def pre_spawn_start(self, user, spawner):
        """Hook called before spawning a user's server

        Can be used to do auth-related startup, e.g. opening PAM sessions.
        """

    def post_spawn_stop(self, user, spawner):
        """Hook called after stopping a user container

        Can be used to do auth-related cleanup, e.g. closing PAM sessions.
        """

    def add_user(self, user):
        """Hook called when a user is added to JupyterHub

        This is called:
         - When a user first authenticates
         - When the hub restarts, for all users.

        This method may be a coroutine.

        By default, this just adds the user to the whitelist.

        Subclasses may do more extensive things, such as adding actual unix users,
        but they should call super to ensure the whitelist is updated.

        Note that this should be idempotent, since it is called whenever the hub restarts
        for all users.

        Args:
            user (User): The User wrapper object
        """
        if not self.validate_username(user.name):
            raise ValueError("Invalid username: %s" % user.name)
        if self.whitelist:
            self.whitelist.add(user.name)

    def delete_user(self, user):
        """Hook called when a user is deleted

        Removes the user from the whitelist.
        Subclasses should call super to ensure the whitelist is updated.

        Args:
            user (User): The User wrapper object
        """
        self.whitelist.discard(user.name)

    auto_login = Bool(False,
                      config=True,
                      help="""Automatically begin the login process

        rather than starting with a "Login with..." link at `/hub/login`

        To work, `.login_url()` must give a URL other than the default `/hub/login`,
        such as an oauth handler or another automatic login handler,
        registered with `.get_handlers()`.

        .. versionadded:: 0.8
        """)

    def login_url(self, base_url):
        """Override this when registering a custom login handler

        Generally used by authenticators that do not use simple form-based authentication.

        The subclass overriding this is responsible for making sure there is a handler
        available to handle the URL returned from this method, using the `get_handlers`
        method.

        Args:
            base_url (str): the base URL of the Hub (e.g. /hub/)

        Returns:
            str: The login URL, e.g. '/hub/login'
        """
        return url_path_join(base_url, 'login')

    def logout_url(self, base_url):
        """Override when registering a custom logout handler

        The subclass overriding this is responsible for making sure there is a handler
        available to handle the URL returned from this method, using the `get_handlers`
        method.

        Args:
            base_url (str): the base URL of the Hub (e.g. /hub/)

        Returns:
            str: The logout URL, e.g. '/hub/logout'
        """
        return url_path_join(base_url, 'logout')

    def get_handlers(self, app):
        """Return any custom handlers the authenticator needs to register

        Used in conjugation with `login_url` and `logout_url`.

        Args:
            app (JupyterHub Application):
                the application object, in case it needs to be accessed for info.
        Returns:
            handlers (list):
                list of ``('/url', Handler)`` tuples passed to tornado.
                The Hub prefix is added to any URLs.
        """
        return [
            ('/login', LoginHandler),
        ]
Esempio n. 7
0
class PAMAuthenticator(LocalAuthenticator):
    """Authenticate local UNIX users with PAM"""

    encoding = Unicode('utf8',
                       help="""
        The text encoding to use when communicating with PAM
        """).tag(config=True)

    service = Unicode('login',
                      help="""
        The name of the PAM service to use for authentication
        """).tag(config=True)

    open_sessions = Bool(True,
                         help="""
        Whether to open a new PAM session when spawners are started.

        This may trigger things like mounting shared filsystems,
        loading credentials, etc. depending on system configuration,
        but it does not always work.

        If any errors are encountered when opening/closing PAM sessions,
        this is automatically set to False.
        """).tag(config=True)

    def __init__(self, **kwargs):
        if pamela is None:
            raise _pamela_error from None
        super().__init__(**kwargs)

    @gen.coroutine
    def authenticate(self, handler, data):
        """Authenticate with PAM, and return the username if login is successful.

        Return None otherwise.
        """
        username = data['username']
        try:
            pamela.authenticate(username,
                                data['password'],
                                service=self.service)
        except pamela.PAMError as e:
            if handler is not None:
                self.log.warning("PAM Authentication failed (%s@%s): %s",
                                 username, handler.request.remote_ip, e)
            else:
                self.log.warning("PAM Authentication failed: %s", e)
        else:
            return username

    def pre_spawn_start(self, user, spawner):
        """Open PAM session for user if so configured"""
        if not self.open_sessions:
            return
        try:
            pamela.open_session(user.name, service=self.service)
        except pamela.PAMError as e:
            self.log.warning("Failed to open PAM session for %s: %s",
                             user.name, e)
            self.log.warning("Disabling PAM sessions from now on.")
            self.open_sessions = False

    def post_spawn_stop(self, user, spawner):
        """Close PAM session for user if we were configured to opened one"""
        if not self.open_sessions:
            return
        try:
            pamela.close_session(user.name, service=self.service)
        except pamela.PAMError as e:
            self.log.warning("Failed to close PAM session for %s: %s",
                             user.name, e)
            self.log.warning("Disabling PAM sessions from now on.")
            self.open_sessions = False
Esempio n. 8
0
class InspectReply(Reply, MimeBundle):
    found = Bool()
Esempio n. 9
0
class GenericOAuthenticator(OAuthenticator):

    login_service = Unicode("OAuth 2.0", config=True)

    extra_params = Dict(help="Extra parameters for first POST request").tag(config=True)

    username_key = Union(
        [Unicode(os.environ.get('OAUTH2_USERNAME_KEY', 'username')), Callable()],
        config=True,
        help="""
        Userdata username key from returned json for USERDATA_URL.

        Can be a string key name or a callable that accepts the returned
        json (as a dict) and returns the username.  The callable is useful
        e.g. for extracting the username from a nested object in the
        response.
        """,
    )

    userdata_params = Dict(
        help="Userdata params to get user data login information"
    ).tag(config=True)

    userdata_method = Unicode(
        os.environ.get('OAUTH2_USERDATA_METHOD', 'GET'),
        config=True,
        help="Userdata method to get user data login information",
    )
    userdata_token_method = Unicode(
        os.environ.get('OAUTH2_USERDATA_REQUEST_TYPE', 'header'),
        config=True,
        help="Method for sending access token in userdata request. Supported methods: header, url. Default: header",
    )

    tls_verify = Bool(
        os.environ.get('OAUTH2_TLS_VERIFY', 'True').lower() in {'true', '1'},
        config=True,
        help="Disable TLS verification on http request",
    )

    basic_auth = Bool(
        os.environ.get('OAUTH2_BASIC_AUTH', 'True').lower() in {'true', '1'},
        config=True,
        help="Disable basic authentication for access token request",
    )

    def http_client(self):
        return AsyncHTTPClient(force_instance=True, defaults=dict(validate_cert=self.tls_verify))


    async def authenticate(self, handler, data=None):
        code = handler.get_argument("code")
        # TODO: Configure the curl_httpclient for tornado
        http_client = self.http_client()

        params = dict(
            redirect_uri=self.get_callback_url(handler),
            code=code,
            grant_type='authorization_code',
        )
        params.update(self.extra_params)

        if self.token_url:
            url = self.token_url
        else:
            raise ValueError("Please set the $OAUTH2_TOKEN_URL environment variable")

        headers = {"Accept": "application/json", "User-Agent": "JupyterHub"}

        if self.basic_auth:
            b64key = base64.b64encode(
                bytes("{}:{}".format(self.client_id, self.client_secret), "utf8")
            )
            headers.update({"Authorization": "Basic {}".format(b64key.decode("utf8"))})

        req = HTTPRequest(
            url,
            method="POST",
            headers=headers,
            body=urllib.parse.urlencode(params),
        )

        resp = await http_client.fetch(req)

        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        access_token = resp_json['access_token']
        refresh_token = resp_json.get('refresh_token', None)
        token_type = resp_json['token_type']
        scope = resp_json.get('scope', '')
        if isinstance(scope, str):
            scope = scope.split(' ')

        # Determine who the logged in user is
        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
            "Authorization": "{} {}".format(token_type, access_token),
        }
        if self.userdata_url:
            url = url_concat(self.userdata_url, self.userdata_params)
        else:
            raise ValueError("Please set the OAUTH2_USERDATA_URL environment variable")

        if self.userdata_token_method == "url":
            url = url_concat(self.userdata_url, dict(access_token=access_token))

        req = HTTPRequest(
            url,
            method=self.userdata_method,
            headers=headers,,allow_nonstandard_methods=True
        )
        resp = await http_client.fetch(req)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        if callable(self.username_key):
            name = self.username_key(resp_json)
        else:
            name = resp_json.get(self.username_key)
            if not name:
                self.log.error(
                    "OAuth user contains no key %s: %s", self.username_key, resp_json
                )
                return

        return {
            'name': name,
            'auth_state': {
                'access_token': access_token,
                'refresh_token': refresh_token,
                'oauth_user': resp_json,
                'scope': scope,
            },
        }
Esempio n. 10
0
class AxisAtController(Manipulator):
    _StatusMap = {
        'I': 'axis is initialized',
        'O': 'axis is disabled',
        'R': 'axis initialised and ready',
        'T': 'axis is positioning in trapezoidal profile',
        'S': 'axis is positioning in S-curve profile',
        'V': 'axis is operating in velocity mode',
        'P': 'reference motion is in progress',
        'F': 'axis is releasing a limit switch',
        'J': 'axis is operating in joystick mode',
        'L': 'axis has been disabled after approaching a hardware limit switch'
        ' (MINSTOP, MAXSTOP)',
        'B': 'axis has been stopped after approaching a brake switch (MINDEC, '
        'MAXDEC)',
        'A': 'axis has been disabled after limit switch error',
        'M': 'axis has been disabled after motion controller error',
        'Z': 'axis has been disabled after timeout error',
        'H': 'phase initialization activ (step motor axis)',
        'U': 'axis is not released.',
        'E': 'axis has been disabled after motion error',
        'W': 'axis is positioning in trapezoidal profile with WMS',
        'X': 'axis is positioning in S-curve profile with WMS',
        'Y': 'axis is operating in velocity mode with WMS',
        'C': 'axis is operating in velocity mode with continuous-path control',
        '?': 'error, unknown status of axis',
    }

    _movingStates = ['T', 'S', 'V', 'P', 'W', 'X', 'Y', 'C']

    statusMessage = Unicode(read_only=True).tag(name="Status")
    limitSwitchActive = Bool(read_only=True).tag(name="Limit switch active")
    motorPowerStageError = Bool(read_only=True).tag(
        name="Motor power stage error")

    def __init__(self,
                 connection=None,
                 axis=1,
                 pitch=Q_(1),
                 objectName=None,
                 loop=None):
        """ Axis `axis` at connection `connection`. Has pitch `pitch` in units
        of 'full step count/length'."""
        super().__init__(objectName=objectName, loop=loop)
        self.connection = connection
        self.axis = axis
        self._status = '?'
        self.set_trait('statusMessage', self._StatusMap[self._status])
        self._isMovingFuture = asyncio.Future()
        self._isMovingFuture.set_result(None)

        self.prefPosUnit = (ureg.count / pitch).units
        self.prefVelocUnit = (ureg.count / ureg.s / pitch).units
        self.setPreferredUnits(self.prefPosUnit, self.prefVelocUnit)

        self._microstepResolution = 50
        self._pitch = pitch
        self.define_context()

        if not pitch.dimensionless:
            self.velocity = Q_(1, 'mm/s')
        else:
            self.velocity = Q_(2000, 'count/s')

    def define_context(self):
        self.contextName = 'Owis-{}'.format(uuid.uuid4())
        context = Context(self.contextName)

        if not self._pitch.dimensionless:
            context.add_transformation(
                '', '[length]', lambda ureg, x: x /
                (self._pitch * self._microstepResolution))
            context.add_transformation(
                '[length]', '[]',
                lambda ureg, x: x * self._pitch * self._microstepResolution)
            context.add_transformation(
                '1/[time]', '[length]/[time]', lambda ureg, x: x /
                (self._pitch * self._microstepResolution))
            context.add_transformation(
                '[length]/[time]', '1/[time]',
                lambda ureg, x: x * self._pitch * self._microstepResolution)

        ureg.add_context(context)

    async def __aenter__(self):
        await super().__aenter__()

        await self.send("absol" + str(self.axis))
        self._microstepResolution = await self.queryAxisVariable('mcstp')
        self._updateFuture = ensure_weakly_binding_future(self.updateStatus)

        return self

    async def __aexit__(self, *args):
        await super().__aexit__(*args)
        self._updateFuture.cancel()

    def handleError(self, msg):
        errorCode = int(msg)
        if errorCode == 0:  # no error
            return

    async def updateStatus(self):
        while True:
            await asyncio.sleep(0.2)

            if (self.connection is None):
                continue

            try:
                await self.singleUpdate()
            except:
                logging.error(traceback.format_exc())

    async def singleUpdate(self):
        movFut = self._isMovingFuture

        self._status = (await self.send("?astat"))[self.axis - 1]
        self.set_trait('statusMessage', self._StatusMap[self._status])

        cnt = await self.queryAxisVariable('cnt') * ureg.count
        with ureg.context(self.contextName):
            cnt = cnt.to(self.prefPosUnit)
        self.set_trait('value', cnt)

        if self._status not in self._movingStates:
            self.set_trait('status', self.Status.Idle)
            if not movFut.done():
                movFut.set_result(None)
        else:
            self.set_trait('status', self.Status.Moving)

        errCode = await self.send("?err")
        if (errCode != 0):
            logging.error("OWIS Axis {} Error: {}".format(self.axis, errCode))

        estat = await self.queryAxisVariable('estat')
        self.set_trait('limitSwitchActive', bool(estat & 0xF))
        self.set_trait('motorPowerStageError', bool(estat & (1 << 4)))

    async def setAxisVariable(self, var, value):
        cmd = '{}{}={}'.format(var, int(self.axis), value)
        await self.send(cmd)

    async def queryAxisVariable(self, var):
        cmd = '?{}{}'.format(var, int(self.axis))
        return await self.send(cmd)

    async def send(self, command):
        """ Send a command to the controller. If the command is a request,
        the reply will be parsed (if possible) and returned. A error message
        is automatically sent to check for communcation errors.

        Parameters
        ----------
        command (convertible to bytearray) : The command to be sent.
        """
        if self.connection is None:
            return None

        ret = await self.connection.send(command)

        if ret is not None:
            try:
                ret = int(ret)
            except ValueError:
                pass

        errorCode = int(await self.connection.send('?msg'))
        if errorCode != 0:
            raise Exception("Message error code %d on OWIS Axis %d." %
                            (errorCode, self.axis))

        return ret

    async def moveTo(self, val: float, velocity=None):
        if velocity is None:
            velocity = self.velocity

        with ureg.context(self.contextName):
            velocity = velocity.to('count/s').magnitude

            # From the manual. 256µs is the sampling period of the encoder
            velocity *= 256e-6 * 65536

            val = val.to('count').magnitude

        await self.halt()

        await self.singleUpdate()

        self.set_trait('status', self.Status.Moving)

        await self.setAxisVariable("pvel", int(velocity))
        await self.setAxisVariable("pset", int(val))
        await self.send("pgo" + str(self.axis))

        if self._isMovingFuture.done():
            self._isMovingFuture = asyncio.Future()

        return await self._isMovingFuture

    @action("Halt", priority=0)
    async def halt(self):
        if not self._isMovingFuture.done():
            self._isMovingFuture.cancel()

        await self.send('stop' + str(self.axis))

    @action("Set Position to zero", priority=1)
    async def resetCounter(self):
        await self.send('cres' + str(self.axis))

    @action("Free from limit switch", priority=2)
    async def efree(self):
        await self.send('efree' + str(self.axis))

    @action("Home to min. lim. switch", priority=3)
    async def homeMinLim(self):
        await self.setAxisVariable('ref', 4)

    @action("Initialize", priority=4)
    async def inititialize(self):
        await self.setAxisVariable('axis', 1)
        await self.send('init' + str(self.axis))

    @action("Release axis", priority=5)
    async def releaseAxis(self):
        await self.setAxisVariable('axis', 0)

    @action("Unrelease axis", priority=6)
    async def unreleaseAxis(self):
        await self.setAxisVariable('axis', 1)

    def stop(self):
        self._loop.create_task(self.halt())

    async def reference(self):
        pass
Esempio n. 11
0
class YAPInteractiveApp(Configurable):
    """A Mixin for applications that start YAPInteractive instances.

    Provides configurables for loading extensions and executing files
    as part of configuring a Shell environment.

    The following methods should be called by the :meth:`initialize` method
    of the subclass:

      - :meth:`init_path`
      - :meth:`init_shell` (to be implemented by the subclass)
      - :meth:`init_gui_pylab`
      - :meth:`init_extensions`
      - :meth:`init_code`
    """
    extensions = List(
        Unicode(),
        help="A list of dotted module names of IPython extensions to load."
    ).tag(config=True)
    extra_extension = Unicode(
        '', help="dotted module name of an IPython extension to load.").tag(
            config=True)

    reraise_ipython_extension_failures = Bool(
        False,
        help="Reraise exceptions encountered loading IPython extensions?",
    ).tag(config=True)

    # Extensions that are always loaded (not configurable)
    default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)

    hide_initial_ns = Bool(
        True,
        help=
        """Should variables loaded at startup (by startup files, exec_lines, etc.)
        be hidden from tools like %who?""").tag(config=True)

    exec_files = List(
        Unicode(),
        help="""List of files to run at IPython startup.""").tag(config=True)
    exec_PYTHONSTARTUP = Bool(
        True,
        help="""Run the file referenced by the PYTHONSTARTUP environment
        variable at IPython startup.""").tag(config=True)
    file_to_run = Unicode('', help="""A file to be run""").tag(config=True)

    exec_lines = List(
        Unicode(),
        help="""lines of code to run at IPython startup.""").tag(config=True)
    code_to_run = Unicode(
        '', help="Execute the given command string.").tag(config=True)
    module_to_run = Unicode(
        '', help="Run the module as a script.").tag(config=True)
    gui = CaselessStrEnum(
        gui_keys,
        allow_none=True,
        help="Enable GUI event loop integration with any of {0}.".format(
            gui_keys)).tag(config=True)
    matplotlib = CaselessStrEnum(
        backend_keys,
        allow_none=True,
        help="""Configure matplotlib for interactive use with
        the default matplotlib backend.""").tag(config=True)
    pylab = CaselessStrEnum(
        backend_keys,
        allow_none=True,
        help="""Pre-load matplotlib and numpy for interactive use,
        selecting a particular matplotlib backend and loop integration.
        """).tag(config=True)
    pylab_import_all = Bool(
        True,
        help=
        """If true, IPython will populate the user namespace with numpy, pylab, etc.
        and an ``import *`` is done from numpy and pylab, when using pylab mode.

        When False, pylab mode should not import any names into the user namespace.
        """).tag(config=True)
    shell = Instance('yap_ipython.core.interactiveshell.YAPInteractiveABC',
                     allow_none=True)
    # whether interact-loop should start
    interact = Bool(True)

    user_ns = Instance(dict, args=None, allow_none=True)

    @observe('user_ns')
    def _user_ns_changed(self, change):
        if self.shell is not None:
            self.shell.user_ns = change['new']
            self.shell.init_user_ns()

    def init_path(self):
        """Add current working directory, '', to sys.path"""
        if sys.path[0] != '':
            sys.path.insert(0, '')

    def init_shell(self):
        raise NotImplementedError("Override in subclasses")

    def init_gui_pylab(self):
        """Enable GUI event loop integration, taking pylab into account."""
        enable = False
        shell = self.shell
        if self.pylab:
            enable = lambda key: shell.enable_pylab(
                key, import_all=self.pylab_import_all)
            key = self.pylab
        elif self.matplotlib:
            enable = shell.enable_matplotlib
            key = self.matplotlib
        elif self.gui:
            enable = shell.enable_gui
            key = self.gui

        if not enable:
            return

        try:
            r = enable(key)
        except ImportError:
            self.log.warning(
                "Eventloop or matplotlib integration failed. Is matplotlib installed?"
            )
            self.shell.showtraceback()
            return
        except Exception:
            self.log.warning("GUI event loop or pylab initialization failed")
            self.shell.showtraceback()
            return

        if isinstance(r, tuple):
            gui, backend = r[:2]
            self.log.info(
                "Enabling GUI event loop integration, "
                "eventloop=%s, matplotlib=%s", gui, backend)
            if key == "auto":
                print("Using matplotlib backend: %s" % backend)
        else:
            gui = r
            self.log.info(
                "Enabling GUI event loop integration, "
                "eventloop=%s", gui)

    def init_extensions(self):
        """Load all IPython extensions in IPythonApp.extensions.

        This uses the :meth:`ExtensionManager.load_extensions` to load all
        the extensions listed in ``self.extensions``.
        """
        try:
            self.log.debug("Loading IPython extensions...")
            extensions = self.default_extensions + self.extensions
            if self.extra_extension:
                extensions.append(self.extra_extension)
            for ext in extensions:
                try:
                    self.log.info("Loading IPython extension: %s" % ext)
                    self.shell.extension_manager.load_extension(ext)
                except:
                    if self.reraise_ipython_extension_failures:
                        raise
                    msg = ("Error in loading extension: {ext}\n"
                           "Check your config files in {location}".format(
                               ext=ext, location=self.profile_dir.location))
                    self.log.warning(msg, exc_info=True)
        except:
            if self.reraise_ipython_extension_failures:
                raise
            self.log.warning("Unknown error in loading extensions:",
                             exc_info=True)

    def init_code(self):
        """run the pre-flight code, specified via exec_lines"""
        self._run_startup_files()
        self._run_exec_lines()
        self._run_exec_files()

        # Hide variables defined here from %who etc.
        if self.hide_initial_ns:
            self.shell.user_ns_hidden.update(self.shell.user_ns)

        # command-line execution (ipython -i script.py, ipython -m module)
        # should *not* be excluded from %whos
        self._run_cmd_line_code()
        self._run_module()

        # flush output, so itwon't be attached to the first cell
        sys.stdout.flush()
        sys.stderr.flush()

    def _run_exec_lines(self):
        """Run lines of code in IPythonApp.exec_lines in the user's namespace."""
        if not self.exec_lines:
            return
        try:
            self.log.debug("Running code from IPythonApp.exec_lines...")
            for line in self.exec_lines:
                try:
                    self.log.info("Running code in user namespace: %s" % line)
                    self.shell.run_cell(line, store_history=False)
                except:
                    self.log.warning("Error in executing line in user "
                                     "namespace: %s" % line)
                    self.shell.showtraceback()
        except:
            self.log.warning(
                "Unknown error in handling IPythonApp.exec_lines:")
            self.shell.showtraceback()

    def _exec_file(self, fname, shell_futures=False):
        try:
            full_filename = filefind(fname, [u'.', self.ipython_dir])
        except IOError:
            self.log.warning("File not found: %r" % fname)
            return
        # Make sure that the running script gets a proper sys.argv as if it
        # were run from a system shell.
        save_argv = sys.argv
        sys.argv = [full_filename] + self.extra_args[1:]
        try:
            if os.path.isfile(full_filename):
                self.log.info("Running file in user namespace: %s" %
                              full_filename)
                # Ensure that __file__ is always defined to match Python
                # behavior.
                with preserve_keys(self.shell.user_ns, '__file__'):
                    self.shell.user_ns['__file__'] = fname
                    if full_filename.endswith('.ipy'):
                        self.shell.safe_execfile_ipy(
                            full_filename, shell_futures=shell_futures)
                    else:
                        # default to python, even without extension
                        self.shell.safe_execfile(full_filename,
                                                 self.shell.user_ns,
                                                 shell_futures=shell_futures,
                                                 raise_exceptions=True)
        finally:
            sys.argv = save_argv

    def _run_startup_files(self):
        """Run files from profile startup directory"""
        startup_dirs = [self.profile_dir.startup_dir] + [
            os.path.join(p, 'startup')
            for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
        ]
        startup_files = []

        if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
                not (self.file_to_run or self.code_to_run or self.module_to_run):
            python_startup = os.environ['PYTHONSTARTUP']
            self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
            try:
                self._exec_file(python_startup)
            except:
                self.log.warning(
                    "Unknown error in handling PYTHONSTARTUP file %s:",
                    python_startup)
                self.shell.showtraceback()
        for startup_dir in startup_dirs[::-1]:
            startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
            startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
        if not startup_files:
            return

        self.log.debug("Running startup files from %s...", startup_dir)
        try:
            for fname in sorted(startup_files):
                self._exec_file(fname)
        except:
            self.log.warning("Unknown error in handling startup files:")
            self.shell.showtraceback()

    def _run_exec_files(self):
        """Run files from IPythonApp.exec_files"""
        if not self.exec_files:
            return

        self.log.debug("Running files in IPythonApp.exec_files...")
        try:
            for fname in self.exec_files:
                self._exec_file(fname)
        except:
            self.log.warning(
                "Unknown error in handling IPythonApp.exec_files:")
            self.shell.showtraceback()

    def _run_cmd_line_code(self):
        """Run code or file specified at the command-line"""
        if self.code_to_run:
            line = self.code_to_run
            try:
                self.log.info("Running code given at command line (c=): %s" %
                              line)
                self.shell.run_cell(line, store_history=False)
            except:
                self.log.warning(
                    "Error in executing line in user namespace: %s" % line)
                self.shell.showtraceback()
                if not self.interact:
                    self.exit(1)

        # Like Python itself, ignore the second if the first of these is present
        elif self.file_to_run:
            fname = self.file_to_run
            if os.path.isdir(fname):
                fname = os.path.join(fname, "__main__.py")
            try:
                self._exec_file(fname, shell_futures=True)
            except:
                self.shell.showtraceback(tb_offset=4)
                if not self.interact:
                    self.exit(1)

    def _run_module(self):
        """Run module specified at the command-line."""
        if self.module_to_run:
            # Make sure that the module gets a proper sys.argv as if it were
            # run using `python -m`.
            save_argv = sys.argv
            sys.argv = [sys.executable] + self.extra_args
            try:
                self.shell.safe_run_module(self.module_to_run,
                                           self.shell.user_ns)
            finally:
                sys.argv = save_argv
Esempio n. 12
0
class OSMagics(Magics):
    """Magics to interact with the underlying OS (shell-type functionality).
    """

    cd_force_quiet = Bool(False,
        help="Force %cd magic to be quiet even if -q is not passed."
    ).tag(config=True)

    def __init__(self, shell=None, **kwargs):

        # Now define isexec in a cross platform manner.
        self.is_posix = False
        self.execre = None
        if os.name == 'posix':
            self.is_posix = True
        else:
            try:
                winext = os.environ['pathext'].replace(';','|').replace('.','')
            except KeyError:
                winext = 'exe|com|bat|py'
            
            self.execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)

        # call up the chain
        super().__init__(shell=shell, **kwargs)


    @skip_doctest
    def _isexec_POSIX(self, file):
        """
            Test for executable on a POSIX system
        """
        if os.access(file.path, os.X_OK):
            # will fail on maxOS if access is not X_OK
            return file.is_file()
        return False


    
    @skip_doctest
    def _isexec_WIN(self, file):
        """
            Test for executable file on non POSIX system
        """
        return file.is_file() and self.execre.match(file.name) is not None

    @skip_doctest
    def isexec(self, file):
        """
            Test for executable file on non POSIX system
        """
        if self.is_posix:
            return self._isexec_POSIX(file)
        else:
            return self._isexec_WIN(file)


    @skip_doctest
    @line_magic
    def alias(self, parameter_s=''):
        """Define an alias for a system command.

        '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'

        Then, typing 'alias_name params' will execute the system command 'cmd
        params' (from your underlying operating system).

        Aliases have lower precedence than magic functions and Python normal
        variables, so if 'foo' is both a Python variable and an alias, the
        alias can not be executed until 'del foo' removes the Python variable.

        You can use the %l specifier in an alias definition to represent the
        whole line when the alias is called.  For example::

          In [2]: alias bracket echo "Input in brackets: <%l>"
          In [3]: bracket hello world
          Input in brackets: <hello world>

        You can also define aliases with parameters using %s specifiers (one
        per parameter)::

          In [1]: alias parts echo first %s second %s
          In [2]: %parts A B
          first A second B
          In [3]: %parts A
          Incorrect number of arguments: 2 expected.
          parts is an alias to: 'echo first %s second %s'

        Note that %l and %s are mutually exclusive.  You can only use one or
        the other in your aliases.

        Aliases expand Python variables just like system calls using ! or !!
        do: all expressions prefixed with '$' get expanded.  For details of
        the semantic rules, see PEP-215:
        http://www.python.org/peps/pep-0215.html.  This is the library used by
        IPython for variable expansion.  If you want to access a true shell
        variable, an extra $ is necessary to prevent its expansion by
        IPython::

          In [6]: alias show echo
          In [7]: PATH='A Python string'
          In [8]: show $PATH
          A Python string
          In [9]: show $$PATH
          /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...

        You can use the alias facility to access all of $PATH.  See the %rehashx
        function, which automatically creates aliases for the contents of your
        $PATH.

        If called with no parameters, %alias prints the current alias table
        for your system.  For posix systems, the default aliases are 'cat',
        'cp', 'mv', 'rm', 'rmdir', and 'mkdir', and other platform-specific
        aliases are added.  For windows-based systems, the default aliases are
        'copy', 'ddir', 'echo', 'ls', 'ldir', 'mkdir', 'ren', and 'rmdir'.

        You can see the definition of alias by adding a question mark in the
        end::

          In [1]: cat?
          Repr: <alias cat for 'cat'>"""

        par = parameter_s.strip()
        if not par:
            aliases = sorted(self.shell.alias_manager.aliases)
            # stored = self.shell.db.get('stored_aliases', {} )
            # for k, v in stored:
            #     atab.append(k, v[0])

            print("Total number of aliases:", len(aliases))
            sys.stdout.flush()
            return aliases

        # Now try to define a new one
        try:
            alias,cmd = par.split(None, 1)
        except TypeError:
            print(oinspect.getdoc(self.alias))
            return
        
        try:
            self.shell.alias_manager.define_alias(alias, cmd)
        except AliasError as e:
            print(e)
    # end magic_alias

    @line_magic
    def unalias(self, parameter_s=''):
        """Remove an alias"""

        aname = parameter_s.strip()
        try:
            self.shell.alias_manager.undefine_alias(aname)
        except ValueError as e:
            print(e)
            return
        
        stored = self.shell.db.get('stored_aliases', {} )
        if aname in stored:
            print("Removing %stored alias",aname)
            del stored[aname]
            self.shell.db['stored_aliases'] = stored

    @line_magic
    def rehashx(self, parameter_s=''):
        """Update the alias table with all executable files in $PATH.

        rehashx explicitly checks that every entry in $PATH is a file
        with execute access (os.X_OK).

        Under Windows, it checks executability as a match against a
        '|'-separated string of extensions, stored in the IPython config
        variable win_exec_ext.  This defaults to 'exe|com|bat'.

        This function also resets the root module cache of module completer,
        used on slow filesystems.
        """
        from IPython.core.alias import InvalidAliasError

        # for the benefit of module completer in ipy_completers.py
        del self.shell.db['rootmodules_cache']

        path = [os.path.abspath(os.path.expanduser(p)) for p in
            os.environ.get('PATH','').split(os.pathsep)]

        syscmdlist = []
        savedir = os.getcwd()

        # Now walk the paths looking for executables to alias.
        try:
            # write the whole loop for posix/Windows so we don't have an if in
            # the innermost part
            if self.is_posix:
                for pdir in path:
                    try:
                        os.chdir(pdir)
                    except OSError:
                        continue

                    # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist:
                    dirlist = os.scandir(path=pdir)
                    for ff in dirlist:
                        if self.isexec(ff):
                            fname = ff.name
                            try:
                                # Removes dots from the name since ipython
                                # will assume names with dots to be python.
                                if not self.shell.alias_manager.is_alias(fname):
                                    self.shell.alias_manager.define_alias(
                                        fname.replace('.',''), fname)
                            except InvalidAliasError:
                                pass
                            else:
                                syscmdlist.append(fname)
            else:
                no_alias = Alias.blacklist
                for pdir in path:
                    try:
                        os.chdir(pdir)
                    except OSError:
                        continue

                    # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist:
                    dirlist = os.scandir(pdir)
                    for ff in dirlist:
                        fname = ff.name
                        base, ext = os.path.splitext(fname)
                        if self.isexec(ff) and base.lower() not in no_alias:
                            if ext.lower() == '.exe':
                                fname = base
                                try:
                                    # Removes dots from the name since ipython
                                    # will assume names with dots to be python.
                                    self.shell.alias_manager.define_alias(
                                        base.lower().replace('.',''), fname)
                                except InvalidAliasError:
                                    pass
                                syscmdlist.append(fname)

            self.shell.db['syscmdlist'] = syscmdlist
        finally:
            os.chdir(savedir)

    @skip_doctest
    @line_magic
    def pwd(self, parameter_s=''):
        """Return the current working directory path.

        Examples
        --------
        ::

          In [9]: pwd
          Out[9]: '/home/tsuser/sprint/ipython'
        """
        try:
            return os.getcwd()
        except FileNotFoundError:
            raise UsageError("CWD no longer exists - please use %cd to change directory.")

    @skip_doctest
    @line_magic
    def cd(self, parameter_s=''):
        """Change the current working directory.

        This command automatically maintains an internal list of directories
        you visit during your IPython session, in the variable _dh. The
        command %dhist shows this history nicely formatted. You can also
        do 'cd -<tab>' to see directory history conveniently.

        Usage:

          cd 'dir': changes to directory 'dir'.

          cd -: changes to the last visited directory.

          cd -<n>: changes to the n-th directory in the directory history.

          cd --foo: change to directory that matches 'foo' in history

          cd -b <bookmark_name>: jump to a bookmark set by %bookmark
             (note: cd <bookmark_name> is enough if there is no
              directory <bookmark_name>, but a bookmark with the name exists.)
              'cd -b <tab>' allows you to tab-complete bookmark names.

        Options:

        -q: quiet.  Do not print the working directory after the cd command is
        executed.  By default IPython's cd command does print this directory,
        since the default prompts do not display path information.

        Note that !cd doesn't work for this purpose because the shell where
        !command runs is immediately discarded after executing 'command'.

        Examples
        --------
        ::

          In [10]: cd parent/child
          /home/tsuser/parent/child
        """

        try:
            oldcwd = os.getcwd()
        except FileNotFoundError:
            # Happens if the CWD has been deleted.
            oldcwd = None

        numcd = re.match(r'(-)(\d+)$',parameter_s)
        # jump in directory history by number
        if numcd:
            nn = int(numcd.group(2))
            try:
                ps = self.shell.user_ns['_dh'][nn]
            except IndexError:
                print('The requested directory does not exist in history.')
                return
            else:
                opts = {}
        elif parameter_s.startswith('--'):
            ps = None
            fallback = None
            pat = parameter_s[2:]
            dh = self.shell.user_ns['_dh']
            # first search only by basename (last component)
            for ent in reversed(dh):
                if pat in os.path.basename(ent) and os.path.isdir(ent):
                    ps = ent
                    break

                if fallback is None and pat in ent and os.path.isdir(ent):
                    fallback = ent

            # if we have no last part match, pick the first full path match
            if ps is None:
                ps = fallback

            if ps is None:
                print("No matching entry in directory history")
                return
            else:
                opts = {}


        else:
            opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
        # jump to previous
        if ps == '-':
            try:
                ps = self.shell.user_ns['_dh'][-2]
            except IndexError:
                raise UsageError('%cd -: No previous directory to change to.')
        # jump to bookmark if needed
        else:
            if not os.path.isdir(ps) or 'b' in opts:
                bkms = self.shell.db.get('bookmarks', {})

                if ps in bkms:
                    target = bkms[ps]
                    print('(bookmark:%s) -> %s' % (ps, target))
                    ps = target
                else:
                    if 'b' in opts:
                        raise UsageError("Bookmark '%s' not found.  "
                              "Use '%%bookmark -l' to see your bookmarks." % ps)

        # at this point ps should point to the target dir
        if ps:
            try:
                os.chdir(os.path.expanduser(ps))
                if hasattr(self.shell, 'term_title') and self.shell.term_title:
                    set_term_title(self.shell.term_title_format.format(cwd=abbrev_cwd()))
            except OSError:
                print(sys.exc_info()[1])
            else:
                cwd = os.getcwd()
                dhist = self.shell.user_ns['_dh']
                if oldcwd != cwd:
                    dhist.append(cwd)
                    self.shell.db['dhist'] = compress_dhist(dhist)[-100:]

        else:
            os.chdir(self.shell.home_dir)
            if hasattr(self.shell, 'term_title') and self.shell.term_title:
                set_term_title(self.shell.term_title_format.format(cwd="~"))
            cwd = os.getcwd()
            dhist = self.shell.user_ns['_dh']

            if oldcwd != cwd:
                dhist.append(cwd)
                self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
        if not 'q' in opts and not self.cd_force_quiet and self.shell.user_ns['_dh']:
            print(self.shell.user_ns['_dh'][-1])

    @line_magic
    def env(self, parameter_s=''):
        """Get, set, or list environment variables.

        Usage:\\

          %env: lists all environment variables/values
          %env var: get value for var
          %env var val: set value for var
          %env var=val: set value for var
          %env var=$val: set value for var, using python expansion if possible
        """
        if parameter_s.strip():
            split = '=' if '=' in parameter_s else ' '
            bits = parameter_s.split(split)
            if len(bits) == 1:
                key = parameter_s.strip()
                if key in os.environ:
                    return os.environ[key]
                else:
                    err = "Environment does not have key: {0}".format(key)
                    raise UsageError(err)
            if len(bits) > 1:
                return self.set_env(parameter_s)
        env = dict(os.environ)
        # hide likely secrets when printing the whole environment
        for key in list(env):
            if any(s in key.lower() for s in ('key', 'token', 'secret')):
                env[key] = '<hidden>'

        return env

    @line_magic
    def set_env(self, parameter_s):
        """Set environment variables.  Assumptions are that either "val" is a
        name in the user namespace, or val is something that evaluates to a
        string.

        Usage:\\
          %set_env var val: set value for var
          %set_env var=val: set value for var
          %set_env var=$val: set value for var, using python expansion if possible
        """
        split = '=' if '=' in parameter_s else ' '
        bits = parameter_s.split(split, 1)
        if not parameter_s.strip() or len(bits)<2:
            raise UsageError("usage is 'set_env var=val'")
        var = bits[0].strip()
        val = bits[1].strip()
        if re.match(r'.*\s.*', var):
            # an environment variable with whitespace is almost certainly
            # not what the user intended.  what's more likely is the wrong
            # split was chosen, ie for "set_env cmd_args A=B", we chose
            # '=' for the split and should have chosen ' '.  to get around
            # this, users should just assign directly to os.environ or use
            # standard magic {var} expansion.
            err = "refusing to set env var with whitespace: '{0}'"
            err = err.format(val)
            raise UsageError(err)
        os.environ[var] = val
        print('env: {0}={1}'.format(var,val))

    @line_magic
    def pushd(self, parameter_s=''):
        """Place the current dir on stack and change directory.

        Usage:\\
          %pushd ['dirname']
        """

        dir_s = self.shell.dir_stack
        tgt = os.path.expanduser(parameter_s)
        cwd = os.getcwd().replace(self.shell.home_dir,'~')
        if tgt:
            self.cd(parameter_s)
        dir_s.insert(0,cwd)
        return self.shell.magic('dirs')

    @line_magic
    def popd(self, parameter_s=''):
        """Change to directory popped off the top of the stack.
        """
        if not self.shell.dir_stack:
            raise UsageError("%popd on empty stack")
        top = self.shell.dir_stack.pop(0)
        self.cd(top)
        print("popd ->",top)

    @line_magic
    def dirs(self, parameter_s=''):
        """Return the current directory stack."""

        return self.shell.dir_stack

    @line_magic
    def dhist(self, parameter_s=''):
        """Print your history of visited directories.

        %dhist       -> print full history\\
        %dhist n     -> print last n entries only\\
        %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\

        This history is automatically maintained by the %cd command, and
        always available as the global list variable _dh. You can use %cd -<n>
        to go to directory number <n>.

        Note that most of time, you should view directory history by entering
        cd -<TAB>.

        """

        dh = self.shell.user_ns['_dh']
        if parameter_s:
            try:
                args = map(int,parameter_s.split())
            except:
                self.arg_err(self.dhist)
                return
            if len(args) == 1:
                ini,fin = max(len(dh)-(args[0]),0),len(dh)
            elif len(args) == 2:
                ini,fin = args
                fin = min(fin, len(dh))
            else:
                self.arg_err(self.dhist)
                return
        else:
            ini,fin = 0,len(dh)
        print('Directory history (kept in _dh)')
        for i in range(ini, fin):
            print("%d: %s" % (i, dh[i]))

    @skip_doctest
    @line_magic
    def sc(self, parameter_s=''):
        """Shell capture - run shell command and capture output (DEPRECATED use !).

        DEPRECATED. Suboptimal, retained for backwards compatibility.

        You should use the form 'var = !command' instead. Example:

         "%sc -l myfiles = ls ~" should now be written as

         "myfiles = !ls ~"

        myfiles.s, myfiles.l and myfiles.n still apply as documented
        below.

        --
        %sc [options] varname=command

        IPython will run the given command using commands.getoutput(), and
        will then update the user's interactive namespace with a variable
        called varname, containing the value of the call.  Your command can
        contain shell wildcards, pipes, etc.

        The '=' sign in the syntax is mandatory, and the variable name you
        supply must follow Python's standard conventions for valid names.

        (A special format without variable name exists for internal use)

        Options:

          -l: list output.  Split the output on newlines into a list before
          assigning it to the given variable.  By default the output is stored
          as a single string.

          -v: verbose.  Print the contents of the variable.

        In most cases you should not need to split as a list, because the
        returned value is a special type of string which can automatically
        provide its contents either as a list (split on newlines) or as a
        space-separated string.  These are convenient, respectively, either
        for sequential processing or to be passed to a shell command.

        For example::

            # Capture into variable a
            In [1]: sc a=ls *py

            # a is a string with embedded newlines
            In [2]: a
            Out[2]: 'setup.py\\nwin32_manual_post_install.py'

            # which can be seen as a list:
            In [3]: a.l
            Out[3]: ['setup.py', 'win32_manual_post_install.py']

            # or as a whitespace-separated string:
            In [4]: a.s
            Out[4]: 'setup.py win32_manual_post_install.py'

            # a.s is useful to pass as a single command line:
            In [5]: !wc -l $a.s
              146 setup.py
              130 win32_manual_post_install.py
              276 total

            # while the list form is useful to loop over:
            In [6]: for f in a.l:
              ...:      !wc -l $f
              ...:
            146 setup.py
            130 win32_manual_post_install.py

        Similarly, the lists returned by the -l option are also special, in
        the sense that you can equally invoke the .s attribute on them to
        automatically get a whitespace-separated string from their contents::

            In [7]: sc -l b=ls *py

            In [8]: b
            Out[8]: ['setup.py', 'win32_manual_post_install.py']

            In [9]: b.s
            Out[9]: 'setup.py win32_manual_post_install.py'

        In summary, both the lists and strings used for output capture have
        the following special attributes::

            .l (or .list) : value as list.
            .n (or .nlstr): value as newline-separated string.
            .s (or .spstr): value as space-separated string.
        """

        opts,args = self.parse_options(parameter_s, 'lv')
        # Try to get a variable name and command to run
        try:
            # the variable name must be obtained from the parse_options
            # output, which uses shlex.split to strip options out.
            var,_ = args.split('=', 1)
            var = var.strip()
            # But the command has to be extracted from the original input
            # parameter_s, not on what parse_options returns, to avoid the
            # quote stripping which shlex.split performs on it.
            _,cmd = parameter_s.split('=', 1)
        except ValueError:
            var,cmd = '',''
        # If all looks ok, proceed
        split = 'l' in opts
        out = self.shell.getoutput(cmd, split=split)
        if 'v' in opts:
            print('%s ==\n%s' % (var, pformat(out)))
        if var:
            self.shell.user_ns.update({var:out})
        else:
            return out

    @line_cell_magic
    def sx(self, line='', cell=None):
        """Shell execute - run shell command and capture output (!! is short-hand).

        %sx command

        IPython will run the given command using commands.getoutput(), and
        return the result formatted as a list (split on '\\n').  Since the
        output is _returned_, it will be stored in ipython's regular output
        cache Out[N] and in the '_N' automatic variables.

        Notes:

        1) If an input line begins with '!!', then %sx is automatically
        invoked.  That is, while::

          !ls

        causes ipython to simply issue system('ls'), typing::

          !!ls

        is a shorthand equivalent to::

          %sx ls

        2) %sx differs from %sc in that %sx automatically splits into a list,
        like '%sc -l'.  The reason for this is to make it as easy as possible
        to process line-oriented shell output via further python commands.
        %sc is meant to provide much finer control, but requires more
        typing.

        3) Just like %sc -l, this is a list with special attributes:
        ::

          .l (or .list) : value as list.
          .n (or .nlstr): value as newline-separated string.
          .s (or .spstr): value as whitespace-separated string.

        This is very useful when trying to use such lists as arguments to
        system commands."""
        
        if cell is None:
            # line magic
            return self.shell.getoutput(line)
        else:
            opts,args = self.parse_options(line, '', 'out=')
            output = self.shell.getoutput(cell)
            out_name = opts.get('out', opts.get('o'))
            if out_name:
                self.shell.user_ns[out_name] = output
            else:
                return output

    system = line_cell_magic('system')(sx)
    bang = cell_magic('!')(sx)

    @line_magic
    def bookmark(self, parameter_s=''):
        """Manage IPython's bookmark system.

        %bookmark <name>       - set bookmark to current dir
        %bookmark <name> <dir> - set bookmark to <dir>
        %bookmark -l           - list all bookmarks
        %bookmark -d <name>    - remove bookmark
        %bookmark -r           - remove all bookmarks

        You can later on access a bookmarked folder with::

          %cd -b <name>

        or simply '%cd <name>' if there is no directory called <name> AND
        there is such a bookmark defined.

        Your bookmarks persist through IPython sessions, but they are
        associated with each profile."""

        opts,args = self.parse_options(parameter_s,'drl',mode='list')
        if len(args) > 2:
            raise UsageError("%bookmark: too many arguments")

        bkms = self.shell.db.get('bookmarks',{})

        if 'd' in opts:
            try:
                todel = args[0]
            except IndexError:
                raise UsageError(
                    "%bookmark -d: must provide a bookmark to delete")
            else:
                try:
                    del bkms[todel]
                except KeyError:
                    raise UsageError(
                        "%%bookmark -d: Can't delete bookmark '%s'" % todel)

        elif 'r' in opts:
            bkms = {}
        elif 'l' in opts:
            bks = sorted(bkms)
            if bks:
                size = max(map(len, bks))
            else:
                size = 0
            fmt = '%-'+str(size)+'s -> %s'
            print('Current bookmarks:')
            for bk in bks:
                print(fmt % (bk, bkms[bk]))
        else:
            if not args:
                raise UsageError("%bookmark: You must specify the bookmark name")
            elif len(args)==1:
                bkms[args[0]] = os.getcwd()
            elif len(args)==2:
                bkms[args[0]] = args[1]
        self.shell.db['bookmarks'] = bkms

    @line_magic
    def pycat(self, parameter_s=''):
        """Show a syntax-highlighted file through a pager.

        This magic is similar to the cat utility, but it will assume the file
        to be Python source and will show it with syntax highlighting.

        This magic command can either take a local filename, an url,
        an history range (see %history) or a macro as argument ::

        %pycat myscript.py
        %pycat 7-27
        %pycat myMacro
        %pycat http://www.example.com/myscript.py
        """
        if not parameter_s:
            raise UsageError('Missing filename, URL, input history range, '
                             'or macro.')

        try :
            cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
        except (ValueError, IOError):
            print("Error: no such file, variable, URL, history range or macro")
            return

        page.page(self.shell.pycolorize(source_to_unicode(cont)))

    @magic_arguments.magic_arguments()
    @magic_arguments.argument(
        '-a', '--append', action='store_true', default=False,
        help='Append contents of the cell to an existing file. '
             'The file will be created if it does not exist.'
    )
    @magic_arguments.argument(
        'filename', type=str,
        help='file to write'
    )
    @cell_magic
    def writefile(self, line, cell):
        """Write the contents of the cell to a file.
        
        The file will be overwritten unless the -a (--append) flag is specified.
        """
        args = magic_arguments.parse_argstring(self.writefile, line)
        if re.match(r'^(\'.*\')|(".*")$', args.filename):
            filename = os.path.expanduser(args.filename[1:-1])
        else:
            filename = os.path.expanduser(args.filename)
            
        if os.path.exists(filename):
            if args.append:
                print("Appending to %s" % filename)
            else:
                print("Overwriting %s" % filename)
        else:
            print("Writing %s" % filename)
        
        mode = 'a' if args.append else 'w'
        with io.open(filename, mode, encoding='utf-8') as f:
            f.write(cell)
Esempio n. 13
0
class FileContentsManager(FileManagerMixin, ContentsManager):

    root_dir = Unicode(config=True)

    @default('root_dir')
    def _default_root_dir(self):
        try:
            return self.parent.notebook_dir
        except AttributeError:
            return getcwd()

    save_script = Bool(
        False,
        config=True,
        help='DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0')

    @observe('save_script')
    def _update_save_script(self):
        self.log.warning("""
        `--script` is deprecated and will be removed in notebook 5.0.

        You can trigger nbconvert via pre- or post-save hooks:

            ContentsManager.pre_save_hook
            FileContentsManager.post_save_hook

        A post-save hook has been registered that calls:

            jupyter nbconvert --to script [notebook]

        which behaves similarly to `--script`.
        """)

        self.post_save_hook = _post_save_script

    post_save_hook = Any(None,
                         config=True,
                         help="""Python callable or importstring thereof

        to be called on the path of a file just saved.

        This can be used to process the file on disk,
        such as converting the notebook to a script or HTML via nbconvert.

        It will be called as (all arguments passed by keyword)::

            hook(os_path=os_path, model=model, contents_manager=instance)

        - path: the filesystem path to the file just written
        - model: the model representing the file
        - contents_manager: this ContentsManager instance
        """)

    @validate('post_save_hook')
    def _validate_post_save_hook(self, proposal):
        value = proposal['value']
        if isinstance(value, string_types):
            value = import_item(value)
        if not callable(value):
            raise TraitError("post_save_hook must be callable")
        return value

    def run_post_save_hook(self, model, os_path):
        """Run the post-save hook if defined, and log errors"""
        if self.post_save_hook:
            try:
                self.log.debug("Running post-save hook on %s", os_path)
                self.post_save_hook(os_path=os_path,
                                    model=model,
                                    contents_manager=self)
            except Exception as e:
                self.log.error("Post-save hook failed o-n %s",
                               os_path,
                               exc_info=True)
                raise web.HTTPError(
                    500,
                    u'Unexpected error while running post hook save: %s' % e)

    @validate('root_dir')
    def _validate_root_dir(self, proposal):
        """Do a bit of validation of the root_dir."""
        value = proposal['value']
        if not os.path.isabs(value):
            # If we receive a non-absolute path, make it absolute.
            value = os.path.abspath(value)
        if not os.path.isdir(value):
            raise TraitError("%r is not a directory" % value)
        return value

    def _checkpoints_class_default(self):
        return FileCheckpoints

    def is_hidden(self, path):
        """Does the API style path correspond to a hidden directory or file?

        Parameters
        ----------
        path : string
            The path to check. This is an API path (`/` separated,
            relative to root_dir).

        Returns
        -------
        hidden : bool
            Whether the path exists and is hidden.
        """
        path = path.strip('/')
        os_path = self._get_os_path(path=path)
        return is_hidden(os_path, self.root_dir)

    def file_exists(self, path):
        """Returns True if the file exists, else returns False.

        API-style wrapper for os.path.isfile

        Parameters
        ----------
        path : string
            The relative path to the file (with '/' as separator)

        Returns
        -------
        exists : bool
            Whether the file exists.
        """
        path = path.strip('/')
        os_path = self._get_os_path(path)
        return os.path.isfile(os_path)

    def dir_exists(self, path):
        """Does the API-style path refer to an extant directory?

        API-style wrapper for os.path.isdir

        Parameters
        ----------
        path : string
            The path to check. This is an API path (`/` separated,
            relative to root_dir).

        Returns
        -------
        exists : bool
            Whether the path is indeed a directory.
        """
        path = path.strip('/')
        os_path = self._get_os_path(path=path)
        return os.path.isdir(os_path)

    def exists(self, path):
        """Returns True if the path exists, else returns False.

        API-style wrapper for os.path.exists

        Parameters
        ----------
        path : string
            The API path to the file (with '/' as separator)

        Returns
        -------
        exists : bool
            Whether the target exists.
        """
        path = path.strip('/')
        os_path = self._get_os_path(path=path)
        return os.path.exists(os_path)

    def _base_model(self, path):
        """Build the common base of a contents model"""
        os_path = self._get_os_path(path)
        info = os.stat(os_path)
        last_modified = tz.utcfromtimestamp(info.st_mtime)
        created = tz.utcfromtimestamp(info.st_ctime)
        # Create the base model.
        model = {}
        model['name'] = path.rsplit('/', 1)[-1]
        model['path'] = path
        model['last_modified'] = last_modified
        model['created'] = created
        model['content'] = None
        model['format'] = None
        model['mimetype'] = None
        try:
            model['writable'] = os.access(os_path, os.W_OK)
        except OSError:
            self.log.error("Failed to check write permissions on %s", os_path)
            model['writable'] = False
        return model

    def _dir_model(self, path, content=True):
        """Build a model for a directory

        if content is requested, will include a listing of the directory
        """
        os_path = self._get_os_path(path)

        four_o_four = u'directory does not exist: %r' % path

        if not os.path.isdir(os_path):
            raise web.HTTPError(404, four_o_four)
        elif is_hidden(os_path, self.root_dir):
            self.log.info(
                "Refusing to serve hidden directory %r, via 404 Error",
                os_path)
            raise web.HTTPError(404, four_o_four)

        model = self._base_model(path)
        model['type'] = 'directory'
        if content:
            model['content'] = contents = []
            os_dir = self._get_os_path(path)
            for name in os.listdir(os_dir):
                try:
                    os_path = os.path.join(os_dir, name)
                except UnicodeDecodeError as e:
                    self.log.warning("failed to decode filename '%s': %s",
                                     name, e)
                    continue
                # skip over broken symlinks in listing
                if not os.path.exists(os_path):
                    self.log.warning("%s doesn't exist", os_path)
                    continue
                elif not os.path.isfile(os_path) and not os.path.isdir(
                        os_path):
                    self.log.debug("%s not a regular file", os_path)
                    continue
                if self.should_list(name) and not is_hidden(
                        os_path, self.root_dir):
                    contents.append(
                        self.get(path='%s/%s' % (path, name), content=False))

            model['format'] = 'json'

        return model

    def _file_model(self, path, content=True, format=None):
        """Build a model for a file

        if content is requested, include the file contents.

        format:
          If 'text', the contents will be decoded as UTF-8.
          If 'base64', the raw bytes contents will be encoded as base64.
          If not specified, try to decode as UTF-8, and fall back to base64
        """
        model = self._base_model(path)
        model['type'] = 'file'

        os_path = self._get_os_path(path)
        model['mimetype'] = mimetypes.guess_type(os_path)[0]

        if content:
            content, format = self._read_file(os_path, format)
            if model['mimetype'] is None:
                default_mime = {
                    'text': 'text/plain',
                    'base64': 'application/octet-stream'
                }[format]
                model['mimetype'] = default_mime

            model.update(
                content=content,
                format=format,
            )

        return model

    def _notebook_model(self, path, content=True):
        """Build a notebook model

        if content is requested, the notebook content will be populated
        as a JSON structure (not double-serialized)
        """
        model = self._base_model(path)
        model['type'] = 'notebook'
        if content:
            os_path = self._get_os_path(path)
            nb = self._read_notebook(os_path, as_version=4)
            self.mark_trusted_cells(nb, path)
            model['content'] = nb
            model['format'] = 'json'
            self.validate_notebook_model(model)
        return model

    def get(self, path, content=True, type=None, format=None):
        """ Takes a path for an entity and returns its model

        Parameters
        ----------
        path : str
            the API path that describes the relative path for the target
        content : bool
            Whether to include the contents in the reply
        type : str, optional
            The requested type - 'file', 'notebook', or 'directory'.
            Will raise HTTPError 400 if the content doesn't match.
        format : str, optional
            The requested format for file contents. 'text' or 'base64'.
            Ignored if this returns a notebook or directory model.

        Returns
        -------
        model : dict
            the contents model. If content=True, returns the contents
            of the file or directory as well.
        """
        path = path.strip('/')

        if not self.exists(path):
            raise web.HTTPError(404, u'No such file or directory: %s' % path)

        os_path = self._get_os_path(path)
        if os.path.isdir(os_path):
            if type not in (None, 'directory'):
                raise web.HTTPError(400,
                                    u'%s is a directory, not a %s' %
                                    (path, type),
                                    reason='bad type')
            model = self._dir_model(path, content=content)
        elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
            model = self._notebook_model(path, content=content)
        else:
            if type == 'directory':
                raise web.HTTPError(400,
                                    u'%s is not a directory' % path,
                                    reason='bad type')
            model = self._file_model(path, content=content, format=format)
        return model

    def _save_directory(self, os_path, model, path=''):
        """create a directory"""
        if is_hidden(os_path, self.root_dir):
            raise web.HTTPError(400,
                                u'Cannot create hidden directory %r' % os_path)
        if not os.path.exists(os_path):
            with self.perm_to_403():
                os.mkdir(os_path)
        elif not os.path.isdir(os_path):
            raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
        else:
            self.log.debug("Directory %r already exists", os_path)

    def save(self, model, path=''):
        """Save the file model and return the model with no content."""
        path = path.strip('/')

        if 'type' not in model:
            raise web.HTTPError(400, u'No file type provided')
        if 'content' not in model and model['type'] != 'directory':
            raise web.HTTPError(400, u'No file content provided')

        os_path = self._get_os_path(path)
        self.log.debug("Saving %s", os_path)

        self.run_pre_save_hook(model=model, path=path)

        try:
            if model['type'] == 'notebook':
                nb = nbformat.from_dict(model['content'])
                self.check_and_sign(nb, path)
                self._save_notebook(os_path, nb)
                # One checkpoint should always exist for notebooks.
                if not self.checkpoints.list_checkpoints(path):
                    self.create_checkpoint(path)
            elif model['type'] == 'file':
                # Missing format will be handled internally by _save_file.
                self._save_file(os_path, model['content'], model.get('format'))
            elif model['type'] == 'directory':
                self._save_directory(os_path, model, path)
            else:
                raise web.HTTPError(
                    400, "Unhandled contents type: %s" % model['type'])
        except web.HTTPError:
            raise
        except Exception as e:
            self.log.error(u'Error while saving file: %s %s',
                           path,
                           e,
                           exc_info=True)
            raise web.HTTPError(
                500, u'Unexpected error while saving file: %s %s' % (path, e))

        validation_message = None
        if model['type'] == 'notebook':
            self.validate_notebook_model(model)
            validation_message = model.get('message', None)

        model = self.get(path, content=False)
        if validation_message:
            model['message'] = validation_message

        self.run_post_save_hook(model=model, os_path=os_path)

        return model

    def delete_file(self, path):
        """Delete file at path."""
        path = path.strip('/')
        os_path = self._get_os_path(path)
        rm = os.unlink
        if os.path.isdir(os_path):
            listing = os.listdir(os_path)
            # Don't delete non-empty directories.
            # A directory containing only leftover checkpoints is
            # considered empty.
            cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
            for entry in listing:
                if entry != cp_dir:
                    raise web.HTTPError(400,
                                        u'Directory %s not empty' % os_path)
        elif not os.path.isfile(os_path):
            raise web.HTTPError(404, u'File does not exist: %s' % os_path)

        if os.path.isdir(os_path):
            self.log.debug("Removing directory %s", os_path)
            with self.perm_to_403():
                shutil.rmtree(os_path)
        else:
            self.log.debug("Unlinking file %s", os_path)
            with self.perm_to_403():
                rm(os_path)

    def rename_file(self, old_path, new_path):
        """Rename a file."""
        old_path = old_path.strip('/')
        new_path = new_path.strip('/')
        if new_path == old_path:
            return

        new_os_path = self._get_os_path(new_path)
        old_os_path = self._get_os_path(old_path)

        # Should we proceed with the move?
        if os.path.exists(new_os_path) and not samefile(
                old_os_path, new_os_path):
            raise web.HTTPError(409, u'File already exists: %s' % new_path)

        # Move the file
        try:
            with self.perm_to_403():
                shutil.move(old_os_path, new_os_path)
        except web.HTTPError:
            raise
        except Exception as e:
            raise web.HTTPError(
                500, u'Unknown error renaming file: %s %s' % (old_path, e))

    def info_string(self):
        return "Serving notebooks from local directory: %s" % self.root_dir

    def get_kernel_path(self, path, model=None):
        """Return the initial API path of  a kernel associated with a given notebook"""
        if self.dir_exists(path):
            return path
        if '/' in path:
            parent_dir = path.rsplit('/', 1)[0]
        else:
            parent_dir = ''
        return parent_dir
Esempio n. 14
0
class BaseIPythonApplication(Application):

    name = Unicode(u'ipython')
    description = Unicode(u'IPython: an enhanced interactive Python shell.')
    version = Unicode(release.version)

    aliases = Dict(base_aliases)
    flags = Dict(base_flags)
    classes = List([ProfileDir])

    # enable `load_subconfig('cfg.py', profile='name')`
    python_config_loader_class = ProfileAwareConfigLoader

    # Track whether the config_file has changed,
    # because some logic happens only if we aren't using the default.
    config_file_specified = Set()

    config_file_name = Unicode()

    def _config_file_name_default(self):
        return self.name.replace('-', '_') + u'_config.py'

    def _config_file_name_changed(self, name, old, new):
        if new != old:
            self.config_file_specified.add(new)

    # The directory that contains IPython's builtin profiles.
    builtin_profile_dir = Unicode(
        os.path.join(get_ipython_package_dir(), u'config', u'profile',
                     u'default'))

    config_file_paths = List(Unicode())

    def _config_file_paths_default(self):
        return [py3compat.getcwd()]

    extra_config_file = Unicode(config=True,
                                help="""Path to an extra config file to load.
    
    If specified, load this config file in addition to any other IPython config.
    """)

    def _extra_config_file_changed(self, name, old, new):
        try:
            self.config_files.remove(old)
        except ValueError:
            pass
        self.config_file_specified.add(new)
        self.config_files.append(new)

    profile = Unicode(u'default',
                      config=True,
                      help="""The IPython profile to use.""")

    def _profile_changed(self, name, old, new):
        self.builtin_profile_dir = os.path.join(get_ipython_package_dir(),
                                                u'config', u'profile', new)

    ipython_dir = Unicode(config=True,
                          help="""
        The name of the IPython directory. This directory is used for logging
        configuration (through profiles), history storage, etc. The default
        is usually $HOME/.ipython. This option can also be specified through
        the environment variable IPYTHONDIR.
        """)

    def _ipython_dir_default(self):
        d = get_ipython_dir()
        self._ipython_dir_changed('ipython_dir', d, d)
        return d

    _in_init_profile_dir = False
    profile_dir = Instance(ProfileDir, allow_none=True)

    def _profile_dir_default(self):
        # avoid recursion
        if self._in_init_profile_dir:
            return
        # profile_dir requested early, force initialization
        self.init_profile_dir()
        return self.profile_dir

    overwrite = Bool(
        False,
        config=True,
        help="""Whether to overwrite existing config files when copying""")
    auto_create = Bool(
        False,
        config=True,
        help="""Whether to create profile dir if it doesn't exist""")

    config_files = List(Unicode())

    def _config_files_default(self):
        return [self.config_file_name]

    copy_config_files = Bool(
        False,
        config=True,
        help="""Whether to install the default config files into the profile dir.
        If a new profile is being created, and IPython contains config files for that
        profile, then they will be staged into the new directory.  Otherwise,
        default config files will be automatically generated.
        """)

    verbose_crash = Bool(
        False,
        config=True,
        help=
        """Create a massive crash report when IPython encounters what may be an
        internal error.  The default is to append a short message to the
        usual traceback""")

    # The class to use as the crash handler.
    crash_handler_class = Type(crashhandler.CrashHandler)

    @catch_config_error
    def __init__(self, **kwargs):
        super(BaseIPythonApplication, self).__init__(**kwargs)
        # ensure current working directory exists
        try:
            py3compat.getcwd()
        except:
            # exit if cwd doesn't exist
            self.log.error("Current working directory doesn't exist.")
            self.exit(1)

    #-------------------------------------------------------------------------
    # Various stages of Application creation
    #-------------------------------------------------------------------------

    def initialize_subcommand(self, subc, argv=None):
        if subc in self.deprecated_subcommands:
            import time
            self.log.warning(
                "Subcommand `ipython {sub}` is deprecated and will be removed "
                "in future versions.".format(sub=subc))
            self.log.warning(
                "You likely want to use `jupyter {sub}`... continue "
                "in 5 sec. Press Ctrl-C to quit now.".format(sub=subc))
            try:
                time.sleep(5)
            except KeyboardInterrupt:
                sys.exit(1)
        return super(BaseIPythonApplication,
                     self).initialize_subcommand(subc, argv)

    def init_crash_handler(self):
        """Create a crash handler, typically setting sys.excepthook to it."""
        self.crash_handler = self.crash_handler_class(self)
        sys.excepthook = self.excepthook

        def unset_crashhandler():
            sys.excepthook = sys.__excepthook__

        atexit.register(unset_crashhandler)

    def excepthook(self, etype, evalue, tb):
        """this is sys.excepthook after init_crashhandler
        
        set self.verbose_crash=True to use our full crashhandler, instead of
        a regular traceback with a short message (crash_handler_lite)
        """

        if self.verbose_crash:
            return self.crash_handler(etype, evalue, tb)
        else:
            return crashhandler.crash_handler_lite(etype, evalue, tb)

    def _ipython_dir_changed(self, name, old, new):
        if old is not Undefined:
            str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
                                               sys.getfilesystemencoding())
            if str_old in sys.path:
                sys.path.remove(str_old)
        str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
                                            sys.getfilesystemencoding())
        sys.path.append(str_path)
        ensure_dir_exists(new)
        readme = os.path.join(new, 'README')
        readme_src = os.path.join(get_ipython_package_dir(), u'config',
                                  u'profile', 'README')
        if not os.path.exists(readme) and os.path.exists(readme_src):
            shutil.copy(readme_src, readme)
        for d in ('extensions', 'nbextensions'):
            path = os.path.join(new, d)
            try:
                ensure_dir_exists(path)
            except OSError as e:
                # this will not be EEXIST
                self.log.error("couldn't create path %s: %s", path, e)
        self.log.debug("IPYTHONDIR set to: %s" % new)

    def load_config_file(self, suppress_errors=True):
        """Load the config file.

        By default, errors in loading config are handled, and a warning
        printed on screen. For testing, the suppress_errors option is set
        to False, so errors will make tests fail.
        """
        self.log.debug("Searching path %s for config files",
                       self.config_file_paths)
        base_config = 'ipython_config.py'
        self.log.debug("Attempting to load config file: %s" % base_config)
        try:
            Application.load_config_file(self,
                                         base_config,
                                         path=self.config_file_paths)
        except ConfigFileNotFound:
            # ignore errors loading parent
            self.log.debug("Config file %s not found", base_config)
            pass

        for config_file_name in self.config_files:
            if not config_file_name or config_file_name == base_config:
                continue
            self.log.debug("Attempting to load config file: %s" %
                           self.config_file_name)
            try:
                Application.load_config_file(self,
                                             config_file_name,
                                             path=self.config_file_paths)
            except ConfigFileNotFound:
                # Only warn if the default config file was NOT being used.
                if config_file_name in self.config_file_specified:
                    msg = self.log.warning
                else:
                    msg = self.log.debug
                msg("Config file not found, skipping: %s", config_file_name)
            except Exception:
                # For testing purposes.
                if not suppress_errors:
                    raise
                self.log.warning("Error loading config file: %s" %
                                 self.config_file_name,
                                 exc_info=True)

    def init_profile_dir(self):
        """initialize the profile dir"""
        self._in_init_profile_dir = True
        if self.profile_dir is not None:
            # already ran
            return
        if 'ProfileDir.location' not in self.config:
            # location not specified, find by profile name
            try:
                p = ProfileDir.find_profile_dir_by_name(
                    self.ipython_dir, self.profile, self.config)
            except ProfileDirError:
                # not found, maybe create it (always create default profile)
                if self.auto_create or self.profile == 'default':
                    try:
                        p = ProfileDir.create_profile_dir_by_name(
                            self.ipython_dir, self.profile, self.config)
                    except ProfileDirError:
                        self.log.fatal("Could not create profile: %r" %
                                       self.profile)
                        self.exit(1)
                    else:
                        self.log.info("Created profile dir: %r" % p.location)
                else:
                    self.log.fatal("Profile %r not found." % self.profile)
                    self.exit(1)
            else:
                self.log.debug("Using existing profile dir: %r" % p.location)
        else:
            location = self.config.ProfileDir.location
            # location is fully specified
            try:
                p = ProfileDir.find_profile_dir(location, self.config)
            except ProfileDirError:
                # not found, maybe create it
                if self.auto_create:
                    try:
                        p = ProfileDir.create_profile_dir(
                            location, self.config)
                    except ProfileDirError:
                        self.log.fatal(
                            "Could not create profile directory: %r" %
                            location)
                        self.exit(1)
                    else:
                        self.log.debug("Creating new profile dir: %r" %
                                       location)
                else:
                    self.log.fatal("Profile directory %r not found." %
                                   location)
                    self.exit(1)
            else:
                self.log.info("Using existing profile dir: %r" % location)
            # if profile_dir is specified explicitly, set profile name
            dir_name = os.path.basename(p.location)
            if dir_name.startswith('profile_'):
                self.profile = dir_name[8:]

        self.profile_dir = p
        self.config_file_paths.append(p.location)
        self._in_init_profile_dir = False

    def init_config_files(self):
        """[optionally] copy default config files into profile dir."""
        self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
        # copy config files
        path = self.builtin_profile_dir
        if self.copy_config_files:
            src = self.profile

            cfg = self.config_file_name
            if path and os.path.exists(os.path.join(path, cfg)):
                self.log.warning(
                    "Staging %r from %s into %r [overwrite=%s]" %
                    (cfg, src, self.profile_dir.location, self.overwrite))
                self.profile_dir.copy_config_file(cfg,
                                                  path=path,
                                                  overwrite=self.overwrite)
            else:
                self.stage_default_config_file()
        else:
            # Still stage *bundled* config files, but not generated ones
            # This is necessary for `ipython profile=sympy` to load the profile
            # on the first go
            files = glob.glob(os.path.join(path, '*.py'))
            for fullpath in files:
                cfg = os.path.basename(fullpath)
                if self.profile_dir.copy_config_file(cfg,
                                                     path=path,
                                                     overwrite=False):
                    # file was copied
                    self.log.warning(
                        "Staging bundled %s from %s into %r" %
                        (cfg, self.profile, self.profile_dir.location))

    def stage_default_config_file(self):
        """auto generate default config file, and stage it into the profile."""
        s = self.generate_config_file()
        fname = os.path.join(self.profile_dir.location, self.config_file_name)
        if self.overwrite or not os.path.exists(fname):
            self.log.warning("Generating default config file: %r" % (fname))
            with open(fname, 'w') as f:
                f.write(s)

    @catch_config_error
    def initialize(self, argv=None):
        # don't hook up crash handler before parsing command-line
        self.parse_command_line(argv)
        self.init_crash_handler()
        if self.subapp is not None:
            # stop here if subapp is taking over
            return
        cl_config = self.config
        self.init_profile_dir()
        self.init_config_files()
        self.load_config_file()
        # enforce cl-opts override configfile opts:
        self.update_config(cl_config)
class Innotater(VBox):  #VBox
    _view_name = Unicode('InnotaterView').tag(sync=True)
    _model_name = Unicode('InnotaterModel').tag(sync=True)
    _view_module = Unicode('jupyter-innotater').tag(sync=True)
    _model_module = Unicode('jupyter-innotater').tag(sync=True)
    _view_module_version = Unicode(semver_range).tag(sync=True)
    _model_module_version = Unicode(semver_range).tag(sync=True)

    index = Int().tag(sync=True)
    keyboard_shortcuts = Bool(False).tag(sync=True)
    is_dirty = Bool(False).tag(sync=True)

    def __init__(self,
                 inputs,
                 targets,
                 indexes=None,
                 keyboard_shortcuts=True,
                 save_hook=None,
                 vertical=False):

        self.path = ''

        self.dirty_uindexes = set()

        self.save_hook = save_hook

        self.datamanager = DataManager(inputs, targets, indexes)

        slider = IntSlider(min=0, max=0)

        self.slider = slider

        self.prevbtn = Button(description='< Previous')
        self.nextbtn = Button(description='Next >')

        self.input_widgets = [
            dw.get_widget() for dw in self.datamanager.get_inputs()
        ]
        self.target_widgets = [
            dw.get_widget() for dw in self.datamanager.get_targets()
        ]

        self.add_class('innotater-base')

        cbar_widgets = [self.prevbtn, slider, self.nextbtn]
        if self.save_hook:
            self.savebtn = Button(description='Save', disabled=True)
            cbar_widgets.append(self.savebtn)

        controlbar_widget = HBox(cbar_widgets)
        controlbar_widget.add_class('innotater-controlbar')

        InnotaterBox = HBox
        if vertical:
            InnotaterBox = VBox
            self.add_class('innotater-base-vertical')

        super().__init__([
            InnotaterBox([VBox(self.input_widgets),
                          VBox(self.target_widgets)]), controlbar_widget
        ])

        widgets.jslink((slider, 'value'), (self, 'index'))

        self._observe_targets(self.datamanager.get_targets())

        for dw in list(self.datamanager.get_all()):
            dw.post_widget_create(self.datamanager)

        self.prevbtn.on_click(lambda c: self.move_slider(-1))
        self.nextbtn.on_click(lambda c: self.move_slider(1))

        if self.save_hook:
            self.savebtn.on_click(lambda c: self.save_hook_fire())

        self.slider.max = self.datamanager.get_data_len() - 1

        self.index = 0
        self.keyboard_shortcuts = keyboard_shortcuts

        self.on_msg(self.handle_message)

        self.suspend_observed_changes = False
        self.update_ui()

    @observe('index')
    def slider_changed(self, change):
        self.update_ui()

    def move_slider(self, change):
        if change < 0 < self.index:
            self.index -= 1
        elif change > 0 and self.index < self.datamanager.get_data_len() - 1:
            self.index += 1

    def handle_message(self, _, content, buffers):
        if content['event'] == 'keydown':
            code = content['code']
            self.handle_keypress(code)

    def handle_keypress(self, code):
        if self.suspend_observed_changes:
            return
        if code == 78:  # n was 110
            self.move_slider(1)
        elif code == 80:  # p was 112
            self.move_slider(-1)
        elif code == 83:  # s
            self.save_hook_fire()

    def update_ui(self):
        uindex = self.datamanager.get_underlying_index(self.index)

        self.suspend_observed_changes = True

        for dw in self.datamanager.get_all():
            dw.update_ui(uindex)

        self.suspend_observed_changes = False

        self.prevbtn.disabled = self.index <= 0
        self.nextbtn.disabled = self.index >= self.datamanager.get_data_len(
        ) - 1

    def update_data(self, change):
        if self.suspend_observed_changes:
            return

        uindex = self.datamanager.get_underlying_index(self.index)
        # Find the Innotation that contains the widget that observed the change
        widg = change['owner']
        for dw in self.datamanager.get_targets():
            if dw.contains_widget(widg):
                dw.update_data(uindex)

        self._make_dirty(uindex)

    def _make_dirty(self, uindex):
        self.dirty_uindexes.add(uindex)

        if self.save_hook and not self.is_dirty:
            self.is_dirty = True
            self.savebtn.disabled = not self.is_dirty

    def save_hook_fire(self):
        if self.save_hook:
            self.savebtn.disabled = True  # Disable during save
            has_saved = self.save_hook(list(self.dirty_uindexes))
            if has_saved:
                self.is_dirty = False
                self.dirty_uindexes.clear()
            self.savebtn.disabled = not self.is_dirty

    def add_innotations(self, inputs, targets):
        self.datamanager.dynamic_add_innotations(inputs, targets)
        self._observe_targets(targets)
        for dw in inputs + targets:
            dw.post_widget_create(self.datamanager)

    def _observe_targets(self, targets):
        for dw in targets:
            dw.widget_observe(self.update_data, names='value')
            if isinstance(dw, ChildrenChangeNotifierMixin):
                dw.on_children_changed(self.new_children_handler)
            if isinstance(dw, DataChangeNotifierMixin):
                dw.on_data_changed(self.updated_data_handler)

    def new_children_handler(self, parent, newchildren):
        self.add_innotations([], newchildren)  # Assume always targets
        self.update_ui()

    def updated_data_handler(self, widget):
        uindex = self.datamanager.get_underlying_index(self.index)
        self._make_dirty(uindex)
        self.update_ui()
Esempio n. 16
0
class DetailsView(RegulusDOMWidget):
    """"""
    _model_name = Unicode('DetailsModel').tag(sync=True)
    _view_name = Unicode('DetailsView').tag(sync=True)

    title = Unicode('title').tag(sync=True)

    data = Instance(klass=DataWidget).tag(sync=True, **widget_serialization)
    measure = Unicode(None, allow_none=True).tag(sync=True)
    tree_model = Instance(klass=TreeWidget,
                          allow_none=True).tag(sync=True,
                                               **widget_serialization)
    show = List().tag(sync=True)
    highlight = Int(-2).tag(sync=True)
    show_inverse = Bool(True).tag(sync=True)
    inverse = Dict(allow_none=True).tag(sync=True)
    cmap = Unicode('RdYlBu').tag(sync=True)

    def __init__(self, **kwargs):
        self._inverse_cache = set()
        if 'data' in kwargs:
            data = kwargs['data']
            if not isinstance(data, DataWidget):
                data = DataWidget(data=data)
                kwargs['data'] = data
            if 'measure' not in kwargs:
                kwargs['measure'] = data.data.measure
        super().__init__(**kwargs)

    def reset_inverse(self):
        self._inverse_cache.clear()
        self._show({'new': self.show})

    def _send_msg(self, pid, data):
        self.inverse = {pid: data}
        self.inverse = None

    @observe('show')
    def _show(self, change):
        self._update_inverse()

    @observe('show_inverse')
    def _show_inverse(self, change):
        self._update_inverse()

    def _update_inverse(self):
        r = self.data.data
        if self.show_inverse and self.data is not None:
            if not self.data.data.attr.has('inverse_regression'):
                self.data.data.add_attr(default_inverse_regression,
                                        name='inverse_regression')
            pids = filter(lambda pid: pid not in self._inverse_cache,
                          self.show)
            msg = {}
            t0 = time()
            for node in r.find_nodes(pids):
                line = r.attr['inverse_regression'][node]
                msg[node.id] = convert(line)
                self._inverse_cache.add(node.id)
                if time() - t0 > 1:
                    self.inverse = msg
                    self.inverse = None
                    msg = {}
                    t0 = time()
            if len(msg) > 0:
                self.inverse = msg
                self.inverse = None
Esempio n. 17
0
class NBExtensionApp(JupyterApp):
    """Entry point for installing notebook extensions"""

    description = """Install Jupyter notebook extensions
    
    Usage
    
        jupyter install-nbextension path/url
    
    This copies a file or a folder into the Jupyter nbextensions directory.
    If a URL is given, it will be downloaded.
    If an archive is given, it will be extracted into nbextensions.
    If the requested files are already up to date, no action is taken
    unless --overwrite is specified.
    """

    examples = """
    jupyter install-nbextension /path/to/myextension
    """
    aliases = aliases
    flags = flags

    overwrite = Bool(False,
                     config=True,
                     help="Force overwrite of existing files")
    symlink = Bool(False,
                   config=True,
                   help="Create symlinks instead of copying files")
    user = Bool(False, config=True, help="Whether to do a user install")
    prefix = Unicode('', config=True, help="Installation prefix")
    nbextensions_dir = Unicode(
        '',
        config=True,
        help="Full path to nbextensions dir (probably use prefix or user)")
    destination = Unicode('',
                          config=True,
                          help="Destination for the copy or symlink")
    verbose = Enum((0, 1, 2),
                   default_value=1,
                   config=True,
                   help="Verbosity level")

    def install_extensions(self):
        if len(self.extra_args) > 1:
            raise ValueError(
                "only one nbextension allowed at a time.  Call multiple times to install multiple extensions."
            )
        install_nbextension(
            self.extra_args[0],
            overwrite=self.overwrite,
            symlink=self.symlink,
            verbose=self.verbose,
            user=self.user,
            prefix=self.prefix,
            destination=self.destination,
            nbextensions_dir=self.nbextensions_dir,
        )

    def start(self):
        if not self.extra_args:
            for nbext in [pjoin(self.data_dir, u'nbextensions')
                          ] + SYSTEM_NBEXTENSIONS_DIRS:
                if os.path.exists(nbext):
                    print("Notebook extensions in %s:" % nbext)
                    for ext in os.listdir(nbext):
                        print(u"    %s" % ext)
        else:
            try:
                self.install_extensions()
            except ArgumentConflict as e:
                print(str(e), file=sys.stderr)
                self.exit(1)
Esempio n. 18
0
class IPythonWidget(FrontendWidget):
    """ A FrontendWidget for an IPython kernel.
    """

    # If set, the 'custom_edit_requested(str, int)' signal will be emitted when
    # an editor is needed for a file. This overrides 'editor' and 'editor_line'
    # settings.
    custom_edit = Bool(False)
    custom_edit_requested = QtCore.Signal(object, object)

    editor = Unicode(default_editor,
                     config=True,
                     help="""
        A command for invoking a system text editor. If the string contains a
        {filename} format specifier, it will be used. Otherwise, the filename
        will be appended to the end the command.
        """)

    editor_line = Unicode(config=True,
                          help="""
        The editor command to use when a specific line number is requested. The
        string should contain two format specifiers: {line} and {filename}. If
        this parameter is not specified, the line number option to the %edit
        magic will be ignored.
        """)

    style_sheet = Unicode(config=True,
                          help="""
        A CSS stylesheet. The stylesheet can contain classes for:
            1. Qt: QPlainTextEdit, QFrame, QWidget, etc
            2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
            3. IPython: .error, .in-prompt, .out-prompt, etc
        """)

    syntax_style = Unicode(config=True,
                           help="""
        If not empty, use this Pygments style for syntax highlighting.
        Otherwise, the style sheet is queried for Pygments style
        information.
        """)

    # Prompts.
    in_prompt = Unicode(default_in_prompt, config=True)
    out_prompt = Unicode(default_out_prompt, config=True)
    input_sep = Unicode(default_input_sep, config=True)
    output_sep = Unicode(default_output_sep, config=True)
    output_sep2 = Unicode(default_output_sep2, config=True)

    # FrontendWidget protected class variables.
    _input_splitter_class = IPythonInputSplitter
    _prompt_transformer = IPythonInputSplitter(
        physical_line_transforms=[ipy_prompt()],
        logical_line_transforms=[],
        python_line_transforms=[],
    )

    # IPythonWidget protected class variables.
    _PromptBlock = namedtuple('_PromptBlock', ['block', 'length', 'number'])
    _payload_source_edit = 'edit_magic'
    _payload_source_exit = 'ask_exit'
    _payload_source_next_input = 'set_next_input'
    _payload_source_page = 'page'
    _retrying_history_request = False
    _starting = False

    #---------------------------------------------------------------------------
    # 'object' interface
    #---------------------------------------------------------------------------

    def __init__(self, *args, **kw):
        super(IPythonWidget, self).__init__(*args, **kw)

        # IPythonWidget protected variables.
        self._payload_handlers = {
            self._payload_source_edit: self._handle_payload_edit,
            self._payload_source_exit: self._handle_payload_exit,
            self._payload_source_page: self._handle_payload_page,
            self._payload_source_next_input: self._handle_payload_next_input
        }
        self._previous_prompt_obj = None
        self._keep_kernel_on_exit = None

        # Initialize widget styling.
        if self.style_sheet:
            self._style_sheet_changed()
            self._syntax_style_changed()
        else:
            self.set_default_style()

        self._guiref_loaded = False

    #---------------------------------------------------------------------------
    # 'BaseFrontendMixin' abstract interface
    #---------------------------------------------------------------------------
    def _handle_complete_reply(self, rep):
        """ Reimplemented to support IPython's improved completion machinery.
        """
        self.log.debug("complete: %s", rep.get('content', ''))
        cursor = self._get_cursor()
        info = self._request_info.get('complete')
        if info and info.id == rep['parent_header']['msg_id'] and \
                info.pos == cursor.position():
            content = rep['content']
            matches = content['matches']
            start = content['cursor_start']
            end = content['cursor_end']

            start = max(start, 0)
            end = max(end, start)

            # Move the control's cursor to the desired end point
            cursor_pos = self._get_input_buffer_cursor_pos()
            if end < cursor_pos:
                cursor.movePosition(QtGui.QTextCursor.Left,
                                    n=(cursor_pos - end))
            elif end > cursor_pos:
                cursor.movePosition(QtGui.QTextCursor.Right,
                                    n=(end - cursor_pos))
            # This line actually applies the move to control's cursor
            self._control.setTextCursor(cursor)

            offset = end - start
            # Move the local cursor object to the start of the match and
            # complete.
            cursor.movePosition(QtGui.QTextCursor.Left, n=offset)
            self._complete_with_items(cursor, matches)

    def _handle_execute_reply(self, msg):
        """ Reimplemented to support prompt requests.
        """
        msg_id = msg['parent_header'].get('msg_id')
        info = self._request_info['execute'].get(msg_id)
        if info and info.kind == 'prompt':
            content = msg['content']
            if content['status'] == 'aborted':
                self._show_interpreter_prompt()
            else:
                number = content['execution_count'] + 1
                self._show_interpreter_prompt(number)
            self._request_info['execute'].pop(msg_id)
        else:
            super(IPythonWidget, self)._handle_execute_reply(msg)

    def _handle_history_reply(self, msg):
        """ Implemented to handle history tail replies, which are only supported
            by the IPython kernel.
        """
        content = msg['content']
        if 'history' not in content:
            self.log.error("History request failed: %r" % content)
            if content.get('status', '') == 'aborted' and \
                                            not self._retrying_history_request:
                # a *different* action caused this request to be aborted, so
                # we should try again.
                self.log.error("Retrying aborted history request")
                # prevent multiple retries of aborted requests:
                self._retrying_history_request = True
                # wait out the kernel's queue flush, which is currently timed at 0.1s
                time.sleep(0.25)
                self.kernel_client.history(hist_access_type='tail', n=1000)
            else:
                self._retrying_history_request = False
            return
        # reset retry flag
        self._retrying_history_request = False
        history_items = content['history']
        self.log.debug("Received history reply with %i entries",
                       len(history_items))
        items = []
        last_cell = u""
        for _, _, cell in history_items:
            cell = cell.rstrip()
            if cell != last_cell:
                items.append(cell)
                last_cell = cell
        self._set_history(items)

    def _insert_other_input(self, cursor, content):
        """Insert function for input from other frontends"""
        cursor.beginEditBlock()
        start = cursor.position()
        n = content.get('execution_count', 0)
        cursor.insertText('\n')
        self._insert_html(cursor, self._make_in_prompt(n))
        cursor.insertText(content['code'])
        self._highlighter.rehighlightBlock(cursor.block())
        cursor.endEditBlock()

    def _handle_execute_input(self, msg):
        """Handle an execute_input message"""
        self.log.debug("execute_input: %s", msg.get('content', ''))
        if self.include_output(msg):
            self._append_custom(self._insert_other_input,
                                msg['content'],
                                before_prompt=True)

    def _handle_execute_result(self, msg):
        """Reimplemented for IPython-style "display hook"."""
        if self.include_output(msg):
            self.flush_clearoutput()
            content = msg['content']
            prompt_number = content.get('execution_count', 0)
            data = content['data']
            if 'text/plain' in data:
                self._append_plain_text(self.output_sep, True)
                self._append_html(self._make_out_prompt(prompt_number), True)
                text = data['text/plain']
                # If the repr is multiline, make sure we start on a new line,
                # so that its lines are aligned.
                if "\n" in text and not self.output_sep.endswith("\n"):
                    self._append_plain_text('\n', True)
                self._append_plain_text(text + self.output_sep2, True)

    def _handle_display_data(self, msg):
        """The base handler for the ``display_data`` message."""
        # For now, we don't display data from other frontends, but we
        # eventually will as this allows all frontends to monitor the display
        # data. But we need to figure out how to handle this in the GUI.
        if self.include_output(msg):
            self.flush_clearoutput()
            data = msg['content']['data']
            metadata = msg['content']['metadata']
            # In the regular IPythonWidget, we simply print the plain text
            # representation.
            if 'text/plain' in data:
                text = data['text/plain']
                self._append_plain_text(text, True)
            # This newline seems to be needed for text and html output.
            self._append_plain_text(u'\n', True)

    def _handle_kernel_info_reply(self, rep):
        """Handle kernel info replies."""
        content = rep['content']
        if not self._guiref_loaded:
            if content.get('implementation') == 'ipython':
                self._load_guiref_magic()
            self._guiref_loaded = True

        self.kernel_banner = content.get('banner', '')
        if self._starting:
            # finish handling started channels
            self._starting = False
            super(IPythonWidget, self)._started_channels()

    def _started_channels(self):
        """Reimplemented to make a history request and load %guiref."""
        self._starting = True
        # The reply will trigger %guiref load provided language=='python'
        self.kernel_client.kernel_info()

        self.kernel_client.history(hist_access_type='tail', n=1000)

    def _load_guiref_magic(self):
        """Load %guiref magic."""
        self.kernel_client.execute('\n'.join([
            "try:",
            "    _usage",
            "except:",
            "    from IPython.core import usage as _usage",
            "    get_ipython().register_magic_function(_usage.page_guiref, 'line', 'guiref')",
            "    del _usage",
        ]),
                                   silent=True)

    #---------------------------------------------------------------------------
    # 'ConsoleWidget' public interface
    #---------------------------------------------------------------------------

    #---------------------------------------------------------------------------
    # 'FrontendWidget' public interface
    #---------------------------------------------------------------------------

    def execute_file(self, path, hidden=False):
        """ Reimplemented to use the 'run' magic.
        """
        # Use forward slashes on Windows to avoid escaping each separator.
        if sys.platform == 'win32':
            path = os.path.normpath(path).replace('\\', '/')

        # Perhaps we should not be using %run directly, but while we
        # are, it is necessary to quote or escape filenames containing spaces
        # or quotes.

        # In earlier code here, to minimize escaping, we sometimes quoted the
        # filename with single quotes. But to do this, this code must be
        # platform-aware, because run uses shlex rather than python string
        # parsing, so that:
        # * In Win: single quotes can be used in the filename without quoting,
        #   and we cannot use single quotes to quote the filename.
        # * In *nix: we can escape double quotes in a double quoted filename,
        #   but can't escape single quotes in a single quoted filename.

        # So to keep this code non-platform-specific and simple, we now only
        # use double quotes to quote filenames, and escape when needed:
        if ' ' in path or "'" in path or '"' in path:
            path = '"%s"' % path.replace('"', '\\"')
        self.execute('%%run %s' % path, hidden=hidden)

    #---------------------------------------------------------------------------
    # 'FrontendWidget' protected interface
    #---------------------------------------------------------------------------

    def _process_execute_error(self, msg):
        """ Reimplemented for IPython-style traceback formatting.
        """
        content = msg['content']
        traceback = '\n'.join(content['traceback']) + '\n'
        if False:
            # FIXME: For now, tracebacks come as plain text, so we can't use
            # the html renderer yet.  Once we refactor ultratb to produce
            # properly styled tracebacks, this branch should be the default
            traceback = traceback.replace(' ', '&nbsp;')
            traceback = traceback.replace('\n', '<br/>')

            ename = content['ename']
            ename_styled = '<span class="error">%s</span>' % ename
            traceback = traceback.replace(ename, ename_styled)

            self._append_html(traceback)
        else:
            # This is the fallback for now, using plain text with ansi escapes
            self._append_plain_text(traceback)

    def _process_execute_payload(self, item):
        """ Reimplemented to dispatch payloads to handler methods.
        """
        handler = self._payload_handlers.get(item['source'])
        if handler is None:
            # We have no handler for this type of payload, simply ignore it
            return False
        else:
            handler(item)
            return True

    def _show_interpreter_prompt(self, number=None):
        """ Reimplemented for IPython-style prompts.
        """
        # If a number was not specified, make a prompt number request.
        if number is None:
            msg_id = self.kernel_client.execute('', silent=True)
            info = self._ExecutionRequest(msg_id, 'prompt')
            self._request_info['execute'][msg_id] = info
            return

        # Show a new prompt and save information about it so that it can be
        # updated later if the prompt number turns out to be wrong.
        self._prompt_sep = self.input_sep
        self._show_prompt(self._make_in_prompt(number), html=True)
        block = self._control.document().lastBlock()
        length = len(self._prompt)
        self._previous_prompt_obj = self._PromptBlock(block, length, number)

        # Update continuation prompt to reflect (possibly) new prompt length.
        self._set_continuation_prompt(self._make_continuation_prompt(
            self._prompt),
                                      html=True)

    def _show_interpreter_prompt_for_reply(self, msg):
        """ Reimplemented for IPython-style prompts.
        """
        # Update the old prompt number if necessary.
        content = msg['content']
        # abort replies do not have any keys:
        if content['status'] == 'aborted':
            if self._previous_prompt_obj:
                previous_prompt_number = self._previous_prompt_obj.number
            else:
                previous_prompt_number = 0
        else:
            previous_prompt_number = content['execution_count']
        if self._previous_prompt_obj and \
                self._previous_prompt_obj.number != previous_prompt_number:
            block = self._previous_prompt_obj.block

            # Make sure the prompt block has not been erased.
            if block.isValid() and block.text():

                # Remove the old prompt and insert a new prompt.
                cursor = QtGui.QTextCursor(block)
                cursor.movePosition(QtGui.QTextCursor.Right,
                                    QtGui.QTextCursor.KeepAnchor,
                                    self._previous_prompt_obj.length)
                prompt = self._make_in_prompt(previous_prompt_number)
                self._prompt = self._insert_html_fetching_plain_text(
                    cursor, prompt)

                # When the HTML is inserted, Qt blows away the syntax
                # highlighting for the line, so we need to rehighlight it.
                self._highlighter.rehighlightBlock(cursor.block())

            self._previous_prompt_obj = None

        # Show a new prompt with the kernel's estimated prompt number.
        self._show_interpreter_prompt(previous_prompt_number + 1)

    #---------------------------------------------------------------------------
    # 'IPythonWidget' interface
    #---------------------------------------------------------------------------

    def set_default_style(self, colors='lightbg'):
        """ Sets the widget style to the class defaults.

        Parameters
        ----------
        colors : str, optional (default lightbg)
            Whether to use the default IPython light background or dark
            background or B&W style.
        """
        colors = colors.lower()
        if colors == 'lightbg':
            self.style_sheet = styles.default_light_style_sheet
            self.syntax_style = styles.default_light_syntax_style
        elif colors == 'linux':
            self.style_sheet = styles.default_dark_style_sheet
            self.syntax_style = styles.default_dark_syntax_style
        elif colors == 'nocolor':
            self.style_sheet = styles.default_bw_style_sheet
            self.syntax_style = styles.default_bw_syntax_style
        else:
            raise KeyError("No such color scheme: %s" % colors)

    #---------------------------------------------------------------------------
    # 'IPythonWidget' protected interface
    #---------------------------------------------------------------------------

    def _edit(self, filename, line=None):
        """ Opens a Python script for editing.

        Parameters
        ----------
        filename : str
            A path to a local system file.

        line : int, optional
            A line of interest in the file.
        """
        if self.custom_edit:
            self.custom_edit_requested.emit(filename, line)
        elif not self.editor:
            self._append_plain_text(
                'No default editor available.\n'
                'Specify a GUI text editor in the `IPythonWidget.editor` '
                'configurable to enable the %edit magic')
        else:
            try:
                filename = '"%s"' % filename
                if line and self.editor_line:
                    command = self.editor_line.format(filename=filename,
                                                      line=line)
                else:
                    try:
                        command = self.editor.format()
                    except KeyError:
                        command = self.editor.format(filename=filename)
                    else:
                        command += ' ' + filename
            except KeyError:
                self._append_plain_text('Invalid editor command.\n')
            else:
                try:
                    Popen(command, shell=True)
                except OSError:
                    msg = 'Opening editor with command "%s" failed.\n'
                    self._append_plain_text(msg % command)

    def _make_in_prompt(self, number):
        """ Given a prompt number, returns an HTML In prompt.
        """
        try:
            body = self.in_prompt % number
        except TypeError:
            # allow in_prompt to leave out number, e.g. '>>> '
            from xml.sax.saxutils import escape
            body = escape(self.in_prompt)
        return '<span class="in-prompt">%s</span>' % body

    def _make_continuation_prompt(self, prompt):
        """ Given a plain text version of an In prompt, returns an HTML
            continuation prompt.
        """
        end_chars = '...: '
        space_count = len(prompt.lstrip('\n')) - len(end_chars)
        body = '&nbsp;' * space_count + end_chars
        return '<span class="in-prompt">%s</span>' % body

    def _make_out_prompt(self, number):
        """ Given a prompt number, returns an HTML Out prompt.
        """
        try:
            body = self.out_prompt % number
        except TypeError:
            # allow out_prompt to leave out number, e.g. '<<< '
            from xml.sax.saxutils import escape
            body = escape(self.out_prompt)
        return '<span class="out-prompt">%s</span>' % body

    #------ Payload handlers --------------------------------------------------

    # Payload handlers with a generic interface: each takes the opaque payload
    # dict, unpacks it and calls the underlying functions with the necessary
    # arguments.

    def _handle_payload_edit(self, item):
        self._edit(item['filename'], item['line_number'])

    def _handle_payload_exit(self, item):
        self._keep_kernel_on_exit = item['keepkernel']
        self.exit_requested.emit(self)

    def _handle_payload_next_input(self, item):
        self.input_buffer = item['text']

    def _handle_payload_page(self, item):
        # Since the plain text widget supports only a very small subset of HTML
        # and we have no control over the HTML source, we only page HTML
        # payloads in the rich text widget.
        data = item['data']
        if 'text/html' in data and self.kind == 'rich':
            self._page(data['text/html'], html=True)
        else:
            self._page(data['text/plain'], html=False)

    #------ Trait change handlers --------------------------------------------

    def _style_sheet_changed(self):
        """ Set the style sheets of the underlying widgets.
        """
        self.setStyleSheet(self.style_sheet)
        if self._control is not None:
            self._control.document().setDefaultStyleSheet(self.style_sheet)
            bg_color = self._control.palette().window().color()
            self._ansi_processor.set_background_color(bg_color)

        if self._page_control is not None:
            self._page_control.document().setDefaultStyleSheet(
                self.style_sheet)

    def _syntax_style_changed(self):
        """ Set the style for the syntax highlighter.
        """
        if self._highlighter is None:
            # ignore premature calls
            return
        if self.syntax_style:
            self._highlighter.set_style(self.syntax_style)
        else:
            self._highlighter.set_style_sheet(self.style_sheet)

    #------ Trait default initializers -----------------------------------------

    def _banner_default(self):
        return "Jupyter QtConsole {version}\n".format(version=__version__)
Esempio n. 19
0
class LocalAuthenticator(Authenticator):
    """Base class for Authenticators that work with local Linux/UNIX users

    Checks for local users, and can attempt to create them if they exist.
    """

    create_system_users = Bool(False,
                               help="""
        If set to True, will attempt to create local system users if they do not exist already.

        Supports Linux and BSD variants only.
        """).tag(config=True)

    add_user_cmd = Command(help="""
        The command to use for creating users as a list of strings

        For each element in the list, the string USERNAME will be replaced with
        the user's username. The username will also be appended as the final argument.

        For Linux, the default value is:

            ['adduser', '-q', '--gecos', '""', '--disabled-password']

        To specify a custom home directory, set this to:

            ['adduser', '-q', '--gecos', '""', '--home', '/customhome/USERNAME', '--disabled-password']

        This will run the command:

            adduser -q --gecos "" --home /customhome/river --disabled-password river

        when the user 'river' is created.
        """).tag(config=True)

    @default('add_user_cmd')
    def _add_user_cmd_default(self):
        """Guess the most likely-to-work adduser command for each platform"""
        if sys.platform == 'darwin':
            raise ValueError("I don't know how to create users on OS X")
        elif which('pw'):
            # Probably BSD
            return ['pw', 'useradd', '-m']
        else:
            # This appears to be the Linux non-interactive adduser command:
            return ['adduser', '-q', '--gecos', '""', '--disabled-password']

    group_whitelist = Set(help="""
        Whitelist all users from this UNIX group.

        This makes the username whitelist ineffective.
        """).tag(config=True)

    @observe('group_whitelist')
    def _group_whitelist_changed(self, change):
        """
        Log a warning if both group_whitelist and user whitelist are set.
        """
        if self.whitelist:
            self.log.warning(
                "Ignoring username whitelist because group whitelist supplied!"
            )

    def check_whitelist(self, username):
        if self.group_whitelist:
            return self.check_group_whitelist(username)
        else:
            return super().check_whitelist(username)

    def check_group_whitelist(self, username):
        """
        If group_whitelist is configured, check if authenticating user is part of group.
        """
        if not self.group_whitelist:
            return False
        for grnam in self.group_whitelist:
            try:
                group = getgrnam(grnam)
            except KeyError:
                self.log.error('No such group: [%s]' % grnam)
                continue
            if username in group.gr_mem:
                return True
        return False

    @gen.coroutine
    def add_user(self, user):
        """Hook called whenever a new user is added

        If self.create_system_users, the user will attempt to be created if it doesn't exist.
        """
        user_exists = yield gen.maybe_future(self.system_user_exists(user))
        if not user_exists:
            if self.create_system_users:
                yield gen.maybe_future(self.add_system_user(user))
            else:
                raise KeyError("User %s does not exist." % user.name)

        yield gen.maybe_future(super().add_user(user))

    @staticmethod
    def system_user_exists(user):
        """Check if the user exists on the system"""
        import pwd
        try:
            pwd.getpwnam(user.name)
        except KeyError:
            return False
        else:
            return True

    def add_system_user(self, user):
        """Create a new local UNIX user on the system.

        Tested to work on FreeBSD and Linux, at least.
        """
        name = user.name
        cmd = [arg.replace('USERNAME', name)
               for arg in self.add_user_cmd] + [name]
        self.log.info("Creating user: %s", ' '.join(map(pipes.quote, cmd)))
        p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
        p.wait()
        if p.returncode:
            err = p.stdout.read().decode('utf8', 'replace')
            raise RuntimeError("Failed to create system user %s: %s" %
                               (name, err))
Esempio n. 20
0
class FrontendWidget(HistoryConsoleWidget, BaseFrontendMixin):
    """ A Qt frontend for a generic Python kernel.
    """

    # The text to show when the kernel is (re)started.
    banner = Unicode(config=True)
    kernel_banner = Unicode()
    # Whether to show the banner
    _display_banner = Bool(False)

    # An option and corresponding signal for overriding the default kernel
    # interrupt behavior.
    custom_interrupt = Bool(False)
    custom_interrupt_requested = QtCore.Signal()

    # An option and corresponding signals for overriding the default kernel
    # restart behavior.
    custom_restart = Bool(False)
    custom_restart_kernel_died = QtCore.Signal(float)
    custom_restart_requested = QtCore.Signal()

    # Whether to automatically show calltips on open-parentheses.
    enable_calltips = Bool(True, config=True,
        help="Whether to draw information calltips on open-parentheses.")

    clear_on_kernel_restart = Bool(True, config=True,
        help="Whether to clear the console when the kernel is restarted")

    confirm_restart = Bool(True, config=True,
        help="Whether to ask for user confirmation when restarting kernel")

    lexer_class = DottedObjectName(config=True,
        help="The pygments lexer class to use."
    )
    def _lexer_class_changed(self, name, old, new):
        lexer_class = import_item(new)
        self.lexer = lexer_class()

    def _lexer_class_default(self):
        if py3compat.PY3:
            return 'pygments.lexers.Python3Lexer'
        else:
            return 'pygments.lexers.PythonLexer'

    lexer = Any()
    def _lexer_default(self):
        lexer_class = import_item(self.lexer_class)
        return lexer_class()

    # Emitted when a user visible 'execute_request' has been submitted to the
    # kernel from the FrontendWidget. Contains the code to be executed.
    executing = QtCore.Signal(object)

    # Emitted when a user-visible 'execute_reply' has been received from the
    # kernel and processed by the FrontendWidget. Contains the response message.
    executed = QtCore.Signal(object)

    # Emitted when an exit request has been received from the kernel.
    exit_requested = QtCore.Signal(object)

    _CallTipRequest = namedtuple('_CallTipRequest', ['id', 'pos'])
    _CompletionRequest = namedtuple('_CompletionRequest', ['id', 'pos'])
    _ExecutionRequest = namedtuple('_ExecutionRequest', ['id', 'kind'])
    _local_kernel = False
    _highlighter = Instance(FrontendHighlighter, allow_none=True)

    #---------------------------------------------------------------------------
    # 'object' interface
    #---------------------------------------------------------------------------

    def __init__(self, *args, **kw):
        super(FrontendWidget, self).__init__(*args, **kw)
        # FIXME: remove this when PySide min version is updated past 1.0.7
        # forcefully disable calltips if PySide is < 1.0.7, because they crash
        if qt.QT_API == qt.QT_API_PYSIDE:
            import PySide
            if PySide.__version_info__ < (1,0,7):
                self.log.warn("PySide %s < 1.0.7 detected, disabling calltips" % PySide.__version__)
                self.enable_calltips = False

        # FrontendWidget protected variables.
        self._bracket_matcher = BracketMatcher(self._control)
        self._call_tip_widget = CallTipWidget(self._control)
        self._copy_raw_action = QtGui.QAction('Copy (Raw Text)', None)
        self._hidden = False
        self._highlighter = FrontendHighlighter(self, lexer=self.lexer)
        self._kernel_manager = None
        self._kernel_client = None
        self._request_info = {}
        self._request_info['execute'] = {}
        self._callback_dict = {}
        self._display_banner = True

        # Configure the ConsoleWidget.
        self.tab_width = 4
        self._set_continuation_prompt('... ')

        # Configure the CallTipWidget.
        self._call_tip_widget.setFont(self.font)
        self.font_changed.connect(self._call_tip_widget.setFont)

        # Configure actions.
        action = self._copy_raw_action
        key = QtCore.Qt.CTRL | QtCore.Qt.SHIFT | QtCore.Qt.Key_C
        action.setEnabled(False)
        action.setShortcut(QtGui.QKeySequence(key))
        action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut)
        action.triggered.connect(self.copy_raw)
        self.copy_available.connect(action.setEnabled)
        self.addAction(action)

        # Connect signal handlers.
        document = self._control.document()
        document.contentsChange.connect(self._document_contents_change)

        # Set flag for whether we are connected via localhost.
        self._local_kernel = kw.get('local_kernel',
                                    FrontendWidget._local_kernel)

        # Whether or not a clear_output call is pending new output.
        self._pending_clearoutput = False

    #---------------------------------------------------------------------------
    # 'ConsoleWidget' public interface
    #---------------------------------------------------------------------------

    def copy(self):
        """ Copy the currently selected text to the clipboard, removing prompts.
        """
        if self._page_control is not None and self._page_control.hasFocus():
            self._page_control.copy()
        elif self._control.hasFocus():
            text = self._control.textCursor().selection().toPlainText()
            if text:
                # Remove prompts.
                lines = text.splitlines()
                lines = map(self._highlighter.transform_classic_prompt, lines)
                lines = map(self._highlighter.transform_ipy_prompt, lines)
                text = '\n'.join(lines)
                # Needed to prevent errors when copying the prompt.
                # See issue 264
                try:
                    was_newline = text[-1] == '\n'
                except IndexError:
                    was_newline = False
                if was_newline:  # user doesn't need newline
                    text = text[:-1]
                QtGui.QApplication.clipboard().setText(text)
        else:
            self.log.debug("frontend widget : unknown copy target")

    #---------------------------------------------------------------------------
    # 'ConsoleWidget' abstract interface
    #---------------------------------------------------------------------------

    def _execute(self, source, hidden):
        """ Execute 'source'. If 'hidden', do not show any output.

        See parent class :meth:`execute` docstring for full details.
        """
        msg_id = self.kernel_client.execute(source, hidden)
        self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'user')
        self._hidden = hidden
        if not hidden:
            self.executing.emit(source)

    def _prompt_started_hook(self):
        """ Called immediately after a new prompt is displayed.
        """
        if not self._reading:
            self._highlighter.highlighting_on = True

    def _prompt_finished_hook(self):
        """ Called immediately after a prompt is finished, i.e. when some input
            will be processed and a new prompt displayed.
        """
        if not self._reading:
            self._highlighter.highlighting_on = False

    def _tab_pressed(self):
        """ Called when the tab key is pressed. Returns whether to continue
            processing the event.
        """
        # Perform tab completion if:
        # 1) The cursor is in the input buffer.
        # 2) There is a non-whitespace character before the cursor.
        # 3) There is no active selection.
        text = self._get_input_buffer_cursor_line()
        if text is None:
            return False
        non_ws_before = bool(text[:self._get_input_buffer_cursor_column()].strip())
        complete = non_ws_before and self._get_cursor().selectedText() == ''
        if complete:
            self._complete()
        return not complete

    #---------------------------------------------------------------------------
    # 'ConsoleWidget' protected interface
    #---------------------------------------------------------------------------

    def _context_menu_make(self, pos):
        """ Reimplemented to add an action for raw copy.
        """
        menu = super(FrontendWidget, self)._context_menu_make(pos)
        for before_action in menu.actions():
            if before_action.shortcut().matches(QtGui.QKeySequence.Paste) == \
                    QtGui.QKeySequence.ExactMatch:
                menu.insertAction(before_action, self._copy_raw_action)
                break
        return menu

    def request_interrupt_kernel(self):
        if self._executing:
            self.interrupt_kernel()

    def request_restart_kernel(self):
        message = 'Are you sure you want to restart the kernel?'
        self.restart_kernel(message, now=False)

    def _event_filter_console_keypress(self, event):
        """ Reimplemented for execution interruption and smart backspace.
        """
        key = event.key()
        if self._control_key_down(event.modifiers(), include_command=False):

            if key == QtCore.Qt.Key_C and self._executing:
                self.request_interrupt_kernel()
                return True

            elif key == QtCore.Qt.Key_Period:
                self.request_restart_kernel()
                return True

        elif not event.modifiers() & QtCore.Qt.AltModifier:

            # Smart backspace: remove four characters in one backspace if:
            # 1) everything left of the cursor is whitespace
            # 2) the four characters immediately left of the cursor are spaces
            if key == QtCore.Qt.Key_Backspace:
                col = self._get_input_buffer_cursor_column()
                cursor = self._control.textCursor()
                if col > 3 and not cursor.hasSelection():
                    text = self._get_input_buffer_cursor_line()[:col]
                    if text.endswith('    ') and not text.strip():
                        cursor.movePosition(QtGui.QTextCursor.Left,
                                            QtGui.QTextCursor.KeepAnchor, 4)
                        cursor.removeSelectedText()
                        return True

        return super(FrontendWidget, self)._event_filter_console_keypress(event)

    #---------------------------------------------------------------------------
    # 'BaseFrontendMixin' abstract interface
    #---------------------------------------------------------------------------
    def _handle_clear_output(self, msg):
        """Handle clear output messages."""
        if self.include_output(msg):
            wait = msg['content'].get('wait', True)
            if wait:
                self._pending_clearoutput = True
            else:
                self.clear_output()

    def _silent_exec_callback(self, expr, callback):
        """Silently execute `expr` in the kernel and call `callback` with reply

        the `expr` is evaluated silently in the kernel (without) output in
        the frontend. Call `callback` with the
        `repr <http://docs.python.org/library/functions.html#repr> `_ as first argument

        Parameters
        ----------
        expr : string
            valid string to be executed by the kernel.
        callback : function
            function accepting one argument, as a string. The string will be
            the `repr` of the result of evaluating `expr`

        The `callback` is called with the `repr()` of the result of `expr` as
        first argument. To get the object, do `eval()` on the passed value.

        See Also
        --------
        _handle_exec_callback : private method, deal with calling callback with reply

        """

        # generate uuid, which would be used as an indication of whether or
        # not the unique request originated from here (can use msg id ?)
        local_uuid = str(uuid.uuid1())
        msg_id = self.kernel_client.execute('',
            silent=True, user_expressions={ local_uuid:expr })
        self._callback_dict[local_uuid] = callback
        self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'silent_exec_callback')

    def _handle_exec_callback(self, msg):
        """Execute `callback` corresponding to `msg` reply, after ``_silent_exec_callback``

        Parameters
        ----------
        msg : raw message send by the kernel containing an `user_expressions`
                and having a 'silent_exec_callback' kind.

        Notes
        -----
        This function will look for a `callback` associated with the
        corresponding message id. Association has been made by
        `_silent_exec_callback`. `callback` is then called with the `repr()`
        of the value of corresponding `user_expressions` as argument.
        `callback` is then removed from the known list so that any message
        coming again with the same id won't trigger it.
        """
        user_exp = msg['content'].get('user_expressions')
        if not user_exp:
            return
        for expression in user_exp:
            if expression in self._callback_dict:
                self._callback_dict.pop(expression)(user_exp[expression])

    def _handle_execute_reply(self, msg):
        """ Handles replies for code execution.
        """
        self.log.debug("execute_reply: %s", msg.get('content', ''))
        msg_id = msg['parent_header']['msg_id']
        info = self._request_info['execute'].get(msg_id)
        # unset reading flag, because if execute finished, raw_input can't
        # still be pending.
        self._reading = False
        # Note:  If info is NoneType, this is ignored
        if info and info.kind == 'user' and not self._hidden:
            # Make sure that all output from the SUB channel has been processed
            # before writing a new prompt.
            self.kernel_client.iopub_channel.flush()

            # Reset the ANSI style information to prevent bad text in stdout
            # from messing up our colors. We're not a true terminal so we're
            # allowed to do this.
            if self.ansi_codes:
                self._ansi_processor.reset_sgr()

            content = msg['content']
            status = content['status']
            if status == 'ok':
                self._process_execute_ok(msg)
            elif status == 'aborted':
                self._process_execute_abort(msg)

            self._show_interpreter_prompt_for_reply(msg)
            self.executed.emit(msg)
            self._request_info['execute'].pop(msg_id)
        elif info and info.kind == 'silent_exec_callback' and not self._hidden:
            self._handle_exec_callback(msg)
            self._request_info['execute'].pop(msg_id)
        elif info and not self._hidden:
            raise RuntimeError("Unknown handler for %s" % info.kind)

    def _handle_error(self, msg):
        """ Handle error messages.
        """
        self._process_execute_error(msg)

    def _handle_input_request(self, msg):
        """ Handle requests for raw_input.
        """
        self.log.debug("input: %s", msg.get('content', ''))
        if self._hidden:
            raise RuntimeError('Request for raw input during hidden execution.')

        # Make sure that all output from the SUB channel has been processed
        # before entering readline mode.
        self.kernel_client.iopub_channel.flush()

        def callback(line):
            self.kernel_client.input(line)
        if self._reading:
            self.log.debug("Got second input request, assuming first was interrupted.")
            self._reading = False
        self._readline(msg['content']['prompt'], callback=callback, password=msg['content']['password'])

    def _kernel_restarted_message(self, died=True):
        msg = "Kernel died, restarting" if died else "Kernel restarting"
        self._append_html("<br>%s<hr><br>" % msg,
            before_prompt=False
        )

    def _handle_kernel_died(self, since_last_heartbeat):
        """Handle the kernel's death (if we do not own the kernel).
        """
        self.log.warn("kernel died: %s", since_last_heartbeat)
        if self.custom_restart:
            self.custom_restart_kernel_died.emit(since_last_heartbeat)
        else:
            self._kernel_restarted_message(died=True)
            self.reset()

    def _handle_kernel_restarted(self, died=True):
        """Notice that the autorestarter restarted the kernel.

        There's nothing to do but show a message.
        """
        self.log.warn("kernel restarted")
        self._kernel_restarted_message(died=died)
        self.reset()

    def _handle_inspect_reply(self, rep):
        """Handle replies for call tips."""
        self.log.debug("oinfo: %s", rep.get('content', ''))
        cursor = self._get_cursor()
        info = self._request_info.get('call_tip')
        if info and info.id == rep['parent_header']['msg_id'] and \
                info.pos == cursor.position():
            content = rep['content']
            if content.get('status') == 'ok' and content.get('found', False):
                self._call_tip_widget.show_inspect_data(content)

    def _handle_execute_result(self, msg):
        """ Handle display hook output.
        """
        self.log.debug("execute_result: %s", msg.get('content', ''))
        if self.include_output(msg):
            self.flush_clearoutput()
            text = msg['content']['data']
            self._append_plain_text(text + '\n', before_prompt=True)

    def _handle_stream(self, msg):
        """ Handle stdout, stderr, and stdin.
        """
        self.log.debug("stream: %s", msg.get('content', ''))
        if self.include_output(msg):
            self.flush_clearoutput()
            self.append_stream(msg['content']['text'])

    def _handle_shutdown_reply(self, msg):
        """ Handle shutdown signal, only if from other console.
        """
        self.log.debug("shutdown: %s", msg.get('content', ''))
        restart = msg.get('content', {}).get('restart', False)
        if not self._hidden and not self.from_here(msg):
            # got shutdown reply, request came from session other than ours
            if restart:
                # someone restarted the kernel, handle it
                self._handle_kernel_restarted(died=False)
            else:
                # kernel was shutdown permanently
                # this triggers exit_requested if the kernel was local,
                # and a dialog if the kernel was remote,
                # so we don't suddenly clear the qtconsole without asking.
                if self._local_kernel:
                    self.exit_requested.emit(self)
                else:
                    title = self.window().windowTitle()
                    reply = QtGui.QMessageBox.question(self, title,
                        "Kernel has been shutdown permanently. "
                        "Close the Console?",
                        QtGui.QMessageBox.Yes,QtGui.QMessageBox.No)
                    if reply == QtGui.QMessageBox.Yes:
                        self.exit_requested.emit(self)

    def _handle_status(self, msg):
        """Handle status message"""
        # This is where a busy/idle indicator would be triggered,
        # when we make one.
        state = msg['content'].get('execution_state', '')
        if state == 'starting':
            # kernel started while we were running
            if self._executing:
                self._handle_kernel_restarted(died=True)
        elif state == 'idle':
            pass
        elif state == 'busy':
            pass

    def _started_channels(self):
        """ Called when the KernelManager channels have started listening or
            when the frontend is assigned an already listening KernelManager.
        """
        self.reset(clear=True)

    #---------------------------------------------------------------------------
    # 'FrontendWidget' public interface
    #---------------------------------------------------------------------------

    def copy_raw(self):
        """ Copy the currently selected text to the clipboard without attempting
            to remove prompts or otherwise alter the text.
        """
        self._control.copy()

    def interrupt_kernel(self):
        """ Attempts to interrupt the running kernel.
        
        Also unsets _reading flag, to avoid runtime errors
        if raw_input is called again.
        """
        if self.custom_interrupt:
            self._reading = False
            self.custom_interrupt_requested.emit()
        elif self.kernel_manager:
            self._reading = False
            self.kernel_manager.interrupt_kernel()
        else:
            self._append_plain_text('Cannot interrupt a kernel I did not start.\n')

    def reset(self, clear=False):
        """ Resets the widget to its initial state if ``clear`` parameter
        is True, otherwise
        prints a visual indication of the fact that the kernel restarted, but
        does not clear the traces from previous usage of the kernel before it
        was restarted.  With ``clear=True``, it is similar to ``%clear``, but
        also re-writes the banner and aborts execution if necessary.
        """
        if self._executing:
            self._executing = False
            self._request_info['execute'] = {}
        self._reading = False
        self._highlighter.highlighting_on = False

        if clear:
            self._control.clear()
            if self._display_banner:
                self._append_plain_text(self.banner)
                if self.kernel_banner:
                    self._append_plain_text(self.kernel_banner)

        # update output marker for stdout/stderr, so that startup
        # messages appear after banner:
        self._show_interpreter_prompt()

    def restart_kernel(self, message, now=False):
        """ Attempts to restart the running kernel.
        """
        # FIXME: now should be configurable via a checkbox in the dialog.  Right
        # now at least the heartbeat path sets it to True and the manual restart
        # to False.  But those should just be the pre-selected states of a
        # checkbox that the user could override if so desired.  But I don't know
        # enough Qt to go implementing the checkbox now.

        if self.custom_restart:
            self.custom_restart_requested.emit()
            return

        if self.kernel_manager:
            # Pause the heart beat channel to prevent further warnings.
            self.kernel_client.hb_channel.pause()

            # Prompt the user to restart the kernel. Un-pause the heartbeat if
            # they decline. (If they accept, the heartbeat will be un-paused
            # automatically when the kernel is restarted.)
            if self.confirm_restart:
                buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No
                result = QtGui.QMessageBox.question(self, 'Restart kernel?',
                                                    message, buttons)
                do_restart = result == QtGui.QMessageBox.Yes
            else:
                # confirm_restart is False, so we don't need to ask user
                # anything, just do the restart
                do_restart = True
            if do_restart:
                try:
                    self.kernel_manager.restart_kernel(now=now)
                except RuntimeError as e:
                    self._append_plain_text(
                        'Error restarting kernel: %s\n' % e,
                        before_prompt=True
                    )
                else:
                    self._append_html("<br>Restarting kernel...\n<hr><br>",
                        before_prompt=True,
                    )
            else:
                self.kernel_client.hb_channel.unpause()

        else:
            self._append_plain_text(
                'Cannot restart a Kernel I did not start\n',
                before_prompt=True
            )

    def append_stream(self, text):
        """Appends text to the output stream."""
        # Most consoles treat tabs as being 8 space characters. Convert tabs
        # to spaces so that output looks as expected regardless of this
        # widget's tab width.
        text = text.expandtabs(8)
        self._append_plain_text(text, before_prompt=True)
        self._control.moveCursor(QtGui.QTextCursor.End)

    def flush_clearoutput(self):
        """If a clearoutput is pending, execute it."""
        if self._pending_clearoutput:
            self._pending_clearoutput = False
            self.clear_output()

    def clear_output(self):
        """Clears the current line of output."""
        cursor = self._control.textCursor()
        cursor.beginEditBlock()
        cursor.movePosition(cursor.StartOfLine, cursor.KeepAnchor)
        cursor.insertText('')
        cursor.endEditBlock()

    #---------------------------------------------------------------------------
    # 'FrontendWidget' protected interface
    #---------------------------------------------------------------------------

    def _auto_call_tip(self):
        """Trigger call tip automatically on open parenthesis
        
        Call tips can be requested explcitly with `_call_tip`.
        """
        cursor = self._get_cursor()
        cursor.movePosition(QtGui.QTextCursor.Left)
        if cursor.document().characterAt(cursor.position()) == '(':
            # trigger auto call tip on open paren
            self._call_tip()

    def _call_tip(self):
        """Shows a call tip, if appropriate, at the current cursor location."""
        # Decide if it makes sense to show a call tip
        if not self.enable_calltips or not self.kernel_client.shell_channel.is_alive():
            return False
        cursor_pos = self._get_input_buffer_cursor_pos()
        code = self.input_buffer
        # Send the metadata request to the kernel
        msg_id = self.kernel_client.inspect(code, cursor_pos)
        pos = self._get_cursor().position()
        self._request_info['call_tip'] = self._CallTipRequest(msg_id, pos)
        return True

    def _complete(self):
        """ Performs completion at the current cursor location.
        """
        # Send the completion request to the kernel
        msg_id = self.kernel_client.complete(
            code=self.input_buffer,
            cursor_pos=self._get_input_buffer_cursor_pos(),
        )
        pos = self._get_cursor().position()
        info = self._CompletionRequest(msg_id, pos)
        self._request_info['complete'] = info

    def _process_execute_abort(self, msg):
        """ Process a reply for an aborted execution request.
        """
        self._append_plain_text("ERROR: execution aborted\n")

    def _process_execute_error(self, msg):
        """ Process a reply for an execution request that resulted in an error.
        """
        content = msg['content']
        # If a SystemExit is passed along, this means exit() was called - also
        # all the ipython %exit magic syntax of '-k' to be used to keep
        # the kernel running
        if content['ename']=='SystemExit':
            keepkernel = content['evalue']=='-k' or content['evalue']=='True'
            self._keep_kernel_on_exit = keepkernel
            self.exit_requested.emit(self)
        else:
            traceback = ''.join(content['traceback'])
            self._append_plain_text(traceback)

    def _process_execute_ok(self, msg):
        """ Process a reply for a successful execution request.
        """
        payload = msg['content'].get('payload', [])
        for item in payload:
            if not self._process_execute_payload(item):
                warning = 'Warning: received unknown payload of type %s'
                print(warning % repr(item['source']))

    def _process_execute_payload(self, item):
        """ Process a single payload item from the list of payload items in an
            execution reply. Returns whether the payload was handled.
        """
        # The basic FrontendWidget doesn't handle payloads, as they are a
        # mechanism for going beyond the standard Python interpreter model.
        return False

    def _show_interpreter_prompt(self):
        """ Shows a prompt for the interpreter.
        """
        self._show_prompt('>>> ')

    def _show_interpreter_prompt_for_reply(self, msg):
        """ Shows a prompt for the interpreter given an 'execute_reply' message.
        """
        self._show_interpreter_prompt()

    #------ Signal handlers ----------------------------------------------------

    def _document_contents_change(self, position, removed, added):
        """ Called whenever the document's content changes. Display a call tip
            if appropriate.
        """
        # Calculate where the cursor should be *after* the change:
        position += added

        document = self._control.document()
        if position == self._get_cursor().position():
            self._auto_call_tip()

    #------ Trait default initializers -----------------------------------------

    def _banner_default(self):
        """ Returns the standard Python banner.
        """
        banner = 'Python %s on %s\nType "help", "copyright", "credits" or ' \
            '"license" for more information.'
        return banner % (sys.version, sys.platform)
Esempio n. 21
0
class PromptManager(Configurable):
    """This is the primary interface for producing IPython's prompts."""
    shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
                     allow_none=True)

    color_scheme_table = Instance(coloransi.ColorSchemeTable, allow_none=True)
    color_scheme = Unicode('Linux', config=True)

    def _color_scheme_changed(self, name, new_value):
        self.color_scheme_table.set_active_scheme(new_value)
        for pname in ['in', 'in2', 'out', 'rewrite']:
            # We need to recalculate the number of invisible characters
            self.update_prompt(pname)

    lazy_evaluate_fields = Dict(help="""
        This maps field names used in the prompt templates to functions which
        will be called when the prompt is rendered. This allows us to include
        things like the current time in the prompts. Functions are only called
        if they are used in the prompt.
        """)

    def _lazy_evaluate_fields_default(self):
        return lazily_evaluate.copy()

    in_template = Unicode(
        'In [\\#]: ',
        config=True,
        help="Input prompt.  '\\#' will be transformed to the prompt number")
    in2_template = Unicode('   .\\D.: ',
                           config=True,
                           help="Continuation prompt.")
    out_template = Unicode(
        'Out[\\#]: ',
        config=True,
        help="Output prompt. '\\#' will be transformed to the prompt number")

    justify = Bool(True,
                   config=True,
                   help="""
        If True (default), each prompt will be right-aligned with the
        preceding one.
        """)

    # We actually store the expanded templates here:
    templates = Dict()

    # The number of characters in the last prompt rendered, not including
    # colour characters.
    width = Int()
    txtwidth = Int()  # Not including right-justification

    # The number of characters in each prompt which don't contribute to width
    invisible_chars = Dict()

    def _invisible_chars_default(self):
        return {'in': 0, 'in2': 0, 'out': 0, 'rewrite': 0}

    def __init__(self, shell, **kwargs):
        super(PromptManager, self).__init__(shell=shell, **kwargs)

        # Prepare colour scheme table
        self.color_scheme_table = coloransi.ColorSchemeTable(
            [PColNoColors, PColLinux, PColLightBG], self.color_scheme)

        self._formatter = UserNSFormatter(shell)
        # Prepare templates & numbers of invisible characters
        self.update_prompt('in', self.in_template)
        self.update_prompt('in2', self.in2_template)
        self.update_prompt('out', self.out_template)
        self.update_prompt('rewrite')
        self.on_trait_change(self._update_prompt_trait,
                             ['in_template', 'in2_template', 'out_template'])

    def update_prompt(self, name, new_template=None):
        """This is called when a prompt template is updated. It processes
        abbreviations used in the prompt template (like \#) and calculates how
        many invisible characters (ANSI colour escapes) the resulting prompt
        contains.
        
        It is also called for each prompt on changing the colour scheme. In both
        cases, traitlets should take care of calling this automatically.
        """
        if new_template is not None:
            self.templates[name] = multiple_replace(prompt_abbreviations,
                                                    new_template)
        # We count invisible characters (colour escapes) on the last line of the
        # prompt, to calculate the width for lining up subsequent prompts.
        invis_chars = _invisible_characters(self._render(name, color=True))
        self.invisible_chars[name] = invis_chars

    def _update_prompt_trait(self, traitname, new_template):
        name = traitname[:-9]  # Cut off '_template'
        self.update_prompt(name, new_template)

    def _render(self, name, color=True, **kwargs):
        """Render but don't justify, or update the width or txtwidth attributes.
        """
        if name == 'rewrite':
            return self._render_rewrite(color=color)

        if color:
            scheme = self.color_scheme_table.active_colors
            if name == 'out':
                colors = color_lists['normal']
                colors.number, colors.prompt, colors.normal = \
                        scheme.out_number, scheme.out_prompt, scheme.normal
            else:
                colors = color_lists['inp']
                colors.number, colors.prompt, colors.normal = \
                        scheme.in_number, scheme.in_prompt, scheme.in_normal
                if name == 'in2':
                    colors.prompt = scheme.in_prompt2
        else:
            # No color
            colors = color_lists['nocolor']
            colors.number, colors.prompt, colors.normal = '', '', ''

        count = self.shell.execution_count  # Shorthand
        # Build the dictionary to be passed to string formatting
        fmtargs = dict(color=colors,
                       count=count,
                       dots="." * len(str(count)),
                       width=self.width,
                       txtwidth=self.txtwidth)
        fmtargs.update(self.lazy_evaluate_fields)
        fmtargs.update(kwargs)

        # Prepare the prompt
        prompt = colors.prompt + self.templates[name] + colors.normal

        # Fill in required fields
        return self._formatter.format(prompt, **fmtargs)

    def _render_rewrite(self, color=True):
        """Render the ---> rewrite prompt."""
        if color:
            scheme = self.color_scheme_table.active_colors
            # We need a non-input version of these escapes
            color_prompt = scheme.in_prompt.replace("\001",
                                                    "").replace("\002", "")
            color_normal = scheme.normal
        else:
            color_prompt, color_normal = '', ''

        return color_prompt + "-> ".rjust(self.txtwidth, "-") + color_normal

    def render(self, name, color=True, just=None, **kwargs):
        """
        Render the selected prompt.
        
        Parameters
        ----------
        name : str
          Which prompt to render. One of 'in', 'in2', 'out', 'rewrite'
        color : bool
          If True (default), include ANSI escape sequences for a coloured prompt.
        just : bool
          If True, justify the prompt to the width of the last prompt. The
          default is stored in self.justify.
        **kwargs :
          Additional arguments will be passed to the string formatting operation,
          so they can override the values that would otherwise fill in the
          template.
        
        Returns
        -------
        A string containing the rendered prompt.
        """
        res = self._render(name, color=color, **kwargs)

        # Handle justification of prompt
        invis_chars = self.invisible_chars[name] if color else 0
        self.txtwidth = _lenlastline(res) - invis_chars
        just = self.justify if (just is None) else just
        # If the prompt spans more than one line, don't try to justify it:
        if just and name != 'in' and ('\n' not in res) and ('\r' not in res):
            res = res.rjust(self.width + invis_chars)
        self.width = _lenlastline(res) - invis_chars
        return res
Esempio n. 22
0
class Repo2Docker(Application):
    """An application for converting git repositories to docker images"""

    name = "jupyter-repo2docker"
    version = __version__
    description = __doc__

    @default("log_level")
    def _default_log_level(self):
        """The application's default log level"""
        return logging.INFO

    git_workdir = Unicode(
        None,
        config=True,
        allow_none=True,
        help="""
        Working directory to use for check out of git repositories.

        The default is to use the system's temporary directory. Should be
        somewhere ephemeral, such as /tmp.
        """,
    )

    subdir = Unicode(
        "",
        config=True,
        help="""
        Subdirectory of the git repository to examine.

        Defaults to ''.
        """,
    )

    cache_from = List(
        [],
        config=True,
        help="""
        List of images to try & re-use cached image layers from.

        Docker only tries to re-use image layers from images built locally,
        not pulled from a registry. We can ask it to explicitly re-use layers
        from non-locally built images by through the 'cache_from' parameter.
        """,
    )

    buildpacks = List(
        [
            LegacyBinderDockerBuildPack,
            DockerBuildPack,
            JuliaProjectTomlBuildPack,
            JuliaRequireBuildPack,
            NixBuildPack,
            RBuildPack,
            CondaBuildPack,
            PipfileBuildPack,
            PythonBuildPack,
        ],
        config=True,
        help="""
        Ordered list of BuildPacks to try when building a git repository.
        """,
    )

    extra_build_kwargs = Dict(
        {},
        help="""
        extra kwargs to limit CPU quota when building a docker image.
        Dictionary that allows the user to set the desired runtime flag
        to configure the amount of access to CPU resources your container has.
        Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
        """,
        config=True,
    )

    extra_run_kwargs = Dict(
        {},
        help="""
        extra kwargs to limit CPU quota when running a docker image.
        Dictionary that allows the user to set the desired runtime flag
        to configure the amount of access to CPU resources your container has.
        Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
        """,
        config=True,
    )

    extra_build_args = Dict(
        {},
        help="""
        Regular Docker build-time  arguments that will be passed as
        --build-arg key=value.
        Reference https://docs.docker.com/engine/reference/commandline/build/
        """,
        config=True,
    )

    default_buildpack = Any(
        PythonBuildPack,
        config=True,
        help="""
        The default build pack to use when no other buildpacks are found.
        """,
    )

    # Git is our content provider of last resort. This is to maintain the
    # old behaviour when git and local directories were the only supported
    # content providers. We can detect local directories from the path, but
    # detecting if something will successfully `git clone` is very hard if all
    # you can do is look at the path/URL to it.
    content_providers = List(
        [
            contentproviders.Local,
            contentproviders.Zenodo,
            contentproviders.Figshare,
            contentproviders.Dataverse,
            contentproviders.Hydroshare,
            contentproviders.Mercurial,
            contentproviders.Git,
        ],
        config=True,
        help="""
        Ordered list by priority of ContentProviders to try in turn to fetch
        the contents specified by the user.
        """,
    )

    build_memory_limit = ByteSpecification(
        0,
        help="""
        Total memory that can be used by the docker image building process.

        Set to 0 for no limits.
        """,
        config=True,
    )

    volumes = Dict(
        {},
        help="""
        Volumes to mount when running the container.

        Only used when running, not during build process!

        Use a key-value pair, with the key being the volume source &
        value being the destination volume.

        Both source and destination can be relative. Source is resolved
        relative to the current working directory on the host, and
        destination is resolved relative to the working directory of the
        image - ($HOME by default)
        """,
        config=True,
    )

    user_id = Int(
        help="""
        UID of the user to create inside the built image.

        Should be a uid that is not currently used by anything in the image.
        Defaults to uid of currently running user, since that is the most
        common case when running r2d manually.

        Might not affect Dockerfile builds.
        """,
        config=True,
    )

    @default("user_id")
    def _user_id_default(self):
        """
        Default user_id to current running user.
        """
        return os.geteuid()

    user_name = Unicode(
        "jovyan",
        help="""
        Username of the user to create inside the built image.

        Should be a username that is not currently used by anything in the
        image, and should conform to the restrictions on user names for Linux.

        Defaults to username of currently running user, since that is the most
        common case when running repo2docker manually.
        """,
        config=True,
    )

    @default("user_name")
    def _user_name_default(self):
        """
        Default user_name to current running user.
        """
        return getpass.getuser()

    appendix = Unicode(
        config=True,
        help="""
        Appendix of Dockerfile commands to run at the end of the build.

        Can be used to customize the resulting image after all
        standard build steps finish.
        """,
    )

    json_logs = Bool(
        False,
        help="""
        Log output in structured JSON format.

        Useful when stdout is consumed by other tools
        """,
        config=True,
    )

    repo = Unicode(
        ".",
        help="""
        Specification of repository to build image for.

        Could be local path or git URL.
        """,
        config=True,
    )

    ref = Unicode(
        None,
        help="""
        Git ref that should be built.

        If repo is a git repository, this ref is checked out
        in a local clone before repository is built.
        """,
        config=True,
        allow_none=True,
    )

    cleanup_checkout = Bool(
        False,
        help="""
        Delete source repository after building is done.

        Useful when repo2docker is doing the git cloning
        """,
        config=True,
    )

    output_image_spec = Unicode(
        "",
        help="""
        Docker Image name:tag to tag the built image with.

        Required parameter.
        """,
        config=True,
    )

    push = Bool(
        False,
        help="""
        Set to true to push docker image after building
        """,
        config=True,
    )

    run = Bool(
        False,
        help="""
        Run docker image after building
        """,
        config=True,
    )

    # FIXME: Refactor class to be able to do --no-build without needing
    #        deep support for it inside other code
    dry_run = Bool(
        False,
        help="""
        Do not actually build the docker image, just simulate it.
        """,
        config=True,
    )

    # FIXME: Refactor classes to separate build & run steps
    run_cmd = List(
        [],
        help="""
        Command to run when running the container

        When left empty, a jupyter notebook is run.
        """,
        config=True,
    )

    all_ports = Bool(
        False,
        help="""
        Publish all declared ports from container while running.

        Equivalent to -P option to docker run
        """,
        config=True,
    )

    ports = Dict(
        {},
        help="""
        Port mappings to establish when running the container.

        Equivalent to -p {key}:{value} options to docker run.
        {key} refers to port inside container, and {value}
        refers to port / host:port in the host
        """,
        config=True,
    )

    environment = List(
        [],
        help="""
        Environment variables to set when running the built image.

        Each item must be a string formatted as KEY=VALUE
        """,
        config=True,
    )

    target_repo_dir = Unicode(
        "",
        help="""
        Path inside the image where contents of the repositories are copied to,
        and where all the build operations (such as postBuild) happen.

        Defaults to ${HOME} if not set
        """,
        config=True,
    )

    template = Unicode(
        "",
        help="""
        Jinja template used to render the Dockerfile.
        """,
        config=True,
    )

    entrypoint_file = Unicode(
        "",
        help="""
        Path to a file that will be used as an entry point in the Docker image.
        """,
        config=True,
    )

    def fetch(self, url, ref, checkout_path):
        """Fetch the contents of `url` and place it in `checkout_path`.

        The `ref` parameter specifies what "version" of the contents should be
        fetched. In the case of a git repository `ref` is the SHA-1 of a commit.

        Iterate through possible content providers until a valid provider,
        based on URL, is found.
        """
        picked_content_provider = None
        for ContentProvider in self.content_providers:
            cp = ContentProvider()
            spec = cp.detect(url, ref=ref)
            if spec is not None:
                picked_content_provider = cp
                self.log.info(
                    "Picked {cp} content "
                    "provider.\n".format(cp=cp.__class__.__name__)
                )
                break

        if picked_content_provider is None:
            self.log.error(
                "No matching content provider found for " "{url}.".format(url=url)
            )

        for log_line in picked_content_provider.fetch(
            spec, checkout_path, yield_output=self.json_logs
        ):
            self.log.info(log_line, extra=dict(phase="fetching"))

        if not self.output_image_spec:
            self.output_image_spec = (
                "r2d" + escapism.escape(self.repo, escape_char="-").lower()
            )
            # if we are building from a subdirectory include that in the
            # image name so we can tell builds from different sub-directories
            # apart.
            if self.subdir:
                self.output_image_spec += escapism.escape(
                    self.subdir, escape_char="-"
                ).lower()
            if picked_content_provider.content_id is not None:
                self.output_image_spec += picked_content_provider.content_id
            else:
                self.output_image_spec += str(int(time.time()))

    def json_excepthook(self, etype, evalue, traceback):
        """Called on an uncaught exception when using json logging

        Avoids non-JSON output on errors when using --json-logs
        """
        self.log.error(
            "Error during build: %s",
            evalue,
            exc_info=(etype, evalue, traceback),
            extra=dict(phase="failed"),
        )

    def initialize(self):
        """Init repo2docker configuration before start"""
        # FIXME: Remove this function, move it to setters / traitlet reactors
        if self.json_logs:
            # register JSON excepthook to avoid non-JSON output on errors
            sys.excepthook = self.json_excepthook
            # Need to reset existing handlers, or we repeat messages
            logHandler = logging.StreamHandler()
            formatter = jsonlogger.JsonFormatter()
            logHandler.setFormatter(formatter)
            self.log = logging.getLogger("repo2docker")
            self.log.handlers = []
            self.log.addHandler(logHandler)
            self.log.setLevel(self.log_level)
        else:
            # due to json logger stuff above,
            # our log messages include carriage returns, newlines, etc.
            # remove the additional newline from the stream handler
            self.log.handlers[0].terminator = ""
            # We don't want a [Repo2Docker] on all messages
            self.log.handlers[0].formatter = logging.Formatter(fmt="%(message)s")

        if self.dry_run and (self.run or self.push):
            raise ValueError("Cannot push or run image if we are not building it")

        if self.volumes and not self.run:
            raise ValueError("Cannot mount volumes if container is not run")

    def push_image(self):
        """Push docker image to registry"""
        client = docker.APIClient(version="auto", **kwargs_from_env())
        # Build a progress setup for each layer, and only emit per-layer
        # info every 1.5s
        progress_layers = {}
        layers = {}
        last_emit_time = time.time()
        for chunk in client.push(self.output_image_spec, stream=True):
            # each chunk can be one or more lines of json events
            # split lines here in case multiple are delivered at once
            for line in chunk.splitlines():
                line = line.decode("utf-8", errors="replace")
                try:
                    progress = json.loads(line)
                except Exception as e:
                    self.log.warning("Not a JSON progress line: %r", line)
                    continue
                if "error" in progress:
                    self.log.error(progress["error"], extra=dict(phase="failed"))
                    raise docker.errors.ImageLoadError(progress["error"])
                if "id" not in progress:
                    continue
                # deprecated truncated-progress data
                if "progressDetail" in progress and progress["progressDetail"]:
                    progress_layers[progress["id"]] = progress["progressDetail"]
                else:
                    progress_layers[progress["id"]] = progress["status"]
                # include full progress data for each layer in 'layers' data
                layers[progress["id"]] = progress
                if time.time() - last_emit_time > 1.5:
                    self.log.info(
                        "Pushing image\n",
                        extra=dict(
                            progress=progress_layers, layers=layers, phase="pushing"
                        ),
                    )
                    last_emit_time = time.time()
        self.log.info(
            "Successfully pushed {}".format(self.output_image_spec),
            extra=dict(phase="pushing"),
        )

    def run_image(self):
        """Run docker container from built image

        and wait for it to finish.
        """
        container = self.start_container()
        self.wait_for_container(container)

    def start_container(self):
        """Start docker container from built image

        Returns running container
        """
        client = docker.from_env(version="auto")

        docker_host = os.environ.get("DOCKER_HOST")
        if docker_host:
            host_name = urlparse(docker_host).hostname
        else:
            host_name = "127.0.0.1"
        self.hostname = host_name

        if not self.run_cmd:
            port = str(self._get_free_port())
            self.port = port
            # To use the option --NotebookApp.custom_display_url
            # make sure the base-notebook image is updated:
            # docker pull jupyter/base-notebook
            run_cmd = [
                "jupyter",
                "notebook",
                "--ip",
                "0.0.0.0",
                "--port",
                port,
                "--NotebookApp.custom_display_url=http://{}:{}".format(host_name, port),
            ]
            ports = {"%s/tcp" % port: port}
        else:
            # run_cmd given by user, if port is also given then pass it on
            run_cmd = self.run_cmd
            if self.ports:
                ports = self.ports
            else:
                ports = {}
        # store ports on self so they can be retrieved in tests
        self.ports = ports

        container_volumes = {}
        if self.volumes:
            api_client = docker.APIClient(
                version="auto", **docker.utils.kwargs_from_env()
            )
            image = api_client.inspect_image(self.output_image_spec)
            # Buildkit uses Config/WorkingDir not ContainerConfig/WorkingDir
            image_workdir = image["Config"]["WorkingDir"]

            for k, v in self.volumes.items():
                container_volumes[os.path.abspath(k)] = {
                    "bind": v if v.startswith("/") else os.path.join(image_workdir, v),
                    "mode": "rw",
                }

        run_kwargs = dict(
            publish_all_ports=self.all_ports,
            ports=ports,
            detach=True,
            command=run_cmd,
            volumes=container_volumes,
            environment=self.environment,
        )

        run_kwargs.update(self.extra_run_kwargs)

        container = client.containers.run(self.output_image_spec, **run_kwargs)

        while container.status == "created":
            time.sleep(0.5)
            container.reload()

        return container

    def wait_for_container(self, container):
        """Wait for a container to finish

        Displaying logs while it's running
        """

        try:
            for line in container.logs(stream=True):
                self.log.info(line.decode("utf-8"), extra=dict(phase="running"))
        finally:
            container.reload()
            if container.status == "running":
                self.log.info("Stopping container...\n", extra=dict(phase="running"))
                container.kill()
            exit_code = container.attrs["State"]["ExitCode"]
            container.remove()
            if exit_code:
                sys.exit(exit_code)

    def _get_free_port(self):
        """
        Hacky method to get a free random port on local host
        """
        import socket

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.bind(("", 0))
        port = s.getsockname()[1]
        s.close()
        return port

    def find_image(self):
        # if this is a dry run it is Ok for dockerd to be unreachable so we
        # always return False for dry runs.
        if self.dry_run:
            return False
        # check if we already have an image for this content
        client = docker.APIClient(version="auto", **kwargs_from_env())
        for image in client.images():
            if image["RepoTags"] is not None:
                for tag in image["RepoTags"]:
                    if tag == self.output_image_spec + ":latest":
                        return True
        return False

    def build(self):
        """
        Build docker image
        """
        # Check if r2d can connect to docker daemon
        if not self.dry_run:
            try:
                docker_client = DockerCLI()
            except DockerException as e:
                self.log.error(
                    "\nDocker client initialization error: %s.\nCheck if docker is running on the host.\n",
                    e,
                )
                self.exit(1)

        # If the source to be executed is a directory, continue using the
        # directory. In the case of a local directory, it is used as both the
        # source and target. Reusing a local directory seems better than
        # making a copy of it as it might contain large files that would be
        # expensive to copy.
        if os.path.isdir(self.repo):
            checkout_path = self.repo
        else:
            if self.git_workdir is None:
                checkout_path = tempfile.mkdtemp(prefix="repo2docker")
            else:
                checkout_path = self.git_workdir

        try:
            self.fetch(self.repo, self.ref, checkout_path)

            if self.find_image():
                self.log.info(
                    "Reusing existing image ({}), not "
                    "building.".format(self.output_image_spec)
                )
                # no need to build, so skip to the end by `return`ing here
                # this will still execute the finally clause and let's us
                # avoid having to indent the build code by an extra level
                return

            if self.subdir:
                checkout_path = os.path.join(checkout_path, self.subdir)
                if not os.path.isdir(checkout_path):
                    self.log.error(
                        "Subdirectory %s does not exist",
                        self.subdir,
                        extra=dict(phase="failure"),
                    )
                    raise FileNotFoundError("Could not find {}".format(checkout_path))

            with chdir(checkout_path):
                for BP in self.buildpacks:
                    bp = BP()
                    if bp.detect():
                        picked_buildpack = bp
                        break
                else:
                    picked_buildpack = self.default_buildpack()

                picked_buildpack.appendix = self.appendix
                if self.template:
                    picked_buildpack.template = self.template
                if self.entrypoint_file:
                    picked_buildpack.entrypoint_file = self.entrypoint_file
                # Add metadata labels
                picked_buildpack.labels["repo2docker.version"] = self.version
                repo_label = "local" if os.path.isdir(self.repo) else self.repo
                picked_buildpack.labels["repo2docker.repo"] = repo_label
                picked_buildpack.labels["repo2docker.ref"] = self.ref

                if self.dry_run:
                    print(picked_buildpack.render())
                else:
                    self.log.debug(
                        picked_buildpack.render(), extra=dict(phase="building")
                    )
                    if self.user_id == 0:
                        raise ValueError(
                            "Root as the primary user in the image is not permitted."
                        )

                    build_args = {
                        "NB_USER": self.user_name,
                        "NB_UID": str(self.user_id),
                    }
                    if self.target_repo_dir:
                        build_args["REPO_DIR"] = self.target_repo_dir
                    self.log.info(
                        "Using %s builder\n",
                        bp.__class__.__name__,
                        extra=dict(phase="building"),
                    )

                    build_args.update(self.extra_build_args)

                    for l in picked_buildpack.build(
                        docker_client,
                        self.output_image_spec,
                        self.build_memory_limit,
                        build_args,
                        self.cache_from,
                        self.extra_build_kwargs,
                    ):
                        if "stream" in l:
                            self.log.info(l["stream"], extra=dict(phase="building"))
                        elif "error" in l:
                            self.log.info(l["error"], extra=dict(phase="failure"))
                            raise docker.errors.BuildError(l["error"], build_log="")
                        elif "status" in l:
                            self.log.info(
                                "Fetching base image...\r", extra=dict(phase="building")
                            )
                        else:
                            self.log.info(json.dumps(l), extra=dict(phase="building"))

        finally:
            # Cleanup checkout if necessary
            if self.cleanup_checkout:
                shutil.rmtree(checkout_path, ignore_errors=True)

    def start(self):
        self.build()

        if self.push:
            self.push_image()

        if self.run:
            self.run_image()
Esempio n. 23
0
class Turtle(widgets.DOMWidget):
    """"""
    _view_name = Unicode('TurtleView').tag(sync=True)
    _model_name = Unicode('TurtleModel').tag(sync=True)
    _view_module = Unicode('ipython-turtle-widget').tag(sync=True)
    _model_module = Unicode('ipython-turtle-widget').tag(sync=True)

    _canvas_fixed = Bool(True).tag(sync=True)
    _canvas_width = Int(320).tag(sync=True)
    _canvas_height = Int(320).tag(sync=True)
    _turtle_on = Bool(True).tag(sync=True)
    _pen_on = True

    _turtle_height = Int(20).tag(sync=True)
    _turtle_width = Int(10).tag(sync=True)
    _turtle_location_x = Float(0.0).tag(sync=True)
    _turtle_location_y = Float(0.0).tag(sync=True)
    _turtle_heading = Int(90).tag(sync=True)

    _turtle_heading_x = Float(0).tag(sync=True)
    _turtle_heading_y = Float(1).tag(sync=True)

    _line = Unicode('').tag(sync=True)

    def __init__(self, width=320, height=320, fixed=True):
        widgets.DOMWidget.__init__(self)
        self._canvas_width = width
        self._canvas_height = height
        self._canvas_fixed = fixed
        self._reset()

    def _reset(self):
        self._turtle_on = True
        self._pen_on = True
        self._turtle_location_x = 0
        self._turtle_location_y = 0
        self._turtle_heading = 90
        self._turtle_heading_x = 0.0
        self._turtle_heading_y = 1.0

    def position(self):
        return (self._turtle_location_x, self._turtle_location_y)

    def forward(self, length):
        start = "{} {}".format(self._turtle_location_x,
                               self._turtle_location_y)
        self._turtle_location_x += length * self._turtle_heading_x
        self._turtle_location_y += length * self._turtle_heading_y
        end = " {} {}".format(self._turtle_location_x, self._turtle_location_y)
        if self._pen_on:
            self._line = start + end

    def back(self, length):
        self.forward(-length)

    def heading(self):
        return self._turtle_heading

    def left(self, degree):
        self._turtle_heading += degree

        hx = math.cos(math.radians(self._turtle_heading))
        hy = math.sin(math.radians(self._turtle_heading))

        self._turtle_heading_x = hx
        self._turtle_heading_y = hy

    def right(self, degree):
        self.left(-degree)

    def penup(self):
        self._pen_on = False

    def pendown(self):
        self._pen_on = True

    def isdown(self):
        return self._pen_on

    def hideturtle(self):
        self._turtle_on = False

    def showturtle(self):
        self._turtle_on = True

    def isvisible(self):
        return self._turtle_on

    def reset(self):
        self._reset()
        self._line = 'clear'
Esempio n. 24
0
class Axis(BaseAxis):

    """A line axis.

    A line axis is the visual representation of a numerical or date scale.

    Attributes
    ----------
    icon: string (class-level attribute)
        The font-awesome icon name for this object.
    axis_types: dict (class-level attribute)
        A registry of existing axis types.
    orientation: {'horizontal', 'vertical'}
        The orientation of the axis, either vertical or horizontal
    side: {'bottom', 'top', 'left', 'right'} or None (default: None)
        The side of the axis, either bottom, top, left or right.
    label: string (default: '')
        The axis label
    tick_format: string or None (default: '')
        The tick format for the axis, for dates use d3 string formatting.
    scale: Scale
        The scale represented by the axis
    num_ticks: int or None (default: None)
        If tick_values is None, number of ticks
    tick_values: numpy.ndarray or None (default: None)
        Tick values for the axis
    offset: dict (default: {})
        Contains a scale and a value {'scale': scale or None,
        'value': value of the offset}
        If offset['scale'] is None, the corresponding figure scale is used
        instead.
    label_location: {'middle', 'start', 'end'}
        The location of the label along the axis, one of 'start', 'end' or
        'middle'
    label_color: Color or None (default: None)
        The color of the axis label
    grid_lines: {'none', 'solid', 'dashed'}
        The display of the grid lines
    grid_color: Color or None (default: None)
        The color of the grid lines
    color: Color or None (default: None)
        The color of the line
    label_offset: string or None (default: None)
        Label displacement from the axis line. Units allowed are 'em', 'px'
        and 'ex'. Positive values are away from the figure and negative
        values are towards the figure with respect to the axis line.
    visible: bool (default: True)
        A visibility toggle for the axis
    tick_style: Dict (default: {})
        Dictionary containing the CSS-style of the text for the ticks.
        For example: font-size of the text can be changed by passing
        `{'font-size': 14}`
    tick_rotate: int (default: 0)
        Degrees to rotate tick labels by.
    """
    icon = 'fa-arrows'
    orientation = Enum(['horizontal', 'vertical'], default_value='horizontal')\
        .tag(sync=True)
    side = Enum(['bottom', 'top', 'left', 'right'],
                allow_none=True, default_value=None).tag(sync=True)
    label = Unicode().tag(sync=True)
    grid_lines = Enum(['none', 'solid', 'dashed'], default_value='solid')\
        .tag(sync=True)
    tick_format = Unicode(None, allow_none=True).tag(sync=True)
    scale = Instance(Scale).tag(sync=True, **widget_serialization)
    num_ticks = Int(default_value=None, allow_none=True).tag(sync=True)
    tick_values = Array(None, allow_none=True)\
        .tag(sync=True, **array_serialization)\
        .valid(array_dimension_bounds(1, 1))
    offset = Dict().tag(sync=True, **widget_serialization)
    label_location = Enum(['middle', 'start', 'end'],
                          default_value='middle').tag(sync=True)
    label_color = Color(None, allow_none=True).tag(sync=True)
    grid_color = Color(None, allow_none=True).tag(sync=True)
    color = Color(None, allow_none=True).tag(sync=True)
    label_offset = Unicode(default_value=None, allow_none=True).tag(sync=True)

    visible = Bool(True).tag(sync=True)
    tick_style = Dict().tag(sync=True)
    tick_rotate = Int(0).tag(sync=True)

    _view_name = Unicode('Axis').tag(sync=True)
    _model_name = Unicode('AxisModel').tag(sync=True)
    _ipython_display_ = None  # We cannot display an axis outside of a figure.
Esempio n. 25
0
class NotebookHTTPPersonality(LoggingConfigurable):
    """Personality for notebook-http support, creating REST endpoints
    based on the notebook's annotated cells
    """
    cell_parser_env = 'KG_CELL_PARSER'
    cell_parser = Unicode(
        'kernel_gateway.notebook_http.cell.parser',
        config=True,
        help=
        """Determines which module is used to parse the notebook for endpoints and
            documentation. Valid module names include 'kernel_gateway.notebook_http.cell.parser'
            and 'kernel_gateway.notebook_http.swagger.parser'. (KG_CELL_PARSER env var)
            """)

    @default('cell_parser')
    def cell_parser_default(self):
        return os.getenv(self.cell_parser_env,
                         'kernel_gateway.notebook_http.cell.parser')

    # Intentionally not defining an env var option for a dict type
    comment_prefix = Dict({
        'scala': '//',
        None: '#'
    },
                          config=True,
                          help='Maps kernel language to code comment syntax')

    allow_notebook_download_env = 'KG_ALLOW_NOTEBOOK_DOWNLOAD'
    allow_notebook_download = Bool(
        config=True,
        help=
        "Optional API to download the notebook source code in notebook-http mode, defaults to not allow"
    )

    @default('allow_notebook_download')
    def allow_notebook_download_default(self):
        return os.getenv(self.allow_notebook_download_env, 'False') == 'True'

    static_path_env = 'KG_STATIC_PATH'
    static_path = Unicode(
        None,
        config=True,
        allow_none=True,
        help=
        "Serve static files on disk in the given path as /public, defaults to not serve"
    )

    @default('static_path')
    def static_path_default(self):
        return os.getenv(self.static_path_env)

    def __init__(self, *args, **kwargs):
        super(NotebookHTTPPersonality, self).__init__(*args, **kwargs)
        # Import the module to use for cell endpoint parsing
        cell_parser_module = importlib.import_module(self.cell_parser)
        # Build the parser using the comment syntax for the notebook language
        func = getattr(cell_parser_module, 'create_parser')
        try:
            lang = self.parent.seed_source['metadata']['language_info']['name']
        except (AttributeError, KeyError):
            lang = None
        prefix = self.comment_prefix[lang]
        self.api_parser = func(parent=self,
                               log=self.log,
                               comment_prefix=prefix,
                               notebook_cells=self.parent.seed_notebook.cells)

    def init_configurables(self):
        """Create a managed kernel pool"""
        self.kernel_pool = ManagedKernelPool(self.parent.prespawn_count,
                                             self.parent.kernel_manager)

    def create_request_handlers(self):
        """Create handlers and redefine them off of the base_url path. Assumes
        init_configurables() has already been called, and that the seed source
        was available there.
        """
        handlers = []
        # Register the NotebookDownloadHandler if configuration allows
        if self.allow_notebook_download:
            path = url_path_join('/', self.parent.base_url, r'/_api/source')
            self.log.info(
                'Registering resource: {}, methods: (GET)'.format(path))
            handlers.append((path, NotebookDownloadHandler, {
                'path': self.parent.seed_uri
            }))

        # Register a static path handler if configuration allows
        if self.static_path is not None:
            path = url_path_join('/', self.parent.base_url, r'/public/(.*)')
            self.log.info(
                'Registering resource: {}, methods: (GET)'.format(path))
            handlers.append((path, tornado.web.StaticFileHandler, {
                'path': self.static_path
            }))

        # Discover the notebook endpoints and their implementations
        endpoints = self.api_parser.endpoints(
            self.parent.kernel_manager.seed_source)
        response_sources = self.api_parser.endpoint_responses(
            self.parent.kernel_manager.seed_source)
        if len(endpoints) == 0:
            raise RuntimeError(
                'No endpoints were discovered. Check your notebook to make sure your cells are annotated correctly.'
            )

        # Cycle through the (endpoint_path, source) tuples and register their handlers
        for endpoint_path, verb_source_map in endpoints:
            parameterized_path = parameterize_path(endpoint_path)
            parameterized_path = url_path_join('/', self.parent.base_url,
                                               parameterized_path)
            self.log.info('Registering resource: {}, methods: ({})'.format(
                parameterized_path, list(verb_source_map.keys())))
            response_source_map = response_sources[
                endpoint_path] if endpoint_path in response_sources else {}
            handler_args = {
                'sources': verb_source_map,
                'response_sources': response_source_map,
                'kernel_pool': self.kernel_pool,
                'kernel_name': self.parent.kernel_manager.seed_kernelspec
            }
            handlers.append(
                (parameterized_path, NotebookAPIHandler, handler_args))

        # Register the swagger API spec handler
        path = url_path_join('/', self.parent.base_url,
                             r'/_api/spec/swagger.json')
        handlers.append((path, SwaggerSpecHandler, {
            'notebook_path': self.parent.seed_uri,
            'source_cells': self.parent.seed_notebook.cells,
            'cell_parser': self.api_parser
        }))
        self.log.info('Registering resource: {}, methods: (GET)'.format(path))

        # Add the 404 catch-all last
        handlers.append(default_base_handlers[-1])
        return handlers

    def should_seed_cell(self, code):
        """Determines whether the given code cell source should be executed when
        seeding a new kernel."""
        # seed cells that are uninvolved with the presented API
        return (not self.api_parser.is_api_cell(code)
                and not self.api_parser.is_api_response_cell(code))

    def shutdown(self):
        """Stop all kernels in the pool."""
        self.kernel_pool.shutdown()
Esempio n. 26
0
class RasterLayer(Layer):
    _view_name = Unicode('LeafletRasterLayerView').tag(sync=True)
    _model_name = Unicode('LeafletRasterLayerModel').tag(sync=True)

    opacity = Float(1.0, min=0.0, max=1.0).tag(sync=True)
    visible = Bool(True).tag(sync=True)
Esempio n. 27
0
class OAuthenticator(Authenticator):
    """Base class for OAuthenticators

    Subclasses must override:

    login_service (string identifying the service provider)
    authenticate (method takes one arg - the request handler handling the oauth callback)
    """

    login_handler = OAuthLoginHandler
    callback_handler = OAuthCallbackHandler

    authorize_url = Unicode(
        config=True, help="""The authenticate url for initiating oauth""")

    @default("authorize_url")
    def _authorize_url_default(self):
        return os.environ.get("OAUTH2_AUTHORIZE_URL", "")

    token_url = Unicode(
        config=True,
        help=
        """The url retrieving an access token at the completion of oauth""",
    )

    @default("token_url")
    def _token_url_default(self):
        return os.environ.get("OAUTH2_TOKEN_URL", "")

    userdata_url = Unicode(
        config=True,
        help=
        """The url for retrieving user data with a completed access token""",
    )

    @default("userdata_url")
    def _userdata_url_default(self):
        return os.environ.get("OAUTH2_USERDATA_URL", "")

    scope = List(
        Unicode(),
        config=True,
        help="""The OAuth scopes to request.
        See the OAuth documentation of your OAuth provider for options.
        For GitHub in particular, you can see github_scopes.md in this repo.
        """,
    )

    extra_authorize_params = Dict(
        config=True,
        help="""Extra GET params to send along with the initial OAuth request
        to the OAuth provider.""",
    )

    login_service = 'override in subclass'
    oauth_callback_url = Unicode(
        os.getenv('OAUTH_CALLBACK_URL', ''),
        config=True,
        help="""Callback URL to use.
        Typically `https://{host}/hub/oauth_callback`""",
    )

    client_id_env = ''
    client_id = Unicode(config=True)

    def _client_id_default(self):
        if self.client_id_env:
            client_id = os.getenv(self.client_id_env, '')
            if client_id:
                return client_id
        return os.getenv('OAUTH_CLIENT_ID', '')

    client_secret_env = ''
    client_secret = Unicode(config=True)

    def _client_secret_default(self):
        if self.client_secret_env:
            client_secret = os.getenv(self.client_secret_env, '')
            if client_secret:
                return client_secret
        return os.getenv('OAUTH_CLIENT_SECRET', '')

    validate_server_cert_env = 'OAUTH_TLS_VERIFY'
    validate_server_cert = Bool(config=True)

    def _validate_server_cert_default(self):
        env_value = os.getenv(self.validate_server_cert_env, '')
        if env_value == '0':
            return False
        else:
            return True

    def login_url(self, base_url):
        return url_path_join(base_url, 'oauth_login')

    def get_callback_url(self, handler=None):
        """Get my OAuth redirect URL
        
        Either from config or guess based on the current request.
        """
        if self.oauth_callback_url:
            return self.oauth_callback_url
        elif handler:
            return guess_callback_uri(
                handler.request.protocol,
                handler.request.host,
                handler.hub.server.base_url,
            )
        else:
            raise ValueError(
                "Specify callback oauth_callback_url or give me a handler to guess with"
            )

    def get_handlers(self, app):
        return [
            (r'/oauth_login', self.login_handler),
            (r'/oauth_callback', self.callback_handler),
        ]

    async def authenticate(self, handler, data=None):
        raise NotImplementedError()
Esempio n. 28
0
class Map(DOMWidget, InteractMixin):
    _view_name = Unicode('LeafletMapView').tag(sync=True)
    _model_name = Unicode('LeafletMapModel').tag(sync=True)
    _view_module = Unicode('jupyter-leaflet').tag(sync=True)
    _model_module = Unicode('jupyter-leaflet').tag(sync=True)

    _view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
    _model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)

    # Map options
    center = List(def_loc).tag(sync=True, o=True)
    zoom_start = Int(12).tag(sync=True, o=True)
    zoom = Int(12).tag(sync=True, o=True)
    max_zoom = Int(18).tag(sync=True, o=True)
    min_zoom = Int(1).tag(sync=True, o=True)
    interpolation = Unicode('bilinear').tag(sync=True, o=True)

    # Specification of the basemap
    basemap = Dict(default_value=dict(
        url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
        max_zoom=19,
        attribution=
        'Map data (c) <a href="https://openstreetmap.org">OpenStreetMap</a> contributors'
    )).tag(sync=True, o=True)
    modisdate = Unicode('yesterday').tag(sync=True)

    # Interaction options
    dragging = Bool(True).tag(sync=True, o=True)
    touch_zoom = Bool(True).tag(sync=True, o=True)
    scroll_wheel_zoom = Bool(False).tag(sync=True, o=True)
    double_click_zoom = Bool(True).tag(sync=True, o=True)
    box_zoom = Bool(True).tag(sync=True, o=True)
    tap = Bool(True).tag(sync=True, o=True)
    tap_tolerance = Int(15).tag(sync=True, o=True)
    world_copy_jump = Bool(False).tag(sync=True, o=True)
    close_popup_on_click = Bool(True).tag(sync=True, o=True)
    bounce_at_zoom_limits = Bool(True).tag(sync=True, o=True)
    keyboard = Bool(True).tag(sync=True, o=True)
    keyboard_pan_offset = Int(80).tag(sync=True, o=True)
    keyboard_zoom_offset = Int(1).tag(sync=True, o=True)
    inertia = Bool(True).tag(sync=True, o=True)
    inertia_deceleration = Int(3000).tag(sync=True, o=True)
    inertia_max_speed = Int(1500).tag(sync=True, o=True)
    # inertia_threshold = Int(?, o=True).tag(sync=True)
    zoom_control = Bool(True).tag(sync=True, o=True)
    attribution_control = Bool(True).tag(sync=True, o=True)
    # fade_animation = Bool(?).tag(sync=True, o=True)
    # zoom_animation = Bool(?).tag(sync=True, o=True)
    zoom_animation_threshold = Int(4).tag(sync=True, o=True)
    # marker_zoom_animation = Bool(?).tag(sync=True, o=True)

    options = List(trait=Unicode).tag(sync=True)

    @default('options')
    def _default_options(self):
        return [name for name in self.traits(o=True)]

    south = Float(def_loc[0], read_only=True).tag(sync=True)
    north = Float(def_loc[0], read_only=True).tag(sync=True)
    east = Float(def_loc[1], read_only=True).tag(sync=True)
    west = Float(def_loc[1], read_only=True).tag(sync=True)

    layers = Tuple(trait=Instance(Layer)).tag(sync=True,
                                              **widget_serialization)

    @default('layers')
    def _default_layers(self):
        return (basemap_to_tiles(self.basemap, self.modisdate, base=True), )

    bounds = Tuple(read_only=True)
    bounds_polygon = Tuple(read_only=True)

    @observe('south', 'north', 'east', 'west')
    def _observe_bounds(self, change):
        self.set_trait('bounds',
                       ((self.south, self.west), (self.north, self.east)))
        self.set_trait('bounds_polygon',
                       ((self.north, self.west), (self.north, self.east),
                        (self.south, self.east), (self.south, self.west)))

    def __init__(self, **kwargs):
        super(Map, self).__init__(**kwargs)
        self.on_displayed(self._fire_children_displayed)
        self.on_msg(self._handle_leaflet_event)

    def _fire_children_displayed(self, widget, **kwargs):
        for layer in self.layers:
            layer._handle_displayed(**kwargs)
        for control in self.controls:
            control._handle_displayed(**kwargs)

    _layer_ids = List()

    @validate('layers')
    def _validate_layers(self, proposal):
        '''Validate layers list.

        Makes sure only one instance of any given layer can exist in the
        layers list.
        '''
        self._layer_ids = [l.model_id for l in proposal.value]
        if len(set(self._layer_ids)) != len(self._layer_ids):
            raise LayerException(
                'duplicate layer detected, only use each layer once')
        return proposal.value

    def add_layer(self, layer):
        if isinstance(layer, dict):
            layer = basemap_to_tiles(layer)
        if layer.model_id in self._layer_ids:
            raise LayerException('layer already on map: %r' % layer)
        self.layers = tuple([l for l in self.layers] + [layer])

    def remove_layer(self, layer):
        if layer.model_id not in self._layer_ids:
            raise LayerException('layer not on map: %r' % layer)
        self.layers = tuple(
            [l for l in self.layers if l.model_id != layer.model_id])

    def substitute_layer(self, old, new):
        if isinstance(new, dict):
            new = basemap_to_tiles(new)
        if old.model_id not in self._layer_ids:
            raise LayerException(
                'Could not substitute layer: layer not on map.')
        self.layers = tuple(
            [new if l.model_id == old.model_id else l for l in self.layers])

    def clear_layers(self):
        self.layers = ()

    controls = Tuple(trait=Instance(Control)).tag(sync=True,
                                                  **widget_serialization)
    _control_ids = List()

    @validate('controls')
    def _validate_controls(self, proposal):
        '''Validate controls list.

        Makes sure only one instance of any given layer can exist in the
        controls list.
        '''
        self._control_ids = [c.model_id for c in proposal.value]
        if len(set(self._control_ids)) != len(self._control_ids):
            raise ControlException(
                'duplicate control detected, only use each control once')
        return proposal.value

    def add_control(self, control):
        if control.model_id in self._control_ids:
            raise ControlException('control already on map: %r' % control)
        self.controls = tuple([c for c in self.controls] + [control])

    def remove_control(self, control):
        if control.model_id not in self._control_ids:
            raise ControlException('control not on map: %r' % control)
        self.controls = tuple(
            [c for c in self.controls if c.model_id != control.model_id])

    def clear_controls(self):
        self.controls = ()

    def __iadd__(self, item):
        if isinstance(item, Layer):
            self.add_layer(item)
        elif isinstance(item, Control):
            self.add_control(item)
        return self

    def __isub__(self, item):
        if isinstance(item, Layer):
            self.remove_layer(item)
        elif isinstance(item, Control):
            self.remove_control(item)
        return self

    def __add__(self, item):
        if isinstance(item, Layer):
            self.add_layer(item)
        elif isinstance(item, Control):
            self.add_control(item)
        return self

    # Event handling
    _interaction_callbacks = Instance(CallbackDispatcher, ())

    def _handle_leaflet_event(self, _, content, buffers):
        if content.get('event', '') == 'interaction':
            self._interaction_callbacks(**content)

    def on_interaction(self, callback, remove=False):
        self._interaction_callbacks.register_callback(callback, remove=remove)
Esempio n. 29
0
class ChargeResolutionGenerator(Tool):
    name = "ChargeResolutionGenerator"
    description = "Generate the a pickle file of ChargeResolutionFile for " \
                  "either MC or data files."

    telescopes = Int(1,
                     help='Telescopes to include from the event file. '
                     'Default = 1').tag(config=True)
    output_name = Unicode('charge_resolution',
                          help='Name of the output charge resolution hdf5 '
                          'file').tag(config=True)
    input_path = Unicode(help='Path to directory containing data').tag(
        config=True)

    max_events = Int(1,
                     help='Maximum number of events to use').tag(config=True)

    plot_cam = Bool(False,
                    "enable plotting of individual camera").tag(config=True)

    use_true_pe = Bool(False, "Use true mc p.e.").tag(config=True)

    calibrator = Unicode(
        'HESSIOR1Calibrator',
        help='which calibrator to use, default = HESSIOR1Calibrator').tag(
            config=True)

    aliases = Dict(
        dict(input_path='ChargeResolutionGenerator.input_path',
             calibrator='ChargeResolutionGenerator.calibrator',
             max_events='ChargeResolutionGenerator.max_events',
             extractor='ChargeExtractorFactory.product',
             window_width='ChargeExtractorFactory.window_width',
             t0='ChargeExtractorFactory.t0',
             window_shift='ChargeExtractorFactory.window_shift',
             sig_amp_cut_HG='ChargeExtractorFactory.sig_amp_cut_HG',
             sig_amp_cut_LG='ChargeExtractorFactory.sig_amp_cut_LG',
             lwt='ChargeExtractorFactory.lwt',
             clip_amplitude='CameraDL1Calibrator.clip_amplitude',
             radius='CameraDL1Calibrator.radius',
             max_pe='ChargeResolutionCalculator.max_pe',
             T='ChargeResolutionGenerator.telescopes',
             o='ChargeResolutionGenerator.output_name',
             plot_cam='ChargeResolutionGenerator.plot_cam',
             use_true_pe='ChargeResolutionGenerator.use_true_pe'))
    classes = List([
        EventSourceFactory, HESSIOEventSource, TargetIOEventSource,
        ChargeExtractorFactory, CameraDL1Calibrator,
        ChargeResolutionCalculator, CameraCalibrator
    ])

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.eventsource = None
        self.r1 = None
        self.dl0 = None
        self.dl1 = None
        self.calculator = None
        self.cal = None

    def setup(self):
        kwargs = dict(config=self.config, tool=self)
        self.dl0 = CameraDL0Reducer(**kwargs)

        self.dl1 = CameraDL1Calibrator(**kwargs)

        self.cal = CameraCalibrator(r1_product=self.calibrator)

        self.calculator = ChargeResolutionCalculator(**kwargs)

    def start(self):
        run_list = np.loadtxt('%s/runlist.txt' % self.input_path, unpack=True)
        plot_cam = False
        plot_delay = 0.5
        disp = None

        if debug:
            fig = plt.figure(1)
            ax = fig.add_subplot(111)
        for n, run in enumerate(run_list[0]):
            # TODO remove need for hardcoded file name
            if self.calibrator == "TargetIOR1Calibrator":
                file_name = "%s/Run%05d_r1.tio" % (self.input_path, int(run))
                print(file_name)
            elif self.calibrator == "HESSIOR1Calibrator":
                file_name = "%s/Run%05d_mc.simtel.gz" % (self.input_path,
                                                         int(run))
                print(file_name)

            try:
                source = EventSourceFactory.produce(input_url=file_name,
                                                    max_events=self.max_events)
                true_pe = []
                # lab_pe = []
                peds_all = []
                for event in tqdm(source):
                    self.cal.calibrate(event)
                    self.dl0.reduce(event)
                    self.dl1.calibrate(event)
                    input_pe = run_list[2][n]
                    try:
                        input_nsb = run_list[5][n]
                    except IndexError:
                        print('File has no column for NSB, setting to 0')
                        input_nsb = 0
                    if self.plot_cam == True:
                        if disp is None:
                            geom = event.inst.subarray.tel[
                                self.telescopes].camera
                            disp = CameraDisplay(geom)
                            disp.add_colorbar()
                            plt.show(block=False)
                        im = event.dl1.tel[self.telescopes].image[0]
                        disp.image = im
                        plt.pause(plot_delay)

                    teldata = event.r0.tel[self.telescopes].waveform[0]
                    peds = teldata[:, 0:10].mean(axis=1)
                    peds2 = teldata[:, 0:10].std(axis=1)
                    peds_all.append(teldata[:, 0:90])
                    # plt.hist(peds,bins=50, alpha=0.4)
                    # plt.show()
                    # print(teldata)
                    # plt.plot(range(len(teldata[100])), teldata[100])
                    # plt.show()
                    # exit()
                # print(np.mean(peds_all), np.std(peds_all))
                # exit()
                # true_charge_mc = event.mc.tel[self.telescopes].photo_electron_image
                # measured_charge = event.dl1.tel[self.telescopes].image[0]
                # true_charge_lab = np.asarray([input_pe]*len(measured_charge))
                # true_pe.append(true_charge_mc)
                # if self.use_true_pe:
                #     true_charge=true_charge_mc
                # else:
                #     true_charge=true_charge_lab.astype(int)
                #
                # self.calculator.add_charges(true_charge, measured_charge)

                if debug:
                    plt.errorbar(input_nsb,
                                 np.mean(peds_all),
                                 np.std(peds_all),
                                 marker='x',
                                 color='k')
                    # plt.scatter(input_nsb, np.std(peds_all), marker ='x',color='k')
                    plt.xlabel('Non pulsed background light [GHz]')
                    plt.ylabel('Pedistal mean')
            except FileNotFoundError:
                stop = 0
                print('file_not_found')
        plt.show()
        # if debug:
        #     plt.xscale('log')
        #     plt.yscale('log')
        #     plt.plot([0,1000],[0,1000], 'k:')
        #     plt.xlabel('Input p.e.')
        #     plt.ylabel('True mc p.e.')
        #     plt.show()
    def finish(self):
        out_file = '%s/charge_resolution_test.h5' % self.input_path
        self.calculator.save(self.output_name)
Esempio n. 30
0
class HDFSContentsManager(ContentsManager):
    """A ContentsManager implementation that persists to HDFS."""

    root_dir = Unicode(
        help="""
        The root directory to serve from.

        By default this is populated by ``root_dir_template``.
        """,
        config=True
    )

    @default('root_dir')
    def _default_root_dir(self):
        return self.root_dir_template.format(
            username=getuser()
        )

    root_dir_template = Unicode(
        default_value="/user/{username}/notebooks",
        config=True,
        help="""
        A template string to populate ``root_dir`` from.

        Receive the following format parameters:

        - username
        """
    )

    create_root_dir_on_startup = Bool(
        default_value=True,
        config=True,
        help="Create ``root_dir`` on startup if it doesn't already exist"
    )

    hdfs_host = Unicode(
        default_value="default",
        config=True,
        help="""
        The hostname of the HDFS namenode.

        By default this will be inferred from the HDFS configuration files.
        """
    )

    hdfs_port = Integer(
        default_value=0,
        config=True,
        help="""
        The port for the HDFS namenode.

        By default this will be inferred from the HDFS configuration files.
        """
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.log.debug("Connecting to HDFS at %s:%d",
                       self.hdfs_host, self.hdfs_port)
        self.fs = hdfs.connect(host=self.hdfs_host, port=self.hdfs_port)
        if self.create_root_dir_on_startup:
            self.ensure_root_directory()

    def ensure_root_directory(self):
        self.log.debug("Creating root notebooks directory: %s", self.root_dir)
        self.fs.mkdir(self.root_dir)

    def _checkpoints_class_default(self):
        return HDFSCheckpoints

    def info_string(self):
        return "Serving notebooks from HDFS directory: %s" % self.root_dir

    def infer_type(self, path):
        if path.endswith(".ipynb"):
            return "notebook"
        elif self.fs.isdir(path):
            return "directory"
        else:
            return "file"

    def is_hidden(self, path):
        hdfs_path = to_fs_path(path, self.root_dir)
        return is_hidden(hdfs_path, self.root_dir)

    def file_exists(self, path):
        hdfs_path = to_fs_path(path, self.root_dir)
        return self.fs.isfile(hdfs_path)

    def dir_exists(self, path):
        hdfs_path = to_fs_path(path, self.root_dir)
        return self.fs.isdir(hdfs_path)

    def exists(self, path):
        hdfs_path = to_fs_path(path, self.root_dir)
        return self.fs.exists(hdfs_path)

    def _info_and_check_kind(self, path, hdfs_path, kind):
        try:
            with perm_to_403(path):
                info = self.fs.info(hdfs_path)
        except ArrowIOError:
            raise HTTPError(404, "%s does not exist: %s"
                            % (kind.capitalize(), path))

        if info['kind'] != kind:
            raise HTTPError(400, "%s is not a %s" % (path, kind))
        return info

    def _model_from_info(self, info, type=None):
        if 'name' in info:
            hdfs_path = info['name']
            timestamp = info['last_modified_time']
        else:
            # info from `ls` is different for some reason
            hdfs_path = urlsplit(info['path']).path
            timestamp = info['last_modified']

        path = to_api_path(hdfs_path, self.root_dir)
        name = path.rsplit('/', 1)[-1]

        if type is None:
            if info['kind'] == 'directory':
                type = 'directory'
            elif path.endswith('.ipynb'):
                type = 'notebook'
            else:
                type = 'file'

        mimetype = mimetypes.guess_type(path)[0] if type == 'file' else None
        size = info['size'] if type != 'directory' else None
        timestamp = utcfromtimestamp(timestamp)
        model = {'name': name,
                 'path': path,
                 'last_modified': timestamp,
                 'created': timestamp,
                 'type': type,
                 'size': size,
                 'mimetype': mimetype,
                 'content': None,
                 'format': None,
                 'writable': True}

        return model

    def _dir_model(self, path, hdfs_path, content):
        info = self._info_and_check_kind(path, hdfs_path, 'directory')
        model = self._model_from_info(info, 'directory')
        if content:
            with perm_to_403(path):
                records = self.fs.ls(hdfs_path, True)
            contents = [self._model_from_info(i) for i in records]
            # Filter out hidden files/directories
            # These are rare, so do this after generating contents, not before
            model['content'] = [c for c in contents
                                if self.should_list(c['name']) and not
                                c['name'].startswith('.')]
            model['format'] = 'json'
        return model

    def _file_model(self, path, hdfs_path, content, format):
        info = self._info_and_check_kind(path, hdfs_path, 'file')
        model = self._model_from_info(info, 'file')

        if content:
            content, format = self._read_file(path, hdfs_path, format)
            if model['mimetype'] is None:
                model['mimetype'] = {
                    'text': 'text/plain',
                    'base64': 'application/octet-stream'
                }[format]

            model.update(
                content=content,
                format=format,
            )

        return model

    def _notebook_model(self, path, hdfs_path, content=True):
        info = self._info_and_check_kind(path, hdfs_path, 'file')
        model = self._model_from_info(info, 'notebook')

        if content:
            contents = self._read_notebook(path, hdfs_path)
            self.mark_trusted_cells(contents, path)
            model['content'] = contents
            model['format'] = 'json'
            self.validate_notebook_model(model)

        return model

    def _read_file(self, path, hdfs_path, format):
        if not self.fs.isfile(hdfs_path):
            raise HTTPError(400, "Cannot read non-file %s" % path)

        with perm_to_403(path):
            with self.fs.open(hdfs_path, 'rb') as f:
                bcontent = f.read()

        if format is None:
            try:
                return bcontent.decode('utf8'), 'text'
            except UnicodeError:
                return encodebytes(bcontent).decode('ascii'), 'base64'
        elif format == 'text':
            try:
                return bcontent.decode('utf8'), 'text'
            except UnicodeError:
                raise HTTPError(400, "%s is not UTF-8 encoded" % path,
                                reason='bad format')
        else:
            return encodebytes(bcontent).decode('ascii'), 'base64'

    def _read_notebook(self, path, hdfs_path):
        with perm_to_403(path):
            with self.fs.open(hdfs_path, 'rb') as f:
                content = f.read()
        try:
            return nbformat.reads(content.decode('utf8'), as_version=4)
        except Exception as e:
            raise HTTPError(400, "Unreadable Notebook: %s\n%r" % (path, e))

    def get(self, path, content=True, type=None, format=None):
        hdfs_path = to_fs_path(path, self.root_dir)

        if not self.fs.exists(hdfs_path):
            raise HTTPError(404, 'No such file or directory: %s' % path)
        elif not self.allow_hidden and is_hidden(hdfs_path, self.root_dir):
            self.log.debug("Refusing to serve hidden directory %r", hdfs_path)
            raise HTTPError(404, 'No such file or directory: %s' % path)

        if type is None:
            type = self.infer_type(hdfs_path)

        if type == 'directory':
            model = self._dir_model(path, hdfs_path, content)
        elif type == 'notebook':
            model = self._notebook_model(path, hdfs_path, content)
        else:
            model = self._file_model(path, hdfs_path, content, format)
        return model

    def _save_directory(self, path, hdfs_path, model):
        if not self.allow_hidden and is_hidden(hdfs_path, self.root_dir):
            raise HTTPError(400, 'Cannot create hidden directory %r' % path)

        if not self.fs.exists(hdfs_path):
            self.log.debug("Creating directory at %s", hdfs_path)
            with perm_to_403(path):
                self.fs.mkdir(hdfs_path)
        elif not self.fs.isdir(hdfs_path):
            raise HTTPError(400, 'Not a directory: %s' % path)

    def _save_file(self, path, hdfs_path, model):
        format = model['format']
        content = model['content']

        if format not in {'text', 'base64'}:
            raise HTTPError(
                400,
                "Must specify format of file contents as 'text' or 'base64'",
            )
        try:
            if format == 'text':
                bcontent = content.encode('utf8')
            else:
                b64_bytes = content.encode('ascii')
                bcontent = decodebytes(b64_bytes)
        except Exception as e:
            raise HTTPError(400, 'Encoding error saving %s: %s' % (path, e))

        self.log.debug("Saving file to %s", hdfs_path)
        with perm_to_403(path):
            with self.fs.open(hdfs_path, 'wb') as f:
                f.write(bcontent)

    def _save_notebook(self, path, hdfs_path, model):
        nb = nbformat.from_dict(model['content'])
        self.check_and_sign(nb, path)
        content = nbformat.writes(nb, version=nbformat.NO_CONVERT)
        bcontent = content.encode('utf8')
        self.log.debug("Saving notebook to %s", hdfs_path)
        with perm_to_403(path):
            with self.fs.open(hdfs_path, 'wb') as f:
                f.write(bcontent)
        self.validate_notebook_model(model)
        return model.get('message')

    def save(self, model, path):
        if 'type' not in model:
            raise HTTPError(400, 'No file type provided')

        typ = model['type']

        if 'content' not in model and typ != 'directory':
            raise HTTPError(400, 'No file content provided')

        hdfs_path = to_fs_path(path, self.root_dir)

        message = None
        if typ == 'notebook':
            message = self._save_notebook(path, hdfs_path, model)
        elif typ == 'file':
            self._save_file(path, hdfs_path, model)
        elif typ == 'directory':
            self._save_directory(path, hdfs_path, model)
        else:
            raise HTTPError(400, "Unhandled contents type: %s" % typ)

        model = self.get(path, type=model["type"], content=False)
        if message is not None:
            model['message'] = message

        return model

    def _is_dir_empty(self, path, hdfs_path):
        with perm_to_403(path):
            files = self.fs.ls(hdfs_path)
        if not files:
            return True
        cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
        files = {f.rsplit('/', 1)[-1] for f in files} - {cp_dir}
        return not files

    def delete_file(self, path):
        hdfs_path = to_fs_path(path, self.root_dir)

        if not self.fs.exists(hdfs_path):
            raise HTTPError(
                404, 'File or directory does not exist: %s' % path
            )

        if self.fs.isdir(hdfs_path):
            if not self._is_dir_empty(path, hdfs_path):
                raise HTTPError(400, 'Directory %s not empty' % path)
            self.log.debug("Deleting directory at %s", hdfs_path)
            with perm_to_403(path):
                self.fs.delete(hdfs_path, recursive=True)
        else:
            self.log.debug("Deleting file at %s", hdfs_path)
            with perm_to_403(path):
                self.fs.delete(hdfs_path)

    def rename_file(self, old_path, new_path):
        if old_path == new_path:
            return

        old_hdfs_path = to_fs_path(old_path, self.root_dir)
        new_hdfs_path = to_fs_path(new_path, self.root_dir)

        if self.fs.exists(new_hdfs_path):
            raise HTTPError(409, 'File already exists: %s' % new_path)

        # Move the file
        self.log.debug("Renaming %s -> %s", old_hdfs_path, new_hdfs_path)
        try:
            with perm_to_403(old_path):
                self.fs.rename(old_hdfs_path, new_hdfs_path)
        except HTTPError:
            raise
        except Exception as e:
            raise HTTPError(
                500, 'Unknown error renaming file: %s\n%s' % (old_path, e)
            )