Example #1
0
    def __init__(self, url_or_resource, io_loop=None, http_client=None):
        if io_loop is None:
            io_loop = IOLoop.current()
        self.io_loop = io_loop
        if http_client is None:
            http_client = AsyncHTTPClient()
        self.http_client = http_client

        loader = Loader(
                http_client=HTTPClient(defaults=self.http_client.defaults),
                processors=[WebsocketProcessor(), ClientProcessor()]
        )

        if isinstance(url_or_resource, str):
            log.debug('Loading from {0}'.format(url_or_resource))
            self.api_docs = loader.load_resource_listing(url_or_resource)
        else:
            log.debug('Loading from {0}'.format(
                    url_or_resource.get('basePath')))
            self.api_docs = url_or_resource
            loader.process_resource_listing(self.api_docs)

        self.resources = {
            resource['name']: Resource(resource, self.http_client)
            for resource in self.api_docs['apis']}
Example #2
0
 def get_request_type(self, query_args):
     app_log.debug('Query args: %s', query_args)
     if not query_args:
         return CONFIG_TYPE_RUNNING
     else:
         typearg = query_args.get("type", CONFIG_TYPE_RUNNING)
         return typearg[0]
Example #3
0
 def check_config_type(self, request_type):
     app_log.debug('Requested config type: %s', request_type)
     if request_type not in [CONFIG_TYPE_RUNNING, CONFIG_TYPE_STARTUP]:
         error = "Invalid configuration type. Configuration "\
                 "types allowed: %s, %s" %\
                 (CONFIG_TYPE_RUNNING, CONFIG_TYPE_STARTUP)
         raise DataValidationFailed(error)
Example #4
0
def main():
    parse_command_line()
    settings = dict(
        pipeline_file=options.pipeline
    )
    app = remotecontrol.app.Application(settings)
    options.logging = str('DEBUG')
    enable_pretty_logging(options)
    app_log.setLevel(logging.DEBUG)

    server = HTTPServer(app)
    server.listen(options.port, options.host)
    app_log.info("Version: %s from: %s" % (remotecontrol.VERSION, remotecontrol.VERSION_DATE))
    app_log.info("Listen on http://%s:%d/" % (
        options.host if options.host != "" else "localhost",
        options.port)
    )
    # app.processor.start()
    second_tick = None
    try:
        tornado.autoreload.add_reload_hook(app.manager.stop)
        second_tick = tornado.ioloop.PeriodicCallback(lambda: app.second_tick(), 1000)
        second_tick.start()
        tornado.ioloop.IOLoop.instance().start()
    except KeyboardInterrupt:
        second_tick.stop()
        app_log.info("stop second tick")
        app.manager.stop()
        tornado.ioloop.IOLoop.instance().stop()
    app_log.debug("Server shutdown.")
Example #5
0
def verify_index(resource, parent, index_values, schema, idl):
    '''
        Verify if a resource exists in the DB Table using the index.
    '''

    if resource.table not in idl.tables:
        return None

    # check if we are dealing with key/value type of forward reference
    kv_type = False
    if parent is not None and parent.relation == OVSDB_SCHEMA_CHILD:
        if schema.ovs_tables[parent.table].references[parent.column].kv_type:
            kv_type = True

    if kv_type:
        # check in parent table that the index exists
        app_log.debug('verifying key/value type reference')
        row = utils.kv_index_to_row(index_values, parent, idl)
    else:
        dbtable = idl.tables[resource.table]
        table_schema = schema.ovs_tables[resource.table]

        row = utils.index_to_row(index_values, table_schema, dbtable)

    return row
Example #6
0
    def get(self, path=None):
        '''Spawns a brand new server'''
        if path is None:
            # no path, use random prefix
            prefix = "user-" + sample_with_replacement(string.ascii_letters +
                                                       string.digits)
        else:
            prefix = path.lstrip('/').split('/', 1)[0]

        self.write("Initializing {}".format(prefix))

        container_id, ip, port = yield self.spawner.create_notebook_server(prefix,
                image=self.image, ipython_executable=self.ipython_executable,
                mem_limit=self.mem_limit, cpu_shares=self.cpu_shares,
                container_ip=self.container_ip,
                container_port=self.container_port)

        app_log.debug(ip, port, prefix, container_id)
        yield self.proxy(ip, port, prefix, container_id)

        # Wait for the notebook server to come up.
        yield self.wait_for_server(ip, port, prefix)

        if path is None:
            # Redirect the user to the configured redirect location
            
            # Take out a leading slash
            redirect_uri = self.redirect_uri.lstrip("/")
            url = "/".join(("/{}".format(prefix), redirect_uri))
        else:
            url = path
            if not url.startswith('/'):
                url = '/' + url
        app_log.debug("redirecting %s -> %s", self.request.path, url)
        self.redirect(url, permanent=False)
Example #7
0
    def prepare(self):

        app_log.debug("Incoming request from %s: %s",
                      self.request.remote_ip,
                      self.request)

        if settings['auth_enabled'] and self.request.method != "OPTIONS":
            is_authenticated = userauth.is_user_authenticated(self)
        else:
            is_authenticated = True

        if not is_authenticated:
            self.set_status(httplib.UNAUTHORIZED)
            self.set_header("Link", "/login")
            self.finish()
        else:
            self.resource_path = parse_url_path(self.request.path,
                                                self.schema,
                                                self.idl,
                                                self.request.method)

            if self.resource_path is None:
                self.set_status(httplib.NOT_FOUND)
                self.finish()
            else:
                #If Match support
                match = self.process_if_match()
                if not match:
                    self.finish()
Example #8
0
def main():
    """ entry """
    try:
        conf = __import__('conf')
    except ImportError as e:
        app_log.critical("Unable to load site config. ({})".format(e))
        raise SystemExit()
    parse_command_line()

    if options.debug:
        app_log.setLevel(logging.DEBUG)

    if not options.debug:
        fork_processes(None)
    options.port += task_id() or 0

    if not os.path.isdir(conf.app_path):
        app_log.critical("{p} isn't accessible, maybe "
                         "create it?".format(p=conf.app_path))
        raise SystemExit()
    app_log.debug("Starting {name} on port {port}".format(name=conf.name,
                                                          port=options.port))
    # initialize the application
    tornado.httpserver.HTTPServer(Application(options,
                                              conf)).listen(options.port,
                                                            '0.0.0.0')
    ioloop = tornado.ioloop.IOLoop.instance()
    if options.debug:
        tornado.autoreload.start(ioloop)
    # enter the Tornado IO loop
    ioloop.start()
Example #9
0
 def load_by_name(self, name):
     try:
         return load_by_name('app.system.cmf.%s' % (name.lower(),),
                             name.capitalize())()
     except ImportError as e:
         app_log.debug(e)
         return False
Example #10
0
    def get(self, path=None):
        '''Spawns a brand new server'''
        if self.allow_origin:
            self.set_header("Access-Control-Allow-Origin", self.allow_origin)
        try:
            if path is None:
                # No path. Assign a prelaunched container from the pool and redirect to it.
                # Append self.redirect_uri to the redirect target.
                container_path = self.pool.acquire().path
                app_log.info("Allocated [%s] from the pool.", container_path)

                url = "/{}/{}".format(container_path, self.redirect_uri)
            else:
                # Split /user/{some_user}/long/url/path and acquire {some_user}
                path_parts = path.lstrip('/').split('/', 2)
                app_log.info("path parts: %s", path_parts)
                user = path_parts[1]

                # Scrap a container from the pool and replace it with an ad-hoc replacement.
                # This takes longer, but is necessary to support ad-hoc containers
                yield self.pool.adhoc(user, path_parts[-1])

                url = "/" + "/".join(path_parts[:2])
                app_log.info("new url: %s", url)

            app_log.debug("Redirecting [%s] -> [%s].", self.request.path, url)
            self.redirect(url, permanent=False)
        except spawnpool.EmptyPoolError:
            app_log.warning("The container pool is empty!")
            self.render("full.html", cull_period=self.cull_period)
    def __init__(self, commons):
        self.commons = commons
        app_log.debug("Application path (%s)" % (self.commons['script_location'],))

        urls = [
            (r"/(.*)", tornado.web.StaticFileHandler,
             {"path" : os.path.join(self.commons['script_location'],
                                    'app',
                                    'index.html')}),
            (r"/partials/(.*)", tornado.web.StaticFileHandler,
             {"path" : os.path.join(self.commons['script_location'],
                                    'app',
                                    'partials')}),

        ]

        settings = dict(
            template_path=None,
            static_path=os.path.join(self.commons['script_location'],
                                     'app'),
            xsrf_cookies=False if options.debug else True,
            cookie_secret='i love my dog!',
            debug=options.debug,
            )
        tornado.web.Application.__init__(self, urls, **settings)
Example #12
0
    def cache_and_finish(self, content=""):
        """finish a request and cache the result
        
        does not actually call finish - if used in @web.asynchronous,
        finish must be called separately. But we never use @web.asynchronous,
        because we are using gen.coroutine for async.
        
        currently only works if:
        
        - result is not written in multiple chunks
        - custom headers are not used
        """
        self.write(content)
        short_url = self.truncate(self.request.path)
        cache_data = pickle.dumps({"headers": self.cache_headers, "body": content}, pickle.HIGHEST_PROTOCOL)
        request_time = self.request.request_time()
        # set cache expiry to 120x request time
        # bounded by cache_expiry_min,max
        # a 30 second render will be cached for an hour
        expiry = max(min(120 * request_time, self.cache_expiry_max), self.cache_expiry_min)

        if self.request.uri in self.max_cache_uris:
            # if it's a link from the front page, cache for a long time
            expiry = self.cache_expiry_max

        log = app_log.info if expiry > self.cache_expiry_min else app_log.debug
        log("caching (expiry=%is) %s", expiry, short_url)
        try:
            with self.time_block("cache set %s" % short_url):
                yield self.cache.set(self.cache_key, cache_data, int(time.time() + expiry))
        except Exception:
            app_log.error("cache set for %s failed", short_url, exc_info=True)
        else:
            app_log.debug("cache set finished %s", short_url)
Example #13
0
    def __init__(self, commons):
        self.commons = commons
        self.cfg = cfg
        self.cage_path = self.commons['script_location']
        app_log.debug("Application path (%s)" % (self.cage_path,))
        # Set application path in config
        self.cfg.app_path = self.cage_path

        urls = [
            (r"/", "app.controllers.views.IndexHandler"),
            (r"/entity/([A-Za-z0-9-]+$)",
             "app.controllers.views.EntityHandler"),
            (r"/entity/search/(.*)/?",
             "app.controllers.views.EntitySearchHandler"),
            (r"/entity/new/(.*)/?",
             "app.controllers.views.EntityNewHandler"),
            (r"/entity/modify/(.*)/(.*)/?", 
             "app.controllers.views.EntityModifyHandler"),
            ]

        ui_modules_map = {}
        settings = dict(
            template_path=None,
            static_path=None,
            xsrf_cookies=False if options.debug else True,
            cookie_secret=self.cfg.cookie_secret,
            debug=options.debug,
            ui_modules=ui_modules_map,
            )
        tornado.web.Application.__init__(self, urls, **settings)
Example #14
0
    def _check_hub_authorization(self, url, cache_key=None, use_cache=True):
        """Identify a user with the Hub
        
        Args:
            url (str): The API URL to check the Hub for authorization
                       (e.g. http://127.0.0.1:8081/hub/api/authorizations/token/abc-def)
            cache_key (str): The key for checking the cache
            use_cache (bool): Specify use_cache=False to skip cached cookie values (default: True)

        Returns:
            user_model (dict): The user model, if a user is identified, None if authentication fails.

        Raises an HTTPError if the request failed for a reason other than no such user.
        """
        if use_cache:
            if cache_key is None:
                raise ValueError("cache_key is required when using cache")
            # check for a cached reply, so we don't check with the Hub if we don't have to
            try:
                return self.cache[cache_key]
            except KeyError:
                app_log.debug("HubAuth cache miss: %s", cache_key)

        data = self._api_request('GET', url, allow_404=True)
        if data is None:
            app_log.warning("No Hub user identified for request")
        else:
            app_log.debug("Received request from Hub user %s", data)
        if use_cache:
            # cache result
            self.cache[cache_key] = data
        return data
Example #15
0
    def check(self, handler):
        """Check the rate limit for a handler.
        
        Identifies the source by ip and user-agent.
        
        If the rate limit is exceeded, raise HTTPError(429)
        """
        if not self.limit:
            return
        key = self.key_for_handler(handler)
        added = yield self.cache.add(key, 1, self.interval)
        if not added:
            # it's been seen before, use incr
            try:
                count = yield self.cache.incr(key)
            except Exception as e:
                app_log.warning("Failed to increment rate limit for %s", key)
                return

            app_log.debug("Rate limit remaining for %r: %s/%s", key, self.limit - count, self.limit)

            if count and count >= self.limit:
                minutes = self.interval // 60
                raise HTTPError(429,
                    "Rate limit exceeded for {ip} ({limit} req / {minutes} min)."
                    " Try again later.".format(
                        ip=handler.request.remote_ip,
                        limit=self.limit,
                        minutes=minutes,
                    )
                )
Example #16
0
    def _on_pika_message(self, channel, method, props, body):
        log.debug('PikaCient: Message received, delivery tag #%i : %r' % (method.delivery_tag, len(body)))

        correlation_id = getattr(props, 'correlation_id', None)
        if not self.callbacks_hash.has_key(correlation_id) and method.exchange != 'DLX':
            log.info('Got result for task "{0}", but no has callback'.format(correlation_id))
            return

        cb = self.callbacks_hash.pop(correlation_id)
        content_type = getattr(props, 'content_type', 'text/plain')

        if method.exchange == 'DLX':
            dl = props.headers['x-death'][0]
            body = ExpirationError("Dead letter received. Reason: {0}".format(dl.get('reason')))
            body.reason = dl.get('reason')
            body.time = dl.get('time')
            body.expiration = int(dl.get('original-expiration')) / 1000
        else:
            if props.content_encoding == 'gzip':
                body = zlib.decompress(body)

            if 'application/json' in content_type:
                body = json.loads(body)
            elif 'application/python-pickle' in content_type:
                body = pickle.loads(body)

        if isinstance(cb, Future):
            if isinstance(body, Exception):
                cb.set_exception(body)
            else:
                cb.set_result(body)
        else:
            out = cb(body, headers=props.headers)
            return out
Example #17
0
    def get_user(self, handler):
        """Get the Hub user for a given tornado handler.

        Checks cookie with the Hub to identify the current user.

        Args:
            handler (tornado.web.RequestHandler): the current request handler

        Returns:
            user_model (dict): The user model, if a user is identified, None if authentication fails.

            The 'name' field contains the user's name.
        """

        # only allow this to be called once per handler
        # avoids issues if an error is raised,
        # since this may be called again when trying to render the error page
        if hasattr(handler, '_cached_hub_user'):
            return handler._cached_hub_user

        handler._cached_hub_user = None
        encrypted_cookie = handler.get_cookie(self.cookie_name)
        if encrypted_cookie:
            user_model = self.user_for_cookie(encrypted_cookie)
            handler._cached_hub_user = user_model
            return user_model
        else:
            app_log.debug("No token cookie")
            return None
    def get_app_status(self, webOTT):
        params = {
            'status':      "new",
            'statusCode':  0,
            'userId':      "",
            'redirectURL': "",
            'authOTT': ""
        }

        I = self.storage.find(stage="auth", webOTT=webOTT)
        if not I:
            log.debug("Cannot find webOTT: {0}".format(webOTT))
            params['status'] = 'expired'
            return params

        if I.mobile_status:
            params['status'] = I.mobile_status

        if I.mobile_status == 'user' and I.userId:
            params['userId'] = I.userId

        authOTT = I.authOTT
        if authOTT and (str(I.status) == "200"):
            params['status'] = 'authenticate'
            params['authOTT'] = authOTT

        return params
Example #19
0
    def release(self, container, replace_if_room=True):
        '''Shut down a container and delete its proxy entry.

        Destroy the container in an orderly fashion. If requested and capacity is remaining, create
        a new one to take its place.'''

        try:
            app_log.info("Releasing container [%s].", container)
            yield [
                self.spawner.shutdown_notebook_server(container.id),
                self._proxy_remove(container.path)
            ]
            app_log.debug("Container [%s] has been released.", container)
        except Exception as e:
            app_log.error("Unable to release container [%s]: %s", container, e)
            return

        if replace_if_room:
            running = yield self.spawner.list_notebook_servers(self.container_config,
                                                                    all=False)
            if len(running) + 1 <= self.capacity:
                app_log.debug("Launching a replacement container.")
                yield self._launch_container()
            else:
                app_log.info("Declining to launch a new container because [%i] containers are" +
                             " already running, and the capacity is [%i].",
                             len(running), self.capacity)
Example #20
0
    def get(self):
        code = self.get_argument("code", False)
        if not code:
            raise HTTPError(400, "oauth callback made without a token")

        # validate OAuth state
        arg_state = self.get_argument("state", None)
        cookie_state = self.get_secure_cookie(self.hub_auth.state_cookie_name)
        next_url = None
        if arg_state or cookie_state:
            # clear cookie state now that we've consumed it
            self.clear_cookie(self.hub_auth.state_cookie_name)
            if isinstance(cookie_state, bytes):
                cookie_state = cookie_state.decode('ascii', 'replace')
            # check that state matches
            if arg_state != cookie_state:
                app_log.debug("oauth state %r != %r", arg_state, cookie_state)
                raise HTTPError(403, "oauth state does not match")
            next_url = self.hub_auth.get_next_url(cookie_state)
        # TODO: make async (in a Thread?)
        token = self.hub_auth.token_for_code(code)
        user_model = self.hub_auth.user_for_token(token)
        if user_model is None:
            raise HTTPError(500, "oauth callback failed to identify a user")
        app_log.info("Logged-in user %s", user_model)
        self.hub_auth.set_cookie(self, token)
        self.redirect(next_url or self.hub_auth.base_url)
    def _get_certivox_server_secret_share_dta(self, expires):
        path = 'serverSecret'
        url_params = url_concat('{0}{1}'.format(Keys.certivoxServer(), path), {
            'app_id': self.app_id,
            'expires': expires,
            'signature': signMessage('{0}{1}{2}'.format(path, self.app_id, expires), self.app_key)
        })
        log.debug('MIRACL server secret request: {0}'.format(url_params))
        httpclient = tornado.httpclient.HTTPClient()
        try:
            response = httpclient.fetch(url_params, **fetchConfig(url_params))
        except tornado.httpclient.HTTPError as e:
            log.error(e)
            raise SecretsError('Unable to get Server Secret from the MIRACL TA server')
        httpclient.close()

        try:
            data = json.loads(response.body)
        except ValueError as e:
            log.error(e)
            raise SecretsError('Invalid response from TA server')

        if 'serverSecret' not in data:
            raise SecretsError('serverSecret not in response from TA server')

        return data["serverSecret"]
Example #22
0
 def finish_notebook(self, nbjson, download_url, home_url=None, msg=None, breadcrumbs=None):
     """render a notebook from its JSON body.
     
     download_url is required, home_url is not.
     
     msg is extra information for the log message when rendering fails.
     """
     if msg is None:
         msg = download_url
     try:
         app_log.debug("Requesting render of %s", download_url)
         with self.time_block("Rendered %s" % download_url):
             nbhtml, config = yield self.pool.submit(
                 render_notebook, self.exporter, nbjson, download_url,
                 config=self.config,
             )
     except NbFormatError as e:
         app_log.error("Invalid notebook %s: %s", msg, e)
         raise web.HTTPError(400, str(e))
     except Exception as e:
         app_log.error("Failed to render %s", msg, exc_info=True)
         raise web.HTTPError(400, str(e))
     else:
         app_log.debug("Finished render of %s", download_url)
     
     html = self.render_template('notebook.html',
         body=nbhtml,
         download_url=download_url,
         home_url=home_url,
         date=datetime.utcnow().strftime(date_fmt),
         breadcrumbs=breadcrumbs,
         **config)
     yield self.cache_and_finish(html)
def cull_idle(url, api_token, timeout):
    """cull idle single-user servers"""
    auth_header = {
            'Authorization': 'token %s' % api_token
        }
    req = HTTPRequest(url=url + '/api/users',
        headers=auth_header,
    )
    now = datetime.datetime.utcnow()
    cull_limit = now - datetime.timedelta(seconds=timeout)
    client = AsyncHTTPClient()
    resp = yield client.fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []
    for user in users:
        last_activity = parse_date(user['last_activity'])
        if user['server'] and last_activity < cull_limit:
            app_log.info("Culling %s (inactive since %s)", user['name'], last_activity)
            req = HTTPRequest(url=url + '/api/users/%s/server' % user['name'],
                method='DELETE',
                headers=auth_header,
            )
            futures.append((user['name'], client.fetch(req)))
        elif user['server'] and last_activity > cull_limit:
            app_log.debug("Not culling %s (active since %s)", user['name'], last_activity)

    for (name, f) in futures:
        yield f
        app_log.debug("Finished culling %s", name)
Example #24
0
def delete_all_references(resource, schema, idl):
    """
    Delete all occurrences of reference for resource
    Parameters:
        resource - resource whose references are to be
                   deleted from the entire DB
        schema - ovsdb schema object
        idl = ovs.db.idl.Idl instance
    """
    row = get_row_from_resource(resource, idl)
    #We get the tables that reference the row to delete table
    tables_reference = schema.references_table_map[resource.table]
    #Get the table name and column list we is referenced
    for table_name, columns_list in tables_reference.iteritems():
        app_log.debug("Table %s" % table_name)
        app_log.debug("Column list %s" % columns_list)
        #Iterate each row to see wich tuple has the reference
        for uuid, row_ref in idl.tables[table_name].rows.iteritems():
            #Iterate over each reference column and check if has the reference
            for column_name in columns_list:
                #get the referenced values
                reflist = get_column_data_from_row(row_ref, column_name)
                if reflist is not None:
                    #delete the reference on that row and column
                    delete_row_reference(reflist, row, row_ref, column_name)
Example #25
0
def wait_for_http_server(url, timeout=10):
    """Wait for an HTTP Server to respond at url
    
    Any non-5XX response code will do, even 404.
    """
    loop = ioloop.IOLoop.current()
    tic = loop.time()
    client = AsyncHTTPClient()
    while loop.time() - tic < timeout:
        try:
            r = yield client.fetch(url, follow_redirects=False)
        except HTTPError as e:
            if e.code >= 500:
                # failed to respond properly, wait and try again
                if e.code != 599:
                    # we expect 599 for no connection,
                    # but 502 or other proxy error is conceivable
                    app_log.warn("Server at %s responded with error: %s", url, e.code)
                yield gen.Task(loop.add_timeout, loop.time() + 0.25)
            else:
                app_log.debug("Server at %s responded with %s", url, e.code)
                return
        except (OSError, socket.error) as e:
            if e.errno not in {errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET}:
                app_log.warn("Failed to connect to %s (%s)", url, e)
            yield gen.Task(loop.add_timeout, loop.time() + 0.25)
        else:
            return
    
    raise TimeoutError("Server at {url} didn't respond in {timeout} seconds".format(
        **locals()
    ))
Example #26
0
 def remove_client(cls, email):
     """
     移除client
     @param email:
     """
     app_log.debug("remove client[{0}]".format(email))
     del cls._CLIENTS_MAP[email]
Example #27
0
 def _data_load(self, data_string):
     try:
         return json.loads(data_string)
     except Exception as e:
         log.debug(Lazy(lambda: traceback.format_exc()))
         log.error(Lazy(lambda: 'Parsing message error: {0}'.format(repr(e))))
         raise e
    def prepare(self):
        self._boundary = None
        self._boundary_length = None
        self._boundary_padding = 2
        self._sep = b'\r\n\r\n'
        self._disp_header = None
        self._disp_params = None
        self._disp_name = None
        self._disp_buffer = None
        self._buffer = None

        content_type = self.request.headers.get('content-type', '')
        if not content_type.startswith('multipart/form-data'):
            raise HTTPError(400)

        fields = content_type.split(';')
        for field in fields:
            k, sep, v = field.strip().partition('=')
            if k == 'boundary' and v:
                if v.startswith('"') and v.endswith('"'):
                    v = v[1:-1]
                self._boundary = b'--' + utf8(v)
                self._boundary_length = len(self._boundary) + self._boundary_padding
                break

        if self._boundary is None:
            raise HTTPError(400)

        app_log.debug('boundary: %s', self._boundary)
Example #29
0
    def get(self, path=None):
        '''Spawns a brand new server'''

        try:
            if path is None:
                # No path. Assign a prelaunched container from the pool and redirect to it.
                # Append self.redirect_uri to the redirect target.
                container_path = self.pool.acquire().path
                app_log.info("Allocated [%s] from the pool.", container_path)

                url = "/{}/{}".format(container_path, self.redirect_uri)
            else:
                path_parts = path.lstrip('/').split('/', 1)
                container_path = path_parts[0]

                # Scrap a container from the pool and replace it with an ad-hoc replacement.
                # This takes longer, but is necessary to support ad-hoc containers
                yield self.pool.adhoc(container_path)

                app_log.info("Allocated ad-hoc container at [%s].", container_path)
                url = path

            app_log.debug("Redirecting [%s] -> [%s].", self.request.path, url)
            self.redirect(url, permanent=False)
        except spawnpool.EmptyPoolError:
            app_log.warning("The container pool is empty!")
            self.render("full.html", cull_period=self.cull_period)
Example #30
0
def update_stats(stats):
    """Get updated stats for each host
    
    If a host fails to reply,
    assume it is is down and assign it zero availability and capacity
    """

    http_client = AsyncHTTPClient()
    futures = {}
    for host in stats.keys():
        app_log.debug("Checking stats on %s" % host)
        req = HTTPRequest(host + '/stats')
        futures[host] = http_client.fetch(req)
    
    for host, f in futures.items():
        try:
            reply = yield f
            data = json.loads(reply.body.decode('utf8'))
        except Exception as e:
            app_log.error("Failed to get stats for %s: %s", host, e)
            if host in stats:
                stats[host] = {'available': 0, 'capacity': 0, 'down': True}
        else:
            app_log.debug("Got stats from %s: %s", host, data)
            if host in stats:
                stats[host] = data
Example #31
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              remove_named_servers=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server, max_age, inactive_limit):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']
        state = server['state']
        # Support getting state from wrapspawer child's conf.
        if 'child_conf' in state:
            state = state['child_conf']
        if 'cull_max_age' in state:
            max_age = max(max_age, state['cull_max_age'])
        if 'cull_inactive_limit' in state:
            inactive_limit = max(inactive_limit, state['cull_inactive_limit'])
        app_log.info(
            f"CULL IDLE: {user['name']}/{server_name}: {max_age} inactive={inactive} inactive_limit={inactive_limit} age={age} last_activity={server['last_activity']}"
        )

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        body = None
        if server_name:
            # culling a named server
            # A named server can be stopped and kept available to the user
            # for starting again or stopped and removed. To remove the named
            # server we have to pass an additional option in the body of our
            # DELETE request.
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
            if remove_named_servers:
                body = json.dumps({"remove": True})
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url,
                          method='DELETE',
                          headers=auth_header,
                          body=body,
                          allow_nonstandard_methods=True)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server, max_age, inactive_limit)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Example #32
0
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server, max_age, inactive_limit)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True
Example #33
0
    def handle_server(user, server_name, server, max_age, inactive_limit):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']
        state = server['state']
        # Support getting state from wrapspawer child's conf.
        if 'child_conf' in state:
            state = state['child_conf']
        if 'cull_max_age' in state:
            max_age = max(max_age, state['cull_max_age'])
        if 'cull_inactive_limit' in state:
            inactive_limit = max(inactive_limit, state['cull_inactive_limit'])
        app_log.info(
            f"CULL IDLE: {user['name']}/{server_name}: {max_age} inactive={inactive} inactive_limit={inactive_limit} age={age} last_activity={server['last_activity']}"
        )

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        body = None
        if server_name:
            # culling a named server
            # A named server can be stopped and kept available to the user
            # for starting again or stopped and removed. To remove the named
            # server we have to pass an additional option in the body of our
            # DELETE request.
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
            if remove_named_servers:
                body = json.dumps({"remove": True})
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url,
                          method='DELETE',
                          headers=auth_header,
                          body=body,
                          allow_nonstandard_methods=True)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True
Example #34
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req = HTTPRequest(
        url=url + '/users',
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()
    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server was culled,
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s/server' % quote(user['name']),
            method='DELETE',
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user"""
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        servers = user.get(
            'servers', {
                '': {
                    'started': user.get('started'),
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                }
            })
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug("Not culling user %s with %i servers still alive",
                          user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling user %s (age: %s, inactive for %s)",
                             user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling user %s (created: %s, last active: %s)",
                          user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Example #35
0
    def handle_user(user):
        """Handle one user"""
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        servers = user.get(
            'servers', {
                '': {
                    'started': user.get('started'),
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                }
            })
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug("Not culling user %s with %i servers still alive",
                          user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling user %s (age: %s, inactive for %s)",
                             user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling user %s (created: %s, last active: %s)",
                          user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True
Example #36
0
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server was culled,
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s/server' % quote(user['name']),
            method='DELETE',
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True
Example #37
0
        def decorators(self, *args, **kwargs):

            # 生成_field
            _field = generate_field(field, self, fun, args, kwargs)

            # 后台刷新缓存的方法
            def cache_data_in_background_thread():
                """
                数据过期,返回旧的数据,在后台更新缓存
                :return:
                """
                def _start():
                    # print 'refresh cache: {0} --- clean_old'.format(fun.__name__)
                    _data_from_db = fun(self, *args, **kwargs)
                    _data_to_cache = {
                        'data':
                        _data_from_db,
                        'last_query_time':
                        str(time.time()),  # 刷新时间
                        'last_use_date':
                        datetime.datetime.now().strftime(
                            '%Y-%m-%d'),  # 最近一次使用的时间
                        'life':
                        life  # 数据寿命,从最近一次使用的时间开始算
                    }
                    rsdb.hset(key, _field, json_encode(_data_to_cache))

                _thread_pool.submit(_start)

            # 从缓存拿取数据
            data_from_cache = rsdb.hget(key, _field)
            # 缓存中有数据
            if data_from_cache:
                data_dict = json_decode(data_from_cache)
                # 立即刷新
                if immediately_refresh is True:
                    # 后台更新数据
                    cache_data_in_background_thread()
                    # 返回旧数据
                    data_to_return = data_dict['data']
                # 不立即刷新
                else:
                    # 不过期
                    if timeout == 0:
                        # 更新缓存中的时间
                        data_to_cache = {
                            'data':
                            data_dict['data'],
                            'last_query_time':
                            str(time.time()),
                            'last_use_date':
                            datetime.datetime.now().strftime('%Y-%m-%d'),
                            'life':
                            life
                        }
                        rsdb.hset(key, _field, json_encode(data_to_cache))
                        # 返回的数据
                        data_to_return = data_dict['data']
                    # 要过期
                    else:
                        last_query_time = float(data_dict['last_query_time'])
                        now_time = time.time()
                        # 过期
                        if now_time - last_query_time > timeout:
                            # 数据过期,返回旧数据,更新缓存中的时间,以防止多次启动线程;启动线程,更新缓存
                            data_to_cache = {
                                'data':
                                data_dict['data'],
                                'last_query_time':
                                str(time.time()),
                                'last_use_date':
                                datetime.datetime.now().strftime('%Y-%m-%d'),
                                'life':
                                life
                            }
                            rsdb.hset(key, _field, json_encode(data_to_cache))
                            # 启动更新线程
                            cache_data_in_background_thread()
                            # 返回旧数据
                            data_to_return = data_dict['data']
                        # 未过期
                        else:
                            # 更新使用时间
                            data_to_cache = {
                                'data':
                                data_dict['data'],
                                'last_query_time':
                                data_dict['last_query_time'],
                                'last_use_date':
                                datetime.datetime.now().strftime('%Y-%m-%d'),
                                'life':
                                life
                            }
                            rsdb.hset(key, _field, json_encode(data_to_cache))
                            # 返回的数据
                            data_to_return = data_dict['data']
            # 数据不存在,从数库拿取数据,并缓存
            else:
                app_log.debug(
                    '<---------> get data from db <---------> {0} <--------->'.
                    format(fun.__name__))
                data_from_db = fun(self, *args, **kwargs)
                data_to_cache = {
                    'data': data_from_db,
                    'last_query_time': str(time.time()),
                    'last_use_date':
                    datetime.datetime.now().strftime('%Y-%m-%d'),
                    'life': life
                }
                rsdb.hset(key, _field, json_encode(data_to_cache))
                data_to_return = data_from_db

            # 清除过期数据
            clean_old_cached_data(key=LCIC_MAIN_TEMP_FIXED_INFO_KEY)
            # 返回数据
            return data_to_return
Example #38
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        "Authorization": "token %s" % api_token,
    }
    req = HTTPRequest(
        url=url + "/users",
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode("utf8", "replace"))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user["name"]
        if server_name:
            log_name = "%s/%s" % (user["name"], server_name)
        if server.get("pending"):
            app_log.warning(
                "Not culling server %s with pending %s",
                log_name,
                server["pending"],
            )
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get("ready", bool(server["url"])):
            app_log.warning(
                "Not culling not-ready not-pending server %s: %s",
                log_name,
                server,
            )
            return False

        if server.get("started"):
            age = now - parse_date(server["started"])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server["last_activity"]:
            inactive = now - parse_date(server["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info(
                "Culling server %s (inactive for %s)",
                log_name,
                format_td(inactive),
            )

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(
            url=url + "/users/%s/server" % quote(user["name"]),
            method="DELETE",
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if "servers" in user:
            servers = user["servers"]
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user["server"]:
                servers[""] = {
                    "last_activity": user["last_activity"],
                    "pending": user["pending"],
                    "url": user["server"],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user["name"],
                still_alive,
            )
            return False

        should_cull = False
        if user.get("created"):
            age = now - parse_date(user["created"])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user["last_activity"]:
            inactive = now - parse_date(user["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user["name"],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user["name"],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user["name"],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(
            url=url + "/users/%s" % user["name"],
            method="DELETE",
            headers=auth_header,
        )
        yield fetch(req)
        return True

    for user in users:
        futures.append((user["name"], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Example #39
0
def check_db_revision(engine):
    """Check the JupyterHub database revision

    After calling this function, an alembic tag is guaranteed to be stored in the db.

    - Checks the alembic tag and raises a ValueError if it's not the current revision
    - If no tag is stored (Bug in Hub prior to 0.8),
      guess revision based on db contents and tag the revision.
    - Empty databases are tagged with the current revision
    """
    # Check database schema version
    current_table_names = set(engine.table_names())
    my_table_names = set(Base.metadata.tables.keys())

    from .dbutil import _temp_alembic_ini

    with _temp_alembic_ini(engine.url) as ini:
        cfg = alembic.config.Config(ini)
        scripts = ScriptDirectory.from_config(cfg)
        head = scripts.get_heads()[0]
        base = scripts.get_base()

        if not my_table_names.intersection(current_table_names):
            # no tables have been created, stamp with current revision
            app_log.debug("Stamping empty database with alembic revision %s", head)
            alembic.command.stamp(cfg, head)
            return

        if 'alembic_version' not in current_table_names:
            # Has not been tagged or upgraded before.
            # we didn't start tagging revisions correctly except during `upgrade-db`
            # until 0.8
            # This should only occur for databases created prior to JupyterHub 0.8
            msg_t = "Database schema version not found, guessing that JupyterHub %s created this database."
            if 'spawners' in current_table_names:
                # 0.8
                app_log.warning(msg_t, '0.8.dev')
                rev = head
            elif 'services' in current_table_names:
                # services is present, tag for 0.7
                app_log.warning(msg_t, '0.7.x')
                rev = 'af4cbdb2d13c'
            else:
                # it's old, mark as first revision
                app_log.warning(msg_t, '0.6 or earlier')
                rev = base
            app_log.debug("Stamping database schema version %s", rev)
            alembic.command.stamp(cfg, rev)

    # check database schema version
    # it should always be defined at this point
    alembic_revision = engine.execute('SELECT version_num FROM alembic_version').first()[0]
    if alembic_revision == head:
        app_log.debug("database schema version found: %s", alembic_revision)
        pass
    else:
        raise DatabaseSchemaMismatch("Found database schema version {found} != {head}. "
        "Backup your database and run `jupyterhub upgrade-db`"
        " to upgrade to the latest schema.".format(
            found=alembic_revision,
            head=head,
        ))
Example #40
0
    def add_table_monitor(self, table):
        app_log.debug("Adding monitoring for table %s" % table)

        if table not in self.tables_monitored:
            self.tables_monitored.add(table)
            self.restart_monitoring()
 def save_bearer_token(self, token, request, *args, **kwargs):
     """Persist the Bearer token.
     The Bearer token should at minimum be associated with:
         - a client and it's client_id, if available
         - a resource owner / user (request.user)
         - authorized scopes (request.scopes)
         - an expiration time
         - a refresh token, if issued
         - a claims document, if present in request.claims
     The Bearer token dict may hold a number of items::
         {
             'token_type': 'Bearer',
             'access_token': 'askfjh234as9sd8',
             'expires_in': 3600,
             'scope': 'string of space separated authorized scopes',
             'refresh_token': '23sdf876234',  # if issued
             'state': 'given_by_client',  # if supplied by client
         }
     Note that while "scope" is a string-separated list of authorized scopes,
     the original list is still available in request.scopes.
     The token dict is passed as a reference so any changes made to the dictionary
     will go back to the user.  If additional information must return to the client
     user, and it is only possible to get this information after writing the token
     to storage, it should be added to the token dictionary.  If the token
     dictionary must be modified but the changes should not go back to the user,
     a copy of the dictionary must be made before making the changes.
     Also note that if an Authorization Code grant request included a valid claims
     parameter (for OpenID Connect) then the request.claims property will contain
     the claims dict, which should be saved for later use when generating the
     id_token and/or UserInfo response content.
     :param token: A Bearer token dict
     :param request: The HTTP Request (oauthlib.common.Request)
     :rtype: The default redirect URI for the client
     Method is used by all core grant types issuing Bearer tokens:
         - Authorization Code Grant
         - Implicit Grant
         - Resource Owner Password Credentials Grant (might not associate a client)
         - Client Credentials grant
     """
     log_token = {}
     log_token.update(token)
     scopes = token['scope'].split(' ')
     # TODO:
     if scopes != ['identify']:
         raise ValueError("Only 'identify' scope is supported")
     # redact sensitive keys in log
     for key in ('access_token', 'refresh_token', 'state'):
         if key in token:
             value = token[key]
             if isinstance(value, str):
                 log_token[key] = 'REDACTED'
     app_log.debug("Saving bearer token %s", log_token)
     if request.user is None:
         raise ValueError("No user for access token: %s" % request.user)
     client = (
         self.db.query(orm.OAuthClient)
         .filter_by(identifier=request.client.client_id)
         .first()
     )
     orm_access_token = orm.OAuthAccessToken(
         client=client,
         grant_type=orm.GrantType.authorization_code,
         expires_at=datetime.utcnow().timestamp() + token['expires_in'],
         refresh_token=token['refresh_token'],
         # TODO: save scopes,
         # scopes=scopes,
         token=token['access_token'],
         session_id=request.session_id,
         user=request.user,
     )
     self.db.add(orm_access_token)
     self.db.commit()
     return client.redirect_uri
Example #42
0
 def on_close(self):
     if hasattr(self, 'sub'):
         self.sub.close()
     app_log.debug('connection close')
Example #43
0
    def finish_notebook(self,
                        json_notebook,
                        download_url,
                        provider_url=None,
                        provider_icon=None,
                        provider_label=None,
                        msg=None,
                        breadcrumbs=None,
                        public=False,
                        format=None,
                        request=None):
        """render a notebook from its JSON body.

        download_url is required, provider_url is not.

        msg is extra information for the log message when rendering fails.
        """

        if msg is None:
            msg = download_url

        try:
            nb = reads(json_notebook, current_nbformat)
        except ValueError:
            app_log.error("Failed to render %s", msg, exc_info=True)
            raise web.HTTPError(400, "Error reading JSON notebook")

        try:
            app_log.debug("Requesting render of %s", download_url)
            with time_block("Rendered %s" % download_url):
                app_log.info("rendering %d B notebook from %s",
                             len(json_notebook), download_url)
                nbhtml, config = yield self.pool.submit(
                    render_notebook,
                    self.formats[format],
                    nb,
                    download_url,
                    config=self.config,
                )
        except NbFormatError as e:
            app_log.error("Invalid notebook %s: %s", msg, e)
            raise web.HTTPError(400, str(e))
        except Exception as e:
            app_log.error("Failed to render %s", msg, exc_info=True)
            raise web.HTTPError(400, str(e))
        else:
            app_log.debug("Finished render of %s", download_url)

        html = self.render_template(
            "formats/%s.html" % format,
            body=nbhtml,
            nb=nb,
            download_url=download_url,
            provider_url=provider_url,
            provider_label=provider_label,
            provider_icon=provider_icon,
            format=self.format,
            default_format=self.default_format,
            format_prefix=format_prefix,
            formats=dict(self.filter_formats(nb, json_notebook)),
            format_base=self.request.uri.replace(self.format_prefix, ""),
            date=datetime.utcnow().strftime(date_fmt),
            breadcrumbs=breadcrumbs,
            **config)

        yield self.cache_and_finish(html)

        # Index notebook
        self.index.index_notebook(download_url, nb, public)
def cull_idle(url, api_token, timeout):
    #last valid activity timestame
    cull_limit = datetime.datetime.utcnow() - datetime.timedelta(
        seconds=timeout)

    #get user list
    hub_api_authorization_header = {'Authorization': 'token %s' % api_token}
    users_request = HTTPRequest(url=url + '/users',
                                headers=hub_api_authorization_header)

    #run request tornado-asynchronously, extract user list (contains more information)
    resp = yield AsyncHTTPClient().fetch(users_request)
    all_users = json.loads(resp.body.decode('utf8', 'replace'))

    #build a bunch of (asynchronous) HTTP request futures...
    stop_notebook_futures = []
    servers_to_check = []
    dont_cull_these = set()
    for user in all_users:

        #extract last activity time, determine cullability of the server.
        last_activity = parse_date(user['last_activity'])
        should_cull = last_activity.replace(tzinfo=None) < cull_limit.replace(
            tzinfo=None)
        user_name = user['name']
        app_log.debug("checking %s, last activity: %s, server: %s" %
                      (user_name, last_activity, user['server']))

        if not should_cull:
            dont_cull_these.add(user_name)

        #server should be culled:
        if user['server'] and should_cull:
            app_log.info("Culling %s (inactive since %s)", user_name,
                         last_activity)
            stop_user_request = HTTPRequest(
                url=url + '/users/%s/server' % user_name,
                method='DELETE',
                headers=hub_api_authorization_header)
            stop_notebook_futures.append(
                (user_name, AsyncHTTPClient().fetch(stop_user_request)))

        #Server status is None, which means actual status needs to be checked.
        if not user['server'] and should_cull:
            servers_to_check.append(user_name)

        #server should not be culled, just a log statement
        if user['server'] and not should_cull:
            app_log.info("Not culling %s (active since %s)", user['name'],
                         last_activity)

    # Cull notebooks using normal API.
    for (user_name, cull_request) in stop_notebook_futures:
        try:
            yield cull_request  #this line actually runs the api call to kill a server
        except HTTPError:
            #Due to a bug in Jupyterhub
            app_log.error(
                "Something went wrong culling %s, will be manually killing it.",
                user_name)
            servers_to_check.append(user_name)
            continue
        app_log.info("Finished culling %s", user_name)

    for user_name in servers_to_check:
        if user_name not in dont_cull_these:
            yield manually_kill_server(user_name)
Example #45
0
    def get(self, user, gist_id, filename=''):
        with self.catch_client_error():
            response = yield self.github_client.get_gist(gist_id)

        gist = json.loads(response_text(response))
        gist_id = gist['id']
        if user is None:
            # redirect to /gist/user/gist_id if no user given
            owner_dict = gist.get('owner', {})
            if owner_dict:
                user = owner_dict['login']
            else:
                user = '******'
            new_url = u"{format}/gist/{user}/{gist_id}".format(
                format=self.format_prefix, user=user, gist_id=gist_id)
            if filename:
                new_url = new_url + "/" + filename
            self.redirect(new_url)
            return

        files = gist['files']
        many_files_gist = (len(files) > 1)

        if not many_files_gist and not filename:
            filename = list(files.keys())[0]

        if filename and filename in files:
            file = files[filename]
            if file['truncated']:
                app_log.debug("Gist %s/%s truncated, fetching %s", gist_id,
                              filename, file['raw_url'])
                response = yield self.fetch(file['raw_url'])
                content = response_text(response)
            else:
                content = file['content']

            if not many_files_gist or filename.endswith('.ipynb'):
                yield self.finish_notebook(content,
                                           file['raw_url'],
                                           home_url=gist['html_url'],
                                           msg="gist: %s" % gist_id,
                                           public=gist['public'],
                                           format=self.format,
                                           request=self.request)
            else:
                # cannot redirect because of X-Frame-Content
                self.finish(content)
                return

        elif filename:
            raise web.HTTPError(404, "No such file in gist: %s (%s)", filename,
                                list(files.keys()))
        else:
            entries = []
            ipynbs = []
            others = []

            for file in files.itervalues():
                e = {}
                e['name'] = file['filename']
                if file['filename'].endswith('.ipynb'):
                    e['url'] = quote('/%s/%s' % (gist_id, file['filename']))
                    e['class'] = 'fa-book'
                    ipynbs.append(e)
                else:
                    github_url = u"https://gist.github.com/{user}/{gist_id}#file-{clean_name}".format(
                        user=user,
                        gist_id=gist_id,
                        clean_name=clean_filename(file['filename']),
                    )
                    e['url'] = github_url
                    e['class'] = 'fa-share'
                    others.append(e)

            entries.extend(ipynbs)
            entries.extend(others)

            html = self.render_template(
                'treelist.html',
                entries=entries,
                tree_type='gist',
                user=user.rstrip('/'),
                github_url=gist['html_url'],
            )
            yield self.cache_and_finish(html)
Example #46
0
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user["name"]
        if server_name:
            log_name = "%s/%s" % (user["name"], server_name)
        if server.get("pending"):
            app_log.warning(
                "Not culling server %s with pending %s",
                log_name,
                server["pending"],
            )
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get("ready", bool(server["url"])):
            app_log.warning(
                "Not culling not-ready not-pending server %s: %s",
                log_name,
                server,
            )
            return False

        if server.get("started"):
            age = now - parse_date(server["started"])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server["last_activity"]:
            inactive = now - parse_date(server["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info(
                "Culling server %s (inactive for %s)",
                log_name,
                format_td(inactive),
            )

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(
            url=url + "/users/%s/server" % quote(user["name"]),
            method="DELETE",
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True
Example #47
0
 def install_rbac(self):
     rbac_dict = utils.load_rbacfile()
     context["rbac"] = rbac_dict
     app_log.debug("RBAC module register ok")
Example #48
0
 def shutdown_message_backend():
     context.get("message_backend").close()
     del context["message_backend"]
     app_log.debug("message backend closed!")
Example #49
0
 def set_database_server_status(status):
     app_log.debug("set database server status to %s" % status)
     context["database_server_status"] = status
Example #50
0
 def set_cache_server_status(status):
     app_log.debug("set cache server status to %s" % status)
     context["cache_server_status"] = status
Example #51
0
 def on_close(self):
     app_log.debug('Stream client WebSocket closed: ' + self.client.id)
     if self.client.stream_id in StreamSubscriptions:
         StreamSubscriptions[self.client.stream_id].remove(self.client)
     self.client = None
Example #52
0
 def open(self, id):
     if id in StreamSubscriptions:
         app_log.debug('Stream client WebSocket opened')
         self.client = StreamClient(id)
         StreamSubscriptions[id].append(self.client)
Example #53
0
 def handle_set_parameters_request(self, message):
     app_log.debug('Handling SetParametersRequest')
     self.manager.set_parameters(message.parameters)
     response = messages.SetParametersResponse.from_request(message)
     yield self.manager.message_sender.send_message_ignore_response(
         response)
Example #54
0
 def handle_global_stats_reset(self, message):
     app_log.debug("Handling:{message}".format(message=message.to_json()))
     yield self.manager.reset_engine_global_stats()
Example #55
0
    def remove_table_monitor(self, table):
        app_log.debug("Removing monitoring for table %s" % table)

        if table in self.tables_monitored:
            self.tables_monitored.discard(table)
            self.restart_monitoring()
Example #56
0
        def decorators(self, *args, **kwargs):
            # 生成_field
            _field = generate_field(field, self, fun, args, kwargs)

            # 后台缓存数据的方法(线程)
            def cache_data_in_background_thread():
                """
                数据过期,返回旧的数据,在后台更新缓存
                :return:
                """
                def _start():
                    app_log.debug('---> refresh cache: {0}'.format(
                        fun.__name__))
                    _data_from_db = fun(self, *args, **kwargs)
                    _data_to_cache = {
                        'data': _data_from_db,
                        'last_query_time': str(time.time())
                    }
                    rsdb.hset(key, _field, json_encode(_data_to_cache))

                _thread_pool.submit(_start)

            # 从缓存拿取数据
            data_from_cache = rsdb.hget(key, _field)
            # 分析数据
            if data_from_cache:
                # 数据存在,更具过期时间进行操作
                data_dict = json_decode(data_from_cache)
                # 立即刷新
                if immediately_refresh is True:
                    # 启动后台更新线程
                    cache_data_in_background_thread()
                    # 返回数据
                    return data_dict['data']
                # 不立即刷新
                else:
                    # 不过期
                    if timeout == 0:
                        return data_dict['data']
                    # 要过期
                    else:
                        last_query_time = float(data_dict['last_query_time'])
                        now_time = time.time()
                        if now_time - last_query_time > timeout:
                            # 数据过期,返回旧数据,更新缓存中的时间,以防止多次启动线程;启动线程,更新缓存
                            data_to_cache = {
                                'data': data_dict['data'],
                                'last_query_time': str(time.time())
                            }
                            rsdb.hset(key, _field, json_encode(data_to_cache))
                            # 启动更新线程
                            cache_data_in_background_thread()
                            # 返回数据
                            return data_dict['data']
                        else:
                            return data_dict['data']
            else:
                app_log.debug(
                    '<---------> get data from db <---------> {0} <--------->'.
                    format(fun.__name__))
                # 数据不存在,从数库拿取数据,并缓存
                data_from_db = fun(self, *args, **kwargs)
                data_to_cache = {
                    'data': data_from_db,
                    'last_query_time': str(time.time())
                }
                rsdb.hset(key, _field, json_encode(data_to_cache))
                return data_from_db
Example #57
0
 def new_monitor_started_callback(self, manager, idl):
     app_log.debug("New monitor/manager started.")
     self.established = True
Example #58
0
 def index_notebook(self, notebook_url, notebook_contents, *args, **kwargs):
     app_log.debug('Totally not indexing "{}"'.format(notebook_url))
Example #59
0
 def prepare(self):
     """
     Retrieve session for current user
     """
     self.session = SessionManager.instance().get_session(self)
     app_log.debug("session %s", self.session)
Example #60
0
def check_db_revision(engine):
    """Check the JupyterHub database revision

    After calling this function, an alembic tag is guaranteed to be stored in the db.

    - Checks the alembic tag and raises a ValueError if it's not the current revision
    - If no tag is stored (Bug in Hub prior to 0.8),
      guess revision based on db contents and tag the revision.
    - Empty databases are tagged with the current revision
    """
    # Check database schema version
    current_table_names = set(engine.table_names())

    from .dbutil import _temp_alembic_ini

    with _temp_alembic_ini(engine.url) as ini:
        cfg = alembic.config.Config(ini)
        scripts = ScriptDirectory.from_config(cfg)
        head = scripts.get_heads()[0]
        base = scripts.get_base()

        if not my_table_names.intersection(current_table_names):
            # no tables have been created, stamp with current revision
            app_log.debug(
                "Stamping empty dashboards database with alembic revision %s",
                head)
            alembic.command.stamp(cfg, head)
            return

        if 'cds_alembic_version' not in current_table_names:
            # Has not been tagged or upgraded before.
            # This should only occur for databases created on cdsdashboards 0.0.11 or earlier

            # Need to identify if this is really an old version or not
            rev = head
            if 'dashboards' in current_table_names:
                inspector = inspect(engine)
                cols = inspector.get_columns('dashboards')
                colnames = [c.get('name', '') for c in cols]
                if not 'presentation_type' in colnames:
                    rev = base
                    # presentation_type was added in v0.0.13, so the reason we don't have cds_alembic_version
                    # is because the db was created before we had db versioning
                # If we DO have dashboards.presentation_type but no cds_alembic_version then this is just because
                # it's a new installation, Dashboards has been created before this first check.

            app_log.debug("Stamping dashboards database schema version %s",
                          rev)
            alembic.command.stamp(cfg, rev)

        else:
            alembic_revision = engine.execute(
                'SELECT version_num FROM cds_alembic_version').first()[0]

            if alembic_revision == base:
                if 'dashboards' in current_table_names:
                    inspector = inspect(engine)
                    cols = inspector.get_columns('dashboards')
                    colnames = [c.get('name', '') for c in cols]
                    if 'presentation_type' in colnames:
                        # For people who got stuck in the broken upgrade before - actually they are NOT on base...
                        rev = '260ac5c1a9e0'
                        app_log.debug(
                            "Stamping dashboards database schema version %s",
                            rev)
                        alembic.command.stamp(cfg, rev)

    # check database schema version
    # it should always be defined at this point
    alembic_revision = engine.execute(
        'SELECT version_num FROM cds_alembic_version').first()[0]

    if alembic_revision == head:
        app_log.debug("database dashboards schema version found: %s",
                      alembic_revision)
        pass
    else:
        raise DatabaseSchemaMismatch(
            "Found database schema version {found} != {head}. "
            "Backup your database and run `jupyterhub upgrade-db`"
            " to upgrade to the latest schema.".format(found=alembic_revision,
                                                       head=head))