def get(self, *args, **kwargs): # pre_get can be a coroutine in subclasses # assign and yield in two step to avoid tornado 3 issues res = self.pre_get() yield gen.maybe_future(res) res = super(AuthenticatedZMQStreamHandler, self).get(*args, **kwargs) yield gen.maybe_future(res)
def post(self): username = self.get_argument('username', None) password = self.get_argument('password', None) email = self.get_argument('email', None) avatar = self.default_avatar_url if username is None or password is None or email is None: raise exceptions.EmptyFields() else: user = yield gen.maybe_future(User.get_by_name(username)) if user is not None: raise exceptions.UsernameAlreadyExists() user = yield gen.maybe_future(User.get_by_email(email)) if user is not None: raise exceptions.EmailAlreadyExists() password = encrypt_password(password) user = yield gen.maybe_future( User.create(username=username, password=password, email=email, avatar=avatar)) # Update permission after xxx seconds. seconds = Level['time'][Roles.Comment] wait = datetime.now(get_localzone()) + timedelta(seconds=seconds) update_permission.apply_async((user, Roles.Comment), eta=wait) # Register success, then login. token = set_cookie_session(self, username, 1) raise gen.Return({'username': username, 'token': token})
def post(self, topic_id): name = self.get_argument('name', None) description = self.get_argument('description', None) rules = self.get_argument('rules', None) avatar = self.get_argument('avatar', None) why = self.get_argument('why', None) if not all([name, description, rules, why]): raise exceptions.EmptyFields() else: exists = yield gen.maybe_future(Topic.get_by_name(name)) if exists: raise exceptions.TopicNameAlreadyExists() else: created_user = self.current_user topic = yield gen.maybe_future( Topic.create(name, created_user, avatar, description, rules, why)) # Update Gold. update_gold.apply_async(('new_proposal', created_user)) # Update proposal state. seconds = Level['time']['proposal'] wait = datetime.now(get_localzone()) + timedelta(seconds=seconds) check_proposal.apply_async((topic.id,), eta=wait)
def delete(self, path): # Deletes the session with given path sm = self.session_manager sessions = yield gen.maybe_future(sm.list_sessions()) key_path = ["notebook", "path"] nb_name = path.strip('/') session_id = None for session in sessions: nb_path = self._get_from_dict(session, key_path) if nb_path == nb_name: session_id = session["id"] if not (session_id is None): try: yield gen.maybe_future(sm.delete_session(session_id)) exists = yield gen.maybe_future(self.contents_manager.file_exists(path)) if exists: cm = self.contents_manager self.log.warning('Deleting %s', path) yield gen.maybe_future(cm.delete(path)) else: self.log.warning('Path %s does not exist', path) except KeyError: # the kernel was deleted but the session wasn't! raise web.HTTPError(410, "Kernel deleted before session") self.set_status(204) self.finish()
def post(self, topic_id): title = self.get_argument('title', None) keywords = self.get_argument('keywords', None) content = self.get_argument('content', '') keep_silent = int(self.get_argument('keep_silent', 0)) is_draft = int(self.get_argument('is_draft', 0)) if not all([title, keywords]): raise exceptions.EmptyFields() else: can_post = yield gen.maybe_future(Topic.can_post(topic_id)) if not can_post: raise exceptions.TopicIsNotAccepted exists = yield gen.maybe_future(Post.get_by_title(title)) if exists: raise exceptions.PostTitleAlreadyExists() else: username = self.current_user yield gen.maybe_future( Post.create(username, topic_id, title, keywords, content, keep_silent=keep_silent, is_draft=is_draft)) # Update gold. update_gold.apply_async(('new_post', username))
def check_image(self, kw): if self.context.config.MAX_ID_LENGTH > 0: # Check if an image with an uuid exists in storage exists = yield gen.maybe_future( self.context.modules.storage.exists(kw["image"][: self.context.config.MAX_ID_LENGTH]) ) if exists: kw["image"] = kw["image"][: self.context.config.MAX_ID_LENGTH] url = self.request.uri if not self.validate(kw["image"]): self._error(400, "No original image was specified in the given URL") return kw["request"] = self.request kw["image"] = quote(kw["image"].encode("utf-8")) kw["config"] = self.context.config self.context.request = RequestParameters(**kw) has_none = not self.context.request.unsafe and not self.context.request.hash has_both = self.context.request.unsafe and self.context.request.hash if has_none or has_both: self._error(400, "URL does not have hash or unsafe, or has both: %s" % url) return if self.context.request.unsafe and not self.context.config.ALLOW_UNSAFE_URL: self._error(400, "URL has unsafe but unsafe is not allowed by the config: %s" % url) return if self.context.config.USE_BLACKLIST: blacklist = yield self.get_blacklist_contents() if self.context.request.image_url in blacklist: self._error(400, "Source image url has been blacklisted: %s" % self.context.request.image_url) return url_signature = self.context.request.hash if url_signature: signer = self.context.modules.url_signer(self.context.server.security_key) url_to_validate = Url.encode_url(url).replace("/%s/" % self.context.request.hash, "") valid = signer.validate(url_signature, url_to_validate) if not valid and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE: # Retrieves security key for this image if it has been seen before security_key = yield gen.maybe_future( self.context.modules.storage.get_crypto(self.context.request.image_url) ) if security_key is not None: signer = self.context.modules.url_signer(security_key) valid = signer.validate(url_signature, url_to_validate) if not valid: self._error(400, "Malformed URL: %s" % url) return self.execute_image_operations()
def get(self): username = self.current_user subs = yield gen.maybe_future(Subscription.list_by_user(username)) result = { 'total': len(subs), 'topics': [(yield gen.maybe_future(s.to_dict())) for s in subs], } raise gen.Return(result)
def delete(self, topic_id): username = self.current_user s = yield gen.maybe_future( Subscription.get_by_user_topic(username, topic_id)) if s: yield gen.maybe_future(s.delete()) else: raise exceptions.TopicHasNotBeenSubscribed()
def get(self, path='', commit='HEAD', path2=None): """runs nbdiff for a given notebook""" cm = self.contents_manager if path2: diff = yield gen.maybe_future(cm.file_diff(path, path2)) else: diff = yield gen.maybe_future(cm.git_diff(path, commit)) self.finish(diff)
def post(self, topic_id): username = self.current_user s = yield gen.maybe_future( Subscription.get_by_user_topic(username, topic_id)) if s: raise exceptions.TopicAlreadySubscribed() else: yield gen.maybe_future(Subscription.create(username, topic_id))
def get_blacklist_contents(self): filename = 'blacklist.txt' exists = yield gen.maybe_future(self.context.modules.storage.exists(filename)) if exists: blacklist = yield gen.maybe_future(self.context.modules.storage.get(filename)) raise tornado.gen.Return(blacklist) else: raise tornado.gen.Return("")
def get(self): count = int(self.get_argument('count', 4)) ans = yield gen.maybe_future(Announcement.list_by_count(count)) raise gen.Return({ 'total': len(ans), 'announcements': [(yield gen.maybe_future(an.to_dict())) for an in ans] })
def trigger_reactions(self): if self.cbs: cb = self.cbs.pop() if self.react_data is None: drew_cards = yield gen.maybe_future(cb()) yield self.reacted(drew_cards) else: drew_cards = yield gen.maybe_future(cb(self.react_data)) yield self.reacted(drew_cards)
def data_received(self, chunk): if self._decompressor: compressed_data = chunk while compressed_data: decompressed = self._decompressor.decompress(compressed_data, self._chunk_size) if decompressed: yield gen.maybe_future(self._delegate.data_received(decompressed)) compressed_data = self._decompressor.unconsumed_tail else: yield gen.maybe_future(self._delegate.data_received(chunk))
def post(self): # Creates a new session #(unless a session already exists for the named session) sm = self.session_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, "No JSON data provided") if 'notebook' in model and 'path' in model['notebook']: self.log.warn('Sessions API changed, see updated swagger docs') model['path'] = model['notebook']['path'] model['type'] = 'notebook' try: path = model['path'] except KeyError: raise web.HTTPError(400, "Missing field in JSON data: path") try: mtype = model['type'] except KeyError: raise web.HTTPError(400, "Missing field in JSON data: type") name = model.get('name', None) kernel = model.get('kernel', {}) kernel_name = kernel.get('name', None) kernel_id = kernel.get('id', None) if not kernel_id and not kernel_name: self.log.debug("No kernel specified, using default kernel") kernel_name = None exists = yield gen.maybe_future(sm.session_exists(path=path)) if exists: model = yield gen.maybe_future(sm.get_session(path=path)) else: try: model = yield gen.maybe_future( sm.create_session(path=path, kernel_name=kernel_name, kernel_id=kernel_id, name=name, type=mtype)) except NoSuchKernel: msg = ("The '%s' kernel is not available. Please pick another " "suitable kernel instead, or install that kernel." % kernel_name) status_msg = '%s not found' % kernel_name self.log.warning('Kernel not found: %s' % kernel_name) self.set_status(501) self.finish(json.dumps(dict(message=msg, short_message=status_msg))) return location = url_path_join(self.base_url, 'api', 'sessions', model['id']) self.set_header('Location', location) self.set_status(201) self.finish(json.dumps(model, default=date_default))
def post(self): username = self.current_user robed = gold.get(username, random_gold) if robed == -1: raise exceptions.CanNotRobAgain() elif robed == 0: raise exceptions.NoMoreGoldToday() user = yield gen.maybe_future(User.get_by_name(username)) yield gen.maybe_future(user.update(gold=robed)) raise gen.Return({'gold': robed})
def delete(self, post_id): username = self.current_user f = yield gen.maybe_future(Favorite.get_by_user_post(username, post_id)) if f: yield gen.maybe_future(f.delete()) count = yield gen.maybe_future(Favorite.count_by_post(post_id)) # Update gold. update_gold.apply_async(("cancel_post_be_favorite", post_id)) raise gen.Return({"count": count}) else: raise exceptions.PostHasNotBeenFavorited()
def post(self, post_id): username = self.current_user f = yield gen.maybe_future(Favorite.get_by_user_post(username, post_id)) if f: raise exceptions.PostAlreadyFavorited() else: yield gen.maybe_future(Favorite.create(username, post_id)) count = yield gen.maybe_future(Favorite.count_by_post(post_id)) # Update gold. update_gold.apply_async(("post_be_favorite", post_id)) raise gen.Return({"count": count})
def get(self): users = yield gen.maybe_future(User.count()) topics = yield gen.maybe_future(Topic.count()) posts = yield gen.maybe_future(Post.count()) comments = yield gen.maybe_future(Comment.count()) raise gen.Return({ 'users_count': users, 'topics_count': topics, 'posts_count': posts, 'comments_count': comments, })
def gain_helper(self, card_obj, from_supply=True, announcement=None): if announcement is not None and announcement is not "": self.game.announce(announcement) self.discard_pile.append(card_obj) self.update_discard_size() yield gen.maybe_future(card_obj.on_gain()) yield gen.maybe_future(self.hand.do_reactions("Gain", card_obj)) for i in self.get_opponents(): yield gen.maybe_future(i.hand.do_reactions("OpponentGain", card_obj)) if card_obj in self.all_cards(): yield gen.maybe_future(self.resolve_on_gain_effects(card_obj))
def patch(self, topic_id): fields = dict() for key in ('description', 'rules', 'avatar', 'state'): value = self.get_argument(key, None) if value is not None: fields[key] = value if not fields: raise exceptions.EmptyFields() else: topic = yield gen.maybe_future(Topic.get(topic_id)) yield gen.maybe_future(topic.update(**fields))
def get(self): username = self.current_user users = yield gen.maybe_future(Following.list_following(username)) followings = list() if users: ids = [user.following_id for user in users] users = yield gen.maybe_future(User.get_multi(*ids)) followings = [user.information() for user in users] raise gen.Return({ 'followings': followings, 'total': len(followings), })
def get(self): username = self.current_user users = yield gen.maybe_future(Blocked.list_blocked(username)) blockeds = list() if users: ids = [user.blocked_id for user in users] users = yield gen.maybe_future(User.get_multi(*ids)) blockeds = [user.information() for user in users] raise gen.Return({ 'blockeds': blockeds, 'total': len(blockeds), })
def patch(self, post_id): keywords = self.get_argument('keywords', None) content = self.get_argument('content', None) keep_silent = int(self.get_argument('keep_silent', 0)) is_draft = int(self.get_argument('is_draft', 0)) if keywords is None and content is None and keep_silent is None: raise exceptions.EmptyFields() else: post = yield gen.maybe_future(Post.get(post_id)) yield gen.maybe_future( post.update(keywords, content, keep_silent, is_draft))
def patch(self): fields = dict() for key in self._fields.split(): value = self.get_argument(key, None) if value: fields[key] = value if not fields: raise exceptions.EmptyFields() else: username = self.current_user user = yield gen.maybe_future(User.get_by_name(username)) yield gen.maybe_future(user.update(**fields))
def add_user(self, user): """Hook called whenever a new user is added If self.create_system_users, the user will attempt to be created if it doesn't exist. """ user_exists = yield gen.maybe_future(self.system_user_exists(user)) if not user_exists: if self.create_system_users: yield gen.maybe_future(self.add_system_user(user)) else: raise KeyError("User %s does not exist." % user.name) yield gen.maybe_future(super().add_user(user))
def create_session(self, path=None, kernel_name=None): """Creates a session and returns its model""" session_id = self.new_session_id() # allow nbm to specify kernels cwd kernel_path = self.contents_manager.get_kernel_path(path=path) kernel_id = yield gen.maybe_future( self.kernel_manager.start_kernel(path=kernel_path, kernel_name=kernel_name) ) result = yield gen.maybe_future( self.save_session(session_id, path=path, kernel_id=kernel_id) ) # py2-compat raise gen.Return(result)
def add_user(self, user): """Add a new user If self.create_system_users, the user will attempt to be created. """ user_exists = yield gen.maybe_future(self.system_user_exists(user)) if not user_exists: if self.create_system_users: yield gen.maybe_future(self.add_system_user(user)) else: raise KeyError("User %s does not exist." % user.name) yield gen.maybe_future(super().add_user(user))
def get(self): num = int(self.get_argument('num', 30)) hots = yield gen.maybe_future(Post.hot_list(num)) posts = list() for post in hots: info = yield gen.maybe_future(_post_info(post)) posts.append(info) result = { 'total': len(posts), 'posts': posts, } raise gen.Return(result)
def buy_card(self, card_title): if self.buys > 0 and self.game.supply.get_count(card_title) > 0 and card_title not in self.banned: new_card = self.gen_new_card(card_title) self.game.announce("<b>" + self.name + "</b> buys " + new_card.log_string()) self.buys -= 1 self.balance -= new_card.get_price() self.has_bought_cards = True self.bought_cards.append(new_card) self.game.remove_from_supply(card_title) yield gen.maybe_future(new_card.on_buy()) yield gen.maybe_future(self.resolve_on_buy_effects(new_card)) yield self.gain_helper(new_card, False, None) self.update_resources(True)
def splash_fetch(self, url, task): '''Fetch with splash''' start_time = time.time() self.on_fetch('splash', task) handle_error = lambda x: self.handle_error('splash', url, task, start_time, x) # check phantomjs proxy is enabled if not self.splash_endpoint: result = { "orig_url": url, "content": "splash is not enabled.", "headers": {}, "status_code": 501, "url": url, "time": time.time() - start_time, "cookies": {}, "save": task.get('fetch', {}).get('save') } logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url) raise gen.Return(result) # setup request parameters fetch = self.pack_tornado_request_parameters(url, task) task_fetch = task.get('fetch', {}) for each in task_fetch: if each not in fetch: fetch[each] = task_fetch[each] # robots.txt if task_fetch.get('robots_txt', False): user_agent = fetch['headers']['User-Agent'] can_fetch = yield self.can_fetch(user_agent, url) if not can_fetch: error = tornado.httpclient.HTTPError( 403, 'Disallowed by robots.txt') raise gen.Return(handle_error(error)) request_conf = { 'follow_redirects': False, 'headers': { 'Content-Type': 'application/json', } } request_conf['connect_timeout'] = fetch.get('connect_timeout', 20) request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1 session = cookies.RequestsCookieJar() if 'Cookie' in fetch['headers']: c = http_cookies.SimpleCookie() try: c.load(fetch['headers']['Cookie']) except AttributeError: c.load(utils.utf8(fetch['headers']['Cookie'])) for key in c: session.set(key, c[key]) del fetch['headers']['Cookie'] if 'cookies' in fetch: session.update(fetch['cookies']) del fetch['cookies'] request = tornado.httpclient.HTTPRequest(url=fetch['url']) cookie_header = cookies.get_cookie_header(session, request) if cookie_header: fetch['headers']['Cookie'] = cookie_header # making requests fetch['lua_source'] = self.splash_lua_source fetch['headers'] = dict(fetch['headers']) try: request = tornado.httpclient.HTTPRequest(url=self.splash_endpoint, method="POST", body=json.dumps(fetch), **request_conf) except Exception as e: raise gen.Return(handle_error(e)) try: response = yield gen.maybe_future(self.http_client.fetch(request)) except tornado.httpclient.HTTPError as e: if e.response: response = e.response else: raise gen.Return(handle_error(e)) if not response.body: raise gen.Return( handle_error(Exception('no response from phantomjs'))) result = {} try: result = json.loads(utils.text(response.body)) assert 'status_code' in result, result except ValueError as e: logger.error("result is not json: %r", response.body[:500]) raise gen.Return(handle_error(e)) except Exception as e: if response.error: result['error'] = utils.text(response.error) raise gen.Return(handle_error(e)) if result.get('status_code', 200): logger.info("[%d] %s:%s %s %.2fs", result['status_code'], task.get('project'), task.get('taskid'), url, result['time']) else: logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'], task.get('project'), task.get('taskid'), url, result['content'], result['time']) raise gen.Return(result)
def process_emitBatch(self, seqid, iprot, oprot): args = emitBatch_args() args.read(iprot) iprot.readMessageEnd() yield gen.maybe_future(self._handler.emitBatch(args.batch))
def post(self, path, checkpoint_id): """post restores a file from a checkpoint""" cm = self.contents_manager yield gen.maybe_future(cm.restore_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish()
def init_users(self): """Load users into and from the database""" db = self.db if self.admin_users and not self.authenticator.admin_users: self.log.warning( "\nJupyterHub.admin_users is deprecated." "\nUse Authenticator.admin_users instead." ) self.authenticator.admin_users = self.admin_users admin_users = [ self.authenticator.normalize_username(name) for name in self.authenticator.admin_users ] self.authenticator.admin_users = set(admin_users) # force normalization for username in admin_users: if not self.authenticator.validate_username(username): raise ValueError("username %r is not valid" % username) if not admin_users: self.log.warning("No admin users, admin interface will be unavailable.") self.log.warning("Add any administrative users to `c.Authenticator.admin_users` in config.") new_users = [] for name in admin_users: # ensure anyone specified as admin in config is admin in db user = orm.User.find(db, name) if user is None: user = orm.User(name=name, admin=True) new_users.append(user) db.add(user) else: user.admin = True # the admin_users config variable will never be used after this point. # only the database values will be referenced. whitelist = [ self.authenticator.normalize_username(name) for name in self.authenticator.whitelist ] self.authenticator.whitelist = set(whitelist) # force normalization for username in whitelist: if not self.authenticator.validate_username(username): raise ValueError("username %r is not valid" % username) if not whitelist: self.log.info("Not using whitelist. Any authenticated user will be allowed.") # add whitelisted users to the db for name in whitelist: user = orm.User.find(db, name) if user is None: user = orm.User(name=name) new_users.append(user) db.add(user) db.commit() # Notify authenticator of all users. # This ensures Auth whitelist is up-to-date with the database. # This lets whitelist be used to set up initial list, # but changes to the whitelist can occur in the database, # and persist across sessions. for user in db.query(orm.User): yield gen.maybe_future(self.authenticator.add_user(user)) db.commit() # can add_user touch the db?
def authenticate(self, data): return gen.maybe_future( self.authenticator.get_authenticated_user(self, data))
def delete_session(self, session_id): """Deletes the row in the session database with given session_id""" session = self.get_session(session_id=session_id) yield gen.maybe_future(self.kernel_manager.shutdown_kernel(session['kernel']['id'])) self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
def spawn(self, options=None): """Start the user's spawner depending from the value of JupyterHub.allow_named_servers if False: JupyterHub expects only one single-server per user url of the server will be /user/:name if True: JupyterHub expects more than one single-server per user url of the server will be /user/:name/:server_name """ db = self.db if self.allow_named_servers: if options is not None and 'server_name' in options: server_name = options['server_name'] else: server_name = default_server_name(self) base_url = url_path_join(self.base_url, server_name) else: server_name = '' base_url = self.base_url orm_server = orm.Server( name=server_name, base_url=base_url, ) self.servers.append(orm_server) api_token = self.new_api_token() db.commit() server = Server(orm_server=orm_server) spawner = self.spawner # Passing server_name to the spawner spawner.server_name = server_name spawner.user_options = options or {} # we are starting a new server, make sure it doesn't restore state spawner.clear_state() # create API and OAuth tokens spawner.api_token = api_token spawner.admin_access = self.settings.get('admin_access', False) client_id = 'user-%s' % self.escaped_name if server_name: client_id = '%s-%s' % (client_id, server_name) spawner.oauth_client_id = client_id oauth_provider = self.settings.get('oauth_provider') if oauth_provider: client_store = oauth_provider.client_authenticator.client_store try: oauth_client = client_store.fetch_by_client_id(client_id) except ClientNotFoundError: oauth_client = None # create a new OAuth client + secret on every launch, # except for resuming containers. if oauth_client is None or not spawner.will_resume: client_store.add_client( client_id, api_token, url_path_join(self.url, 'oauth_callback'), ) db.commit() # trigger pre-spawn hook on authenticator authenticator = self.authenticator if (authenticator): yield gen.maybe_future(authenticator.pre_spawn_start( self, spawner)) self.spawn_pending = True # wait for spawner.start to return try: f = spawner.start() # commit any changes in spawner.start (always commit db changes before yield) db.commit() ip_port = yield gen.with_timeout( timedelta(seconds=spawner.start_timeout), f) if ip_port: # get ip, port info from return value of start() server.ip, server.port = ip_port else: # prior to 0.7, spawners had to store this info in user.server themselves. # Handle < 0.7 behavior with a warning, assuming info was stored in db by the Spawner. self.log.warning( "DEPRECATION: Spawner.start should return (ip, port) in JupyterHub >= 0.7" ) if spawner.api_token != api_token: # Spawner re-used an API token, discard the unused api_token orm_token = orm.APIToken.find(self.db, api_token) if orm_token is not None: self.db.delete(orm_token) self.db.commit() except Exception as e: if isinstance(e, gen.TimeoutError): self.log.warning( "{user}'s server failed to start in {s} seconds, giving up" .format( user=self.name, s=spawner.start_timeout, )) e.reason = 'timeout' else: self.log.error( "Unhandled error starting {user}'s server: {error}".format( user=self.name, error=e, )) e.reason = 'error' try: yield self.stop() except Exception: self.log.error( "Failed to cleanup {user}'s server that failed to start". format(user=self.name, ), exc_info=True) # raise original exception raise e spawner.start_polling() # store state self.state = spawner.get_state() self.last_activity = datetime.utcnow() db.commit() self.waiting_for_response = True try: yield server.wait_up(http=True, timeout=spawner.http_timeout) except Exception as e: if isinstance(e, TimeoutError): self.log.warning( "{user}'s server never showed up at {url} " "after {http_timeout} seconds. Giving up".format( user=self.name, url=server.url, http_timeout=spawner.http_timeout, )) e.reason = 'timeout' else: e.reason = 'error' self.log.error( "Unhandled error waiting for {user}'s server to show up at {url}: {error}" .format( user=self.name, url=server.url, error=e, )) try: yield self.stop() except Exception: self.log.error( "Failed to cleanup {user}'s server that failed to start". format(user=self.name, ), exc_info=True) # raise original TimeoutError raise e finally: self.waiting_for_response = False self.spawn_pending = False return self
def get(self, *args, **kwargs): # pre_get can be a coroutine in subclasses # assign and yield in two step to avoid tornado 3 issues res = self.pre_get() yield gen.maybe_future(res) super(AuthenticatedZMQStreamHandler, self).get(*args, **kwargs)
def post(self, path=''): cm = self.contents_manager yield gen.maybe_future(cm.trust_notebook(path)) self.set_status(201) self.finish()
def test_get_info(self): host = zk.Host('localhost', 2181) host.srvr = MagicMock(return_value=gen.maybe_future('Some result')) res = yield host.get_info() host.srvr.assert_called_once() self.assertIsInstance(res, dict)
def spawn(self, options=None): """Start the user's spawner""" db = self.db self.server = orm.Server( cookie_name=self.cookie_name, base_url=self.base_url, ) db.add(self.server) db.commit() api_token = self.new_api_token() db.commit() spawner = self.spawner spawner.user_options = options or {} # we are starting a new server, make sure it doesn't restore state spawner.clear_state() spawner.api_token = api_token # trigger pre-spawn hook on authenticator authenticator = self.authenticator if (authenticator): yield gen.maybe_future(authenticator.pre_spawn_start( self, spawner)) self.spawn_pending = True # wait for spawner.start to return try: f = spawner.start() # commit any changes in spawner.start (always commit db changes before yield) db.commit() ip_port = yield gen.with_timeout( timedelta(seconds=spawner.start_timeout), f) if ip_port: # get ip, port info from return value of start() self.server.ip, self.server.port = ip_port else: # prior to 0.7, spawners had to store this info in user.server themselves. # Handle < 0.7 behavior with a warning, assuming info was stored in db by the Spawner. self.log.warning( "DEPRECATION: Spawner.start should return (ip, port) in JupyterHub >= 0.7" ) except Exception as e: if isinstance(e, gen.TimeoutError): self.log.warning( "{user}'s server failed to start in {s} seconds, giving up" .format( user=self.name, s=spawner.start_timeout, )) e.reason = 'timeout' else: self.log.error( "Unhandled error starting {user}'s server: {error}".format( user=self.name, error=e, )) e.reason = 'error' try: yield self.stop() except Exception: self.log.error( "Failed to cleanup {user}'s server that failed to start". format(user=self.name, ), exc_info=True) # raise original exception raise e spawner.start_polling() # store state self.state = spawner.get_state() self.last_activity = datetime.utcnow() db.commit() self.spawn_pending = False try: yield self.server.wait_up(http=True, timeout=spawner.http_timeout) except Exception as e: if isinstance(e, TimeoutError): self.log.warning( "{user}'s server never showed up at {url} " "after {http_timeout} seconds. Giving up".format( user=self.name, url=self.server.url, http_timeout=spawner.http_timeout, )) e.reason = 'timeout' else: e.reason = 'error' self.log.error( "Unhandled error waiting for {user}'s server to show up at {url}: {error}" .format( user=self.name, url=self.server.url, error=e, )) try: yield self.stop() except Exception: self.log.error( "Failed to cleanup {user}'s server that failed to start". format(user=self.name, ), exc_info=True) # raise original TimeoutError raise e return self
def get(self, session_id): # Returns the JSON model for a single session sm = self.session_manager model = yield gen.maybe_future(sm.get_session(session_id=session_id)) self.finish(json.dumps(model, default=date_default))
def make_coro(): coro = gen.maybe_future(func(*args, **kwargs)) if timeout is None: return coro else: return gen.with_timeout(timedelta(seconds=timeout), coro)
def _save(self, model, path): """Save an existing file.""" self.log.info(u"Saving file at %s", path) model = yield gen.maybe_future(self.contents_manager.save(model, path)) validate_model(model, expect_content=False) self._finish_model(model)
def _fetch(self, url): """ :param url: :type url: :return: :rtype: """ fetch_result = FetchResult() loader_result = None storage = self.context.modules.storage yield self.acquire_url_lock(url) try: fetch_result.buffer = yield gen.maybe_future(storage.get(url)) if fetch_result.buffer is not None: self.release_url_lock(url) fetch_result.successful = True self.context.metrics.incr('storage.hit') else: self.context.metrics.incr('storage.miss') loader_result = yield self.context.modules.loader.load(self.context, url) finally: self.release_url_lock(url) if loader_result is not None: if isinstance(loader_result, LoaderResult): # TODO _fetch should probably return a result object vs a list to # to allow returning metadata if not loader_result.successful: fetch_result.buffer = None fetch_result.loader_error = loader_result.error raise gen.Return(fetch_result) fetch_result.buffer = loader_result.buffer else: # Handle old loaders fetch_result.buffer = loader_result if fetch_result.buffer is None: raise gen.Return(fetch_result) fetch_result.successful = True mime = BaseEngine.get_mimetype(fetch_result.buffer) self.context.request.extension = extension = EXTENSION.get(mime, '.jpg') try: if mime == 'image/gif' and self.context.config.USE_GIFSICLE_ENGINE: self.context.request.engine = self.context.modules.gif_engine else: self.context.request.engine = self.context.modules.engine self.context.request.engine.load(fetch_result.buffer, extension) if self.context.request.engine.image is None: fetch_result.successful = False fetch_result.buffer = None fetch_result.engine = self.context.request.engine fetch_result.engine_error = EngineResult.COULD_NOT_LOAD_IMAGE raise gen.Return(fetch_result) fetch_result.normalized = self.context.request.engine.normalize() # Allows engine or loader to override storage on the fly for the purpose of # marking a specific file as unstoreable storage = self.context.modules.storage is_no_storage = isinstance(storage, NoStorage) is_mixed_storage = isinstance(storage, MixedStorage) is_mixed_no_file_storage = is_mixed_storage and isinstance(storage.file_storage, NoStorage) if not (is_no_storage or is_mixed_no_file_storage): storage.put(url, fetch_result.buffer) storage.put_crypto(url) except Exception: fetch_result.successful = False finally: if not fetch_result.successful: raise fetch_result.buffer = None fetch_result.engine = self.context.request.engine raise gen.Return(fetch_result)
def exists(self, path): self._init_file_storage() result = yield gen.maybe_future(self.file_storage.exists(path)) raise gen.Return(result)
def get(self, path=''): """get lists checkpoints for a file""" cm = self.contents_manager checkpoints = yield gen.maybe_future(cm.list_checkpoints(path)) data = json.dumps(checkpoints, default=date_default) self.finish(data)
def exponential_backoff(pass_func, fail_message, start_wait=0.2, scale_factor=2, max_wait=5, timeout=10, timeout_tolerance=0.1, *args, **kwargs): """ Exponentially backoff until `pass_func` is true. The `pass_func` function will wait with **exponential backoff** and **random jitter** for as many needed iterations of the Tornado loop, until reaching maximum `timeout` or truthiness. If `pass_func` is still returning false at `timeout`, a `TimeoutError` will be raised. The first iteration will begin with a wait time of `start_wait` seconds. Each subsequent iteration's wait time will scale up by continuously multiplying itself by `scale_factor`. This continues for each iteration until `pass_func` returns true or an iteration's wait time has reached the `max_wait` seconds per iteration. `pass_func` may be a future, although that is not entirely recommended. Parameters ---------- pass_func function that is to be run fail_message : str message for a `TimeoutError` start_wait : optional initial wait time for the first iteration in seconds scale_factor : optional a multiplier to increase the wait time for each iteration max_wait : optional maximum wait time per iteration in seconds timeout : optional maximum time of total wait in seconds timeout_tolerance : optional a small multiplier used to add jitter to `timeout`'s deadline *args, **kwargs passed to `pass_func(*args, **kwargs)` Returns ------- value of `pass_func(*args, **kwargs)` Raises ------ TimeoutError If `pass_func` is still false at the end of the `timeout` period. Notes ----- See https://www.awsarchitectureblog.com/2015/03/backoff.html for information about the algorithm and examples. We're using their full Jitter implementation equivalent. """ loop = ioloop.IOLoop.current() deadline = loop.time() + timeout # add jitter to the deadline itself to prevent re-align of a bunch of # timing out calls once the deadline is reached. if timeout_tolerance: tol = timeout_tolerance * timeout deadline = random.uniform(deadline - tol, deadline + tol) scale = 1 while True: ret = yield gen.maybe_future(pass_func(*args, **kwargs)) # Truthy! if ret: return ret remaining = deadline - loop.time() if remaining < 0: # timeout exceeded break # add some random jitter to improve performance # this prevents overloading any single tornado loop iteration with # too many things dt = min(max_wait, remaining, random.uniform(0, start_wait * scale)) scale *= scale_factor yield gen.sleep(dt) raise TimeoutError(fail_message)
def delete(self, path, checkpoint_id): """delete clears a checkpoint for a given file""" cm = self.contents_manager yield gen.maybe_future(cm.delete_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish()
def fetch(self, url, method='GET', body=None, headers=None, fail=True, freeze=False, follow_redirects=True, max_redirects=5, **kwargs): if not headers: headers = {} default_headers = copy(self._headers) default_headers.update(headers) headers = default_headers if "Content-Type" not in headers: headers['Content-Type'] = 'application/json' if method in self.METHODS_WITH_BODY and headers[ 'Content-Type'] == 'application/json': body = yield maybe_future(self._make_json(body)) params = copy(self._default_args) params.update(kwargs) last_exc = RuntimeError("Something wrong") for _ in range(max_redirects + 1): request = HTTPRequest(b(url), method=method, body=body, headers=HTTPHeaders(headers), **params) request.headers['Cookie'] = "; ".join( "{0.key}={0.value}".format(cookie) for cookie in self._cookies.values()) need_redirect = False try: response = yield self._client.fetch(request, follow_redirects=False) response.fail = False except HTTPError as e: last_exc = e response = e.response if e.code == 599: response = e if e.code in (301, 302, 303, 307) and follow_redirects: need_redirect = True else: response.fail = True if e.response: content_type = e.response.headers.get('Content-Type', '') e.response._body = self._decode_body( content_type, response.body) if e.response.body and 'application/json' in content_type.lower( ): e.response._body = self._parse_json(e.response.body) if not need_redirect: break if not freeze: for cookie in response.headers.get_list('Set-Cookie'): self._cookies.load(cookie) else: response.fail = True if fail and response.fail: raise last_exc content_type = response.headers.get("Content-Type", '') response._body = self._decode_body(content_type, response.body) if response.body and 'json' in response.headers.get( "Content-Type", ""): new_body = self._parse_json(response.body) response._body = _freeze_response(new_body) if not freeze: for cookie in response.headers.get_list('Set-Cookie'): self._cookies.load(cookie) raise Return(response)
def http_fetch(self, url, task): '''HTTP fetcher''' start_time = time.time() self.on_fetch('http', task) handle_error = lambda x: self.handle_error('http', url, task, start_time, x) # setup request parameters fetch = self.pack_tornado_request_parameters(url, task) task_fetch = task.get('fetch', {}) session = cookies.RequestsCookieJar() # fix for tornado request obj if 'Cookie' in fetch['headers']: c = http_cookies.SimpleCookie() try: c.load(fetch['headers']['Cookie']) except AttributeError: c.load(utils.utf8(fetch['headers']['Cookie'])) for key in c: session.set(key, c[key]) del fetch['headers']['Cookie'] if 'cookies' in fetch: session.update(fetch['cookies']) del fetch['cookies'] max_redirects = task_fetch.get('max_redirects', 5) # we will handle redirects by hand to capture cookies fetch['follow_redirects'] = False # making requests while True: # robots.txt if task_fetch.get('robots_txt', False): can_fetch = yield self.can_fetch( fetch['headers']['User-Agent'], fetch['url']) if not can_fetch: error = tornado.httpclient.HTTPError( 403, 'Disallowed by robots.txt') raise gen.Return(handle_error(error)) try: request = tornado.httpclient.HTTPRequest(**fetch) # if cookie already in header, get_cookie_header wouldn't work old_cookie_header = request.headers.get('Cookie') if old_cookie_header: del request.headers['Cookie'] cookie_header = cookies.get_cookie_header(session, request) if cookie_header: request.headers['Cookie'] = cookie_header elif old_cookie_header: request.headers['Cookie'] = old_cookie_header except Exception as e: logger.exception(fetch) raise gen.Return(handle_error(e)) try: response = yield gen.maybe_future( self.http_client.fetch(request)) except tornado.httpclient.HTTPError as e: if e.response: response = e.response else: raise gen.Return(handle_error(e)) extract_cookies_to_jar(session, response.request, response.headers) if (response.code in (301, 302, 303, 307) and response.headers.get('Location') and task_fetch.get('allow_redirects', True)): if max_redirects <= 0: error = tornado.httpclient.HTTPError( 599, 'Maximum (%d) redirects followed' % task_fetch.get('max_redirects', 5), response) raise gen.Return(handle_error(error)) if response.code in (302, 303): fetch['method'] = 'GET' if 'body' in fetch: del fetch['body'] fetch['url'] = quote_chinese( urljoin(fetch['url'], response.headers['Location'])) fetch['request_timeout'] -= time.time() - start_time if fetch['request_timeout'] < 0: fetch['request_timeout'] = 0.1 max_redirects -= 1 continue result = {} result['orig_url'] = url result['content'] = response.body or '' result['headers'] = dict(response.headers) result['status_code'] = response.code result['url'] = response.effective_url or url result['time'] = time.time() - start_time result['cookies'] = session.get_dict() result['save'] = task_fetch.get('save') if response.error: result['error'] = utils.text(response.error) if 200 <= response.code < 300: logger.info("[%d] %s:%s %s %.2fs", response.code, task.get('project'), task.get('taskid'), url, result['time']) else: logger.warning("[%d] %s:%s %s %.2fs", response.code, task.get('project'), task.get('taskid'), url, result['time']) raise gen.Return(result)
def spawn(self, spawner_class, base_url='/', hub=None, authenticator=None, config=None): """Start the user's spawner""" db = inspect(self).session if hub is None: hub = db.query(Hub).first() self.server = Server( cookie_name='%s-%s' % (hub.server.cookie_name, quote(self.name, safe='')), base_url=url_path_join(base_url, 'user', self.escaped_name), ) db.add(self.server) db.commit() api_token = self.new_api_token() db.commit() spawner = self.spawner = spawner_class( config=config, user=self, hub=hub, db=db, authenticator=authenticator, ) # we are starting a new server, make sure it doesn't restore state spawner.clear_state() spawner.api_token = api_token # trigger pre-spawn hook on authenticator if (authenticator): yield gen.maybe_future(authenticator.pre_spawn_start( self, spawner)) self.spawn_pending = True # wait for spawner.start to return try: f = spawner.start() yield gen.with_timeout(timedelta(seconds=spawner.start_timeout), f) except Exception as e: if isinstance(e, gen.TimeoutError): self.log.warn( "{user}'s server failed to start in {s} seconds, giving up" .format( user=self.name, s=spawner.start_timeout, )) e.reason = 'timeout' else: self.log.error( "Unhandled error starting {user}'s server: {error}".format( user=self.name, error=e, )) e.reason = 'error' try: yield self.stop() except Exception: self.log.error( "Failed to cleanup {user}'s server that failed to start". format(user=self.name, ), exc_info=True) # raise original exception raise e spawner.start_polling() # store state self.state = spawner.get_state() self.last_activity = datetime.utcnow() db.commit() try: yield self.server.wait_up(http=True, timeout=spawner.http_timeout) except Exception as e: if isinstance(e, TimeoutError): self.log.warn("{user}'s server never showed up at {url} " "after {http_timeout} seconds. Giving up".format( user=self.name, url=self.server.url, http_timeout=spawner.http_timeout, )) e.reason = 'timeout' else: e.reason = 'error' self.log.error( "Unhandled error waiting for {user}'s server to show up at {url}: {error}" .format( user=self.name, url=self.server.url, error=e, )) try: yield self.stop() except Exception: self.log.error( "Failed to cleanup {user}'s server that failed to start". format(user=self.name, ), exc_info=True) # raise original TimeoutError raise e self.spawn_pending = False return self
def handle_call(self, request, connection): # read arg_1 so that handle_call is able to get the endpoint # name and find the endpoint handler. # the arg_1 value will be store in the request.endpoint field. # NOTE: after here, the correct way to access value of arg_1 is through # request.endpoint. The original argstream[0] is no longer valid. If # user still tries read from it, it will return empty. chunk = yield request.argstreams[0].read() response = None while chunk: request.endpoint += chunk chunk = yield request.argstreams[0].read() log.debug('Received a call to %s.', request.endpoint) tchannel = connection.tchannel # event: receive_request request.tracing.name = request.endpoint tchannel.event_emitter.fire(EventType.before_receive_request, request) handler = self.handlers.get(request.endpoint) if handler is None: handler = self.handlers[self.FALLBACK] if request.headers.get('as', None) != handler.req_serializer.name: connection.send_error( ErrorCode.bad_request, "Invalid arg scheme in request header", request.id, ) raise gen.Return(None) request.serializer = handler.req_serializer response = DeprecatedResponse( id=request.id, checksum=request.checksum, tracing=request.tracing, connection=connection, headers={'as': request.headers.get('as', 'raw')}, serializer=handler.resp_serializer, ) connection.post_response(response) try: # New impl - the handler takes a request and returns a response if self._handler_returns_response: # convert deprecated req to new top-level req b = yield request.get_body() he = yield request.get_header() t = request.headers t = transport.to_kwargs(t) t = TransportHeaders(**t) new_req = Request( body=b, headers=he, transport=t, endpoint=request.endpoint, ) # Not safe to have coroutine yields statement within # stack context. # The right way to do it is: # with request_context(..): # future = f() # yield future with request_context(request.tracing): f = handler.endpoint(new_req) new_resp = yield gen.maybe_future(f) # instantiate a tchannel.Response new_resp = response_from_mixed(new_resp) # assign resp values to dep response response.write_header(new_resp.headers) if new_resp.body is not None: response.write_body(new_resp.body) # Dep impl - the handler is provided with a req & resp writer else: with request_context(request.tracing): f = handler.endpoint(request, response) yield gen.maybe_future(f) response.flush() except TChannelError as e: connection.send_error( e.code, e.message, request.id, ) except Exception as e: msg = "An unexpected error has occurred from the handler" log.exception(msg) response.set_exception(TChannelError(e.message)) connection.request_message_factory.remove_buffer(response.id) connection.send_error(ErrorCode.unexpected, msg, response.id) tchannel.event_emitter.fire(EventType.on_exception, request, e) raise gen.Return(response)
def get_pod_manifest(self): """ Make a pod manifest that will spawn current user's notebook pod. """ if callable(self.singleuser_uid): singleuser_uid = yield gen.maybe_future(self.singleuser_uid(self)) else: singleuser_uid = self.singleuser_uid if callable(self.singleuser_fs_gid): singleuser_fs_gid = yield gen.maybe_future( self.singleuser_fs_gid(self)) else: singleuser_fs_gid = self.singleuser_fs_gid if callable(self.singleuser_supplemental_gids): singleuser_supplemental_gids = yield gen.maybe_future( self.singleuser_supplemental_gids(self)) else: singleuser_supplemental_gids = self.singleuser_supplemental_gids if self.cmd: real_cmd = self.cmd + self.get_args() else: real_cmd = None labels = self._build_pod_labels( self._expand_all(self.singleuser_extra_labels)) annotations = self._build_common_annotations( self._expand_all(self.singleuser_extra_annotations)) target_node_selector = self._get_node_selector() self.log.info("User %s will be scheduled to node with label %s.", self.user.name, str(target_node_selector)) return make_pod( name=self.pod_name, cmd=real_cmd, port=self.port, image_spec=self.singleuser_image_spec, image_pull_policy=self.singleuser_image_pull_policy, image_pull_secret=self.singleuser_image_pull_secrets, node_selector=self.singleuser_node_selector, run_as_uid=singleuser_uid, fs_gid=singleuser_fs_gid, supplemental_gids=singleuser_supplemental_gids, run_privileged=self.singleuser_privileged, env=self.get_env(), volumes=self._expand_all(self.volumes), volume_mounts=self._expand_all(self.volume_mounts), working_dir=self.singleuser_working_dir, labels=labels, annotations=annotations, cpu_limit=self.cpu_limit, cpu_guarantee=self.cpu_guarantee, mem_limit=self.mem_limit, mem_guarantee=self.mem_guarantee, extra_resource_limits=self.extra_resource_limits, extra_resource_guarantees=self.extra_resource_guarantees, lifecycle_hooks=self.singleuser_lifecycle_hooks, init_containers=self.singleuser_init_containers, service_account=self.singleuser_service_account, extra_container_config=self.singleuser_extra_container_config, extra_pod_config=self.singleuser_extra_pod_config, extra_containers=self.singleuser_extra_containers)
def start_kernel(self, *args, **kwargs): self.log.debug("RemoteMappingKernelManager.start_kernel: {}".format(kwargs['kernel_name'])) kernel_id = yield gen.maybe_future(super(RemoteMappingKernelManager, self).start_kernel(*args, **kwargs)) self.parent.kernel_session_manager.create_session(kernel_id, **kwargs) raise gen.Return(kernel_id)
def get(self, page_num=0, page_size=10): payload = yield gen.maybe_future( SingletonChartStorage.instance().get_charts( int(page_num), int(page_size))) self.write(payload) self.finish()
def get_crypto(self, path): self._init_crypto_storage() result = yield gen.maybe_future(self.crypto_storage.get_crypto(path)) raise gen.Return(result)
def get(self): # Return a list of running sessions sm = self.session_manager sessions = yield gen.maybe_future(sm.list_sessions()) self.finish(json.dumps(sessions, default=date_default))
def start(self): if self.user_storage_pvc_ensure: pvc = self.get_pvc_manifest() try: yield self.asynchronize( self.api.create_namespaced_persistent_volume_claim, namespace=self.namespace, body=pvc) except ApiException as e: if e.status == 409: self.log.info( "PVC " + self.pvc_name + " already exists, so did not create new pvc.") else: raise main_loop = IOLoop.current() def on_reflector_failure(): self.log.critical("Events reflector failed, halting Hub.") main_loop.stop() # events are selected based on pod name, which will include previous launch/stop self.events = EventReflector(parent=self, namespace=self.namespace, fields={ 'involvedObject.kind': 'Pod', 'involvedObject.name': self.pod_name }, on_failure=on_reflector_failure) # If we run into a 409 Conflict error, it means a pod with the # same name already exists. We stop it, wait for it to stop, and # try again. We try 4 times, and if it still fails we give up. # FIXME: Have better / cleaner retry logic! retry_times = 4 pod = yield self.get_pod_manifest() if self.modify_pod_hook: pod = yield gen.maybe_future(self.modify_pod_hook(self, pod)) for i in range(retry_times): try: yield self.asynchronize(self.api.create_namespaced_pod, self.namespace, pod) break except ApiException as e: if e.status != 409: # We only want to handle 409 conflict errors self.log.exception("Failed for %s", pod.to_str()) raise self.log.info('Found existing pod %s, attempting to kill', self.pod_name) yield self.stop(True) self.log.info( 'Killed pod %s, will try starting singleuser pod again', self.pod_name) else: raise Exception( 'Can not create user pod %s already exists & could not be deleted' % self.pod_name) # Note: The self.start_timeout here is kinda superfluous, since # there is already a timeout on how long start can run for in # jupyterhub itself. yield exponential_backoff(lambda: self.is_pod_running( self.pod_reflector.pods.get(self.pod_name, None)), 'pod/%s did not start in %s seconds!' % (self.pod_name, self.start_timeout), timeout=self.start_timeout) pod = self.pod_reflector.pods[self.pod_name] self.log.debug('pod %s events before launch: %s', self.pod_name, self.events.events) # Note: we stop the event watcher once launch is successful, but the reflector # will only stop when the next event comes in, likely when it is stopped. self.events.stop() return (pod.status.pod_ip, self.port)
def get_detector_data(self, path): self._init_detector_storage() result = yield gen.maybe_future( self.detector_storage.get_detector_data(path)) raise gen.Return(result)