def get_book(self, book_id): books = self.get_books(ids=[int(book_id)]) if not books: raise web.HTTPError(404, reason=_(u"抱歉,這本書不存在")) return books[0]
async def open(self, cluster_id, proxied_path=""): """ Called when a client opens a websocket connection. We establish a websocket connection to the proxied backend & set up a callback to relay messages through. """ # Get the cluster by ID. If it is not found, # raise an error. cluster_model = manager.get_cluster(cluster_id) if not cluster_model: raise web.HTTPError(404, f"Dask cluster {cluster_id} not found") # Construct the proper websocket proxy link from the cluster dashboard dashboard_link = cluster_model["dashboard_link"] dashboard_link = _normalize_dashboard_link(dashboard_link, self.request) # Convert to a websocket protocol. ws_link = "ws" + dashboard_link[4:] if not proxied_path.startswith("/"): proxied_path = "/" + proxied_path client_uri = "{ws_link}{path}".format(ws_link=ws_link, path=proxied_path) self.log.warn(ws_link) if self.request.query: client_uri += "?" + self.request.query headers = self.request.headers def message_cb(message): """ Callback when the backend sends messages to us We just pass it back to the frontend """ # Websockets support both string (utf-8) and binary data, so let's # make sure we signal that appropriately when proxying self._record_activity() if message is None: self.close() else: self.write_message(message, binary=isinstance(message, bytes)) def ping_cb(data): """ Callback when the backend sends pings to us. We just pass it back to the frontend. """ self._record_activity() self.ping(data) async def start_websocket_connection(): self.log.info( "Trying to establish websocket connection to {}".format(client_uri) ) self._record_activity() request = httpclient.HTTPRequest(url=client_uri, headers=headers) self.ws = await pingable_ws_connect( request=request, on_message_callback=message_cb, on_ping_callback=ping_cb, ) self._record_activity() self.log.info("Websocket connection established to {}".format(client_uri)) ioloop.IOLoop.current().add_callback(start_websocket_connection)
async def my_pre_launch_hook(launcher, *args): raise web.HTTPError( 400, "Launch is not possible with parameters: " + ",".join(args))
def get(self, status_code): raise web.HTTPError(int(status_code), reason=self.get_query_argument('reason', None))
def get(self, *args, **kwargs): if not self.get_current_user(): raise web.HTTPError(403) return super(TermSocket, self).get(*args, **kwargs)
def admin_only(self): """Decorator for restricting access to admin users""" user = self.get_current_user() if user is None or not user.admin: raise web.HTTPError(403)
def delete_file(self, path): raise web.HTTPError(400, "Unsupported: delete_file {}".format(path))
def metrics_authentication(self): """Decorator for restricting access to metrics""" user = self.current_user if user is None and self.authenticate_prometheus: raise web.HTTPError(403)
def _dump_exception(self, exc_info): pickled_exc = pickle.dumps(exc_info) self.write(json.dumps(dict(exc_info=base64.b64encode(pickled_exc), ))) raise web.HTTPError(500, 'Internal server error')
def on_first_message(self, msg): self._inject_cookie_message(msg) if self.get_current_user() is None: self.log.warn("Couldn't authenticate WebSocket connection") raise web.HTTPError(403) self.on_message = self.save_on_message
def on_postgres_error(_metric_name: str, exc: Exception) -> None: raise web.HTTPError(500, 'System error')
def delete(self, name): user = self.find_user(name) if user.spawner is None: raise web.HTTPError(400, "%s's server is not running" % name) yield self.stop_single_user(user) self.set_status(204)
def check_profile(self, profile): if profile not in self.profiles: raise web.HTTPError(404, 'profile not found')
def post(self, action): if not ServerEnvironment.IsDevBox(): raise web.HTTPError(403, _TEST_HOOKS_NOT_SUPPORTED) from PIL import Image, ImageChops if action == 'copy': logging.info('Updating baseline image') urls = {} body = json.loads(self.request.body) testname = body['testname'] imagename = body['imagename'] scheme = body['scheme'] _FULL_RESULTS_BASELINE = '%s/results/baseline/%s' % (options.options.testing_path, scheme) _FULL_RESULTS_CURRENT = '%s/results/current/%s' % (options.options.testing_path, scheme) # Overwrite the 'baseline' image for the test with the 'current' image. baseline_image = r'%s/%s/%s' % (_FULL_RESULTS_BASELINE, testname, imagename) current_image = r'%s/%s/Run 1/%s' % (_FULL_RESULTS_CURRENT, testname, imagename) yield self._UpdateImageMaskConfig(testname, imagename, scheme) if os.path.exists(current_image): shutil.copy(current_image, baseline_image) logging.info('Updated baseline image for %s' % testname) baseline_web_image = r'%s/%s/%s/%s' % (_STATIC_RESULTS_BASELINE, scheme, testname, imagename) current_web_image = r'%s/%s/%s/Run 1/%s' % (_STATIC_RESULTS_CURRENT, scheme, testname, imagename) urls['baseline'] = baseline_web_image urls['current'] = current_web_image # Return JSON result. self.write(urls) self.finish() return if action == 'delete': body = json.loads(self.request.body) testname = body['testname'] imagename = body['imagename'] current_image = r'%s/%s/Run 1/%s' % (_FULL_RESULTS_CURRENT, testname, imagename) if os.path.exists(current_image) is True: os.remove(current_image) logging.info('Deleted current capture image for %s' % testname) self.finish() return if action == 'token': body = json.loads(self.request.body) identity_key = body['auth_info']['identity']; identity = yield gen.Task(Identity.Query, self._client, identity_key, None, must_exist=False) if identity is None: raise web.HTTPError(400, 'Identity does not exist.') self.write(identity.access_token) self.finish() return if action == 'image': body = json.loads(self.request.body) test_name = body['testname'] image_name = body['imagename'] scheme = body['scheme'] _FULL_RESULTS_BASELINE = '%s/results/baseline/%s' % (options.options.testing_path, scheme) _FULL_RESULTS_CURRENT = '%s/results/current/%s' % (options.options.testing_path, scheme) # get image base name tmp = image_name[:-4] base, num = tmp.split('|', 1) image_base_name = '%s.png' % base image1 = r'%s/%s/%s' % (_FULL_RESULTS_BASELINE, test_name, image_base_name) image2 = r'%s/%s/Run 1/%s' % (_FULL_RESULTS_CURRENT, test_name, image_name) if os.path.exists(image1) and os.path.exists(image2): self.set_header('Content-Type', 'application/json; charset=UTF-8') im1 = Image.open(image1) im2 = Image.open(image2) diff = ImageChops.difference(im2, im1) result = diff.getbbox() is None response = { 'response': result, 'bbox': diff.getbbox() } self.write(response) self.finish() return raise web.HTTPError(400, _TEST_ACTION_NOT_SUPPORTED)
async def get(self, for_user=None, server_name=''): """GET renders form for spawning with user-specified options or triggers spawn via redirect if there is no form. """ user = current_user = self.current_user if for_user is not None and for_user != user.name: if not user.admin: raise web.HTTPError( 403, "Only admins can spawn on behalf of other users" ) user = self.find_user(for_user) if user is None: raise web.HTTPError(404, "No such user: %s" % for_user) if not self.allow_named_servers and user.running: url = self.get_next_url(user, default=user.server_url(server_name)) self.log.info("User is running: %s", user.name) self.redirect(url) return if server_name is None: server_name = '' spawner = user.spawners[server_name] # resolve `?next=...`, falling back on the spawn-pending url # must not be /user/server for named servers, # which may get handled by the default server if they aren't ready yet pending_url = url_path_join( self.hub.base_url, "spawn-pending", user.escaped_name, server_name ) if self.get_argument('next', None): # preserve `?next=...` through spawn-pending pending_url = url_concat(pending_url, {'next': self.get_argument('next')}) # spawner is active, redirect back to get progress, etc. if spawner.ready: self.log.info("Server %s is already running", spawner._log_name) next_url = self.get_next_url(user, default=user.server_url(server_name)) self.redirect(next_url) return elif spawner.active: self.log.info("Server %s is already active", spawner._log_name) self.redirect(pending_url) return # Add handler to spawner here so you can access query params in form rendering. spawner.handler = self spawner_options_form = await spawner.get_options_form() if spawner_options_form: self.log.debug("Serving options form for %s", spawner._log_name) form = self._render_form( for_user=user, spawner_options_form=spawner_options_form ) self.finish(form) else: self.log.debug( "Triggering spawn with default options for %s", spawner._log_name ) # Explicit spawn request: clear _spawn_future # which may have been saved to prevent implicit spawns # after a failure. if spawner._spawn_future and spawner._spawn_future.done(): spawner._spawn_future = None # not running, no form. Trigger spawn and redirect back to /user/:name f = asyncio.ensure_future(self.spawn_single_user(user, server_name)) await asyncio.wait([f], timeout=1) self.redirect(pending_url)
async def get(self, test_id): result = await self.postgres_execute(self.GET_SQL, {'id': test_id}) if not result.row_count: raise web.HTTPError(404, 'Not Found') await self.finish(self.cast_data(result.row))
async def get(self, for_user, server_name=''): user = current_user = self.current_user if for_user is not None and for_user != current_user.name: if not current_user.admin: raise web.HTTPError( 403, "Only admins can spawn on behalf of other users" ) user = self.find_user(for_user) if user is None: raise web.HTTPError(404, "No such user: %s" % for_user) if server_name and server_name not in user.spawners: raise web.HTTPError( 404, "%s has no such server %s" % (user.name, server_name) ) spawner = user.spawners[server_name] if spawner.ready: # spawner is ready and waiting. Redirect to it. next_url = self.get_next_url(default=user.server_url(server_name)) self.redirect(next_url) return # if spawning fails for any reason, point users to /hub/home to retry self.extra_error_html = self.spawn_home_error # First, check for previous failure. if ( not spawner.active and spawner._spawn_future and spawner._spawn_future.done() and spawner._spawn_future.exception() ): # Condition: spawner not active and _spawn_future exists and contains an Exception # Implicit spawn on /user/:name is not allowed if the user's last spawn failed. # We should point the user to Home if the most recent spawn failed. exc = spawner._spawn_future.exception() self.log.error("Previous spawn for %s failed: %s", spawner._log_name, exc) spawn_url = url_path_join(self.hub.base_url, "spawn", user.escaped_name) self.set_status(500) html = self.render_template( "not_running.html", user=user, server_name=server_name, spawn_url=spawn_url, failed=True, failed_message=getattr(exc, 'jupyterhub_message', ''), exception=exc, ) self.finish(html) return # Check for pending events. This should usually be the case # when we are on this page. # page could be pending spawn *or* stop if spawner.pending: self.log.info("%s is pending %s", spawner._log_name, spawner.pending) # spawn has started, but not finished url_parts = [] if spawner.pending == "stop": page = "stop_pending.html" else: page = "spawn_pending.html" html = self.render_template( page, user=user, spawner=spawner, progress_url=spawner._progress_url ) self.finish(html) return # spawn is supposedly ready, check on the status if spawner.ready: poll_start_time = time.perf_counter() status = await spawner.poll() SERVER_POLL_DURATION_SECONDS.labels( status=ServerPollStatus.from_status(status) ).observe(time.perf_counter() - poll_start_time) else: status = 0 # server is not running, render "not running" page # further, set status to 404 because this is not # serving the expected page if status is not None: spawn_url = url_path_join(self.hub.base_url, "spawn", user.escaped_name) html = self.render_template( "not_running.html", user=user, server_name=server_name, spawn_url=spawn_url, ) self.finish(html) return # we got here, server appears to be ready and running, # no longer pending. # redirect to the running server. next_url = self.get_next_url(default=user.server_url(server_name)) self.redirect(next_url)
async def get(self): await self.postgres_execute(self.GET_SQL) raise web.HTTPError(500, 'This should have failed')
def prepare(self): if not self.check_origin(): raise web.HTTPError(404) return super(APIHandler, self).prepare()
async def get(self): try: await self.postgres_execute(self.GET_SQL) except psycopg2.DataError: raise web.HTTPError(422) raise web.HTTPError(418, 'This should have failed')
def rename_file(self, old_path, path): raise web.HTTPError(400, "Unsupported: rename_file {} {}".format(old_path, path))
async def get(self): exc = self.on_postgres_error('test', RuntimeError()) if isinstance(exc, RuntimeError): self.set_status(204) else: raise web.HTTPError(500, 'Did not pass through')
def get(self, grade_id): try: comment = self.gradebook.find_comment_by_id(grade_id) except MissingEntry: raise web.HTTPError(404) self.write(json.dumps(comment.to_dict()))
def write_notebook_object(self, nb, notebook_id=None): """Save an existing notebook object by notebook_id.""" try: new_name = normalize('NFC', nb.metadata.name) except AttributeError: raise web.HTTPError(400, u'Missing notebook name') if notebook_id is None: notebook_id = self.new_notebook_id(new_name) if notebook_id not in self.mapping: raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) old_name = self.mapping[notebook_id] old_checkpoints = self.list_checkpoints(notebook_id) path = self.get_path_by_name(new_name) try: self.log.debug("Autosaving notebook %s", path) with open(path, 'w') as f: current.write(nb, f, u'json') except Exception as e: raise web.HTTPError( 400, u'Unexpected error while autosaving notebook: %s' % e) # save .py script as well if self.save_script: pypath = os.path.splitext(path)[0] + '.py' self.log.debug("Writing script %s", pypath) try: with io.open(pypath, 'w', encoding='utf-8') as f: current.write(nb, f, u'py') except Exception as e: raise web.HTTPError( 400, u'Unexpected error while saving notebook as script: %s' % e) # remove old files if the name changed if old_name != new_name: # update mapping self.mapping[notebook_id] = new_name self.rev_mapping[new_name] = notebook_id del self.rev_mapping[old_name] # remove renamed original, if it exists old_path = self.get_path_by_name(old_name) if os.path.isfile(old_path): self.log.debug("unlinking notebook %s", old_path) os.unlink(old_path) # cleanup old script, if it exists if self.save_script: old_pypath = os.path.splitext(old_path)[0] + '.py' if os.path.isfile(old_pypath): self.log.debug("unlinking script %s", old_pypath) os.unlink(old_pypath) # rename checkpoints to follow file for cp in old_checkpoints: checkpoint_id = cp['checkpoint_id'] old_cp_path = self.get_checkpoint_path_by_name( old_name, checkpoint_id) new_cp_path = self.get_checkpoint_path_by_name( new_name, checkpoint_id) if os.path.isfile(old_cp_path): self.log.debug("renaming checkpoint %s -> %s", old_cp_path, new_cp_path) os.rename(old_cp_path, new_cp_path) return notebook_id
def _check_kernel_id(self, kernel_id): """Check a that a kernel_id exists and raise 404 if not.""" if kernel_id not in self: raise web.HTTPError(404, u'Kernel does not exist: %s' % kernel_id)
def check_error(self): """Check the OAuth code""" error = self.get_argument("error", False) if error: message = self.get_argument("error_description", error) raise web.HTTPError(400, "OAuth error: %s" % message)
async def proxy(self, cluster_id, proxied_path): """ While self.request.uri is (hub) /user/username/proxy/([0-9]+)/something. (single) /proxy/([0-9]+)/something This serverextension is given {port}/{everything/after}. """ if "Proxy-Connection" in self.request.headers: del self.request.headers["Proxy-Connection"] self._record_activity() if self.request.headers.get("Upgrade", "").lower() == "websocket": # We wanna websocket! # jupyterhub/nbserverproxy@36b3214 self.log.info( "we wanna websocket, but we don't define WebSocketProxyHandler" ) self.set_status(500) body = self.request.body if not body: if self.request.method == "POST": body = b"" else: body = None # Get the cluster by ID. If it is not found, # raise an error. cluster_model = manager.get_cluster(cluster_id) if not cluster_model: raise web.HTTPError(404, f"Dask cluster {cluster_id} not found") # Construct the proper proxy link from the cluster dashboard dashboard_link = cluster_model["dashboard_link"] dashboard_link = _normalize_dashboard_link(dashboard_link, self.request) # If a path is not provided, default to the individual plots listing. proxied_path = proxied_path or "individual-plots.json" client_uri = "{dashboard_link}/{path}".format( dashboard_link=dashboard_link, path=proxied_path ) if self.request.query: client_uri += "?" + self.request.query client = httpclient.AsyncHTTPClient() req = httpclient.HTTPRequest( client_uri, method=self.request.method, body=body, headers=self.request.headers, follow_redirects=False, ) response = await client.fetch(req, raise_error=False) # record activity at start and end of requests self._record_activity() # For all non http errors... if response.error and type(response.error) is not httpclient.HTTPError: self.set_status(500) self.write(str(response.error)) else: self.set_status(response.code, response.reason) # clear tornado default header self._headers = httputil.HTTPHeaders() for header, v in response.headers.get_all(): if header not in ( "Content-Length", "Transfer-Encoding", "Content-Encoding", "Connection", ): # some header appear multiple times, eg 'Set-Cookie' self.add_header(header, v) if response.body: self.write(response.body)
def check_code(self): """Check the OAuth code""" if not self.get_argument("code", False): raise web.HTTPError(400, "OAuth callback made without a code")
def prepare(self): raise web.HTTPError(404)
def get(self, name, user_path): if not user_path: user_path = '/' current_user = self.get_current_user() if current_user and current_user.name == name: # if spawning fails for any reason, point users to /hub/home to retry self.extra_error_html = self.spawn_home_error # If people visit /user/:name directly on the Hub, # the redirects will just loop, because the proxy is bypassed. # Try to check for that and warn, # though the user-facing behavior is unchanged host_info = urlparse(self.request.full_url()) port = host_info.port if not port: port = 443 if host_info.scheme == 'https' else 80 if port != Server.from_url( self.proxy.public_url ).connect_port and port == self.hub.connect_port: self.log.warning( """ Detected possible direct connection to Hub's private ip: %s, bypassing proxy. This will result in a redirect loop. Make sure to connect to the proxied public URL %s """, self.request.full_url(), self.proxy.public_url) # logged in as correct user, check for pending spawn spawner = current_user.spawner # First, check for previous failure. if (not spawner.active and spawner._spawn_future and spawner._spawn_future.done() and spawner._spawn_future.exception()): # Condition: spawner not active and _spawn_future exists and contains an Exception # Implicit spawn on /user/:name is not allowed if the user's last spawn failed. # We should point the user to Home if the most recent spawn failed. exc = spawner._spawn_future.exception() self.log.error( "Preventing implicit spawn for %s because last spawn failed: %s", spawner._log_name, exc) # raise a copy because each time an Exception object is re-raised, its traceback grows raise copy.copy(exc).with_traceback(exc.__traceback__) # check for pending spawn if spawner.pending and spawner._spawn_future: # wait on the pending spawn self.log.debug("Waiting for %s pending %s", spawner._log_name, spawner.pending) try: yield gen.with_timeout( timedelta(seconds=self.slow_spawn_timeout), spawner._spawn_future) except gen.TimeoutError: self.log.info( "Pending spawn for %s didn't finish in %.1f seconds", spawner._log_name, self.slow_spawn_timeout) pass # we may have waited above, check pending again: if spawner.pending: self.log.info("%s is pending %s", spawner._log_name, spawner.pending) # spawn has started, but not finished self.statsd.incr('redirects.user_spawn_pending', 1) html = self.render_template("spawn_pending.html", user=current_user) self.finish(html) return # spawn has supposedly finished, check on the status if spawner.ready: status = yield spawner.poll() else: status = 0 # server is not running, trigger spawn if status is not None: if spawner.options_form: self.redirect( url_concat(url_path_join(self.hub.base_url, 'spawn'), {'next': self.request.uri})) return else: yield self.spawn_single_user(current_user) # spawn didn't finish, show pending page if spawner.pending: self.log.info("%s is pending %s", spawner._log_name, spawner.pending) # spawn has started, but not finished self.statsd.incr('redirects.user_spawn_pending', 1) html = self.render_template("spawn_pending.html", user=current_user) self.finish(html) return # We do exponential backoff here - since otherwise we can get stuck in a redirect loop! # This is important in many distributed proxy implementations - those are often eventually # consistent and can take upto a couple of seconds to actually apply throughout the cluster. try: redirects = int(self.get_argument('redirects', 0)) except ValueError: self.log.warning("Invalid redirects argument %r", self.get_argument('redirects')) redirects = 0 # check redirect limit to prevent browser-enforced limits. # In case of version mismatch, raise on only two redirects. if redirects >= self.settings.get('user_redirect_limit', 4) or ( redirects >= 2 and spawner._jupyterhub_version != __version__): # We stop if we've been redirected too many times. msg = "Redirect loop detected." if spawner._jupyterhub_version != __version__: msg += ( " Notebook has jupyterhub version {singleuser}, but the Hub expects {hub}." " Try installing jupyterhub=={hub} in the user environment" " if you continue to have problems.").format( singleuser=spawner._jupyterhub_version or 'unknown (likely < 0.8)', hub=__version__, ) raise web.HTTPError(500, msg) # set login cookie anew self.set_login_cookie(current_user) without_prefix = self.request.uri[len(self.hub.base_url):] target = url_path_join(self.base_url, without_prefix) if self.subdomain_host: target = current_user.host + target # record redirect count in query parameter if redirects: self.log.warning("Redirect loop detected on %s", self.request.uri) # add capped exponential backoff where cap is 10s yield gen.sleep(min(1 * (2**redirects), 10)) # rewrite target url with new `redirects` query value url_parts = urlparse(target) query_parts = parse_qs(url_parts.query) query_parts['redirects'] = redirects + 1 url_parts = url_parts._replace(query=urlencode(query_parts)) target = urlunparse(url_parts) else: target = url_concat(target, {'redirects': 1}) self.redirect(target) self.statsd.incr('redirects.user_after_login') elif current_user: # logged in as a different user, redirect self.statsd.incr('redirects.user_to_user', 1) target = url_path_join(current_user.url, user_path or '') self.redirect(target) else: # not logged in, clear any cookies and reload self.statsd.incr('redirects.user_to_login', 1) self.clear_login_cookie() self.redirect( url_concat( self.settings['login_url'], {'next': self.request.uri}, ))