def _api_request(self, method, url, **kwargs): """Make an API request""" allow_404 = kwargs.pop('allow_404', False) headers = kwargs.setdefault('headers', {}) headers.setdefault('Authorization', 'token %s' % self.api_token) try: r = requests.request(method, url, **kwargs) except requests.ConnectionError as e: app_log.error("Error connecting to %s: %s", self.api_url, e) msg = "Failed to connect to Hub API at %r." % self.api_url msg += " Is the Hub accessible at this URL (from host: %s)?" % socket.gethostname() if '127.0.0.1' in self.api_url: msg += " Make sure to set c.JupyterHub.hub_ip to an IP accessible to" + \ " single-user servers if the servers are not on the same host as the Hub." raise HTTPError(500, msg) data = None if r.status_code == 404 and allow_404: pass elif r.status_code == 403: app_log.error("I don't have permission to check authorization with JupyterHub, my auth token may have expired: [%i] %s", r.status_code, r.reason) app_log.error(r.text) raise HTTPError(500, "Permission failure checking authorization, I may need a new token") elif r.status_code >= 500: app_log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason) app_log.error(r.text) raise HTTPError(502, "Failed to check authorization (upstream problem)") elif r.status_code >= 400: app_log.warning("Failed to check authorization: [%i] %s", r.status_code, r.reason) app_log.warning(r.text) raise HTTPError(500, "Failed to check authorization") else: data = r.json() return data
def check(self, handler): """Check the rate limit for a handler. Identifies the source by ip and user-agent. If the rate limit is exceeded, raise HTTPError(429) """ if not self.limit: return key = self.key_for_handler(handler) added = yield self.cache.add(key, 1, self.interval) if not added: # it's been seen before, use incr try: count = yield self.cache.incr(key) except Exception as e: app_log.warning("Failed to increment rate limit for %s", key) return app_log.debug("Rate limit remaining for %r: %s/%s", key, self.limit - count, self.limit) if count and count >= self.limit: minutes = self.interval // 60 raise HTTPError(429, "Rate limit exceeded for {ip} ({limit} req / {minutes} min)." " Try again later.".format( ip=handler.request.remote_ip, limit=self.limit, minutes=minutes, ) )
def get(self, path=None): '''Spawns a brand new server''' try: if path is None: # No path. Assign a prelaunched container from the pool and redirect to it. # Append self.redirect_uri to the redirect target. container_path = self.pool.acquire().path app_log.info("Allocated [%s] from the pool.", container_path) url = "/{}/{}".format(container_path, self.redirect_uri) else: path_parts = path.lstrip('/').split('/', 1) container_path = path_parts[0] # Scrap a container from the pool and replace it with an ad-hoc replacement. # This takes longer, but is necessary to support ad-hoc containers yield self.pool.adhoc(container_path) app_log.info("Allocated ad-hoc container at [%s].", container_path) url = path app_log.debug("Redirecting [%s] -> [%s].", self.request.path, url) self.redirect(url, permanent=False) except spawnpool.EmptyPoolError: app_log.warning("The container pool is empty!") self.render("full.html", cull_period=self.cull_period)
def get(self, path=None): '''Spawns a brand new server''' if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) try: if path is None: # No path. Assign a prelaunched container from the pool and redirect to it. # Append self.redirect_uri to the redirect target. container_path = self.pool.acquire().path app_log.info("Allocated [%s] from the pool.", container_path) url = "/{}/{}".format(container_path, self.redirect_uri) else: # Split /user/{some_user}/long/url/path and acquire {some_user} path_parts = path.lstrip('/').split('/', 2) app_log.info("path parts: %s", path_parts) user = path_parts[1] # Scrap a container from the pool and replace it with an ad-hoc replacement. # This takes longer, but is necessary to support ad-hoc containers yield self.pool.adhoc(user, path_parts[-1]) url = "/" + "/".join(path_parts[:2]) app_log.info("new url: %s", url) app_log.debug("Redirecting [%s] -> [%s].", self.request.path, url) self.redirect(url, permanent=False) except spawnpool.EmptyPoolError: app_log.warning("The container pool is empty!") self.render("full.html", cull_period=self.cull_period)
def wait_for_http_server(url, timeout=10): """Wait for an HTTP Server to respond at url. Any non-5XX response code will do, even 404. """ loop = ioloop.IOLoop.current() tic = loop.time() client = AsyncHTTPClient() while loop.time() - tic < timeout: try: r = yield client.fetch(url, follow_redirects=False) except HTTPError as e: if e.code >= 500: # failed to respond properly, wait and try again if e.code != 599: # we expect 599 for no connection, # but 502 or other proxy error is conceivable app_log.warning( "Server at %s responded with error: %s", url, e.code) yield gen.sleep(0.1) else: app_log.debug("Server at %s responded with %s", url, e.code) return except (OSError, socket.error) as e: if e.errno not in {errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET}: app_log.warning("Failed to connect to %s (%s)", url, e) yield gen.sleep(0.1) else: return raise TimeoutError( "Server at {url} didn't respond in {timeout} seconds".format(**locals()) )
def check_hub_user(self, user_model): """Check whether Hub-authenticated user should be allowed. Returns the input if the user should be allowed, None otherwise. Override if you want to check anything other than the username's presence in hub_users list. Args: user_model (dict): the user model returned from :class:`HubAuth` Returns: user_model (dict): The user model if the user should be allowed, None otherwise. """ if self.hub_users is None and self.hub_groups is None: # no whitelist specified, allow any authenticated Hub user return user_model name = user_model['name'] if self.hub_users and name in self.hub_users: # user in whitelist return user_model elif self.hub_groups and set(user_model['groups']).union(self.hub_groups): # group in whitelist return user_model else: app_log.warning("Not allowing Hub user %s" % name) return None
def get(self): error = self.get_argument("error", False) if error: msg = self.get_argument("error_description", error) raise HTTPError(400, "Error in oauth: %s" % msg) code = self.get_argument("code", False) if not code: raise HTTPError(400, "oauth callback made without a token") # validate OAuth state arg_state = self.get_argument("state", None) if arg_state is None: raise HTTPError("oauth state is missing. Try logging in again.") cookie_name = self.hub_auth.get_state_cookie_name(arg_state) cookie_state = self.get_secure_cookie(cookie_name) # clear cookie state now that we've consumed it self.clear_cookie(cookie_name, path=self.hub_auth.base_url) if isinstance(cookie_state, bytes): cookie_state = cookie_state.decode('ascii', 'replace') # check that state matches if arg_state != cookie_state: app_log.warning("oauth state %r != %r", arg_state, cookie_state) raise HTTPError(403, "oauth state does not match. Try logging in again.") next_url = self.hub_auth.get_next_url(cookie_state) # TODO: make async (in a Thread?) token = self.hub_auth.token_for_code(code) session_id = self.hub_auth.get_session_id(self) user_model = self.hub_auth.user_for_token(token, session_id=session_id) if user_model is None: raise HTTPError(500, "oauth callback failed to identify a user") app_log.info("Logged-in user %s", user_model) self.hub_auth.set_cookie(self, token) self.redirect(next_url or self.hub_auth.base_url)
def _check_hub_authorization(self, url, cache_key=None, use_cache=True): """Identify a user with the Hub Args: url (str): The API URL to check the Hub for authorization (e.g. http://127.0.0.1:8081/hub/api/authorizations/token/abc-def) cache_key (str): The key for checking the cache use_cache (bool): Specify use_cache=False to skip cached cookie values (default: True) Returns: user_model (dict): The user model, if a user is identified, None if authentication fails. Raises an HTTPError if the request failed for a reason other than no such user. """ if use_cache: if cache_key is None: raise ValueError("cache_key is required when using cache") # check for a cached reply, so we don't check with the Hub if we don't have to try: return self.cache[cache_key] except KeyError: app_log.debug("HubAuth cache miss: %s", cache_key) data = self._api_request('GET', url, allow_404=True) if data is None: app_log.warning("No Hub user identified for request") else: app_log.debug("Received request from Hub user %s", data) if use_cache: # cache result self.cache[cache_key] = data return data
def add_error_message_to_slug(self, error_string, args_to_delete=[], msg_categ="error" ) : """ add an "error" arg to url slug """ slug_ = self.request.arguments app_log.info("... add_error_message_to_slug / slug_ : \n %s ", pformat( slug_ ) ) # create a complete clean slug if no slug if slug_ == {} : error_slug = u"?" + u"error=" + tornado.escape.url_escape(error_string) # add error arg to existing slug else : slug_without_error = deepcopy(slug_) ### clean existing slug from existing error arg if any # for arg_to_delete in args_to_delete + DEFAULT_ERROR_ARGS_TO_DELETE : # try : # del slug_without_error[arg_to_delete] # except : # pass slug_without_error = self.clean_slug(slug_without_error, args_to_delete + DEFAULT_ERROR_ARGS_TO_DELETE ) app_log.warning("... add_error_message_to_slug / slug_without_error : \n %s ", pformat(slug_without_error) ) # recreate slug error_dict = { "error" : error_string } error_dict.update( slug_without_error ) error_slug = u"?" + urllib.urlencode( error_dict, doseq=True) app_log.info("... add_error_message_to_slug / error_slug : \n %s ", pformat(error_slug) ) return error_slug
def get_company_exports_summary(self, org_id): db_ce = self.application.mysql_db_name("company-exports") if not db_ce: app_log.warning( "Cannot add '%s' data. Database `%s` not online.", "influence", db_ce) return data = {} sql = """ select {db_ce}.company.company_id, count(distinct({db_ce}.action.destination_id)) from {db_ce}.action join {db_ce}.company using (company_id) where {db_ce}.company.caat_id = {caat_id} ; """.format(**{ "db_ce": db_ce, "caat_id": org_id }) try: result = self.orm.execute(sql).fetchone() except InternalError as e: print(e) return if result: (company_id, n_dest, ) = result data["companyExportsId"] = company_id data["destinationCount"] = n_dest sql = """ select count(distinct({db_ce}.rating.description)) from {db_ce}.action join {db_ce}.company using (company_id) join {db_ce}.rating using (rating_id) where {db_ce}.company.caat_id = {caat_id} and {db_ce}.rating.description is not null ; """.format(**{ "db_ce": db_ce, "caat_id": org_id }) try: result = self.orm.execute(sql).fetchone() except InternalError as e: print(e) return if result: (n_rating, ) = result data["ratingCount"] = n_rating return data
def _get_user_cookie(self, handler): token = handler.get_secure_cookie(self.cookie_name) if token: user_model = self.user_for_token(token) if user_model is None: app_log.warning("Token stored in cookie may have expired") handler.clear_cookie(self.cookie_name) return user_model
def observe(self): '''Collect Ground Truth of what's actually running from Docker and the proxy.''' results = yield { "docker": self.spawner.list_notebook_servers(self.name_pattern, all=True), "proxy": self._proxy_routes() } self.container_ids = set() self.living_container_ids = [] self.stopped_container_ids = [] self.zombie_container_ids = [] self.routes = set() self.live_routes = [] self.stale_routes = [] self.zombie_routes = [] # Sort Docker results into living and dead containers. for container in results["docker"]: id = container['Id'] self.container_ids.add(id) if container['Status'].startswith('Up'): self.living_container_ids.append(id) else: self.stopped_container_ids.append(id) now = datetime.utcnow() idle_cutoff = now - self.cull_idle started_cutoff = now - self.cull_max_age # Sort proxy routes into living, stale, and zombie routes. living_set = set(self.living_container_ids) for path, route in results["proxy"].items(): last_activity_s = route.get('last_activity', None) container_id = route.get('container_id', None) if container_id: result = (path, container_id) if container_id in living_set: try: last_activity = datetime.strptime(last_activity_s, _date_fmt) started = self.started.get(container_id, None) self.routes.add(result) if started and last_activity < idle_cutoff: app_log.info("Culling %s, idle since %s", path, last_activity) self.stale_routes.append(result) elif started and started < started_cutoff: app_log.info("Culling %s, up since %s", path, started) self.stale_routes.append(result) else: app_log.debug("Container %s up since %s, idle since %s", path, started, last_activity) self.live_routes.append(result) except ValueError as e: app_log.warning("Ignoring a proxy route with an unparsable activity date: %s", e) else: # The container doesn't correspond to a living container. self.zombie_routes.append(result)
def _parse_json_arguments(self): content_type = self.content_type if(content_type and content_type.find(r'application/json') >= 0): try: self.json_arguments = json_decode(self.request.body) except Exception as error: app_log.warning('Invalid application/json body: %s', error)
def catch_error_message (self): """ get and log error message if any """ try: self.error_msg = self.get_argument("error") app_log.warning("\n... get_error_message / self.error_msg : %s ", self.error_msg ) except: self.error_msg = ""
def post(self): '''Spawns a brand new server programatically''' try: url = self.pool.acquire().path app_log.info("Allocated [%s] from the pool.", url) app_log.debug("Responding with container url [%s].", url) self.write({'url': url}) except spawnpool.EmptyPoolError: app_log.warning("The container pool is empty!") self.write({'status': 'full'})
def _get_user_cookie(self, handler): token = handler.get_secure_cookie(self.cookie_name) session_id = self.get_session_id(handler) if token: token = token.decode('ascii', 'replace') user_model = self.user_for_token(token, session_id=session_id) if user_model is None: app_log.warning("Token stored in cookie may have expired") handler.clear_cookie(self.cookie_name) return user_model
def find(cls, db, token): orm_token = super().find(db, token) if orm_token and not orm_token.client_id: app_log.warning( "Deleting stale oauth token for %s with no client", orm_token.user and orm_token.user.name, ) db.delete(orm_token) db.commit() return return orm_token
def _register_handlers(self, spec, handlers): """ """ handlers.append(spec) if spec.name: if spec.name in self.named_handlers: app_log.warning( "Multiple handlers named %s; replacing previous value", spec.name) self.named_handlers[spec.name] = spec
def process_rule(self, rule: "Rule") -> "Rule": rule = super(ReversibleRuleRouter, self).process_rule(rule) if rule.name: if rule.name in self.named_rules: app_log.warning( "Multiple handlers named %s; replacing previous value", rule.name ) self.named_rules[rule.name] = rule return rule
async def emit(self, data): if type(data) is not str: serialized_data = json.dumps(data) else: serialized_data = data try: self.write('data: {}\n\n'.format(serialized_data)) await self.flush() except StreamClosedError: app_log.warning("Stream closed while handling %s", self.request.uri) # raise Finish to halt the handler raise web.Finish()
def _json_default(self, obj): """encode non-jsonable objects as JSON Currently only bytes are supported """ if not isinstance(obj, bytes): app_log.warning( "Non-jsonable data in user_options: %r; will persist None.", type(obj) ) return None return {"__jupyterhub_bytes__": True, "data": encodebytes(obj).decode('ascii')}
def post(self): self.content_type = 'application/json' try: data = json.loads(self.request.body) identity = data["mpinId"] userid = data["userId"] expireTime = data["expireTime"] activateKey = data["activateKey"] mobile = data["mobile"] except ValueError: log.error("Cannot decode body as JSON.") log.debug(self.request.body) self.set_status(400, reason="BAD REQUEST. INVALID JSON") self.finish() return userId = data.get("userId") if not userId: log.error("Missing userId") log.debug(self.request.body) self.set_status(400, reason="BAD REQUEST. INVALID USER ID") self.finish() return if options.verifyIdentityURL.startswith("/"): # relative path base_url = "{0}/{1}".format( self.request.headers.get("RPS-BASE-URL").rstrip("/"), options.verifyIdentityURL.lstrip("/") ) else: base_url = options.verifyIdentityURL validateURL = self._generateValidationURL(base_url, identity, activateKey, expireTime) log.info("Sending activation email for user {0}: {1}".format(userid.encode("utf-8"), validateURL)) deviceName = mobile and "Mobile" or "PC" if options.forceActivate: log.warning("forceActivate option set! User activated without verification!") else: mailer.sendActivationEmail(userid.encode("utf-8"), options.emailSubject, deviceName, validateURL, options.smtpUser, options.smtpPassword) log.warning("Sending Mail!") responseData = { "forceActivate": options.forceActivate } self.write(json.dumps(responseData)) self.set_status(200) self.finish()
def fetch(self): q = list() on_message = self.filter(self.container) while len(self.__buff) > 8: header, self.__buff = self.__buff[:8], self.__buff[8:] stream, length = self.parse_header(header) if len(self.__buff) < length: continue if not length: continue message = self.split(length) ts, message = message.split(b(' '), 1) result = on_message.send(message) if result is False: continue else: msg = {'stream': self.STREAMS[stream], 'timestamp': ts.lstrip(b('[')).rstrip(b(']'))} msg.update({ 'container': self.container.name, 'image': self.container.image }) if result is None: log.warning('Message "%s" not parsed by regular exception', message) msg.update({'message': message}) else: msg.update(result) log.debug("Storing message %s", LasyJSON(msg)) q.append(( dumps({ 'index': { '_index': self.get_index_name(), '_type': 'logs', } }), dumps(msg) )) return q
def reset_is_running_on_all_spider( coll_model ) : """ reset is_running on all spiders to avoid errors if app shut down while one spider was running """ print () app_log.warning('>>> reset_is_running_on_all_spider ... ') # find if any spider was running running_spiders = coll_model.find({"scraper_log.is_running" : True}) app_log.info(">>> running_spiders : \n %s" , list(running_spiders) ) coll_model.update_many({'scraper_log.is_running' : True }, {"$set": {'scraper_log.is_running' : False }})
def check_hub_user(self, model): """Check whether Hub-authenticated user or service should be allowed. Returns the input if the user should be allowed, None otherwise. Override if you want to check anything other than the username's presence in hub_users list. Args: model (dict): the user or service model returned from :class:`HubAuth` Returns: user_model (dict): The user model if the user should be allowed, None otherwise. """ name = model['name'] kind = model.setdefault('kind', 'user') if self.allow_all: app_log.debug( "Allowing Hub %s %s (all Hub users and services allowed)", kind, name ) return model if self.allow_admin and model.get('admin', False): app_log.debug("Allowing Hub admin %s", name) return model if kind == 'service': # it's a service, check hub_services if self.hub_services and name in self.hub_services: app_log.debug("Allowing whitelisted Hub service %s", name) return model else: app_log.warning("Not allowing Hub service %s", name) raise UserNotAllowed(model) if self.hub_users and name in self.hub_users: # user in whitelist app_log.debug("Allowing whitelisted Hub user %s", name) return model elif self.hub_groups and set(model['groups']).intersection(self.hub_groups): allowed_groups = set(model['groups']).intersection(self.hub_groups) app_log.debug( "Allowing Hub user %s in group(s) %s", name, ','.join(sorted(allowed_groups)), ) # group in whitelist return model else: app_log.warning("Not allowing Hub user %s", name) raise UserNotAllowed(model)
def backup_mongo_collection(coll, filepath) : """ dumps all documents in collection in _backups_collections """ app_log.warning('>>> backup_mongo_collection ... ') cursor = coll.find({}) backup_file = open(filepath, "w") backup_file.write('[') for document in cursor: backup_file.write(json.dumps(document,indent=4, default=json_util.default)) backup_file.write(',') backup_file.write(']')
def observe(self): '''Collect Ground Truth of what's actually running from Docker and the proxy.''' results = yield { "docker": self.spawner.list_notebook_servers(self.container_config, all=True), "proxy": self._proxy_routes() } self.container_ids = set() self.living_container_ids = [] self.stopped_container_ids = [] self.zombie_container_ids = [] self.routes = set() self.live_routes = [] self.stale_routes = [] self.zombie_routes = [] # Sort Docker results into living and dead containers. for container in results["docker"]: id = container['Id'] self.container_ids.add(id) if container['Status'].startswith('Up'): self.living_container_ids.append(id) else: self.stopped_container_ids.append(id) cutoff = datetime.utcnow() - self.cull_time # Sort proxy routes into living, stale, and zombie routes. living_set = set(self.living_container_ids) for path, route in results["proxy"].items(): last_activity_s = route.get('last_activity', None) container_id = route.get('container_id', None) if container_id: result = (path, container_id) if container_id in living_set: try: last_activity = datetime.strptime(last_activity_s, '%Y-%m-%dT%H:%M:%S.%fZ') self.routes.add(result) if last_activity >= cutoff: self.live_routes.append(result) else: self.stale_routes.append(result) except ValueError as e: app_log.warning("Ignoring a proxy route with an unparsable activity date: %s", e) else: # The container doesn't correspond to a living container. self.zombie_routes.append(result)
def post(self): '''Spawns a brand new server programmatically''' try: container = self.pool.acquire() url = container.path if container.token: url = url_concat(url, {'token': container.token}) app_log.info("Allocated [%s] from the pool.", url) app_log.debug("Responding with container url [%s].", url) self.write({'url': url}) except spawnpool.EmptyPoolError: app_log.warning("The container pool is empty!") self.set_status(429) self.write({'status': 'full'})
def user_for_cookie(self, encrypted_cookie, use_cache=True): """Ask the Hub to identify the user for a given cookie. Args: encrypted_cookie (str): the cookie value (not decrypted, the Hub will do that) use_cache (bool): Specify use_cache=False to skip cached cookie values (default: True) Returns: user_model (dict): The user model, if a user is identified, None if authentication fails. The 'name' field contains the user's name. """ if use_cache: cached = self.cookie_cache.get(encrypted_cookie) if cached is not None: return cached try: r = requests.get( url_path_join(self.api_url, "authorizations/cookie", self.cookie_name, quote(encrypted_cookie, safe=''), ), headers = { 'Authorization' : 'token %s' % self.api_token, }, ) except requests.ConnectionError: msg = "Failed to connect to Hub API at %r." % self.api_url msg += " Is the Hub accessible at this URL (from host: %s)?" % socket.gethostname() if '127.0.0.1' in self.api_url: msg += " Make sure to set c.JupyterHub.hub_ip to an IP accessible to" + \ " single-user servers if the servers are not on the same host as the Hub." raise HTTPError(500, msg) if r.status_code == 404: data = None elif r.status_code == 403: app_log.error("I don't have permission to verify cookies, my auth token may have expired: [%i] %s", r.status_code, r.reason) raise HTTPError(500, "Permission failure checking authorization, I may need a new token") elif r.status_code >= 500: app_log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason) raise HTTPError(502, "Failed to check authorization (upstream problem)") elif r.status_code >= 400: app_log.warning("Failed to check authorization: [%i] %s", r.status_code, r.reason) raise HTTPError(500, "Failed to check authorization") else: data = r.json() self.cookie_cache[encrypted_cookie] = data return data
def __run(self, ws): """Receives message from a WebSocket, sends them to the client's listeners. """ while True: msg_str = yield ws.read_message() if msg_str is None: break try: msg_json = json.loads(msg_str) except (TypeError, ValueError): log.error('Invalid event: {0}'.format(msg_str)) continue if not isinstance(msg_json, dict) or 'type' not in msg_json: log.error('Invalid event: {0}'.format(msg_str)) continue log.debug('Process ARI event: {0}'.format(msg_json)) event_type = msg_json['type'] listeners = self.event_listeners.get(event_type) if listeners: log.debug('Listeners of event type "{0}": {1}' .format(event_type, len(listeners))) # Extract objects from the event event_model = self.event_models.get(event_type) if not event_model: log.warning('Cannot find model "{0}" for received event. ' 'Pass raw event.'.format(event_type)) event = msg_json else: event = dict() properties = event_model['properties'] for field, value in msg_json.items(): if field in properties: type_ = properties[field]['type'] if type_ in CLASS_MAP: value = CLASS_MAP[type_](self, value) event[field] = value # Set a result of pending futures for listener in listeners: future, event_filter = listener if future.done(): listeners.remove(listener) continue if event_filter is None or event_filter(event): future.set_result(event) listeners.remove(listener)
def get_json_body(self, allow_empty=False): """Retrieve and validate a JSON request body""" try: return self._json_body except AttributeError: pass if allow_empty and not self.request.body: body = self._json_body = {} return body try: body = self._json_body = json.loads( self.request.body.decode("utf8", "replace")) if not isinstance(body, dict): raise ValueError("json body is not a dict") except ValueError: raise web.HTTPError( 400, f"Request body must be a json dict of the form {make_sample_schema(self.schema)}", ) schema = self.schema for field, field_type in schema.items(): if isinstance(field_type, tuple): field_type, required = field_type else: required = True value = body.get(field) if value and field_type is datetime.datetime: try: value = body[field] = parse_date(body[field]) except Exception as e: app_log.warning(f"Error parsing date: {body[field]}") if (field in body and not isinstance(value, field_type)) or ( required and field not in body): sample_schema = make_sample_schema(schema) raise web.HTTPError( 400, f"Field {field} must be {type_map.get(field_type, field_type.__name__)}. A request body should look like: {sample_schema}", ) return body
def _cleanup(cls): def timeout_waiter(socket, timeout): flags = [] def _ping_waiter(data): flags.append(True) log.debug('Socket "%r" ping OK', socket) def _timeout(): if not flags: try: log.warning('Socket "%r" must be closed', socket) socket.close() except Exception as e: log.debug(Lazy(lambda: traceback.format_exc())) log.error("%r", e) if hasattr(socket, 'on_pong'): delattr(socket, 'on_pong') tornado.ioloop.IOLoop.instance().call_later(timeout, _timeout) return _ping_waiter def do_ping(socket): if isinstance(socket.ws_connection, tornado.websocket.WebSocketProtocol13): socket.ping("\0" * 8) socket.on_pong = timeout_waiter(socket, cls._CLIENT_TIMEOUT) else: socket.call('ping', data="ping", callback=timeout_waiter(socket, cls._CLIENT_TIMEOUT)) sleep(cls._CLIENT_TIMEOUT) for sid, socket in cls._CLIENTS.iteritems(): try: do_ping(socket) except tornado.websocket.WebSocketClosedError: socket.close() log.warning('Auto close dead socket: %r', socket) except Exception as e: log.debug(Lazy(lambda: traceback.format_exc())) log.error('%r', e) log.debug('Cleanup loop is OK.')
def fetch_impl(self, request, response_callback): urlinfo = urlparse(request.url) host = urlinfo.hostname if host not in self.hosts: app_log.warning("Not mocking request to %s", request.url) return super().fetch_impl(request, response_callback) paths = self.hosts[host] response = None for path_spec, handler in paths: if isinstance(path_spec, str): if path_spec == urlinfo.path: response = handler(request) break else: if path_spec.match(urlinfo.path): response = handler(request) break if response is None: response = HTTPResponse(request=request, code=404, reason=request.url) elif isinstance(response, int): response = HTTPResponse(request=request, code=response) elif isinstance(response, bytes): response = HTTPResponse( request=request, code=200, buffer=BytesIO(response), ) elif isinstance(response, str): response = HTTPResponse( request=request, code=200, buffer=BytesIO(response.encode('utf8')), ) elif isinstance(response, (dict, list)): response = HTTPResponse( request=request, code=200, buffer=BytesIO(json.dumps(response).encode('utf8')), headers={'Content-Type': 'application/json'}, ) response_callback(response)
async def get_group(device_id, select=None): """Get a single device group""" if select is None: attr_names = ",".join( extension_attr_name(attr["name"]) for attr in custom_attributes if "Group" in attr["targetObjects"]) select = f"id,displayName,createdDateTime,{attr_names}" groups = await graph_request( "/groups", params={ "$filter": f"displayName eq '{device_id}'", "$select": select, }, ) if not groups: app_log.warning(f"No group for device {device_id}") return return groups[0]
def _readNotebooks(self): app_log.debug("Reading notebooks from notebook_dir %s", self.notebook_dir) if self.notebook_dir is None: app_log.warning("No notebooks to load") return for path in os.listdir(self.notebook_dir): if path.endswith(".ipynb"): full_path = os.path.join(self.notebook_dir, path) nb_contents = self.loader.load(full_path) if nb_contents is not None: with nb_contents: app_log.debug("loading Notebook: %s",path) notebook = nbformat.read(nb_contents, as_version=4) #Load the pixieapp definition if any pixieapp_def = self.read_pixieapp_def(notebook) if pixieapp_def is not None and pixieapp_def.is_valid: pixieapp_def.location = full_path self.pixieapps[pixieapp_def.name] = pixieapp_def
def check_request_ip(self): """Check network block list, if any""" ban_networks = self.settings.get("ban_networks") if self.skip_check_request_ip or not ban_networks: return request_ip = self.request.remote_ip match = ip_in_networks( request_ip, ban_networks, min_prefix_len=self.settings["ban_networks_min_prefix_len"], ) if match: network, message = match app_log.warning( f"Blocking request from {request_ip} matching banned network {network}: {message}" ) raise web.HTTPError(403, f"Requests from {message} are not allowed")
def on_connection_closed(self, connection, reply_code, reply_text): """This method is invoked by pika when the connection to RabbitMQ is closed unexpectedly. Since it is unexpected, we will reconnect to RabbitMQ if it disconnects. :param pika.connection.Connection connection: The closed connection obj :param int reply_code: The server provided reply_code if given :param str reply_text: The server provided reply_text if given """ self._channel = None if self._closing: self._connection.ioloop.stop() else: app_log.warning( 'Connection closed, reopening in 5 seconds: (%s) %s', reply_code, reply_text) self._connection.add_timeout(5, self.reconnect)
def send_mail(self, subject='[系统邮件]', message='', to=list()): smtp_client, res = yield self._smtp_client() if smtp_client is not None: msg = MIMEText(message, 'html', 'utf-8') msg['Subject'] = subject msg['Sender'] = self.smtp_user msg['To'] = ', '.join(to) msg['From'] = formataddr(parseaddr('自动化测试系统邮件<{}>'.format(self.mail_from))) try: yield smtp_client.send_message(msg) yield smtp_client.quit() return True, '' except Exception as e: log.warning(e) yield smtp_client.quit() return False, e else: return False, res
def on_reply(msg): if 'msg_id' in msg['parent_header'] and msg['parent_header']['msg_id'] == parent_header: if not future.done(): if "channel" not in msg: msg["channel"] = "iopub" result_accumulator.append(msg) # Complete the future on idle status if msg['header']['msg_type'] == 'status' and msg['content']['execution_state'] == 'idle': future.set_result(result_extractor(result_accumulator)) elif msg['header']['msg_type'] == 'error': error_name = msg['content']['ename'] error_value = msg['content']['evalue'] trace = sanitize_traceback(msg['content']['traceback']) future.set_exception( CodeExecutionError(error_name, error_value, trace, code) ) else: app_log.warning("Got an orphan message %s", msg['parent_header'])
def result_extractor(self, result_accumulator): res = [] for msg in result_accumulator: if msg['header']['msg_type'] == 'stream': res.append(msg['content']['text']) elif msg['header']['msg_type'] == 'display_data': if "data" in msg['content'] and "text/html" in msg['content']['data']: res.append(msg['content']['data']['text/html']) else: app_log.warning("display_data msg not processed: %s", msg) elif msg['header']['msg_type'] == 'error': error_name = msg['content']['ename'] error_value = msg['content']['evalue'] trace = sanitize_traceback(msg['content']['traceback']) return 'Error {}: {}\n{}\n'.format(error_name, error_value, trace) else: app_log.warning("Message type not processed: %s", msg['header']['msg_type']) return ''.join(res)
def parse_remote(self, response): if self._finished: app_log.info('connection canceled') return try: if response.code != 200: app_log.warning( 'Error while getting remote %s ' 'Errors details: (%s: %s) %s', response.effective_url, response.code, response.reason, response.body) return base_url = response.effective_url base = urlsplit(base_url) content_type = response.headers.get('content-type', '') if content_type in ('application/x-gzip', ): # in this case the URL was a redirection to download # a package. For example, sourceforge. self.add_version(basename(base.path), '', base_url, base_url) return if not response.body: return app_log.debug('parse %s', base_url) soup = BeautifulSoup(response.body) for anchor in soup.find_all('a'): href = anchor.get('href') if not href: continue current_url = urljoin(base_url, href) current = urlsplit(current_url) if self.is_archive(current.path): self.add_version(basename(current.path), '', current_url, href) else: self.add_link(current_url, current, base, href) finally: self.fetch_next()
def set_state_cookie(self, handler, next_url=None): """Generate an OAuth state and store it in a cookie Parameters ---------- handler : RequestHandler A tornado RequestHandler next_url : str The page to redirect to on successful login Returns ------- state : str The OAuth state that has been stored in the cookie (url safe, base64-encoded) """ extra_state = {} if handler.get_cookie(self.state_cookie_name): # oauth state cookie is already set # use a randomized cookie suffix to avoid collisions # in case of concurrent logins app_log.warning("Detected unused OAuth state cookies") cookie_suffix = ''.join( random.choice(string.ascii_letters) for i in range(8) ) cookie_name = '{}-{}'.format(self.state_cookie_name, cookie_suffix) extra_state['cookie_name'] = cookie_name else: cookie_name = self.state_cookie_name b64_state = self.generate_state(next_url, **extra_state) kwargs = { 'path': self.base_url, 'httponly': True, # Expire oauth state cookie in ten minutes. # Usually this will be cleared by completed login # in less than a few seconds. # OAuth that doesn't complete shouldn't linger too long. 'max_age': 600, } if handler.request.protocol == 'https': kwargs['secure'] = True # load user cookie overrides kwargs.update(self.cookie_options) handler.set_secure_cookie(cookie_name, b64_state, **kwargs) return b64_state
def _api_request(self, method, url, **kwargs): """Make an API request""" allow_404 = kwargs.pop('allow_404', False) headers = kwargs.setdefault('headers', {}) headers.setdefault('Authorization', 'token %s' % self.api_token) try: r = requests.request(method, url, **kwargs) except requests.ConnectionError as e: app_log.error("Error connecting to %s: %s", self.api_url, e) msg = "Failed to connect to Hub API at %r." % self.api_url msg += " Is the Hub accessible at this URL (from host: %s)?" % socket.gethostname( ) if '127.0.0.1' in self.api_url: msg += " Make sure to set c.JupyterHub.hub_ip to an IP accessible to" + \ " single-user servers if the servers are not on the same host as the Hub." raise HTTPError(500, msg) data = None if r.status_code == 404 and allow_404: pass elif r.status_code == 403: app_log.error( "I don't have permission to check authorization with JupyterHub, my auth token may have expired: [%i] %s", r.status_code, r.reason) app_log.error(r.text) raise HTTPError( 500, "Permission failure checking authorization, I may need a new token" ) elif r.status_code >= 500: app_log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason) app_log.error(r.text) raise HTTPError( 502, "Failed to check authorization (upstream problem)") elif r.status_code >= 400: app_log.warning("Failed to check authorization: [%i] %s", r.status_code, r.reason) app_log.warning(r.text) raise HTTPError(500, "Failed to check authorization") else: data = r.json() return data
def check_hub_user(self, model): """Check whether Hub-authenticated user or service should be allowed. Returns the input if the user should be allowed, None otherwise. Override if you want to check anything other than the username's presence in hub_users list. Args: model (dict): the user or service model returned from :class:`HubAuth` Returns: user_model (dict): The user model if the user should be allowed, None otherwise. """ name = model['name'] kind = model.setdefault('kind', 'user') if self.allow_all: app_log.debug("Allowing Hub %s %s (all Hub users and services allowed)", kind, name) return model if self.allow_admin and model.get('admin', False): app_log.debug("Allowing Hub admin %s", name) return model if kind == 'service': # it's a service, check hub_services if self.hub_services and name in self.hub_services: app_log.debug("Allowing whitelisted Hub service %s", name) return model else: app_log.warning("Not allowing Hub service %s", name) raise UserNotAllowed(model) if self.hub_users and name in self.hub_users: # user in whitelist app_log.debug("Allowing whitelisted Hub user %s", name) return model elif self.hub_groups and set(model['groups']).intersection(self.hub_groups): allowed_groups = set(model['groups']).intersection(self.hub_groups) app_log.debug("Allowing Hub user %s in group(s) %s", name, ','.join(sorted(allowed_groups))) # group in whitelist return model else: app_log.warning("Not allowing Hub user %s", name) raise UserNotAllowed(model)
async def proxy(self): client = AsyncHTTPClient() try: app_log.info('Proxying {} {}'.format(self.request.method, self.request.path)) resp = await client.fetch(urljoin(NPM_REGISTRY_URL, self.request.path), method=self.request.method, body=self.request.body if self.request.method in ['PUT', 'POST'] else None, headers={name: value for name, value in self.request.headers.get_all() if name.lower() not in ['host', 'connection', 'user-agent']}, follow_redirects=False, raise_error=True) except HTTPClientError as err: self.set_status(err.code) self.finish(err.response.body) return except Exception as err: app_log.warning('Error connecting to npm', exc_info=sys.exc_info) self.send_error(503) return # TODO: stream instead of bufferring self.finish(resp.body)
def access_token(request): """Handler for access token endpoint Checks code and allocates a new token. Replies with JSON model for the token. """ assert request.method == 'POST' if token_request_style == 'json': body = request.body.decode('utf8') try: body = json.loads(body) except ValueError: return HTTPResponse(request=request, code=400, reason="Body not JSON: %r" % body, ) else: code = body['code'] else: query = urlparse(request.url).query if not query: query = request.body.decode('utf8') query = parse_qs(query) if 'code' not in query: return HTTPResponse(request=request, code=400, reason="No code in access token request: url=%s, body=%s" % ( request.url, request.body, ) ) code = query['code'][0] if code not in oauth_codes: app_log.warning() return HTTPResponse(request=request, code=403, reason="No such code: %s" % code, ) # consume code, allocate token token = uuid.uuid4().hex user = oauth_codes.pop(code) access_tokens[token] = user return { 'access_token': token, 'token_type': token_type, }
async def is_reachable(): try: r = await client.fetch(url, follow_redirects=False) return r except HTTPError as e: if e.code >= 500: # failed to respond properly, wait and try again if e.code != 599: # we expect 599 for no connection, # but 502 or other proxy error is conceivable app_log.warning( "Server at %s responded with error: %s", url, e.code) else: app_log.debug("Server at %s responded with %s", url, e.code) return e.response except (OSError, socket.error) as e: if e.errno not in {errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET}: app_log.warning("Failed to connect to %s (%s)", url, e) return False
def authenticate_client_id(self, client_id, request, *args, **kwargs): """Ensure client_id belong to a non-confidential client. A non-confidential client is one that is not required to authenticate through other means, such as using HTTP Basic. Note, while not strictly necessary it can often be very convenient to set request.client to the client object associated with the given client_id. :param request: oauthlib.common.Request :rtype: True or False Method is used by: - Authorization Code Grant """ orm_client = (self.db.query( orm.OAuthClient).filter_by(identifier=client_id).first()) if orm_client is None: app_log.warning("No such oauth client %s", client_id) return False request.client = orm_client return True
def prepare(self): # copy hosts config in case it changes while we are iterating over it hosts = dict(self.settings["hosts"]) if not hosts: # no healthy hosts, allow routing to unhealthy 'prime' host only hosts = { key: host for key, host in CONFIG["hosts"].items() if host.get("prime") } app_log.warning( f"Using unhealthy prime host(s) {list(hosts)} because zero hosts are healthy" ) self.host_names = [c["url"] for c in hosts.values() if c["weight"] > 0] self.host_weights = [ c["weight"] for c in hosts.values() if c["weight"] > 0 ] # Combine hostnames and weights into one list self.names_and_weights = list(zip(self.host_names, self.host_weights))
def add_error_message_to_slug(self, error_string, args_to_delete=[], msg_categ="error"): """ add an "error" arg to url slug """ slug_ = self.request.arguments app_log.info("... add_error_message_to_slug / slug_ : \n %s ", pformat(slug_)) # create a complete clean slug if no slug if slug_ == {}: error_slug = u"?" + u"error=" + tornado.escape.url_escape( error_string) # add error arg to existing slug else: slug_without_error = deepcopy(slug_) ### clean existing slug from existing error arg if any # for arg_to_delete in args_to_delete + DEFAULT_ERROR_ARGS_TO_DELETE : # try : # del slug_without_error[arg_to_delete] # except : # pass slug_without_error = self.clean_slug( slug_without_error, args_to_delete + DEFAULT_ERROR_ARGS_TO_DELETE) app_log.warning( "... add_error_message_to_slug / slug_without_error : \n %s ", pformat(slug_without_error)) # recreate slug error_dict = {"error": error_string} error_dict.update(slug_without_error) error_slug = u"?" + urllib.urlencode(error_dict, doseq=True) app_log.info("... add_error_message_to_slug / error_slug : \n %s ", pformat(error_slug)) return error_slug
def get(self, op='reports', page=1, limit=10): if not isinstance(limit, int): limit = int(limit) else: limit = limit if self.limit == '' else int(self.limit) try: page = int(page) except Exception as e: log.warning(e) page = 1 page = 1 if int(page) <= 0 else int(page) lists = [] total_page = 1 if op == 'reports': res, total = yield self.setting.get_settings_list( s_type='page_report') total_page = int(math.ceil(total / limit)) for row in res: report = json.loads(row.value) report['sid'] = row.id lists.append(munchify(report)) elif op not in ['checklinks', 'checkpages']: self.redirect('/admin/page-test') return hosts = '' if platform.system().lower() == 'Windows'.lower(): host_path = 'C:\\Windows\\System32\\drivers\\etc\\hosts' else: host_path = '/etc/hosts' if os.path.exists(host_path): with open(host_path, 'r', encoding='utf8') as fp: hosts = fp.read() log.info('读取Hosts {} 配置成功'.format(host_path)) argv = dict(title='页面监控', op=op, lists=lists, hosts=hosts, total_page=total_page, page=page, limit=limit) argv = dict(self.argv, **argv) self.render('admin/page.html', **argv)
def check_origin(self, origin_to_satisfy_tornado=""): """Check Origin for cross-site API requests, including websockets Copied from WebSocket with changes: - allow unspecified host/origin (e.g. scripts) - allow token-authenticated requests """ if self.allow_origin == '*': return True host = self.request.headers.get("Host") origin = self.request.headers.get("Origin") # If no header is provided, assume it comes from a script/curl. # We are only concerned with cross-site browser stuff here. if origin is None or host is None: return True origin = origin.lower() origin_host = urlparse(origin).netloc # OK if origin matches host if origin_host == host: return True # Check CORS headers if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS headers deny the request allow = False if not allow: app_log.warning( 'Blocking Cross Origin API request for %s. Origin: %s, Host: %s', self.request.path, origin, host, ) return allow
def phantom_parsing_call(self, url): app_log.info('phantom process start %s' % url) # todo proxy servers # todo logs path js_parser_path = os.path.sep.join([self.application.app_path, 'js_parser']) phantom_parser_cmd = "%s --ignore-ssl-errors='true' --load-images='false' %s %swebfont_preview.html"\ % (os.path.sep.join([js_parser_path, 'phantomjs-1.9.8', 'phantomjs']), os.path.sep.join([js_parser_path, 'parser2.js']), url) app_log.info('command: <<%s>>' % phantom_parser_cmd) try: result, error = yield self.call_subprocess(phantom_parser_cmd) except Exception as e: app_log.warning('phantom crash: <<%s>>' % e) raise gen.Return(False) app_log.info('phantom process end: <<%d>> <<%d>>' % (len(result), len(error))) self.phantom_logs_save(url, result, error) raise gen.Return(self.phantom_result_parse(result))
def token(self, token): """Store the hashed value and prefix for a token""" self.prefix = token[:self.prefix_length] if len(token) >= 32: # Tokens are generally UUIDs, which have sufficient entropy on their own # and don't need salt & hash rounds. # ref: https://security.stackexchange.com/a/151262/155114 rounds = 1 salt_bytes = b'' else: # users can still specify API tokens in a few ways, # so trigger salt & hash rounds if they provide a short token app_log.warning("Applying salt & hash rounds to %sB token" % len(token)) rounds = self.rounds salt_bytes = self.salt_bytes self.hashed = hash_token(token, rounds=rounds, salt=salt_bytes, algorithm=self.algorithm)
def __init__(self, **kwargs): print app_log.warning("== UserClass ... ") app_log.warning("== UserClass / fields from USER_CORE_FIELDS ... ") for user_field in USER_CORE_FIELDS: app_log.warning("== UserClass / user_field : %s", user_field) self.__dict__[user_field] = "" app_log.warning("== UserClass / fields from **kwargs ...") for k, v in kwargs.items(): # print "{} : {}".format(k,v) app_log.warning("== UserClass / %s : %s ...", k, v) try: self.__dict__[k] = v except: pass print
def sig_handler(server, shutdown_waittime, sig, frame): io_loop = tornado.ioloop.IOLoop.instance() def stop_loop(deadline): now = time.time() if now < deadline and (io_loop._callbacks or io_loop._timeouts): app_log.info('Waiting for next tick') io_loop.add_timeout(now + 1, stop_loop, deadline) else: io_loop.stop() app_log.info('Shutdown finally') def shutdown(): app_log.info('Stopping http server') server.stop() app_log.info('Will shutdown in %s seconds ...', shutdown_waittime) stop_loop(time.time() + shutdown_waittime) app_log.warning('Caught signal: %s', sig) io_loop.add_callback_from_signal(shutdown)
def _check_hub_authorization(self, url, api_token, cache_key=None, use_cache=True): """Identify a user with the Hub Args: url (str): The API URL to check the Hub for authorization (e.g. http://127.0.0.1:8081/hub/api/user) cache_key (str): The key for checking the cache use_cache (bool): Specify use_cache=False to skip cached cookie values (default: True) Returns: user_model (dict): The user model, if a user is identified, None if authentication fails. Raises an HTTPError if the request failed for a reason other than no such user. """ if use_cache: if cache_key is None: raise ValueError("cache_key is required when using cache") # check for a cached reply, so we don't check with the Hub if we don't have to try: return self.cache[cache_key] except KeyError: app_log.debug("HubAuth cache miss: %s", cache_key) data = self._api_request( 'GET', url, headers={"Authorization": "token " + api_token}, allow_403=True, ) if data is None: app_log.warning("No Hub user identified for request") else: app_log.debug("Received request from Hub user %s", data) if use_cache: # cache result self.cache[cache_key] = data return data
def post(self): uri, http_method, body, headers = self.extract_oauth_params() referer = self.request.headers.get('Referer', 'no referer') full_url = self.request.full_url() # trim protocol, which cannot be trusted with multiple layers of proxies anyway # Referer is set by browser, but full_url can be modified by proxy layers to appear as http # when it is actually https referer_proto, _, stripped_referer = referer.partition("://") referer_proto = referer_proto.lower() req_proto, _, stripped_full_url = full_url.partition("://") req_proto = req_proto.lower() if referer_proto != req_proto: app_log.warning("Protocol mismatch: %s != %s", referer, full_url) if req_proto == "https": # insecure origin to secure target is not allowed raise web.HTTPError( 403, "Not allowing authorization form submitted from insecure page" ) if stripped_referer != stripped_full_url: # OAuth post must be made to the URL it came from app_log.error("Original OAuth POST from %s != %s", referer, full_url) app_log.error("Stripped OAuth POST from %s != %s", stripped_referer, stripped_full_url) raise web.HTTPError( 403, "Authorization form must be sent from authorization page") # The scopes the user actually authorized, i.e. checkboxes # that were selected. scopes = self.get_arguments('scopes') # credentials we need in the validator credentials = self.add_credentials() try: headers, body, status = self.oauth_provider.create_authorization_response( uri, http_method, body, headers, scopes, credentials) except oauth2.FatalClientError as e: raise web.HTTPError(e.status_code, e.description) else: self.send_oauth_response(headers, body, status)
async def find_groups_to_delete(): filter = f"{to_delete} eq true" if BACKLOG_DATE: filter = f"{filter} and {to_delete_date} le {BACKLOG_DATE}" count = 0 async for group in graph.list_groups(filter=filter): if not group.get(to_delete): raise RuntimeError( f"Group {group['displayName']} does not have toDelete set!" ) if not group.get(to_delete_date): app_log.warning( f"Group {group['displayName']} marked toDelete, but no date! Saving for later." ) await graph.mark_for_deletion(group) continue count += 1 if count % 100 == 0: app_log.info(f"Found {count} devices to delete") yield group app_log.info(f"Found {count} total devices to delete")
def get_browser_protocol(request): """Get the _protocol_ seen by the browser Like tornado's _apply_xheaders, but in the case of multiple proxy hops, use the outermost value (what the browser likely sees) instead of the innermost value, which is the most trustworthy. We care about what the browser sees, not where the request actually came from, so trusting possible spoofs is the right thing to do. """ headers = request.headers # first choice: Forwarded header forwarded_header = headers.get("Forwarded") if forwarded_header: first_forwarded = forwarded_header.split(",", 1)[0].strip() fields = {} forwarded_dict = {} for field in first_forwarded.split(";"): key, _, value = field.partition("=") fields[key.strip().lower()] = value.strip() if "proto" in fields and fields["proto"].lower() in {"http", "https"}: return fields["proto"].lower() else: app_log.warning( f"Forwarded header present without protocol: {forwarded_header}" ) # second choice: X-Scheme or X-Forwarded-Proto proto_header = headers.get("X-Scheme", headers.get("X-Forwarded-Proto", None)) if proto_header: proto_header = proto_header.split(",")[0].strip().lower() if proto_header in {"http", "https"}: return proto_header # no forwarded headers return request.protocol