def __check_request_limit(self, in_middleware=True): endpoint = request.endpoint or "" view_func = current_app.view_functions.get(endpoint, None) name = ( "%s.%s" % (view_func.__module__, view_func.__name__) if view_func else "" ) if (not request.endpoint or not self.enabled or view_func == current_app.send_static_file or name in self._exempt_routes or request.blueprint in self._blueprint_exempt or any(fn() for fn in self._request_filters) or g.get("_rate_limiting_complete") ): return limits, dynamic_limits = [], [] # this is to ensure backward compatibility with behavior that # existed accidentally, i.e:: # # @limiter.limit(...) # @app.route('...') # def func(...): # # The above setup would work in pre 1.0 versions because the decorator # was not acting immediately and instead simply registering the rate # limiting. The correct way to use the decorator is to wrap # the limiter with the route, i.e:: # # @app.route(...) # @limiter.limit(...) # def func(...): implicit_decorator = view_func in self.__marked_for_limiting.get( name, [] ) if not in_middleware or implicit_decorator: limits = ( name in self._route_limits and self._route_limits[name] or [] ) dynamic_limits = [] if name in self._dynamic_route_limits: for lim in self._dynamic_route_limits[name]: try: dynamic_limits.extend(list(lim)) except ValueError as e: self.logger.error( "failed to load ratelimit for view function %s (%s)", name, e ) if request.blueprint: if (request.blueprint in self._blueprint_dynamic_limits and not dynamic_limits ): for limit_group in self._blueprint_dynamic_limits[ request.blueprint ]: try: dynamic_limits.extend( [ Limit( limit.limit, limit.key_func, limit.scope, limit.per_method, limit.methods, limit.error_message, limit.exempt_when ) for limit in limit_group ] ) except ValueError as e: self.logger.error( "failed to load ratelimit for blueprint %s (%s)", request.blueprint, e ) if request.blueprint in self._blueprint_limits and not limits: limits.extend(self._blueprint_limits[request.blueprint]) try: all_limits = [] if self._storage_dead and self._fallback_limiter: if in_middleware and name in self.__marked_for_limiting: pass else: if self.__should_check_backend() and self._storage.check(): self.logger.info("Rate limit storage recovered") self._storage_dead = False self.__check_backend_count = 0 else: all_limits = list( itertools.chain(*self._in_memory_fallback) ) if not all_limits: route_limits = limits + dynamic_limits all_limits = list(itertools.chain(*self._application_limits)) if in_middleware else [] all_limits += route_limits if ( not route_limits and not (in_middleware and name in self.__marked_for_limiting) or implicit_decorator ): all_limits += list(itertools.chain(*self._default_limits)) self.__evaluate_limits(endpoint, all_limits) except Exception as e: # no qa if isinstance(e, RateLimitExceeded): six.reraise(*sys.exc_info()) if self._in_memory_fallback and not self._storage_dead: self.logger.warn( "Rate limit storage unreachable - falling back to" " in-memory storage" ) self._storage_dead = True self.__check_request_limit(in_middleware) else: if self._swallow_errors: self.logger.exception( "Failed to rate limit. Swallowing error" ) else: six.reraise(*sys.exc_info())
def __check_request_limit(self): endpoint = request.endpoint or "" view_func = current_app.view_functions.get(endpoint, None) name = ("%s.%s" % (view_func.__module__, view_func.__name__) if view_func else "") if (not request.endpoint or not self.enabled or view_func == current_app.send_static_file or name in self._exempt_routes or request.blueprint in self._blueprint_exempt or any(fn() for fn in self._request_filters)): return limits = (name in self._route_limits and self._route_limits[name] or []) dynamic_limits = [] if name in self._dynamic_route_limits: for lim in self._dynamic_route_limits[name]: try: dynamic_limits.extend(list(lim)) except ValueError as e: self.logger.error( "failed to load ratelimit for view function %s (%s)", name, e) if request.blueprint: if (request.blueprint in self._blueprint_dynamic_limits and not dynamic_limits): for limit_group in self._blueprint_dynamic_limits[ request.blueprint]: try: dynamic_limits.extend([ Limit(limit.limit, limit.key_func, limit.scope, limit.per_method, limit.methods, limit.error_message, limit.exempt_when) for limit in limit_group ]) except ValueError as e: self.logger.error( "failed to load ratelimit for blueprint %s (%s)", request.blueprint, e) if request.blueprint in self._blueprint_limits and not limits: limits.extend(self._blueprint_limits[request.blueprint]) failed_limit = None limit_for_header = None try: all_limits = [] if self._storage_dead and self._fallback_limiter: if self.__should_check_backend() and self._storage.check(): self.logger.info("Rate limit storage recovered") self._storage_dead = False self.__check_backend_count = 0 else: all_limits = list( itertools.chain(*self._in_memory_fallback)) if not all_limits: all_limits = itertools.chain( itertools.chain(*self._application_limits), (limits + dynamic_limits) or itertools.chain(*self._default_limits)) for lim in all_limits: limit_scope = lim.scope or endpoint if lim.is_exempt: return if lim.methods is not None and request.method.lower( ) not in lim.methods: return if lim.per_method: limit_scope += ":%s" % request.method limit_key = lim.key_func() args = [limit_key, limit_scope] if all(args): if self._key_prefix: args = [self._key_prefix] + args if not limit_for_header or lim.limit < limit_for_header[0]: limit_for_header = [lim.limit] + args if not self.limiter.hit(lim.limit, *args): self.logger.warning( "ratelimit %s (%s) exceeded at endpoint: %s", lim.limit, limit_key, limit_scope) failed_limit = lim limit_for_header = [lim.limit] + args break else: self.logger.error( "Skipping limit: %s. Empty value found in parameters.", lim.limit) continue g.view_rate_limit = limit_for_header if failed_limit: if failed_limit.error_message: exc_description = failed_limit.error_message if not callable( failed_limit.error_message ) else failed_limit.error_message() else: exc_description = six.text_type(failed_limit.limit) raise RateLimitExceeded(exc_description) except Exception as e: # no qa if isinstance(e, RateLimitExceeded): six.reraise(*sys.exc_info()) if self._in_memory_fallback and not self._storage_dead: self.logger.warn( "Rate limit storage unreachable - falling back to" " in-memory storage") self._storage_dead = True self.__check_request_limit() else: if self._swallow_errors: self.logger.exception( "Failed to rate limit. Swallowing error") else: six.reraise(*sys.exc_info())