def load_from_cache(self, key, eventclass): result = cache.get('anon/' + key) if result: for key in result: if key != 'events': setattr(self, key, result[key]) else: self.events = [eventclass(**kwargs) for kwargs in result[key]]
def _user_flags(self): cache_key = 'user/flags/' + str(self.id) flags = cache.get(cache_key) if not flags: flags = {} for key, func in UserFlags.__dict__.items(): if isinstance(func, UserFlag): flags[key] = func.for_user(self) cache.set(cache_key, flags, timeout=3600) # Cache for one hour return flags
def _user_has_boards(self): # Cached version of User.boards() key = 'user/board/count/' + str(self.id) count = cache.get(key) if not count: count = Board.query.filter( Board.userid.in_(self.user_organizations_owned_ids())).options( defer(Board.description)).count() cache.set(key, count, timeout=300) return bool(count)
def get_job_filters(): basequery = getposts(showall=True, order=False, limit=False) filters = g.get('event_data', {}).get('filters', {}) cache_key = 'jobfilters/' + (g.board.name + '/' if g.board else '') + hashlib.sha1(repr(filters)).hexdigest() result = cache.get(cache_key) if not result: result = dict(job_location_filters=filter_locations(g.board, filters), job_type_filters=filter_types(basequery, board=g.board, filters=filters), job_category_filters=filter_categories(basequery, board=g.board, filters=filters)) cache.set(cache_key, result, timeout=3600) return result
def shortlink(url): """Return a short link suitable for SMS.""" cache_key = 'shortlink/' + blake2b(url.encode(), digest_size=16).hexdigest() shorty = cache.get(cache_key) if shorty: return shorty try: shorty = pyshorteners.Shortener().isgd.short(url) cache.set(cache_key, shorty, timeout=86400) except pyshorteners.exceptions.ShorteningErrorException as e: error = str(e) current_app.logger.error("Shortlink exception %s", error) shorty = url return shorty
def make_cached_token(payload, timeout=24 * 60 * 60, reserved=None): """ Make a short text token that caches data with a timeout period. :param dict payload: Data to save against the token :param int timeout: Timeout period for token in seconds (default 24 hours) :param set reserved: Reserved words that should not be used as token """ while True: token = urlsafe_b64encode(urandom(TOKEN_BYTES_LEN)).decode().rstrip('=') if reserved and token in reserved: continue # Reserved word, try again existing = cache.get(text_token_prefix + token) if existing: continue # Token in use, try again break cache.set(text_token_prefix + token, payload, timeout=timeout) return token
def retrieve_cached_token(token): return cache.get(text_token_prefix + token)
def validate_rate_limit( resource, identifier, attempts, timeout, token=None, validator=None ): """ Confirm the rate limit has not been reached for the given string identifier, number of attempts, and timeout period. Uses a simple limiter: once the number of attempts is reached, no further attempts can be made for timeout seconds. Aborts with HTTP 429 in case the limit has been reached. :param str resource: Resource being rate limited :param str identifier: Identifier for entity being rate limited :param int attempts: Number of attempts allowed :param int timeout: Duration in seconds to block after attempts are exhausted :param str token: For advanced use, a token to check against for future calls :param validator: A validator that receives token and previous token, and returns two bools ``(count_this, retain_previous_token)`` For an example of how the token and validator are used, see :func:`progressive_rate_limit_validator` and its users. """ statsd.set( 'rate_limit', blake2b(identifier.encode(), digest_size=32).hexdigest(), rate=1, tags={'resource': resource}, ) cache_key = 'rate_limit/v1/%s/%s' % (resource, identifier) cache_value = cache.get(cache_key) if cache_value is None: count, cache_token = None, None statsd.incr('rate_limit', tags={'resource': resource, 'status_code': 201}) else: count, cache_token = cache_value if not count or not isinstance(count, int): count = 0 if count >= attempts: statsd.incr('rate_limit', tags={'resource': resource, 'status_code': 429}) abort(429) if validator is not None: do_increment, retain_token = validator(token, cache_token) if retain_token: token = cache_token if do_increment: current_app.logger.debug( "Rate limit +1 (validated with %s, retain %r) for %s/%s", cache_token, retain_token, resource, identifier, ) count += 1 statsd.incr('rate_limit', tags={'resource': resource, 'status_code': 200}) else: current_app.logger.debug( "Rate limit +0 (validated with %s, retain %r) for %s/%s", cache_token, retain_token, resource, identifier, ) else: current_app.logger.debug("Rate limit +1 for %s/%s", resource, identifier) count += 1 statsd.incr('rate_limit', tags={'resource': resource, 'status_code': 200}) # Always set count, regardless of validator output current_app.logger.debug( "Setting rate limit usage for %s/%s to %s with token %s", resource, identifier, count, token, ) cache.set(cache_key, (count, token), timeout=timeout)