def query(self, path): url = '%s%s' % (self.url, path) resp = cache.get(url) if not resp: resp = json.load(urllib2.urlopen(url)) cache.set(url, resp, timeout=60) return resp
def get_feed(self): """Fetch and parse the feature feed for this class.""" url = self._base_url.format(self) feed = cache.get(url) if not feed: feed = feedparser.parse(urllib2.urlopen(url)) cache.set(url, feed, timeout=CACHE_EXPIRATION) return feed
def cached_func(*args, **kwargs): keys = [func.__name__, str(args), str(kwargs.values())] cache_key = ';'.join(keys) cached = cache.get(cache_key) if cached: return cached result = func(*args, **kwargs) cache.set(cache_key, result, timeout=timeout) return result
def get_all(self): cached = cache.get('samples') if cached is None: cached = self.query.filter_by(enabled=True).all() cache.set('samples', cached, timeout=app.config['CACHE_MINUTES'] * 60) return cached
def get_entries(): entries = cache.get("entries") if find_new_entries() or not entries: entries = [entry_dict(f) for f in glob.glob("entries/*.entry")] entries.sort(key=lambda x: x["mtime"], reverse=True) cache.set("entries", entries) return entries
def get_query(self): formatted_url = self._base_url.format(self) params = urlencode(self.query_params) url = '%s?%s' % (formatted_url, params) resp = cache.get(url) if not resp: resp = json.load(urllib2.urlopen(url)) cache.set(url, resp, timeout=CACHE_EXPIRATION) return resp
def get_data(self): self.lang = self.fields['lang'] self.category = self.fields['category'] self.wiki = '%s.wikipedia.org' % self.fields['lang'] cache_name = 'cat-%s-%s-%s' % (self.category, self.lang, self.limit) res = cache.get(cache_name) if not res: res = get_category_members(self.category, lang=self.lang) cache.set(cache_name, res, timeout=CACHE_EXPIRATION) res.sort(key=lambda rev: rev['cl_timestamp'], reverse=True) return map(self.parse_result, res)
def _get_ngram_model(bigrams): #NLTK produces a LOT of warnings - don't mess with my error log warnings.simplefilter("ignore") cached = cache.get('ngram_model') if cached is None: samples = Sample.get_all() if samples: text = [unicode(s) for s in samples] tokenizer = nltk.tokenize.WordPunctTokenizer() tokenized = tokenizer.tokenize(' '.join(text)) cached = nltk.NgramModel(3-int(bool(bigrams)), tokenized) cache.set('ngram_model', cached, timeout=app.config['CACHE_MINUTES'] * 60) return cached
def get_data(self): self.lang = self.fields['lang'] self.category = self.fields['category'] self.wiki = '%s.wikipedia.org' % self.fields['lang'] cache_name = 'cat-revs-%s-%s-%s' % (self.category, self.lang, self.limit) res = cache.get(cache_name) if not res: res = get_category_member_revisions(self.category, lang=self.lang) cache.set(cache_name, res, timeout=CACHE_EXPIRATION) res.sort(key=lambda rev: rev['rc_timestamp'], reverse=True) return map(self.parse_result, res)
def open_session(self, app, request): # type: (flask.Flask, flask.wrappers.Request) -> PhpBB3Session cookie_name = app.config.get('PHPBB3_COOKIE_NAME', 'phpbb3_') if not hasattr(app, 'phpbb3'): raise ValueError('App not properly configured, phpbb3 is missing!') phpbb3 = app.phpbb3 # type: flask_phpbb3.PhpBB3 session_id = request.args.get('sid', type=str)\ or request.cookies.get(cookie_name + 'sid', None) if not session_id: session_id = None user = None if session_id: # Try to fetch session user = phpbb3.get_session(session_id=session_id) if user and 'username' in user: user['username'] = user['username'].decode('utf-8', 'ignore') if not user: # Use anonymous user user = phpbb3.get_user( user_id=1, cache=True, cache_ttl=ANONYMOUS_CACHE_TTL ) # Create session session = self.session_class() # Set session data if isinstance(user, dict) and user: session._read_only_properties = set(user.keys()) session.update(user) # Read from local storage backend if 'session_id' in session: cache = self._cache(app) data = cache.get('sessions_' + session['session_id']) try: data = json.loads(data or '') except ValueError: data = None if not isinstance(data, dict): data = {} else: data = {} session.update(data) return session
def index(path): if not path in adapters: return 'No matching adapter found', 404 arguments, keyword_arguments = _split_request_args(flask.request.args) instance = adapters[path](*arguments, **keyword_arguments) # Using a cache key consisting of the class object and the request arguments # allows caching of different routes leading to the same adapter, for # example the primary adapter. cache_key = (adapters[path], flask.request.args) if cache_key in cached_at: if time.time() - cached_at[cache_key] < instance.cache_timeout: return cache.get(cache_key) feed = instance.to_feed() cache.set(cache_key, feed) cached_at[cache_key] = time.time() return feed
def caching_preprocessor(**kwargs): key = cache_key() cached_result = cache.get(key) if cached_result: raise flask_restless.ProcessingException( response=json.loads(cached_result), code=200)
import flask import flask_sqlalchemy import flask_restless import werkzeug.contrib.cache import requests app = flask.Flask(__name__) CACHE_TIMEOUT = 60 try: cache = werkzeug.contrib.cache.MemcachedCache( ['127.0.0.1:11211'], default_timeout=CACHE_TIMEOUT, key_prefix='snh48live-filter') cache.get('test') # Test connection except Exception: cache = werkzeug.contrib.cache.SimpleCache(default_timeout=CACHE_TIMEOUT) # Monkeypatch flask_restless.ProcessingException and flask_restless.views.catch_processing_exceptions. # This serves two purposes: # 1. Stop dumping tracebacks to logs; # 2. Allow a bolted-on caching mechanism through early exit with ProcessingException. # Kudos to https://gist.github.com/mmautner/cd60fdd45934e5aa494d for the hack. # https://github.com/jfinkels/flask-restless/blob/0.17.0/flask_restless/views.py#L85-L103 import werkzeug.exceptions class MonkeypatchProcessingException(werkzeug.exceptions.HTTPException): def __init__(self,
def download(): sql_data = cache.get('sql_data') response = excel.make_response_from_records(sql_data, 'xls') return response