def query(self, path): url = '%s%s' % (self.url, path) resp = cache.get(url) if not resp: resp = json.load(urllib2.urlopen(url)) cache.set(url, resp, timeout=60) return resp
def get_entries(): entries = cache.get("entries") if find_new_entries() or not entries: entries = [entry_dict(f) for f in glob.glob("entries/*.entry")] entries.sort(key=lambda x: x["mtime"], reverse=True) cache.set("entries", entries) return entries
def cached_func(*args, **kwargs): keys = [func.__name__, str(args), str(kwargs.values())] cache_key = ';'.join(keys) cached = cache.get(cache_key) if cached: return cached result = func(*args, **kwargs) cache.set(cache_key, result, timeout=timeout) return result
def get_all(self): cached = cache.get('samples') if cached is None: cached = self.query.filter_by(enabled=True).all() cache.set('samples', cached, timeout=app.config['CACHE_MINUTES'] * 60) return cached
def get_feed(self): """Fetch and parse the feature feed for this class.""" url = self._base_url.format(self) feed = cache.get(url) if not feed: feed = feedparser.parse(urllib2.urlopen(url)) cache.set(url, feed, timeout=CACHE_EXPIRATION) return feed
def get_query(self): formatted_url = self._base_url.format(self) params = urlencode(self.query_params) url = '%s?%s' % (formatted_url, params) resp = cache.get(url) if not resp: resp = json.load(urllib2.urlopen(url)) cache.set(url, resp, timeout=CACHE_EXPIRATION) return resp
def get_data(self): self.lang = self.fields['lang'] self.category = self.fields['category'] self.wiki = '%s.wikipedia.org' % self.fields['lang'] cache_name = 'cat-%s-%s-%s' % (self.category, self.lang, self.limit) res = cache.get(cache_name) if not res: res = get_category_members(self.category, lang=self.lang) cache.set(cache_name, res, timeout=CACHE_EXPIRATION) res.sort(key=lambda rev: rev['cl_timestamp'], reverse=True) return map(self.parse_result, res)
def _get_ngram_model(bigrams): #NLTK produces a LOT of warnings - don't mess with my error log warnings.simplefilter("ignore") cached = cache.get('ngram_model') if cached is None: samples = Sample.get_all() if samples: text = [unicode(s) for s in samples] tokenizer = nltk.tokenize.WordPunctTokenizer() tokenized = tokenizer.tokenize(' '.join(text)) cached = nltk.NgramModel(3-int(bool(bigrams)), tokenized) cache.set('ngram_model', cached, timeout=app.config['CACHE_MINUTES'] * 60) return cached
def get_data(self): self.lang = self.fields['lang'] self.category = self.fields['category'] self.wiki = '%s.wikipedia.org' % self.fields['lang'] cache_name = 'cat-revs-%s-%s-%s' % (self.category, self.lang, self.limit) res = cache.get(cache_name) if not res: res = get_category_member_revisions(self.category, lang=self.lang) cache.set(cache_name, res, timeout=CACHE_EXPIRATION) res.sort(key=lambda rev: rev['rc_timestamp'], reverse=True) return map(self.parse_result, res)
def result(): subscriber = request.form['subscriber'] serviceNumber = request.form['serviceNumber'] timestamp_start = request.form['timestamp_start'] timestamp_finish = request.form['timestamp_finish'] status = request.form['status'] query = actQuery(subscriber, serviceNumber, timestamp_start, timestamp_finish, status) cur = db.engine.execute(query) results = [dict(id=row[0], subscriber=row[1], serviceNumber=row[2], reporttime=row[3], timestamp=row[4], status=row[5]) for row in cur.fetchall()] cache.set('sql_data', results, timeout=5 * 60) return render_template('result.html', results=results)
def index(path): if not path in adapters: return 'No matching adapter found', 404 arguments, keyword_arguments = _split_request_args(flask.request.args) instance = adapters[path](*arguments, **keyword_arguments) # Using a cache key consisting of the class object and the request arguments # allows caching of different routes leading to the same adapter, for # example the primary adapter. cache_key = (adapters[path], flask.request.args) if cache_key in cached_at: if time.time() - cached_at[cache_key] < instance.cache_timeout: return cache.get(cache_key) feed = instance.to_feed() cache.set(cache_key, feed) cached_at[cache_key] = time.time() return feed
def caching_postprocessor(result, **kwargs): cache.set(cache_key(), json.dumps(result))