def __iter__(self): """override __iter__ to pull results from Beaker if particular attributes have been configured. """ if hasattr(self, '_cache_parameters'): return self.get_value(createfunc=lambda: list(Query.__iter__(self))) else: return Query.__iter__(self)
def __iter__(self): """override __iter__ to pull results from Beaker if particular attributes have been configured. """ if hasattr(self, '_cache_parameters'): cache, cache_key = _get_cache_parameters(self) ret = cache.get_value(cache_key, createfunc=lambda: list(Query.__iter__(self))) # merge the result in. return self.merge_result(ret, load=False) else: return Query.__iter__(self)
def __iter__(self): if hasattr(self, 'cachekey'): try: ret = _cache[self.cachekey] except KeyError: ret = list(Query.__iter__(self)) for x in ret: self.session.expunge(x) _cache[self.cachekey] = ret return iter(self.session.merge(x, dont_load=True) for x in ret) else: return Query.__iter__(self)
def __iter__(self): if hasattr(self, 'cachekey'): try: cache = self.session._cache except AttributeError: self.session._cache = cache = {} try: ret = cache[self.cachekey] except KeyError: ret = list(Query.__iter__(self)) cache[self.cachekey] = ret return iter(ret) else: return Query.__iter__(self)
def __iter__(self): """override __iter__ to pull results from dogpile if particular attributes have been configured. Note that this approach does *not* detach the loaded objects from the current session. If the cache backend is an in-process cache (like "memory") and lives beyond the scope of the current session's transaction, those objects may be expired. The method here can be modified to first expunge() each loaded item from the current session before returning the list of items, so that the items in the cache are not the same ones in the current Session. """ if hasattr(self, '_cache_region'): return self.get_value(createfunc=lambda: list(Query.__iter__(self))) else: return Query.__iter__(self)
def __iter__(self): log.info('Query:\n\t%s' % unicode(self).replace('\n', '\n\t')) explain = self.session.execute(Explain(self)).fetchall() text = '\n\t'.join('|'.join(str(x) for x in line) for line in explain) before = time() result = Query.__iter__(self) log.info('Query Time: %0.3f Explain Query Plan:\n\t%s' % (time() - before, text)) return result
def __iter__(self): log.info("Query:\n\t%s" % unicode(self).replace("\n", "\n\t")) explain = self.session.execute(Explain(self)).fetchall() text = "\n\t".join("|".join(str(x) for x in line) for line in explain) before = time() result = Query.__iter__(self) log.info("Query Time: %0.3f Explain Query Plan:\n\t%s" % (time() - before, text)) return result
def __iter__(self): logger.info('Query:\n\t{}', str(self).replace('\n', '\n\t')) explain = self.session.execute(Explain(self)).fetchall() text = '\n\t'.join('|'.join(str(x) for x in line) for line in explain) before = time() result = Query.__iter__(self) logger.info('Query Time: {:0.3f} Explain Query Plan: {}', time() - before, text) return result
def __iter__(self): tries = self._retry while True: try: results = list(Query.__iter__(self)) break except SQLAlchemyError as e: if tries: self.session.rollback() tries -= 1 continue raise e return iter(results)
def __iter__(self): try: cache = self.session._cache except AttributeError: self.session._cache = cache = {} stmt = self.statement.compile() params = stmt.params params.update(self._params) cachekey = str(stmt) + str(params) try: ret = cache[cachekey] except KeyError: ret = list(Query.__iter__(self)) cache[cachekey] = ret return iter(ret)
def __iter__(self): return Query.__iter__(self.custom_filter())
def __iter__(self): return Query.__iter__(self.typed())
def __iter__(self): return Query.__iter__(self.check_deleted())
def __iter__(self): return Query.__iter__(self.private())
def __iter__(self): return Query.__iter__(self)
def _log_and_query(self): SLOG.info("execute-query", sql=prettyprintable_sql(self, reindent=self.format_log_sql)) return Query.__iter__(self)