def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None): """ Writes data to cache and creates appropriate invalidators. """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] # Ensure that all schemes of current query are "known" schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Write data to cache pickled_data = pickle.dumps(data, -1) if timeout is not None: txn.setex(cache_key, pickled_data, timeout) else: txn.set(cache_key, pickled_data) # Add new cache_key to list of dependencies for every conjunction in dnf for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: # Invalidator timeout should be larger than timeout of any key it references # So we take timeout from profile which is our upper limit # Add few extra seconds to be extra safe txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()
def cache_page_by_queryset(model, cache_key, data, cond_dnf=[[]], timeout=None, only_conj=False): """ Overridden method `cacheops.query.cache_thing` which doesn't pickle data and can set only invalidation conjunctions. """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Here was data pickling, we don't need it because of caching raw value # pickled_data = pickle.dumps(data, -1) # Check whether setting data is allowed in `only_conj` argument if not only_conj: if timeout is not None: txn.setex(cache_key, timeout, data) else: txn.set(cache_key, data) for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None): """ Writes data to cache and creates appropriate invalidators. """ if timeout is None: profile = model_profile(model) timeout = profile['timeout'] # Ensure that all schemes of current query are "known" schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_conn.pipeline() # Write data to cache pickled_data = pickle.dumps(data, -1) if timeout is not None: txn.setex(cache_key, pickled_data, timeout) else: txn.set(cache_key, pickled_data) # Add new cache_key to list of dependencies for every conjunction in dnf for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: # Invalidator timeout should be larger than timeout of any key it references # So we take timeout from profile which is our upper limit # Add few extra seconds to be extra safe txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None): u""" По факту - скопированный метод cache_thing из кешопса с двумя изменениями: - просто функция, а не метод объекта - убрана сериализация data с помощью pickle.dumps """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] # Ensure that all schemes of current query are "known" schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Write data to cache if timeout is not None: txn.setex(cache_key, timeout, data) else: txn.set(cache_key, data) # Add new cache_key to list of dependencies for # every conjunction in dnf for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: # Invalidator timeout should be larger than # timeout of any key it references # So we take timeout from profile which is our upper limit # Add few extra seconds to be extra safe txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()