def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None): """ Writes data to cache and creates appropriate invalidators. """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] # Ensure that all schemes of current query are "known" schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Write data to cache pickled_data = pickle.dumps(data, -1) if timeout is not None: txn.setex(cache_key, pickled_data, timeout) else: txn.set(cache_key, pickled_data) # Add new cache_key to list of dependencies for every conjunction in dnf for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: # Invalidator timeout should be larger than timeout of any key it references # So we take timeout from profile which is our upper limit # Add few extra seconds to be extra safe txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()
def ensure_known(self, model, new_schemes): """ Ensure that `new_schemes` are known or know them """ new_schemes = set(new_schemes) model_name = get_model_name(model) loaded = False if model_name not in self.local: self.load_schemes(model) loaded = True schemes = self.local[model_name] if new_schemes - schemes: if not loaded: schemes = self.load_schemes(model) if new_schemes - schemes: # Write new schemes to redis txn = redis_client.pipeline() txn.incr(self.get_version_key( model_name)) # Увеличиваем версию схем lookup_key = self.get_lookup_key(model_name) for scheme in new_schemes - schemes: txn.sadd(lookup_key, serialize_scheme(scheme)) txn.execute() # Updating local version self.local[model_name].update(new_schemes) # We increment here instead of using incr result from redis, # because even our updated collection could be already obsolete self.versions[model_name] += 1
def ensure_known(self, model, new_schemes): """ Ensure that `new_schemes` are known or know them """ new_schemes = set(new_schemes) model_name = get_model_name(model) loaded = False if model_name not in self.local: self.load_schemes(model) loaded = True schemes = self.local[model_name] if new_schemes - schemes: if not loaded: schemes = self.load_schemes(model) if new_schemes - schemes: # Write new schemes to redis txn = redis_client.pipeline() txn.incr(self.get_version_key(model_name)) # Увеличиваем версию схем lookup_key = self.get_lookup_key(model_name) for scheme in new_schemes - schemes: txn.sadd(lookup_key, serialize_scheme(scheme)) txn.execute() # Updating local version self.local[model_name].update(new_schemes) # We increment here instead of using incr result from redis, # because even our updated collection could be already obsolete self.versions[model_name] += 1
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None): """ Writes data to cache and creates appropriate invalidators. """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] # Ensure that all schemes of current query are "known" schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Write data to cache pickled_data = pickle.dumps(data, -1) if timeout is not None: txn.setex(cache_key, timeout, pickled_data) else: txn.set(cache_key, pickled_data) # Add new cache_key to list of dependencies for every conjunction in dnf for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: # Invalidator timeout should be larger than timeout of any key it references # So we take timeout from profile which is our upper limit # Add few extra seconds to be extra safe txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()
def cache_page_by_queryset(model, cache_key, data, cond_dnf=[[]], timeout=None, only_conj=False): """ Overridden method `cacheops.query.cache_thing` which doesn't pickle data and can set only invalidation conjunctions. """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Here was data pickling, we don't need it because of caching raw value # pickled_data = pickle.dumps(data, -1) # Check whether setting data is allowed in `only_conj` argument if not only_conj: if timeout is not None: txn.setex(cache_key, timeout, data) else: txn.set(cache_key, data) for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()
def load_schemes(self, model): model_name = get_model_name(model) txn = redis_client.pipeline() txn.get(self.get_version_key(model)) txn.smembers(self.get_lookup_key(model_name)) version, members = txn.execute() self.local[model_name] = set(map(deserialize_scheme, members)) self.local[model_name].add(()) # Всегда добавляем пустую схему self.versions[model_name] = int(version or 0) return self.local[model_name]
def load_schemes(self, model): model_name = get_model_name(model) txn = redis_client.pipeline() txn.get(self.get_version_key(model)) txn.smembers(self.get_lookup_key(model_name)) version, members = txn.execute() self.local[model_name] = set(map(deserialize_scheme, members)) self.local[model_name].add(()) # Всегда добавляем пустую схему self.versions[model_name] = int(version or 0) return self.local[model_name]
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None): u""" По факту - скопированный метод cache_thing из кешопса с двумя изменениями: - просто функция, а не метод объекта - убрана сериализация data с помощью pickle.dumps """ model = non_proxy(model) if timeout is None: profile = model_profile(model) timeout = profile['timeout'] # Ensure that all schemes of current query are "known" schemes = map(conj_scheme, cond_dnf) cache_schemes.ensure_known(model, schemes) txn = redis_client.pipeline() # Write data to cache if timeout is not None: txn.setex(cache_key, timeout, data) else: txn.set(cache_key, data) # Add new cache_key to list of dependencies for # every conjunction in dnf for conj in cond_dnf: conj_key = conj_cache_key(model, conj) txn.sadd(conj_key, cache_key) if timeout is not None: # Invalidator timeout should be larger than # timeout of any key it references # So we take timeout from profile which is our upper limit # Add few extra seconds to be extra safe txn.expire(conj_key, model._cacheprofile['timeout'] + 10) txn.execute()