def get(self, *args, **kwargs): """ Checks the cache to see if there's a cached entry for this pk. If not, fetches using super then stores the result in cache. Most of the logic here was gathered from a careful reading of ``django.db.models.sql.query.add_filter`` """ if self.query.where: # If there is any other ``where`` filter on this QuerySet just call # super. There will be a where clause if this QuerySet has already # been filtered/cloned. return super(CachingQuerySet, self).get(*args, **kwargs) # Punt on anything more complicated than get by pk/id only... if len(kwargs) == 1: k = kwargs.keys()[0] if k in ('pk', 'pk__exact', '%s' % self.model._meta.pk.attname, '%s__exact' % self.model._meta.pk.attname): obj = cache.get(self.model._cache_key(pk=kwargs.values()[0])) if obj is not None: obj.from_cache = True return obj obj = super(CachingQuerySet, self).get(*args, **kwargs) cache.set(obj.cache_key,obj) return obj
def _invalidate_cache(self, instance): """ Explicitly set a None value instead of just deleting so we don't have any race conditions where: Thread 1 -> Cache miss, get object from DB Thread 2 -> Object saved, deleted from cache Thread 1 -> Store (stale) object fetched from DB in cache Five second should be more than enough time to prevent this from happening for a web app. """ cache.set(instance.cache_key, None)
def post_save_cache(sender,instance,**kwargs): cache.set(instance.cache_key,instance)
def invalidate_cache(obj, field): cache.set(obj._get_cache_key(field=field), None, 5)