def get_cached(self, **kwargs): """Gets the model instance from the cache, or, if the instance is not in the cache, gets it from the database and puts it in the cache. """ cache_settings = get_cache_settings(self.model) lookups = cache_settings.get("lookups") keys = kwargs.keys() single_kwarg_match = len(keys) == 1 and keys[0] in lookups multi_kwarg_match = len(keys) != 1 and any( sorted(keys) == sorted(lookup) for lookup in lookups if isinstance(lookup, (list, tuple)) ) if not single_kwarg_match and not multi_kwarg_match: raise ValueError("Caching not allowed with kwargs %s" % ", ".join(keys)) # Get object from cache or db. key = generate_base_key(self.model, **kwargs) obj = cache.get(key) if obj is not None: if isinstance(obj, ObjectDoesNotExist): raise self.model.DoesNotExist(repr(obj)) elif isinstance(obj, MultipleObjectsReturned): raise self.model.MultipleObjectsReturned(repr(obj)) else: return obj try: obj = self.get(**kwargs) except (ObjectDoesNotExist, MultipleObjectsReturned), e: # The model-specific subclasses of these exceptions are not # pickleable, so we cache the base exception and reconstruct the # specific exception when fetching from the cache. obj = e.__class__.__base__(repr(e)) cache.set(key, obj, cache_settings.get("timeout")) raise
def get_cached(self, **kwargs): """Gets the model instance from the cache, or, if the instance is not in the cache, gets it from the database and puts it in the cache. """ cache_settings = get_cache_settings(self.model) lookups = cache_settings.get("lookups") keys = kwargs.keys() single_kwarg_match = len(keys) == 1 and keys[0] in lookups multi_kwarg_match = len(keys) != 1 and any( sorted(keys) == sorted(lookup) for lookup in lookups if isinstance(lookup, (list, tuple))) if not single_kwarg_match and not multi_kwarg_match: raise ValueError("Caching not allowed with kwargs %s" % ", ".join(keys)) # Get object from cache or db. key = generate_base_key(self.model, **kwargs) obj = cache.get(key) if obj is not None: if isinstance(obj, ObjectDoesNotExist): raise self.model.DoesNotExist(repr(obj)) elif isinstance(obj, MultipleObjectsReturned): raise self.model.MultipleObjectsReturned(repr(obj)) else: return obj try: obj = self.get(**kwargs) except (ObjectDoesNotExist, MultipleObjectsReturned), e: # The model-specific subclasses of these exceptions are not # pickleable, so we cache the base exception and reconstruct the # specific exception when fetching from the cache. obj = e.__class__.__base__(repr(e)) cache.set(key, obj, cache_settings.get("timeout")) raise
def get_many_cached(self, list_of_kwargs): """Gets the model instance from the cache, or, if the instance is not in the cache, gets it from the database and puts it in the cache. """ cache_settings = get_cache_settings(self.model) lookups = cache_settings.get("lookups") prefetch = cache_settings.get("prefetch") related = self._get_select_related_from_attrs(self.model, prefetch) if related: base_qs = self.all().select_related(*related) else: base_qs = self.all() cache_keys = dict() for kwargs in list_of_kwargs: keys = kwargs.keys() single_kwarg_match = len(keys) == 1 and keys[0] in lookups multi_kwarg_match = len(keys) != 1 and any( sorted(keys) == sorted(lookup) for lookup in lookups if isinstance(lookup, (list, tuple)) ) if not single_kwarg_match and not multi_kwarg_match: raise ValueError("Caching not allowed with kwargs %s" % ", ".join(keys)) # Get object from cache or db. key = generate_base_key(self.model, **kwargs) cache_keys[key] = kwargs objects = cache.get_many(cache_keys.keys()) pending_cache_update = dict() cached_objects = list() for key, kwargs in cache_keys.iteritems(): obj = objects.get(key, None) if obj is not None: if isinstance(obj, ObjectDoesNotExist): raise self.model.DoesNotExist(repr(obj)) elif isinstance(obj, MultipleObjectsReturned): raise self.model.MultipleObjectsReturned(repr(obj)) else: cached_objects.append(obj) continue try: obj = base_qs.get(**kwargs) except (ObjectDoesNotExist, MultipleObjectsReturned), e: # The model-specific subclasses of these exceptions are not # pickleable, so we cache the base exception and reconstruct the # specific exception when fetching from the cache. obj = e.__class__.__base__(repr(e)) cache.set(key, obj, cache_settings.get("timeout")) raise self._prefetch_related(obj, prefetch) self._tag_object_as_from_cache(obj) pending_cache_update[key] = obj cached_objects.append(obj)
def install(cls): """Sets up the invalidation paths for the cached models in CACHETREE, and registers signal handlers for each of them. """ cls.INVALIDATION_PATHS = {} for app_label, model in get_cached_models(): cls.validate_lookups(model) cache_settings = get_cache_settings(model) attrs = cache_settings.get("prefetch") cls._add_invalidation_path(model, [], attrs) cls.connect_signals()
def validate_lookups(cls, model): """Validates that the lookups correspond to fields within model._meta.fields, as invalidation cannot be reliably performed otherwise. Limiting lookups to model._meta.fields is more restrictive than Django, which allows model fields defined on a related model to be used in lookups (e.g. Author.objects.get(authorprofile=authorprofile), where AuthorProfile has a foreign key pointing to author). In order to invalidate the original state of a modified instance, cachetree copies the instance __dict__ in a post-init signal handler and uses it for invalidation. This __dict__ only contains the values for fields in model._meta.fields, not values for reverse fields. Limiting lookups also prevents using lookup separators (double-underscore). Invalidation determines the cache key to invalidate using the values on the invalidated instance, so all lookups must be exact lookups (the default). E.g., if a key was stored using username__contains="stanley", it would be difficult or impossible to reconstruct the key to be invalidated based simply on having an instance with username="******". Lookup fields are not required to be unique. Since the only use of the cache is via get_cached, which calls Manager.get, uniqueness is guaranteed prior to setting instances in the cache. If another instance is subsequently assigned the same lookup value as a cached instance, it will trigger invalidation of the cached instance. Subequent calls to get_cached will raise MultipleObjectsReturned. """ lookups = get_cache_settings(model).get("lookups") valid_fieldnames = ["pk" ] + [field.name for field in model._meta.fields] for lookup in lookups: if not isinstance(lookup, (list, tuple)): lookup = [lookup] for kwarg in lookup: if kwarg not in valid_fieldnames: raise ImproperlyConfigured( cls.ERROR_MSG_INVALID_FIELD_LOOKUP % dict(model=model.__name__, lookup=kwarg, fields=', '.join(valid_fieldnames)))
def validate_lookups(cls, model): """Validates that the lookups correspond to fields within model._meta.fields, as invalidation cannot be reliably performed otherwise. Limiting lookups to model._meta.fields is more restrictive than Django, which allows model fields defined on a related model to be used in lookups (e.g. Author.objects.get(authorprofile=authorprofile), where AuthorProfile has a foreign key pointing to author). In order to invalidate the original state of a modified instance, cachetree copies the instance __dict__ in a post-init signal handler and uses it for invalidation. This __dict__ only contains the values for fields in model._meta.fields, not values for reverse fields. Limiting lookups also prevents using lookup separators (double-underscore). Invalidation determines the cache key to invalidate using the values on the invalidated instance, so all lookups must be exact lookups (the default). E.g., if a key was stored using username__contains="stanley", it would be difficult or impossible to reconstruct the key to be invalidated based simply on having an instance with username="******". Lookup fields are not required to be unique. Since the only use of the cache is via get_cached, which calls Manager.get, uniqueness is guaranteed prior to setting instances in the cache. If another instance is subsequently assigned the same lookup value as a cached instance, it will trigger invalidation of the cached instance. Subequent calls to get_cached will raise MultipleObjectsReturned. """ lookups = get_cache_settings(model).get("lookups") valid_fieldnames = ["pk"] + [field.name for field in model._meta.fields] for lookup in lookups: if not isinstance(lookup, (list, tuple)): lookup = [lookup] for kwarg in lookup: if kwarg not in valid_fieldnames: raise ImproperlyConfigured( cls.ERROR_MSG_INVALID_FIELD_LOOKUP % dict(model=model.__name__, lookup=kwarg, fields=", ".join(valid_fieldnames)) )
def invalidate_root_instances(self, *instances): """Invalidates all possible versions of the cached ``instances``, using the ``instances``'s lookups and their current field values. Each instance in ``instances`` must be a root instance, that is, an instance of one of the top-level models stored in the cache, not a related model instance. """ self.seen_instances.update(instances) keys = set() for instance in instances: model = instance.__class__ instance_variants = [instance] if hasattr(instance, "_orig_state"): orig = model() orig.__dict__ = instance._orig_state instance_variants.append(orig) for instance in instance_variants: cache_settings = get_cache_settings(instance.__class__) lookups = cache_settings.get("lookups") for lookup in lookups: if not isinstance(lookup, (list, tuple)): lookup = [lookup] kwargs = {} for fieldname in lookup: if fieldname == "pk": field = model._meta.pk else: field = model._meta.get_field(fieldname) attname = field.get_attname() kwargs[fieldname] = getattr(instance, attname) key = generate_base_key(instance.__class__, **kwargs) keys.add(key) cache.delete_many(keys)