Ejemplo n.º 1
0
def remove_object_from_cache(sender, instance, **kwargs):
    if isinstance(instance, ContentType):
        # The model cache key stuff has special handling to allow passing
        # in a content type instead of the model. At this point though, we are
        # actually working with the content type itself and not the model it
        # represents. So we need to bypass that special handling code.
        cache_key = model_cache_key(get_identifier_string(instance, instance.pk))
    else:
        cache_key = model_cache_key(instance)
    lazymodel_cache.delete(cache_key)
Ejemplo n.º 2
0
def remove_object_from_cache(sender, instance, **kwargs):
    if isinstance(instance, ContentType):
        # The model cache key stuff has special handling to allow passing
        # in a content type instead of the model. At this point though, we are
        # actually working with the content type itself and not the model it
        # represents. So we need to bypass that special handling code.
        cache_key = model_cache_key(
            get_identifier_string(instance, instance.pk))
    else:
        cache_key = model_cache_key(instance)
    lazymodel_cache.delete(cache_key)
Ejemplo n.º 3
0
    def get(self, *args, **kwargs):

        if len(kwargs) == 1 and kwargs.keys()[0] in ('id', 'id__exact', 'pk',
                                                     'pk__exact'):
            # Generate the cache key directly, since we have the id/pk.
            pk_key = model_cache_key(self.model, kwargs.values()[0])
            lookup_key = None
        else:
            # This lookup is not simply an id/pk lookup.
            # Get the cache key for this lookup.

            # Handle related managers, which automatically use core_filters
            # to filter querysets using the related object's ID.
            core_filters = getattr(self, 'core_filters', None)
            if isinstance(core_filters, dict):
                # Combine the core filters and the kwargs because that is
                # basically what the related manager will do when building
                # the queryset.
                lookup_kwargs = dict(core_filters)
                lookup_kwargs.update(kwargs)
                lookup_key = lookup_cache_key(self.model, **lookup_kwargs)
            else:
                lookup_key = lookup_cache_key(self.model, **kwargs)

            # Try to get the cached pk_key.
            object_pk = self.cache_backend.get(lookup_key)
            pk_key = object_pk and model_cache_key(self.model, object_pk)

        # Try to get a cached result if the pk_key is known.
        result = pk_key and self.cache_backend.get(pk_key)

        if not result:

            # The result was not cached, so get it from the database.
            result = super(RowCacheManager, self).get(*args, **kwargs)
            object_pk = result.pk

            # And cache the result against the pk_key for next time.
            pk_key = model_cache_key(result, object_pk)
            self.cache_backend[pk_key] = result

            # If a lookup was used, then cache the pk against it. Next time
            # the same lookup is requested, it will find the relevent pk and
            # be able to get the cached object using that.
            if lookup_key:
                self.cache_backend[lookup_key] = object_pk

        # Return the cache-protected object.
        return result
Ejemplo n.º 4
0
    def get(self, *args, **kwargs):

        if len(kwargs) == 1 and kwargs.keys()[0] in ('id', 'id__exact', 'pk', 'pk__exact'):
            # Generate the cache key directly, since we have the id/pk.
            pk_key = model_cache_key(self.model, kwargs.values()[0])
            lookup_key = None
        else:
            # This lookup is not simply an id/pk lookup.
            # Get the cache key for this lookup.

            # Handle related managers, which automatically use core_filters
            # to filter querysets using the related object's ID.
            core_filters = getattr(self, 'core_filters', None)
            if isinstance(core_filters, dict):
                # Combine the core filters and the kwargs because that is
                # basically what the related manager will do when building
                # the queryset.
                lookup_kwargs = dict(core_filters)
                lookup_kwargs.update(kwargs)
                lookup_key = lookup_cache_key(self.model, **lookup_kwargs)
            else:
                lookup_key = lookup_cache_key(self.model, **kwargs)

            # Try to get the cached pk_key.
            object_pk = self.cache_backend.get(lookup_key)
            pk_key = object_pk and model_cache_key(self.model, object_pk)

        # Try to get a cached result if the pk_key is known.
        result = pk_key and self.cache_backend.get(pk_key)

        if not result:

            # The result was not cached, so get it from the database.
            result = super(RowCacheManager, self).get(*args, **kwargs)
            object_pk = result.pk

            # And cache the result against the pk_key for next time.
            pk_key = model_cache_key(result, object_pk)
            self.cache_backend[pk_key] = result

            # If a lookup was used, then cache the pk against it. Next time
            # the same lookup is requested, it will find the relevent pk and
            # be able to get the cached object using that.
            if lookup_key:
                self.cache_backend[lookup_key] = object_pk

        # Return the cache-protected object.
        return result
Ejemplo n.º 5
0
    def test_cache_version_setting(self):
        """
        Ensure that when the cache key version is changed,
        the model cache keys will be changed too.

        """

        gallery = PhotoGallery.objects.all()[0]

        key_before = model_cache_key(gallery)

        cache_version = settings.CACHE_KEY_VERSIONS['model_cache']
        settings.CACHE_KEY_VERSIONS['model_cache'] = cache_version + 'test'

        key_after = model_cache_key(gallery)
        self.assertNotEqual(key_before, key_after)
Ejemplo n.º 6
0
    def test_that_it_works(self):
        """
        Check that this whole thing works.
        It's a bit complicated.

        """

        gallery = PhotoGallery.objects.all().filter(sites__isnull=False)[0]
        self.assertTrue(isinstance(gallery, ModelWithCaching), 'This test needs to test a cached model.')
        for gallery in PhotoGallery.objects.filter(slug=gallery.slug).exclude(pk=gallery.pk):
            gallery.delete()

        pk_key = model_cache_key(gallery)
        lookup_key = lookup_cache_key(PhotoGallery, slug=gallery.slug)

        # Accessing with a pk should add a cached version of the object
        # to the cache without any fuss.
        del lazymodel_cache[pk_key]
        self.assertEqual(PhotoGallery.objects.get(pk=gallery.pk), gallery)
        self.assertEqual(lazymodel_cache[pk_key], gallery)
        del lazymodel_cache[pk_key]
        self.assertEqual(LazyModel(PhotoGallery, gallery.pk), gallery)
        self.assertEqual(lazymodel_cache[pk_key], gallery)

        # Accessing via ORM lookups, such as using the slug, should cache a
        # lookup_key that is only a reference to the object pk. The cache key
        # using that object pk should then contain the cached object as usual.
        del lazymodel_cache[lookup_key]
        del lazymodel_cache[pk_key]
        self.assertEqual(PhotoGallery.objects.get(slug=gallery.slug), gallery)
        self.assertEqual(lazymodel_cache[lookup_key], gallery.pk)
        self.assertEqual(lazymodel_cache[pk_key], gallery)

        # Removing a site from the gallery should trigger the m2m signals that
        # delete the cached object.
        gallery.sites.remove(gallery.sites.all()[0])
        self.assertUncached(pk_key, 'M2M changes did not delete the cached value!')

        # The lookup key will still be there.
        self.assertEqual(lazymodel_cache[lookup_key], gallery.pk)

        # Saving the object should also delete the cached object.
        # First access the object to add it back to the cache.
        self.assertEqual(PhotoGallery.objects.get(pk=gallery.pk), gallery)
        self.assertEqual(lazymodel_cache[pk_key], gallery)
        gallery.save()
        self.assertUncached(pk_key, 'Saving did not delete the cached value!')

        # Deleting the object should also delete from the cache.
        self.assertEqual(PhotoGallery.objects.get(pk=gallery.pk), gallery)
        self.assertEqual(lazymodel_cache[pk_key], gallery)
        gallery.delete()
        self.assertUncached(pk_key, 'Deleting did not delete the cached value!')
Ejemplo n.º 7
0
    def test_cache_sharing(self):

        gallery = PhotoGallery.objects.all()[0]
        self.assertTrue(isinstance(gallery, ModelWithCaching), 'This test needs to test a cached model.')

        cache_key = model_cache_key(gallery)
        del lazymodel_cache[cache_key]

        # Test the following accessors, they should work as normal and
        # add the result to the cache.
        self.assertEqual(PhotoGallery.objects.get(pk=gallery.pk), gallery)
        self.assertEqual(lazymodel_cache[cache_key], gallery)
        del lazymodel_cache[cache_key]
        self.assertEqual(gallery, LazyModel(PhotoGallery, gallery.pk))
        self.assertEqual(lazymodel_cache[cache_key], gallery)

        # Put a dummy value into the cache key which both accessors use.
        # Both accessors should then return that dummy value when called.
        dummy = 'dummy value for the cache'
        lazymodel_cache[cache_key] = dummy
        self.assertEqual(dummy, PhotoGallery.objects.get(pk=gallery.pk))
        self.assertEqual(dummy, LazyModel(PhotoGallery, gallery.pk))
        del lazymodel_cache[cache_key]
Ejemplo n.º 8
0
    def _get_cached_instance(self):
        """
        A cache wrapper around _get_instance, using the same cache keys
        as the row-level cache.

        If no object was found, then return a False instead of None. This is
        necessary because the LazyObject code relies on None to mean that it
        has not been evaluated yet. And I don't feel like rebuilding the whole
        class to avoid that reliance.

        """

        try:
            identifier = self._get_identifier()
        except ValueError as error:
            if self._fail_silently:
                return False
            else:
                raise LazyModelError(error)

        # Get the cache key, basically just namespacing
        # and versioning the identifier.
        cache_key = model_cache_key(identifier)

        try:
            instance = self._cache_backend[cache_key]
        except KeyError:
            instance = self._get_instance(identifier)
            self._cache_backend[cache_key] = instance

        if instance is None:
            if self._fail_silently:
                return False
            else:
                raise LazyModelError('%s not found.' % identifier)
        else:
            return instance
Ejemplo n.º 9
0
    def _get_cached_instance(self):
        """
        A cache wrapper around _get_instance, using the same cache keys
        as the row-level cache.

        If no object was found, then return a False instead of None. This is
        necessary because the LazyObject code relies on None to mean that it
        has not been evaluated yet. And I don't feel like rebuilding the whole
        class to avoid that reliance.

        """

        try:
            identifier = self._get_identifier()
        except ValueError as error:
            if self._fail_silently:
                return False
            else:
                raise LazyModelError(error)

        # Get the cache key, basically just namespacing
        # and versioning the identifier.
        cache_key = model_cache_key(identifier)

        try:
            instance = self._cache_backend[cache_key]
        except KeyError:
            instance = self._get_instance(identifier)
            self._cache_backend[cache_key] = instance

        if instance is None:
            if self._fail_silently:
                return False
            else:
                raise LazyModelError('%s not found.' % identifier)
        else:
            return instance