def unset_value(self, instance, key): self.filter(**{self.field_name: instance, 'key': key}).delete() if instance.pk not in self.__metadata: cache.delete(self._make_key(instance)) return self.__metadata[instance.pk].pop(key, None) cache.set(self._make_key(instance), self.__metadata[instance.pk])
def __post_save(self, instance, **kwargs): """ Pushes changes to an instance into the cache, and removes invalid (changed) lookup values. """ pk_name = instance._meta.pk.name pk_names = ("pk", pk_name) pk_val = instance.pk for key in self.cache_fields: if key in pk_names: continue # store pointers cache.set(self.__get_lookup_cache_key(**{key: getattr(instance, key)}), pk_val, self.cache_ttl) # 1 hour # Ensure we dont serialize the database into the cache db = instance._state.db instance._state.db = None # store actual object cache.set(self.__get_lookup_cache_key(**{pk_name: pk_val}), instance, self.cache_ttl) instance._state.db = db # Kill off any keys which are no longer valid if instance in self.__cache: for key in self.cache_fields: if key not in self.__cache[instance]: continue value = self.__cache[instance][key] if value != getattr(instance, key): cache.delete(self.__get_lookup_cache_key(**{key: value})) self.__cache_state(instance)
def __post_delete(self, instance, **kwargs): """ Drops instance from all cache storages. """ pk_name = instance._meta.pk.name for key in self.cache_fields: if key in ("pk", pk_name): continue # remove pointers cache.delete(self.__get_lookup_cache_key(**{key: getattr(instance, key)})) # remove actual object cache.delete(self.__get_lookup_cache_key(**{pk_name: instance.pk}))
def __post_delete(self, instance, **kwargs): """ Drops instance from all cache storages. """ pk_name = instance._meta.pk.name for key in self.cache_fields: if key in ("pk", pk_name): continue # remove pointers value = self.__value_for_field(instance, key) cache.delete(key=self.__get_lookup_cache_key(**{key: value}), version=self.cache_version) # remove actual object cache.delete(key=self.__get_lookup_cache_key(**{pk_name: instance.pk}), version=self.cache_version)
def __post_save(self, instance, **kwargs): """ Pushes changes to an instance into the cache, and removes invalid (changed) lookup values. """ pk_name = instance._meta.pk.name pk_names = ('pk', pk_name) pk_val = instance.pk for key in self.cache_fields: if key in pk_names: continue # store pointers value = self.__value_for_field(instance, key) cache.set( key=self.__get_lookup_cache_key(**{key: value}), value=pk_val, timeout=self.cache_ttl, version=self.cache_version, ) # Ensure we don't serialize the database into the cache db = instance._state.db instance._state.db = None # store actual object try: cache.set( key=self.__get_lookup_cache_key(**{pk_name: pk_val}), value=instance, timeout=self.cache_ttl, version=self.cache_version, ) except Exception as e: logger.error(e, exc_info=True) instance._state.db = db # Kill off any keys which are no longer valid if instance in self.__cache: for key in self.cache_fields: if key not in self.__cache[instance]: continue value = self.__cache[instance][key] current_value = self.__value_for_field(instance, key) if value != current_value: cache.delete( key=self.__get_lookup_cache_key(**{key: value}), version=self.cache_version, ) self.__cache_state(instance)
def set_value(self, instance, key, value): inst, created = self.get_or_create(**{ self.field_name: instance, 'key': key, 'defaults': { 'value': value, } }) if not created and inst.value != value: inst.update(value=value) if instance.pk not in self.__metadata: cache.delete(self._make_key(instance)) return self.__metadata[instance.pk][key] = value cache.set(self._make_key(instance), self.__metadata[instance.pk])
def uncache_object(self, instance_id): pk_name = self.model._meta.pk.name cache_key = self.__get_lookup_cache_key(**{pk_name: instance_id}) cache.delete(cache_key, version=self.cache_version)
def _get_cache_key(self, group_id, environment_id): return 'groupenv:1:{}:{}'.format(group_id, environment_id) @classmethod def get_or_create(cls, group_id, environment_id, defaults=None): cache_key = cls._get_cache_key(group_id, environment_id) instance = cache.get(cache_key) if instance is None: instance, created = cls.objects.get_or_create( group_id=group_id, environment_id=environment_id, defaults=defaults, ) cache.set(cache_key, instance, 3600) else: created = False return instance, created post_delete.connect( lambda instance, **kwargs: cache.delete( GroupEnvironment._get_cache_key( instance.group_id, instance.environment_id, ), ), sender=GroupEnvironment, weak=False, )
def save(self, *args, **kwargs): rv = super(Rule, self).save(*args, **kwargs) cache_key = 'project:{}:rules'.format(self.project_id) cache.delete(cache_key) return rv
def tearDown(self): cache.delete(ProjectOwnership.get_cache_key(self.project.id)) super().tearDown()
def save(self, *args, **kwargs): rv = super().save(*args, **kwargs) cache_key = f"project:{self.project_id}:rules" cache.delete(cache_key) return rv
__repr__ = sane_repr("group_id", "environment_id") @classmethod def _get_cache_key(self, group_id, environment_id): return "groupenv:1:{}:{}".format(group_id, environment_id) @classmethod def get_or_create(cls, group_id, environment_id, defaults=None): cache_key = cls._get_cache_key(group_id, environment_id) instance = cache.get(cache_key) if instance is None: instance, created = cls.objects.get_or_create( group_id=group_id, environment_id=environment_id, defaults=defaults) cache.set(cache_key, instance, 3600) else: created = False return instance, created post_delete.connect( lambda instance, **kwargs: cache.delete( GroupEnvironment._get_cache_key(instance.group_id, instance. environment_id)), sender=GroupEnvironment, weak=False, )
def delete(self, *args, **kwargs): rv = super().delete(*args, **kwargs) cache_key = "project:{}:rules".format(self.project_id) cache.delete(cache_key) return rv
def clear_release_project_cache(instance: ReleaseProject, **kwargs: Any) -> None: proj_id = instance.project_id cache.delete(get_project_release_cache_key(proj_id))