def cache_results(self, results): """ Create invalidation signals for these results in the form of CacheBotSignals. A CacheBotSignal stores a model and it's accessor path to self.queryset.model. """ # cache the results invalidation_dict = {} if results: added_to_cache = cache.add(self.result_key, results, CACHE_SECONDS) else: added_to_cache = cache.add(self.result_key, None, CACHE_SECONDS) if added_to_cache: invalidation_dict.update(dict([(key,None) for key in self.get_invalidation_keys(results)])) invalidation_dict.update(cache.get_many(invalidation_dict.keys())) for child, negate in self.queryset._get_where_clause(self.queryset.query.where): (table_alias, field_name, db_type), lookup_type, value_annotation, params = child for model_class, accessor_path in self._get_join_paths(table_alias, field_name): if model_class is None: continue if self._is_valid_flush_path(accessor_path): cache_signals.register(model_class, accessor_path, lookup_type, negate=negate) invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path = accessor_path, lookup_type = lookup_type, negate = negate, value = params, save=True) invalidation_dict[invalidation_key] = None join_to_tables = ifilter(lambda x: x[0] == model_class._meta.db_table, self.queryset.query.join_map.keys()) for join_tuple in join_to_tables: if self._is_valid_flush_path(accessor_path): model_class = self.queryset._get_model_class_from_table(join_tuple[1]) cache_signals.register(model_class, join_tuple[3], lookup_type, negate=negate) invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path = join_tuple[3], lookup_type = lookup_type, negate = negate, value = params, save=True) invalidation_dict[invalidation_key] = None for flush_key, flush_list in invalidation_dict.iteritems(): # need to add and append to prevent race conditions cache.add(flush_key, self.result_key, CACHE_SECONDS) if flush_list is None or flush_key not in flush_list.split(','): cache.append(flush_key, ',%s' % self.result_key)
def invalidate_cache(model_class, objects, **extra_keys): """ Flushes the cache of any cached objects associated with this instance. Explicitly set a None value instead of just deleting so we don't have any race conditions where: Thread 1 -> Cache miss, get object from DB Thread 2 -> Object saved, deleted from cache Thread 1 -> Store (stale) object fetched from DB in cache Five second should be more than enough time to prevent this from happening for a web app. """ invalidation_dict = {} accessor_set = cache_signals.get_global_signals(model_class) for obj in objects: for (accessor_path, lookup_type, negate) in accessor_set: for value in get_values(obj, accessor_path): invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path=accessor_path, negate=negate, value=value, save=False ) invalidation_dict[invalidation_key] = None invalidation_dict.update(cache.get_many(invalidation_dict.keys())) cache_keys = set() for obj_key, cache_key_list in invalidation_dict.iteritems(): if cache_key_list: cache_keys.update(cache_key_list.split(",")) keys_to_invalidate = dict([(key, None) for key in chain(cache_keys, invalidation_dict.keys())]) keys_to_invalidate.update(extra_keys) cache.set_many(keys_to_invalidate, 5) cache.delete_many(keys_to_invalidate.keys())
def _register_signal(self, model_class, accessor_path, lookup_type, negate, params): cache_signals.register(model_class, accessor_path, lookup_type, negate=negate) return get_invalidation_key( model_class._meta.db_table, accessor_path = accessor_path, lookup_type = lookup_type, negate = negate, value = params)
def cache_results(self, results): """ Create invalidation signals for these results in the form of CacheBotSignals. A CacheBotSignal stores a model and it's accessor path to self.queryset.model. """ # cache the results if not self.invalidation_only: cache.set(self.result_key, results, CACHE_SECONDS) invalidation_dict = {} for child, negate in self.queryset._get_where_clause(self.queryset.query.where): (table_alias, field_name, db_type), lookup_type, value_annotation, params = child for model_class, accessor_path in self._get_join_paths(table_alias, field_name): if self._is_valid_flush_path(accessor_path): cache_signals.register(model_class, accessor_path, lookup_type, negate=negate) invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path = accessor_path, lookup_type = lookup_type, negate = negate, value = params, save=True) invalidation_dict[invalidation_key] = None join_to_tables = ifilter(lambda x: x[0] == model_class._meta.db_table, self.queryset.query.join_map.keys()) for join_tuple in join_to_tables: if self._is_valid_flush_path(accessor_path): model_class = self.queryset._get_model_class_from_table(join_tuple[1]) cache_signals.register(model_class, join_tuple[3], lookup_type, negate=negate) invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path = join_tuple[3], lookup_type = lookup_type, negate = negate, value = params, save=True) invalidation_dict[invalidation_key] = None invalidation_dict.update(dict([(key,None) for key in self.get_invalidation_keys(results)])) invalidation_dict.update(cache.get_many(invalidation_dict.keys())) for flush_key, flush_list in invalidation_dict.iteritems(): if flush_list is None: invalidation_dict[flush_key] = set([self.result_key]) else: invalidation_dict[flush_key].add(self.result_key) cache.set_many(invalidation_dict, CACHE_SECONDS)
def invalidate_cache(model_class, objects, **extra_keys): """ Flushes the cache of any cached objects associated with this instance. Explicitly set a None value instead of just deleting so we don't have any race conditions where: Thread 1 -> Cache miss, get object from DB Thread 2 -> Object saved, deleted from cache Thread 1 -> Store (stale) object fetched from DB in cache Five second should be more than enough time to prevent this from happening for a web app. """ invalidation_dict = {} accessor_set = cache_signals.get_global_signals(model_class) for obj in objects: for (accessor_path, lookup_type, negate) in accessor_set: if lookup_type != 'exact' or negate: invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path=accessor_path, negate=negate, value='') invalidation_dict[invalidation_key] = None else: for value in get_values(obj, accessor_path): invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path=accessor_path, negate=negate, value=value) invalidation_dict[invalidation_key] = None if invalidation_dict: invalidation_dict.update(cache.get_many(invalidation_dict.keys())) cache_keys = set() for obj_key, cache_key_list in invalidation_dict.iteritems(): if cache_key_list: cache_keys.update(cache_key_list.split(',')) if cache_keys: cache.set_many(dict([(key, None) for key in cache_keys]), conf.CACHE_INVALIDATION_TIMEOUT) invalidation_dict.update(extra_keys) cache.delete_many(invalidation_dict.keys())
def get_invalidation_keys(self, results): """ Iterates through a list of results, and returns an invalidation key for each result. If the query spans multiple tables, also return invalidation keys of any related rows. """ related_fields = self.queryset._related_fields for obj in results: for field, model_class in related_fields.iteritems(): pk_name = model_class._meta.pk.attname cache_signals.register(model_class, pk_name, 'exact') for value in get_values(obj, field): invalidation_key = get_invalidation_key( model_class._meta.db_table, accessor_path = pk_name, value = value) yield invalidation_key