示例#1
0
def invalidate_from_dict(model, values):
    """
    Invalidates caches that can possibly be influenced by object
    """
    # Create a list of invalidators from list of schemes and values of object fields
    schemes = cache_schemes.schemes(model)
    conjs_keys = [
        conj_cache_key_from_scheme(model, scheme, values) for scheme in schemes
    ]

    # Get a union of all cache keys registered in invalidators
    # Get schemes version at the same time, hoping it's unchanged
    version_key = cache_schemes.get_version_key(model)
    pipe = redis_conn.pipeline(transaction=False)
    # Optimistic locking: we hope schemes and invalidators won't change while we remove them
    # Ignoring this could lead to cache key hanging with it's invalidator removed
    # HACK: fix for strange WATCH handling in redis-py 2.4.6+
    if hasattr(pipe, 'pipeline_execute_command'):
        pipe.pipeline_execute_command('watch', version_key, *conjs_keys)
    else:
        pipe.watch(version_key, *conjs_keys)
    pipe.get(version_key)
    pipe.sunion(conjs_keys)
    _, version, cache_keys = pipe.execute()

    # Check if our version if schemes for model is obsolete, update them and redo if needed
    # This shouldn't be happen too often once schemes are filled a bit
    if version is None:
        version = 0
    if int(version) != cache_schemes.version(model):
        redis_conn.unwatch()
        cache_schemes.load_schemes(model)
        invalidate_from_dict(model, values)

    elif cache_keys or conjs_keys:
        # `conjs_keys` are keys of sets containing `cache_keys` we are going to delete,
        # so we'll remove them too.
        # NOTE: There could be some other invalidators not matched with current object,
        #       which reference cache keys we delete, they will be hanging out for a while.
        try:
            txn = redis_conn.pipeline()
            txn.delete(*(list(cache_keys) + conjs_keys))
            txn.execute()
        except WatchError:
            # Optimistic locking failed: schemes or one of invalidator sets changed.
            # Just redo everything.
            # NOTE: Scipting will be a cool alternative to optimistic locking
            invalidate_from_dict(model, values)

    else:
        redis_conn.unwatch()
示例#2
0
def invalidate_from_dict(model, values):
    """
    Invalidates caches that can possibly be influenced by object
    """
    # Create a list of invalidators from list of schemes and values of object fields
    schemes = cache_schemes.schemes(model)
    conjs_keys = [conj_cache_key_from_scheme(model, scheme, values) for scheme in schemes]

    # Get a union of all cache keys registered in invalidators
    # Get schemes version at the same time, hoping it's unchanged
    version_key = cache_schemes.get_version_key(model)
    pipe = redis_conn.pipeline(transaction=False)
    # Optimistic locking: we hope schemes and invalidators won't change while we remove them
    # Ignoring this could lead to cache key hanging with it's invalidator removed
    # HACK: fix for strange WATCH handling in redis-py 2.4.6+
    if hasattr(pipe, 'pipeline_execute_command'):
        pipe.pipeline_execute_command('watch', version_key, *conjs_keys)
    else:
        pipe.watch(version_key, *conjs_keys)
    pipe.get(version_key)
    pipe.sunion(conjs_keys)
    _, version, cache_keys = pipe.execute()

    # Check if our version if schemes for model is obsolete, update them and redo if needed
    # This shouldn't be happen too often once schemes are filled a bit
    if version is None:
        version = 0
    if int(version) != cache_schemes.version(model):
        redis_conn.unwatch()
        cache_schemes.load_schemes(model)
        invalidate_from_dict(model, values)

    elif cache_keys or conjs_keys:
        # `conjs_keys` are keys of sets containing `cache_keys` we are going to delete,
        # so we'll remove them too.
        # NOTE: There could be some other invalidators not matched with current object,
        #       which reference cache keys we delete, they will be hanging out for a while.
        try:
            txn = redis_conn.pipeline()
            txn.delete(*(list(cache_keys) + conjs_keys))
            txn.execute()
        except WatchError:
            # Optimistic locking failed: schemes or one of invalidator sets changed.
            # Just redo everything.
            # NOTE: Scipting will be a cool alternative to optimistic locking
            invalidate_from_dict(model, values)

    else:
        redis_conn.unwatch()
示例#3
0
    def ensure_known(self, model, new_schemes):
        """
        Ensure that `new_schemes` are known or know them
        """
        new_schemes = set(new_schemes)
        model_name = get_model_name(model)
        loaded = False

        if model_name not in self.local:
            self.load_schemes(model)
            loaded = True
        schemes = self.local[model_name]

        if new_schemes - schemes:
            if not loaded:
                schemes = self.load_schemes(model)
            if new_schemes - schemes:
                # Write new schemes to redis
                txn = redis_conn.pipeline()
                txn.incr(self.get_version_key(
                    model_name))  # Увеличиваем версию схем

                lookup_key = self.get_lookup_key(model_name)
                for scheme in new_schemes - schemes:
                    txn.sadd(lookup_key, serialize_scheme(scheme))
                txn.execute()

                # Updating local version
                self.local[model_name].update(new_schemes)
                # We increment here instead of using incr result from redis,
                # because even our updated collection could be already obsolete
                self.versions[model_name] += 1
示例#4
0
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None):
    """
    Writes data to cache and creates appropriate invalidators.
    """
    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    # Ensure that all schemes of current query are "known"
    schemes = map(conj_scheme, cond_dnf)
    cache_schemes.ensure_known(model, schemes)

    txn = redis_conn.pipeline()

    # Write data to cache
    pickled_data = pickle.dumps(data, -1)
    if timeout is not None:
        txn.setex(cache_key, pickled_data, timeout)
    else:
        txn.set(cache_key, pickled_data)

    # Add new cache_key to list of dependencies for every conjunction in dnf
    for conj in cond_dnf:
        conj_key = conj_cache_key(model, conj)
        txn.sadd(conj_key, cache_key)
        if timeout is not None:
            # Invalidator timeout should be larger than timeout of any key it references
            # So we take timeout from profile which is our upper limit
            # Add few extra seconds to be extra safe
            txn.expire(conj_key, model._cacheprofile['timeout'] + 10)

    txn.execute()
示例#5
0
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None):
    """
    Writes data to cache and creates appropriate invalidators.
    """
    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    # Ensure that all schemes of current query are "known"
    schemes = map(conj_scheme, cond_dnf)
    cache_schemes.ensure_known(model, schemes)

    txn = redis_conn.pipeline()

    # Write data to cache
    pickled_data = pickle.dumps(data, -1)
    if timeout is not None:
        txn.setex(cache_key, pickled_data, timeout)
    else:
        txn.set(cache_key, pickled_data)

    # Add new cache_key to list of dependencies for every conjunction in dnf
    for conj in cond_dnf:
        conj_key = conj_cache_key(model, conj)
        txn.sadd(conj_key, cache_key)
        if timeout is not None:
            # Invalidator timeout should be larger than timeout of any key it references
            # So we take timeout from profile which is our upper limit
            # Add few extra seconds to be extra safe
            txn.expire(conj_key, model._cacheprofile['timeout'] + 10)

    txn.execute()
示例#6
0
    def ensure_known(self, model, new_schemes):
        """
        Ensure that `new_schemes` are known or know them
        """
        new_schemes = set(new_schemes)
        model_name = get_model_name(model)
        loaded = False

        if model_name not in self.local:
            self.load_schemes(model)
            loaded = True
        schemes = self.local[model_name]

        if new_schemes - schemes:
            if not loaded:
                schemes = self.load_schemes(model)
            if new_schemes - schemes:
                # Write new schemes to redis
                txn = redis_conn.pipeline()
                txn.incr(self.get_version_key(model_name)) # Увеличиваем версию схем

                lookup_key = self.get_lookup_key(model_name)
                for scheme in new_schemes - schemes:
                    txn.sadd(lookup_key, serialize_scheme(scheme))
                txn.execute()

                # Updating local version
                self.local[model_name].update(new_schemes)
                # We increment here instead of using incr result from redis,
                # because even our updated collection could be already obsolete
                self.versions[model_name] += 1 
示例#7
0
    def load_schemes(self, model):
        model_name = get_model_name(model)

        txn = redis_conn.pipeline()
        txn.get(self.get_version_key(model))
        txn.smembers(self.get_lookup_key(model_name))
        version, members = txn.execute()

        self.local[model_name] = set(map(deserialize_scheme, members))
        self.local[model_name].add(())  # Всегда добавляем пустую схему
        self.versions[model_name] = int(version or 0)
        return self.local[model_name]
示例#8
0
    def load_schemes(self, model):
        model_name = get_model_name(model)

        txn = redis_conn.pipeline()
        txn.get(self.get_version_key(model))
        txn.smembers(self.get_lookup_key(model_name))
        version, members = txn.execute()

        self.local[model_name] = set(map(deserialize_scheme, members))
        self.local[model_name].add(()) # Всегда добавляем пустую схему
        self.versions[model_name] = int(version or 0)
        return self.local[model_name]