Exemplo n.º 1
0
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None):
    """
    Writes data to cache and creates appropriate invalidators.
    """
    model = non_proxy(model)

    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    # Ensure that all schemes of current query are "known"
    schemes = map(conj_scheme, cond_dnf)
    cache_schemes.ensure_known(model, schemes)

    txn = redis_client.pipeline()

    # Write data to cache
    pickled_data = pickle.dumps(data, -1)
    if timeout is not None:
        txn.setex(cache_key, timeout, pickled_data)
    else:
        txn.set(cache_key, pickled_data)

    # Add new cache_key to list of dependencies for every conjunction in dnf
    for conj in cond_dnf:
        conj_key = conj_cache_key(model, conj)
        txn.sadd(conj_key, cache_key)
        if timeout is not None:
            # Invalidator timeout should be larger than timeout of any key it references
            # So we take timeout from profile which is our upper limit
            # Add few extra seconds to be extra safe
            txn.expire(conj_key, model._cacheprofile['timeout'] + 10)

    txn.execute()
Exemplo n.º 2
0
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None):
    """
    Writes data to cache and creates appropriate invalidators.
    """
    model = non_proxy(model)

    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    # Ensure that all schemes of current query are "known"
    schemes = map(conj_scheme, cond_dnf)
    cache_schemes.ensure_known(model, schemes)

    txn = redis_client.pipeline()

    # Write data to cache
    pickled_data = pickle.dumps(data, -1)
    if timeout is not None:
        txn.setex(cache_key, pickled_data, timeout)
    else:
        txn.set(cache_key, pickled_data)

    # Add new cache_key to list of dependencies for every conjunction in dnf
    for conj in cond_dnf:
        conj_key = conj_cache_key(model, conj)
        txn.sadd(conj_key, cache_key)
        if timeout is not None:
            # Invalidator timeout should be larger than timeout of any key it references
            # So we take timeout from profile which is our upper limit
            # Add few extra seconds to be extra safe
            txn.expire(conj_key, model._cacheprofile['timeout'] + 10)

    txn.execute()
Exemplo n.º 3
0
def cache_page_by_queryset(model, cache_key, data, cond_dnf=[[]], timeout=None,
                           only_conj=False):
    """ Overridden method `cacheops.query.cache_thing` which doesn't
    pickle data and can set only invalidation conjunctions.
    """
    model = non_proxy(model)

    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    schemes = map(conj_scheme, cond_dnf)
    cache_schemes.ensure_known(model, schemes)

    txn = redis_client.pipeline()

    # Here was data pickling, we don't need it because of caching raw value
    # pickled_data = pickle.dumps(data, -1)

    # Check whether setting data is allowed in `only_conj` argument
    if not only_conj:
        if timeout is not None:
            txn.setex(cache_key, timeout, data)
        else:
            txn.set(cache_key, data)

    for conj in cond_dnf:
        conj_key = conj_cache_key(model, conj)
        txn.sadd(conj_key, cache_key)
        if timeout is not None:
            txn.expire(conj_key, model._cacheprofile['timeout'] + 10)

    txn.execute()
Exemplo n.º 4
0
def invalidate_obj(obj):
    """
    Invalidates caches that can possibly be influenced by object
    """
    model = non_proxy(obj.__class__)
    load_script('invalidate')(args=[
        get_model_name(model),
        serialize_object(model, obj)
    ])
Exemplo n.º 5
0
def invalidate_model(model):
    """
    Invalidates all caches for given model.
    NOTE: This is a heavy artilery which uses redis KEYS request,
          which could be relatively slow on large datasets.
    """
    model = non_proxy(model)
    conjs_keys = redis_client.keys('conj:%s:*' % get_model_name(model))
    if conjs_keys:
        cache_keys = redis_client.sunion(conjs_keys)
        redis_client.delete(*(list(cache_keys) + conjs_keys))
Exemplo n.º 6
0
def invalidate_obj(obj):
    """
    Invalidates caches that can possibly be influenced by object
    """
    model = non_proxy(obj.__class__)

    # Loading model schemes from local memory (or from redis)
    schemes = cache_schemes.schemes(model)

    # We hope that our schemes are valid, but if not we will update them and redo invalidation
    # on second pass
    for _ in (1, 2):
        # Create a list of invalidators from list of schemes and values of object fields
        conjs_keys = [
            conj_cache_key_from_scheme(model, scheme, obj)
            for scheme in schemes
        ]

        # Reading scheme version, cache_keys and deleting invalidators in
        # a single transaction.
        def _invalidate_conjs(pipe):
            # get schemes version to check later that it's not obsolete
            pipe.get(cache_schemes.get_version_key(model))
            # Get a union of all cache keys registered in invalidators
            pipe.sunion(conjs_keys)
            # `conjs_keys` are keys of sets containing `cache_keys` we are going to delete,
            # so we'll remove them too.
            # NOTE: There could be some other invalidators not matched with current object,
            #       which reference cache keys we delete, they will be hanging out for a while.
            pipe.delete(*conjs_keys)

        # NOTE: we delete fetched cache_keys later which makes a tiny possibility that something
        #       will fail in the middle leaving those cache keys hanging without invalidators.
        #       The alternative WATCH-based optimistic locking proved to be pessimistic.
        version, cache_keys, _ = redis_client.transaction(_invalidate_conjs)
        if cache_keys:
            redis_client.delete(*cache_keys)

        # OK, we invalidated all conjunctions we had in memory, but model schema
        # may have been changed in redis long time ago. If this happened,
        # schema version will not match and we should load new schemes and redo our thing
        if int(version or 0) != cache_schemes.version(model):
            schemes = cache_schemes.load_schemes(model)
        else:
            break
Exemplo n.º 7
0
def invalidate_obj(obj):
    """
    Invalidates caches that can possibly be influenced by object
    """
    model = non_proxy(obj.__class__)

    # Loading model schemes from local memory (or from redis)
    schemes = cache_schemes.schemes(model)

    # We hope that our schemes are valid, but if not we will update them and redo invalidation
    # on second pass
    for _ in (1, 2):
        # Create a list of invalidators from list of schemes and values of object fields
        conjs_keys = [conj_cache_key_from_scheme(model, scheme, obj) for scheme in schemes]

        # Reading scheme version, cache_keys and deleting invalidators in
        # a single transaction.
        def _invalidate_conjs(pipe):
            # get schemes version to check later that it's not obsolete
            pipe.get(cache_schemes.get_version_key(model))
            # Get a union of all cache keys registered in invalidators
            pipe.sunion(conjs_keys)
            # `conjs_keys` are keys of sets containing `cache_keys` we are going to delete,
            # so we'll remove them too.
            # NOTE: There could be some other invalidators not matched with current object,
            #       which reference cache keys we delete, they will be hanging out for a while.
            pipe.delete(*conjs_keys)

        # NOTE: we delete fetched cache_keys later which makes a tiny possibility that something
        #       will fail in the middle leaving those cache keys hanging without invalidators.
        #       The alternative WATCH-based optimistic locking proved to be pessimistic.
        version, cache_keys, _ = redis_client.transaction(_invalidate_conjs)
        if cache_keys:
            redis_client.delete(*cache_keys)

        # OK, we invalidated all conjunctions we had in memory, but model schema
        # may have been changed in redis long time ago. If this happened,
        # schema version will not match and we should load new schemes and redo our thing
        if int(version or 0) != cache_schemes.version(model):
            schemes = cache_schemes.load_schemes(model)
        else:
            break
Exemplo n.º 8
0
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None):
    u"""
        По факту - скопированный метод cache_thing из кешопса
            с двумя изменениями:
                - просто функция, а не метод объекта
                - убрана сериализация data с помощью pickle.dumps
    """

    model = non_proxy(model)

    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    # Ensure that all schemes of current query are "known"
    schemes = map(conj_scheme, cond_dnf)
    cache_schemes.ensure_known(model, schemes)

    txn = redis_client.pipeline()

    # Write data to cache
    if timeout is not None:
        txn.setex(cache_key, timeout, data)
    else:
        txn.set(cache_key, data)

    # Add new cache_key to list of dependencies for
    # every conjunction in dnf
    for conj in cond_dnf:
        conj_key = conj_cache_key(model, conj)
        txn.sadd(conj_key, cache_key)
        if timeout is not None:
            # Invalidator timeout should be larger than
            # timeout of any key it references
            # So we take timeout from profile which is our upper limit
            # Add few extra seconds to be extra safe
            txn.expire(conj_key, model._cacheprofile['timeout'] + 10)

    txn.execute()
Exemplo n.º 9
0
def cache_thing(model, cache_key, data, cond_dnf=[[]], timeout=None):
    """
    Writes data to cache and creates appropriate invalidators.
    """
    model = non_proxy(model)

    if timeout is None:
        profile = model_profile(model)
        timeout = profile['timeout']

    pickled_data = pickle.dumps(data, -1)
    load_script('cache_thing')(
        keys=[cache_key],
        args=[
            pickled_data,
            get_model_name(model),
            json.dumps(cond_dnf, default=str),
            timeout,
            # Invalidator timeout should be larger than timeout of any key it references
            # So we take timeout from profile which is our upper limit
            # Add few extra seconds to be extra safe
            model._cacheprofile['timeout'] + 10
        ]
    )
Exemplo n.º 10
0
def invalidate_obj(obj):
    """
    Invalidates caches that can possibly be influenced by object
    """
    invalidate_from_dict(non_proxy(obj.__class__), obj.__dict__)