def test_delete_entity_with_global_cache(ds_entity, client_context): entity_id = test_utils.system.unique_resource_id() ds_entity(KIND, entity_id, foo=42) class SomeKind(ndb.Model): foo = ndb.IntegerProperty() key = ndb.Key(KIND, entity_id) cache_key = _cache.global_cache_key(key._key) global_cache = global_cache_module._InProcessGlobalCache() cache_dict = global_cache_module._InProcessGlobalCache.cache with client_context.new(global_cache=global_cache).use(): assert key.get().foo == 42 assert cache_key in cache_dict assert key.delete() is None assert cache_key not in cache_dict # This is py27 behavior. Not entirely sold on leaving _LOCKED value for # Datastore misses. assert key.get() is None assert cache_dict[cache_key][0] == b"0"
def test_insert_entity_with_redis_cache(dispose_of, redis_context): class SomeKind(ndb.Model): foo = ndb.IntegerProperty() bar = ndb.StringProperty() entity = SomeKind(foo=42, bar="none") key = entity.put() dispose_of(key._key) cache_key = _cache.global_cache_key(key._key) assert redis_context.global_cache.redis.get(cache_key) is None retrieved = key.get() assert retrieved.foo == 42 assert retrieved.bar == "none" assert redis_context.global_cache.redis.get(cache_key) is not None entity.foo = 43 entity.put() # This is py27 behavior. I can see a case being made for caching the # entity on write rather than waiting for a subsequent lookup. assert redis_context.global_cache.redis.get(cache_key) is None
def test_global_cache_key(): key = mock.Mock() key.to_protobuf.return_value.SerializeToString.return_value = b"himom!" assert _cache.global_cache_key(key) == _cache._PREFIX + b"himom!" key.to_protobuf.assert_called_once_with() key.to_protobuf.return_value.SerializeToString.assert_called_once_with()
def lookup(key, options): """Look up a Datastore entity. Gets an entity from Datastore, asynchronously. Checks the global cache, first, if appropriate. Uses batching. Args: key (~datastore.Key): The key for the entity to retrieve. options (_options.ReadOptions): The options for the request. For example, ``{"read_consistency": EVENTUAL}``. Returns: :class:`~tasklets.Future`: If not an exception, future's result will be either an entity protocol buffer or _NOT_FOUND. """ context = context_module.get_context() use_datastore = context._use_datastore(key, options) if use_datastore and options.transaction: use_global_cache = False else: use_global_cache = context._use_global_cache(key, options) if not (use_global_cache or use_datastore): raise TypeError("use_global_cache and use_datastore can't both be False") entity_pb = _NOT_FOUND key_locked = False if use_global_cache: cache_key = _cache.global_cache_key(key) result = yield _cache.global_get(cache_key) key_locked = _cache.is_locked_value(result) if not key_locked: if result: entity_pb = entity_pb2.Entity() entity_pb.MergeFromString(result) elif use_datastore: lock = yield _cache.global_lock_for_read(cache_key, result) if lock: yield _cache.global_watch(cache_key, lock) else: # Another thread locked or wrote to this key after the call to # _cache.global_get above. Behave as though the key was locked by # another thread and don't attempt to write our value below key_locked = True if entity_pb is _NOT_FOUND and use_datastore: batch = _batch.get_batch(_LookupBatch, options) entity_pb = yield batch.add(key) # Do not cache misses if use_global_cache and not key_locked: if entity_pb is not _NOT_FOUND: expires = context._global_cache_timeout(key, options) serialized = entity_pb.SerializeToString() yield _cache.global_compare_and_swap( cache_key, serialized, expires=expires ) else: yield _cache.global_unwatch(cache_key) raise tasklets.Return(entity_pb)