def set(self, key: T, deferred: defer.Deferred) -> defer.Deferred: """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with synapse.logging.context.run_in_background). Can return either a new Deferred (which also doesn't follow the synapse logcontext rules), or, if *deferred* was already complete, the actual result. You will probably want to make_deferred_yieldable the result. Args: key: key to get/set in the cache deferred: The deferred which resolves to the result. Returns: A new deferred which resolves to the actual result. """ result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result def remove(r): if self.timeout_sec: self.clock.call_later(self.timeout_sec, self.pending_result_cache.pop, key, None) else: self.pending_result_cache.pop(key, None) return r result.addBoth(remove) return result.observe()
def set(self, key, deferred): """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with logcontext.run_in_background). Can return either a new Deferred (which also doesn't follow the synapse logcontext rules), or, if *deferred* was already complete, the actual result. You will probably want to make_deferred_yieldable the result. Args: key (hashable): deferred (twisted.internet.defer.Deferred[T): Returns: twisted.internet.defer.Deferred[T]|T: a new deferred, or the actual result. """ result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result def remove(r): if self.timeout_sec: self.clock.call_later( self.timeout_sec, self.pending_result_cache.pop, key, None, ) else: self.pending_result_cache.pop(key, None) return r result.addBoth(remove) return result.observe()
def set(self, time_now_ms, key, deferred): self.rotate(time_now_ms) result = ObservableDeferred(deferred) self.pending_result_cache[key] = result def shuffle_along(r): # When the deferred completes we shuffle it along to the first # generation of the result cache. So that it will eventually # expire from the rotation of that cache. self.next_result_cache[key] = result self.pending_result_cache.pop(key, None) return r result.addBoth(shuffle_along) return result.observe()
def _set( self, context: ResponseCacheContext[KV], deferred: "defer.Deferred[RV]", opentracing_span_context: "Optional[opentracing.SpanContext]", ) -> ResponseCacheEntry: """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with synapse.logging.context.run_in_background). Args: context: Information about the cache miss deferred: The deferred which resolves to the result. opentracing_span_context: An opentracing span wrapping the calculation Returns: The cache entry object. """ result = ObservableDeferred(deferred, consumeErrors=True) key = context.cache_key entry = ResponseCacheEntry(result, opentracing_span_context) self._result_cache[key] = entry def on_complete(r: RV) -> RV: # if this cache has a non-zero timeout, and the callback has not cleared # the should_cache bit, we leave it in the cache for now and schedule # its removal later. if self.timeout_sec and context.should_cache: self.clock.call_later( self.timeout_sec, self._result_cache.pop, key, None ) else: # otherwise, remove the result immediately. self._result_cache.pop(key, None) return r # make sure we do this *after* adding the entry to result_cache, # in case the result is already complete (in which case flipping the order would # leave us with a stuck entry in the cache). result.addBoth(on_complete) return entry
def _set(self, context: ResponseCacheContext[KV], deferred: defer.Deferred) -> defer.Deferred: """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with synapse.logging.context.run_in_background). Returns a new Deferred (which also doesn't follow the synapse logcontext rules). You will probably want to make_deferred_yieldable the result. Args: context: Information about the cache miss deferred: The deferred which resolves to the result. Returns: A new deferred which resolves to the actual result. """ result = ObservableDeferred(deferred, consumeErrors=True) key = context.cache_key self.pending_result_cache[key] = result def on_complete(r): # if this cache has a non-zero timeout, and the callback has not cleared # the should_cache bit, we leave it in the cache for now and schedule # its removal later. if self.timeout_sec and context.should_cache: self.clock.call_later(self.timeout_sec, self.pending_result_cache.pop, key, None) else: # otherwise, remove the result immediately. self.pending_result_cache.pop(key, None) return r # make sure we do this *after* adding the entry to pending_result_cache, # in case the result is already complete (in which case flipping the order would # leave us with a stuck entry in the cache). result.addBoth(on_complete) return result.observe()