def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: server_names (list): list of server_names we want to lookup server_to_deferred (dict): server_name to deferred which gets resolved once we've finished looking up keys for that server """ while True: wait_on = [ self.key_downloads[server_name] for server_name in server_names if server_name in self.key_downloads ] if wait_on: with PreserveLoggingContext(): yield defer.DeferredList(wait_on) else: break for server_name, deferred in server_to_deferred.items(): d = ObservableDeferred(preserve_context_over_deferred(deferred)) self.key_downloads[server_name] = d def rm(r, server_name): self.key_downloads.pop(server_name, None) return r d.addBoth(rm, server_name)
def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: server_names (list): list of server_names we want to lookup server_to_deferred (dict): server_name to deferred which gets resolved once we've finished looking up keys for that server """ while True: wait_on = [ self.key_downloads[server_name] for server_name in server_names if server_name in self.key_downloads ] if wait_on: yield defer.DeferredList(wait_on) else: break for server_name, deferred in server_to_deferred.items(): d = ObservableDeferred(deferred) self.key_downloads[server_name] = d def rm(r, server_name): self.key_downloads.pop(server_name, None) return r d.addBoth(rm, server_name)
def set(self, key, deferred): """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with logcontext.run_in_background). Can return either a new Deferred (which also doesn't follow the synapse logcontext rules), or, if *deferred* was already complete, the actual result. You will probably want to make_deferred_yieldable the result. Args: key (hashable): deferred (twisted.internet.defer.Deferred[T): Returns: twisted.internet.defer.Deferred[T]|T: a new deferred, or the actual result. """ result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result def remove(r): if self.timeout_sec: self.clock.call_later( self.timeout_sec, self.pending_result_cache.pop, key, None, ) else: self.pending_result_cache.pop(key, None) return r result.addBoth(remove) return result.observe()
def set(self, key, deferred): result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result def remove(r): self.pending_result_cache.pop(key, None) return r result.addBoth(remove) return result.observe()
def set(self, key, deferred): result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result def remove(r): if self.timeout_sec: self.clock.call_later( self.timeout_sec, self.pending_result_cache.pop, key, None, ) else: self.pending_result_cache.pop(key, None) return r result.addBoth(remove) return result.observe()
def set(self, time_now_ms, key, deferred): self.rotate(time_now_ms) result = ObservableDeferred(deferred) self.pending_result_cache[key] = result def shuffle_along(r): # When the deferred completes we shuffle it along to the first # generation of the result cache. So that it will eventually # expire from the rotation of that cache. self.next_result_cache[key] = result self.pending_result_cache.pop(key, None) return r result.addBoth(shuffle_along) return result.observe()