def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): # dict[str, dict[str, VerifyKey]]: results so far. # map server_name -> key_id -> VerifyKey merged_results = {} # dict[str, set(str)]: keys to fetch for each server missing_keys = {} for verify_request in verify_requests: missing_keys.setdefault(verify_request.server_name, set()).update( verify_request.key_ids) for fn in key_fetch_fns: results = yield fn(missing_keys.items()) merged_results.update(results) # We now need to figure out which verify requests we have keys # for and which we don't missing_keys = {} requests_missing_keys = [] for verify_request in verify_requests: server_name = verify_request.server_name result_keys = merged_results[server_name] if verify_request.deferred.called: # We've already called this deferred, which probably # means that we've already found a key for it. continue for key_id in verify_request.key_ids: if key_id in result_keys: with PreserveLoggingContext(): verify_request.deferred.callback(( server_name, key_id, result_keys[key_id], )) break else: # The else block is only reached if the loop above # doesn't break. missing_keys.setdefault(server_name, set()).update( verify_request.key_ids) requests_missing_keys.append(verify_request) if not missing_keys: break with PreserveLoggingContext(): for verify_request in requests_missing_keys: verify_request.deferred.errback( SynapseError( 401, "No key for %s with id %s" % ( verify_request.server_name, verify_request.key_ids, ), Codes.UNAUTHORIZED, ))
def _do_fetch(self, conn): """Takes a database connection and waits for requests for events from the _event_fetch_list queue. """ event_list = [] i = 0 while True: try: with self._event_fetch_lock: event_list = self._event_fetch_list self._event_fetch_list = [] if not event_list: single_threaded = self.database_engine.single_threaded if single_threaded or i > EVENT_QUEUE_ITERATIONS: self._event_fetch_ongoing -= 1 return else: self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S) i += 1 continue i = 0 event_id_lists = zip(*event_list)[0] event_ids = [ item for sublist in event_id_lists for item in sublist ] rows = self._new_transaction(conn, "do_fetch", [], [], None, self._fetch_event_rows, event_ids) row_dict = {r["event_id"]: r for r in rows} # We only want to resolve deferreds from the main thread def fire(lst, res): for ids, d in lst: if not d.called: try: with PreserveLoggingContext(): d.callback( [res[i] for i in ids if i in res]) except Exception: logger.exception("Failed to callback") with PreserveLoggingContext(): self.hs.get_reactor().callFromThread( fire, event_list, row_dict) except Exception as e: logger.exception("do_fetch") # We only want to resolve deferreds from the main thread def fire(evs): for _, d in evs: if not d.called: with PreserveLoggingContext(): d.errback(e) if event_list: with PreserveLoggingContext(): self.hs.get_reactor().callFromThread(fire, event_list)
def _fetch_event_list(self, conn, event_list): """Handle a load of requests from the _event_fetch_list queue Args: conn (twisted.enterprise.adbapi.Connection): database connection event_list (list[Tuple[list[str], Deferred]]): The fetch requests. Each entry consists of a list of event ids to be fetched, and a deferred to be completed once the events have been fetched. """ with Measure(self._clock, "_fetch_event_list"): try: event_id_lists = list(zip(*event_list))[0] event_ids = [ item for sublist in event_id_lists for item in sublist ] rows = self._new_transaction( conn, "do_fetch", [], [], self._fetch_event_rows, event_ids, ) row_dict = {r["event_id"]: r for r in rows} # We only want to resolve deferreds from the main thread def fire(lst, res): for ids, d in lst: if not d.called: try: with PreserveLoggingContext(): d.callback( [res[i] for i in ids if i in res]) except Exception: logger.exception("Failed to callback") with PreserveLoggingContext(): self.hs.get_reactor().callFromThread( fire, event_list, row_dict) except Exception as e: logger.exception("do_fetch") # We only want to resolve deferreds from the main thread def fire(evs, exc): for _, d in evs: if not d.called: with PreserveLoggingContext(): d.errback(exc) with PreserveLoggingContext(): self.hs.get_reactor().callFromThread(fire, event_list, e)
def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): # dict[str, set(str)]: keys to fetch for each server missing_keys = {} for verify_request in verify_requests: missing_keys.setdefault(verify_request.server_name, set()).update( verify_request.key_ids) for fn in key_fetch_fns: results = yield fn(missing_keys.items()) # We now need to figure out which verify requests we have keys # for and which we don't missing_keys = {} requests_missing_keys = [] for verify_request in verify_requests: if verify_request.deferred.called: # We've already called this deferred, which probably # means that we've already found a key for it. continue server_name = verify_request.server_name # see if any of the keys we got this time are sufficient to # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: key = result_keys.get(key_id) if key: with PreserveLoggingContext(): verify_request.deferred.callback( (server_name, key_id, key)) break else: # The else block is only reached if the loop above # doesn't break. missing_keys.setdefault(server_name, set()).update( verify_request.key_ids) requests_missing_keys.append(verify_request) if not missing_keys: break with PreserveLoggingContext(): for verify_request in requests_missing_keys: verify_request.deferred.errback( SynapseError( 401, "No key for %s with id %s" % ( verify_request.server_name, verify_request.key_ids, ), Codes.UNAUTHORIZED, ))
def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): """Fetches events from the database using the _event_fetch_list. This allows batch and bulk fetching of events - it allows us to fetch events without having to create a new transaction for each request for events. """ if not events: defer.returnValue({}) events_d = defer.Deferred() with self._event_fetch_lock: self._event_fetch_list.append( (events, events_d) ) self._event_fetch_lock.notify() if self._event_fetch_ongoing < EVENT_QUEUE_THREADS: self._event_fetch_ongoing += 1 should_start = True else: should_start = False if should_start: with PreserveLoggingContext(): self.runWithConnection( self._do_fetch ) logger.debug("Loading %d events", len(events)) with PreserveLoggingContext(): rows = yield events_d logger.debug("Loaded %d events (%d rows)", len(events), len(rows)) if not allow_rejected: rows[:] = [r for r in rows if not r["rejects"]] res = yield make_deferred_yieldable(defer.gatherResults( [ run_in_background( self._get_event_from_row, row["internal_metadata"], row["json"], row["redacts"], rejected_reason=row["rejects"], ) for row in rows ], consumeErrors=True )) defer.returnValue({ e.event.event_id: e for e in res if e })
def handle_new_client_event(self, event, context, extra_destinations=[], extra_users=[], suppress_auth=False): # We now need to go and hit out to wherever we need to hit out to. if not suppress_auth: self.auth.check(event, auth_events=context.current_state) (event_stream_id, max_stream_id) = yield self.store.persist_event(event, context=context) federation_handler = self.hs.get_handlers().federation_handler if event.type == EventTypes.Member: if event.content["membership"] == Membership.INVITE: invitee = UserID.from_string(event.state_key) if not self.hs.is_mine(invitee): # TODO: Can we add signature from remote server in a nicer # way? If we have been invited by a remote server, we need # to get them to sign the event. returned_invite = yield federation_handler.send_invite( invitee.domain, event, ) # TODO: Make sure the signatures actually are correct. event.signatures.update(returned_invite.signatures) destinations = set(extra_destinations) for k, s in context.current_state.items(): try: if k[0] == EventTypes.Member: if s.content["membership"] == Membership.JOIN: destinations.add( UserID.from_string(s.state_key).domain) except SynapseError: logger.warn("Failed to get destination from event %s", s.event_id) with PreserveLoggingContext(): # Don't block waiting on waking up all the listeners. notify_d = self.notifier.on_new_room_event(event, event_stream_id, max_stream_id, extra_users=extra_users) def log_failure(f): logger.warn("Failed to notify about %s: %s", event.event_id, f.value) notify_d.addErrback(log_failure) federation_handler.handle_new_event( event, destinations=destinations, )
def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: server_names (list): list of server_names we want to lookup server_to_deferred (dict): server_name to deferred which gets resolved once we've finished looking up keys for that server """ while True: wait_on = [ self.key_downloads[server_name] for server_name in server_names if server_name in self.key_downloads ] if wait_on: with PreserveLoggingContext(): yield defer.DeferredList(wait_on) else: break for server_name, deferred in server_to_deferred.items(): d = ObservableDeferred(preserve_context_over_deferred(deferred)) self.key_downloads[server_name] = d def rm(r, server_name): self.key_downloads.pop(server_name, None) return r d.addBoth(rm, server_name)
def enqueue_failure(self, failure, destination): if destination == self.server_name or destination == "localhost": return deferred = defer.Deferred() if not self.can_send_to(destination): return self.pending_failures_by_dest.setdefault(destination, []).append( (failure, deferred)) def chain(f): if not deferred.called: deferred.errback(f) def log_failure(f): logger.warn("Failed to send failure to %s: %s", destination, f.value) deferred.addErrback(log_failure) with PreserveLoggingContext(): self._attempt_new_transaction(destination).addErrback(chain) yield deferred
def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1): """Fetch the keys for a remote server.""" factory = SynapseKeyClientFactory() factory.path = path endpoint = matrix_federation_endpoint(reactor, server_name, ssl_context_factory, timeout=30) for i in range(5): try: with PreserveLoggingContext(): protocol = yield endpoint.connect(factory) server_response, server_certificate = yield protocol.remote_key defer.returnValue((server_response, server_certificate)) return except SynapseKeyClientError as e: logger.exception("Error getting key for %r" % (server_name, )) if e.status.startswith("4"): # Don't retry for 4xx responses. raise IOError("Cannot get key for %r" % server_name) except Exception as e: logger.exception(e) raise IOError("Cannot get key for %r" % server_name)
def wrapped(*args, **kwargs): arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs) keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names] list_args = arg_dict[self.list_name] # cached is a dict arg -> deferred, where deferred results in a # 2-tuple (`arg`, `result`) cached = {} missing = [] for arg in list_args: key = list(keyargs) key[self.list_pos] = arg try: res = self.cache.get(tuple(key)).observe() res.addCallback(lambda r, arg: (arg, r), arg) cached[arg] = res except KeyError: missing.append(arg) if missing: sequence = self.cache.sequence args_to_call = dict(arg_dict) args_to_call[self.list_name] = missing ret_d = defer.maybeDeferred( preserve_context_over_fn, self.function_to_call, **args_to_call ) ret_d = ObservableDeferred(ret_d) # We need to create deferreds for each arg in the list so that # we can insert the new deferred into the cache. for arg in missing: with PreserveLoggingContext(): observer = ret_d.observe() observer.addCallback(lambda r, arg: r.get(arg, None), arg) observer = ObservableDeferred(observer) key = list(keyargs) key[self.list_pos] = arg self.cache.update(sequence, tuple(key), observer) def invalidate(f, key): self.cache.invalidate(key) return f observer.addErrback(invalidate, tuple(key)) res = observer.observe() res.addCallback(lambda r, arg: (arg, r), arg) cached[arg] = res return preserve_context_over_deferred(defer.gatherResults( cached.values(), consumeErrors=True, ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res)))
def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" current_context = LoggingContext.current_context() def inner_func(txn, *args, **kwargs): with LoggingContext("runInteraction") as context: current_context.copy_to(context) start = time.clock() * 1000 txn_id = SQLBaseStore._TXN_ID # We don't really need these to be unique, so lets stop it from # growing really large. self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1) name = "%s-%x" % ( desc, txn_id, ) transaction_logger.debug("[TXN START] {%s}", name) try: return func(LoggingTransaction(txn, name), *args, **kwargs) except: logger.exception("[TXN FAIL] {%s}", name) raise finally: end = time.clock() * 1000 transaction_logger.debug("[TXN END] {%s} %f", name, end - start) with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( inner_func, *args, **kwargs) defer.returnValue(result)
def render(self, resrc): # this is called once a Resource has been found to serve the request; in our # case the Resource in question will normally be a JsonResource. # create a LogContext for this request request_id = self.get_request_id() logcontext = self.logcontext = LoggingContext(request_id) logcontext.request = request_id # override the Server header which is set by twisted self.setHeader("Server", self.site.server_version_string) with PreserveLoggingContext(self.logcontext): # we start the request metrics timer here with an initial stab # at the servlet name. For most requests that name will be # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = resrc.__class__.__name__ self._started_processing(servlet_name) Request.render(self, resrc) # record the arrival of the request *after* # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics requests_counter.labels(self.method, self.request_metrics.name).inc()
def enqueue_pdu(self, pdu, destinations, order): # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus # table and we'll get back to it later. destinations = set(destinations) destinations.discard(self.server_name) logger.debug("Sending to: %s", str(destinations)) if not destinations: return deferreds = [] for destination in destinations: deferred = defer.Deferred() self.pending_pdus_by_dest.setdefault(destination, []).append( (pdu, deferred, order)) def eb(failure): if not deferred.called: deferred.errback(failure) else: logger.warn("Failed to send pdu", failure) with PreserveLoggingContext(): self._attempt_new_transaction(destination).addErrback(eb) deferreds.append(deferred) yield defer.DeferredList(deferreds)
def _handle_key_deferred(verify_request): """Waits for the key to become available, and then performs a verification Args: verify_request (VerifyJsonRequest): Returns: Deferred[None] Raises: SynapseError if there was a problem performing the verification """ server_name = verify_request.server_name with PreserveLoggingContext(): _, key_id, verify_key = yield verify_request.key_ready json_object = verify_request.json_object try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: logger.debug( "Error verifying signature for %s:%s:%s with key %s: %s", server_name, verify_key.alg, verify_key.version, encode_verify_key_base64(verify_key), str(e), ) raise SynapseError( 401, "Invalid signature for server %s with key %s:%s: %s" % (server_name, verify_key.alg, verify_key.version, str(e)), Codes.UNAUTHORIZED, )
def fire(self, *args, **kwargs): """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is not an error to fire a signal with no observers. Returns a Deferred that will complete when all the observers have completed.""" def do(observer): def eb(failure): logger.warning("%s signal observer %s failed: %r", self.name, observer, failure, exc_info=(failure.type, failure.value, failure.getTracebackObject())) if not self.suppress_failures: return failure return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb) with PreserveLoggingContext(): deferreds = [do(observer) for observer in self.observers] res = yield defer.gatherResults( deferreds, consumeErrors=True).addErrback(unwrapFirstError) defer.returnValue(res)
def wait_for_events(self, user_id, timeout, callback, room_ids=None, from_token=StreamToken.START): """Wait until the callback returns a non empty response or the timeout fires. """ user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token = yield self.event_sources.get_current_token() if room_ids is None: room_ids = yield self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token = from_token if timeout: end_time = self.clock.time_msec() + timeout while not result: try: now = self.clock.time_msec() if end_time <= now: break # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) add_timeout_to_deferred( listener.deferred, (end_time - now) / 1000., ) with PreserveLoggingContext(): yield listener.deferred current_token = user_stream.current_token result = yield callback(prev_token, current_token) if result: break # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token except DeferredTimeoutError: break except defer.CancelledError: break if result is None: # This happened if there was no timeout or if the timeout had # already expired. current_token = user_stream.current_token result = yield callback(prev_token, current_token) defer.returnValue(result)
def fire(self, *args, **kwargs): """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is not an error to fire a signal with no observers. Returns a Deferred that will complete when all the observers have completed.""" with PreserveLoggingContext(): deferreds = [] for observer in self.observers: d = defer.maybeDeferred(observer, *args, **kwargs) def eb(failure): logger.warning("%s signal observer %s failed: %r", self.name, observer, failure, exc_info=(failure.type, failure.value, failure.getTracebackObject())) if not self.suppress_failures: failure.raiseException() deferreds.append(d.addErrback(eb)) results = [] for deferred in deferreds: result = yield deferred results.append(result) defer.returnValue(results)
def wrapped_request_handler(self, request): global _next_request_id request_id = "%s-%s" % (request.method, _next_request_id) _next_request_id += 1 with LoggingContext(request_id) as request_context: request_context.request = request_id with request.processing(): try: d = request_handler(self, request) with PreserveLoggingContext(): yield d except CodeMessageException as e: code = e.code if isinstance(e, SynapseError): logger.info("%s SynapseError: %s - %s", request, code, e.msg) else: logger.exception(e) outgoing_responses_counter.inc(request.method, str(code)) respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) except: logger.exception("Failed handle request %s.%s on %r: %r", request_handler.__module__, request_handler.__name__, self, request) respond_with_json(request, 500, {"error": "Internal server error"}, send_cors=True)
def wait_for_replication(self, callback, timeout): """Wait for an event to happen. :param callback: Gets called whenever an event happens. If this returns a truthy value then ``wait_for_replication`` returns, otherwise it waits for another event. :param int timeout: How many milliseconds to wait for callback return a truthy value. :returns: A deferred that resolves with the value returned by the callback. """ listener = _NotificationListener(None) def timed_out(): listener.deferred.cancel() timer = self.clock.call_later(timeout / 1000., timed_out) while True: listener.deferred = self.replication_deferred.observe() result = yield callback() if result: break try: with PreserveLoggingContext(): yield listener.deferred except defer.CancelledError: break self.clock.cancel_call_later(timer, ignore_errs=True) defer.returnValue(result)
def _handle_new_receipts(self, receipts): """Takes a list of receipts, stores them and informs the notifier. """ for receipt in receipts: room_id = receipt["room_id"] receipt_type = receipt["receipt_type"] user_id = receipt["user_id"] event_ids = receipt["event_ids"] data = receipt["data"] res = yield self.store.insert_receipt( room_id, receipt_type, user_id, event_ids, data ) if not res: # res will be None if this read receipt is 'old' defer.returnValue(False) stream_id, max_persisted_id = res with PreserveLoggingContext(): self.notifier.on_new_event( "receipt_key", max_persisted_id, rooms=[room_id] ) defer.returnValue(True)
def enqueue_pdu(self, pdu, destinations, order): # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus # table and we'll get back to it later. destinations = set(destinations) destinations = set(dest for dest in destinations if self.can_send_to(dest)) logger.debug("Sending to: %s", str(destinations)) if not destinations: return deferreds = [] for destination in destinations: deferred = defer.Deferred() self.pending_pdus_by_dest.setdefault(destination, []).append( (pdu, deferred, order)) def chain(failure): if not deferred.called: deferred.errback(failure) def log_failure(f): logger.warn("Failed to send pdu to %s: %s", destination, f.value) deferred.addErrback(log_failure) with PreserveLoggingContext(): self._attempt_new_transaction(destination).addErrback(chain) deferreds.append(deferred)
def on_rdata(self, stream_name, token, rows): awaiters = self._rdata_awaiters self._rdata_awaiters = [] super(TestReplicationClientHandler, self).on_rdata(stream_name, token, rows) with PreserveLoggingContext(): for a in awaiters: a.callback(None)
def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: server_names (list): list of server_names we want to lookup server_to_deferred (dict): server_name to deferred which gets resolved once we've finished looking up keys for that server. The Deferreds should be regular twisted ones which call their callbacks with no logcontext. Returns: a Deferred which resolves once all key lookups for the given servers have completed. Follows the synapse rules of logcontext preservation. """ while True: wait_on = [ self.key_downloads[server_name] for server_name in server_names if server_name in self.key_downloads ] if wait_on: with PreserveLoggingContext(): yield defer.DeferredList(wait_on) else: break def rm(r, server_name_): self.key_downloads.pop(server_name_, None) return r for server_name, deferred in server_to_deferred.items(): self.key_downloads[server_name] = deferred deferred.addBoth(rm, server_name)
def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" current_context = LoggingContext.current_context() start_time = time.time() * 1000 after_callbacks = [] def inner_func(conn, *args, **kwargs): with LoggingContext("runInteraction") as context: sql_scheduling_timer.inc_by(time.time() * 1000 - start_time) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() current_context.copy_to(context) return self._new_transaction( conn, desc, after_callbacks, current_context, func, *args, **kwargs ) with PreserveLoggingContext(): result = yield self._db_pool.runWithConnection( inner_func, *args, **kwargs ) for after_callback, after_args in after_callbacks: after_callback(*after_args) defer.returnValue(result)
def on_new_event(self, stream_key, new_token, users=[], rooms=[]): """ Used to inform listeners that something has happened event wise. Will wake up all listeners for the given users and rooms. """ with PreserveLoggingContext(): with Measure(self.clock, "on_new_event"): user_streams = set() for user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None: user_streams.add(user_stream) for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception("Failed to notify listener") self.notify_replication()
def wrapped_request_handler(self, request): """ Args: self: request (synapse.http.site.SynapseRequest): """ request_id = request.get_request_id() with LoggingContext(request_id) as request_context: request_context.request = request_id with Measure(self.clock, "wrapped_request_handler"): # we start the request metrics timer here with an initial stab # at the servlet name. For most requests that name will be # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = self.__class__.__name__ with request.processing(servlet_name): with PreserveLoggingContext(request_context): d = defer.maybeDeferred(h, self, request) # record the arrival of the request *after* # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics requests_counter.labels( request.method, request.request_metrics.name).inc() yield d
def on_incoming_transaction(self, transaction_data): transaction = Transaction(**transaction_data) received_pdus_counter.inc_by(len(transaction.pdus)) for p in transaction.pdus: if "unsigned" in p: unsigned = p["unsigned"] if "age" in unsigned: p["age"] = unsigned["age"] if "age" in p: p["age_ts"] = int(self._clock.time_msec()) - int(p["age"]) del p["age"] pdu_list = [self.event_from_pdu_json(p) for p in transaction.pdus] logger.debug("[%s] Got transaction", transaction.transaction_id) response = yield self.transaction_actions.have_responded(transaction) if response: logger.debug("[%s] We've already responed to this request", transaction.transaction_id) defer.returnValue(response) return logger.debug("[%s] Transaction is new", transaction.transaction_id) with PreserveLoggingContext(): results = [] for pdu in pdu_list: d = self._handle_new_pdu(transaction.origin, pdu) try: yield d results.append({}) except FederationError as e: self.send_failure(e, transaction.origin) results.append({"error": str(e)}) except Exception as e: results.append({"error": str(e)}) logger.exception("Failed to handle PDU") if hasattr(transaction, "edus"): for edu in [Edu(**x) for x in transaction.edus]: self.received_edu(transaction.origin, edu.edu_type, edu.content) for failure in getattr(transaction, "pdu_failures", []): logger.info("Got failure %r", failure) logger.debug("Returning: %s", str(results)) response = { "pdus": dict(zip((p.event_id for p in pdu_list), results)), } yield self.transaction_actions.set_response(transaction, 200, response) defer.returnValue((200, response))
def wait_for_events(self, user_id, timeout, callback, room_ids=None, from_token=StreamToken.START): """Wait until the callback returns a non empty response or the timeout fires. """ user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token = yield self.event_sources.get_current_token() if room_ids is None: rooms = yield self.store.get_rooms_for_user(user_id) room_ids = [room.room_id for room in rooms] user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None if timeout: end_time = self.clock.time_msec() + timeout prev_token = from_token while not result: try: current_token = user_stream.current_token result = yield callback(prev_token, current_token) if result: break now = self.clock.time_msec() if end_time <= now: break # Now we wait for the _NotifierUserStream to be told there # is a new token. # We need to supply the token we supplied to callback so # that we don't miss any current_token updates. prev_token = current_token listener = user_stream.new_listener(prev_token) with PreserveLoggingContext(): yield self.clock.time_bound_deferred( listener.deferred, time_out=(end_time - now) / 1000.) except DeferredTimedOutError: break except defer.CancelledError: break else: current_token = user_stream.current_token result = yield callback(from_token, current_token) defer.returnValue(result)
def fire(lst, res): for ids, d in lst: if not d.called: try: with PreserveLoggingContext(): d.callback([res[i] for i in ids if i in res]) except Exception: logger.exception("Failed to callback")
def __init__(self, user_id, rooms, current_token, time_now_ms): self.user_id = user_id self.rooms = set(rooms) self.current_token = current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred())