async def _async_write_json_to_request_in_thread( request: SynapseRequest, json_encoder: Callable[[Any], bytes], json_object: Any, ) -> None: """Encodes the given JSON object on a thread and then writes it to the request. This is done so that encoding large JSON objects doesn't block the reactor thread. Note: We don't use JsonEncoder.iterencode here as that falls back to the Python implementation (rather than the C backend), which is *much* more expensive. """ def encode(opentracing_span: "Optional[opentracing.Span]") -> bytes: # it might take a while for the threadpool to schedule us, so we write # opentracing logs once we actually get scheduled, so that we can see how # much that contributed. if opentracing_span: opentracing_span.log_kv({"event": "scheduled"}) res = json_encoder(json_object) if opentracing_span: opentracing_span.log_kv({"event": "encoded"}) return res with start_active_span("encode_json_response"): span = active_span() json_str = await defer_to_thread(request.reactor, encode, span) _write_bytes_to_request(request, json_str)
async def add_to_queue( self, room_id: str, events_and_contexts: Iterable[Tuple[EventBase, EventContext]], backfilled: bool, ) -> _PersistResult: """Add events to the queue, with the given persist_event options. If we are not already processing events in this room, starts off a background process to to so, calling the per_item_callback for each item. Args: room_id (str): events_and_contexts (list[(EventBase, EventContext)]): backfilled (bool): Returns: the result returned by the `_per_item_callback` passed to `__init__`. """ queue = self._event_persist_queues.setdefault(room_id, deque()) # if the last item in the queue has the same `backfilled` setting, # we can just add these new events to that item. if queue and queue[-1].backfilled == backfilled: end_item = queue[-1] else: # need to make a new queue item deferred: ObservableDeferred[_PersistResult] = ObservableDeferred( defer.Deferred(), consumeErrors=True) end_item = _EventPersistQueueItem( events_and_contexts=[], backfilled=backfilled, deferred=deferred, ) queue.append(end_item) # add our events to the queue item end_item.events_and_contexts.extend(events_and_contexts) # also add our active opentracing span to the item so that we get a link back span = opentracing.active_span() if span: end_item.parent_opentracing_span_contexts.append(span.context) # start a processor for the queue, if there isn't one already self._handle_queue(room_id) # wait for the queue item to complete res = await make_deferred_yieldable(end_item.deferred.observe()) # add another opentracing span which links to the persist trace. with opentracing.start_active_span_follows_from( "persist_event_batch_complete", (end_item.opentracing_span_context, )): pass return res
async def cb() -> RV: # NB it is important that we do not `await` before setting span_context! nonlocal span_context with start_active_span(f"ResponseCache[{self._name}].calculate"): span = active_span() if span: span_context = span.context return await callback(*args, **kwargs)
async def get_user_by_req( self, request: SynapseRequest, allow_guest: bool = False, rights: str = "access", allow_expired: bool = False, ) -> Requester: """Get a registered user's ID. Args: request: An HTTP request with an access_token query parameter. allow_guest: If False, will raise an AuthError if the user making the request is a guest. rights: The operation being performed; the access token must allow this allow_expired: If True, allow the request through even if the account is expired, or session token lifetime has ended. Note that /login will deliver access tokens regardless of expiration. Returns: Resolves to the requester Raises: InvalidClientCredentialsError if no user by that token exists or the token is invalid. AuthError if access is denied for the user in the access token """ parent_span = active_span() with start_active_span("get_user_by_req"): requester = await self._wrapped_get_user_by_req( request, allow_guest, rights, allow_expired ) if parent_span: if requester.authenticated_entity in self._force_tracing_for_users: # request tracing is enabled for this user, so we need to force it # tracing on for the parent span (which will be the servlet span). # # It's too late for the get_user_by_req span to inherit the setting, # so we also force it on for that. force_tracing() force_tracing(parent_span) parent_span.set_tag( "authenticated_entity", requester.authenticated_entity ) parent_span.set_tag("user_id", requester.user.to_string()) if requester.device_id is not None: parent_span.set_tag("device_id", requester.device_id) if requester.app_service is not None: parent_span.set_tag("appservice_id", requester.app_service.id) return requester
async def new_func(request: SynapseRequest, *args: Any, **kwargs: str) -> Optional[Tuple[int, Any]]: """A callback which can be passed to HttpServer.RegisterPaths Args: request: *args: unused? **kwargs: the dict mapping keys to path components as specified in the path match regexp. Returns: (response code, response object) as returned by the callback method. None if the request has already been handled. """ content = None if request.method in [b"PUT", b"POST"]: # TODO: Handle other method types? other content types? content = parse_json_object_from_request(request) try: with start_active_span("authenticate_request"): origin: Optional[ str] = await authenticator.authenticate_request( request, content) except NoAuthenticationError: origin = None if self.REQUIRE_AUTH: logger.warning( "authenticate_request failed: missing authentication") raise except Exception as e: logger.warning("authenticate_request failed: %s", e) raise # update the active opentracing span with the authenticated entity set_tag("authenticated_entity", origin) # if the origin is authenticated and whitelisted, use its span context # as the parent. context = None if origin and whitelisted_homeserver(origin): context = span_context_from_request(request) if context: servlet_span = active_span() # a scope which uses the origin's context as a parent processing_start_time = time.time() scope = start_active_span_follows_from( "incoming-federation-request", child_of=context, contexts=(servlet_span, ), start_time=processing_start_time, ) else: # just use our context as a parent scope = start_active_span("incoming-federation-request", ) try: with scope: if origin and self.RATELIMIT: with ratelimiter.ratelimit(origin) as d: await d if request._disconnected: logger.warning( "client disconnected before we started processing " "request") return None response = await func(origin, content, request.args, *args, **kwargs) else: response = await func(origin, content, request.args, *args, **kwargs) finally: # if we used the origin's context as the parent, add a new span using # the servlet span as a parent, so that we have a link if context: scope2 = start_active_span_follows_from( "process-federation_request", contexts=(scope.span, ), start_time=processing_start_time, ) scope2.close() return response