예제 #1
0
async def _async_write_json_to_request_in_thread(
    request: SynapseRequest,
    json_encoder: Callable[[Any], bytes],
    json_object: Any,
) -> None:
    """Encodes the given JSON object on a thread and then writes it to the
    request.

    This is done so that encoding large JSON objects doesn't block the reactor
    thread.

    Note: We don't use JsonEncoder.iterencode here as that falls back to the
    Python implementation (rather than the C backend), which is *much* more
    expensive.
    """

    def encode(opentracing_span: "Optional[opentracing.Span]") -> bytes:
        # it might take a while for the threadpool to schedule us, so we write
        # opentracing logs once we actually get scheduled, so that we can see how
        # much that contributed.
        if opentracing_span:
            opentracing_span.log_kv({"event": "scheduled"})
        res = json_encoder(json_object)
        if opentracing_span:
            opentracing_span.log_kv({"event": "encoded"})
        return res

    with start_active_span("encode_json_response"):
        span = active_span()
        json_str = await defer_to_thread(request.reactor, encode, span)

    _write_bytes_to_request(request, json_str)
예제 #2
0
    async def run():
        with _bg_metrics_lock:
            count = _background_process_counts.get(desc, 0)
            _background_process_counts[desc] = count + 1

        _background_process_start_count.labels(desc).inc()
        _background_process_in_flight_count.labels(desc).inc()

        with BackgroundProcessLoggingContext(desc, count) as context:
            try:
                if bg_start_span:
                    ctx = start_active_span(
                        f"bgproc.{desc}",
                        tags={SynapseTags.REQUEST_ID: str(context)})
                else:
                    ctx = noop_context_manager()
                with ctx:
                    return await maybe_awaitable(func(*args, **kwargs))
            except Exception:
                logger.exception(
                    "Background process '%s' threw an exception",
                    desc,
                )
            finally:
                _background_process_in_flight_count.labels(desc).dec()
예제 #3
0
    async def get(self, cache_name: str, key: str) -> Optional[Any]:
        """Look up a key/value in the named cache."""

        if self._redis_connection is None:
            return None

        with opentracing.start_active_span(
                "ExternalCache.get",
                tags={opentracing.SynapseTags.CACHE_NAME: cache_name},
        ):
            with response_timer.labels("get").time():
                result = await make_deferred_yieldable(
                    self._redis_connection.get(
                        self._get_redis_key(cache_name, key)))

        logger.debug("Got cache result %s %s: %r", cache_name, key, result)

        get_counter.labels(cache_name, result is not None).inc()

        if not result:
            return None

        # For some reason the integers get magically converted back to integers
        if isinstance(result, int):
            return result

        return json_decoder.decode(result)
    async def run():
        with _bg_metrics_lock:
            count = _background_process_counts.get(desc, 0)
            _background_process_counts[desc] = count + 1

        _background_process_start_count.labels(desc).inc()
        _background_process_in_flight_count.labels(desc).inc()

        with BackgroundProcessLoggingContext(desc) as context:
            context.request = "%s-%i" % (desc, count)
            try:
                ctx = noop_context_manager()
                if bg_start_span:
                    ctx = start_active_span(
                        desc, tags={"request_id": context.request})
                with ctx:
                    result = func(*args, **kwargs)

                    if inspect.isawaitable(result):
                        result = await result

                    return result
            except Exception:
                logger.exception(
                    "Background process '%s' threw an exception",
                    desc,
                )
            finally:
                _background_process_in_flight_count.labels(desc).dec()
예제 #5
0
    async def set(self, cache_name: str, key: str, value: Any,
                  expiry_ms: int) -> None:
        """Add the key/value to the named cache, with the expiry time given."""

        if self._redis_connection is None:
            return

        set_counter.labels(cache_name).inc()

        # txredisapi requires the value to be string, bytes or numbers, so we
        # encode stuff in JSON.
        encoded_value = json_encoder.encode(value)

        logger.debug("Caching %s %s: %r", cache_name, key, encoded_value)

        with opentracing.start_active_span(
                "ExternalCache.set",
                tags={opentracing.SynapseTags.CACHE_NAME: cache_name},
        ):
            with response_timer.labels("set").time():
                return await make_deferred_yieldable(
                    self._redis_connection.set(
                        self._get_redis_key(cache_name, key),
                        encoded_value,
                        pexpire=expiry_ms,
                    ))
예제 #6
0
    async def send_device_message(
        self,
        sender_user_id: str,
        message_type: str,
        messages: Dict[str, Dict[str, JsonDict]],
    ) -> None:
        set_tag("number_of_messages", len(messages))
        set_tag("sender", sender_user_id)
        local_messages = {}
        remote_messages = {}  # type: Dict[str, Dict[str, Dict[str, JsonDict]]]
        for user_id, by_device in messages.items():
            # we use UserID.from_string to catch invalid user ids
            if self.is_mine(UserID.from_string(user_id)):
                messages_by_device = {
                    device_id: {
                        "content": message_content,
                        "type": message_type,
                        "sender": sender_user_id,
                    }
                    for device_id, message_content in by_device.items()
                }
                if messages_by_device:
                    local_messages[user_id] = messages_by_device
            else:
                destination = get_domain_from_id(user_id)
                remote_messages.setdefault(destination,
                                           {})[user_id] = by_device

        message_id = random_string(16)

        context = get_active_span_text_map()

        remote_edu_contents = {}
        for destination, messages in remote_messages.items():
            with start_active_span("to_device_for_user"):
                set_tag("destination", destination)
                remote_edu_contents[destination] = {
                    "messages": messages,
                    "sender": sender_user_id,
                    "type": message_type,
                    "message_id": message_id,
                    "org.matrix.opentracing_context":
                    json_encoder.encode(context),
                }

        log_kv({"local_messages": local_messages})
        stream_id = await self.store.add_messages_to_device_inbox(
            local_messages, remote_edu_contents)

        self.notifier.on_new_event("to_device_key",
                                   stream_id,
                                   users=local_messages.keys())

        log_kv({"remote_messages": remote_messages})
        if self.federation_sender:
            for destination in remote_messages.keys():
                # Enqueue a new federation transaction to send the new
                # device messages to each remote destination.
                self.federation_sender.send_device_messages(destination)
예제 #7
0
 async def cb() -> RV:
     # NB it is important that we do not `await` before setting span_context!
     nonlocal span_context
     with start_active_span(f"ResponseCache[{self._name}].calculate"):
         span = active_span()
         if span:
             span_context = span.context
         return await callback(*args, **kwargs)
예제 #8
0
    def test_nested_spans(self) -> None:
        """Starting two spans off inside each other should work"""

        with LoggingContext("root context"):
            with start_active_span("root span", tracer=self._tracer) as root_scope:
                self.assertEqual(self._tracer.active_span, root_scope.span)

                scope1 = start_active_span(
                    "child1",
                    tracer=self._tracer,
                )
                self.assertEqual(
                    self._tracer.active_span, scope1.span, "child1 was not activated"
                )
                self.assertEqual(
                    scope1.span.context.parent_id, root_scope.span.context.span_id
                )

                scope2 = start_active_span_follows_from(
                    "child2",
                    contexts=(scope1,),
                    tracer=self._tracer,
                )
                self.assertEqual(self._tracer.active_span, scope2.span)
                self.assertEqual(
                    scope2.span.context.parent_id, scope1.span.context.span_id
                )

                with scope1, scope2:
                    pass

                # the root scope should be restored
                self.assertEqual(self._tracer.active_span, root_scope.span)
                self.assertIsNotNone(scope2.span.end_time)
                self.assertIsNotNone(scope1.span.end_time)

            self.assertIsNone(self._tracer.active_span)

        # the spans should be reported in order of their finishing.
        self.assertEqual(
            self._reporter.get_spans(), [scope2.span, scope1.span, root_scope.span]
        )
예제 #9
0
        async def task(i: int):
            scope = start_active_span(
                f"task{i}",
                tracer=self._tracer,
            )
            scopes.append(scope)

            self.assertEqual(self._tracer.active_span, scope.span)
            await clock.sleep(4)
            self.assertEqual(self._tracer.active_span, scope.span)
            scope.close()
예제 #10
0
파일: auth.py 프로젝트: matrix-org/synapse
    async def get_user_by_req(
        self,
        request: SynapseRequest,
        allow_guest: bool = False,
        rights: str = "access",
        allow_expired: bool = False,
    ) -> Requester:
        """Get a registered user's ID.

        Args:
            request: An HTTP request with an access_token query parameter.
            allow_guest: If False, will raise an AuthError if the user making the
                request is a guest.
            rights: The operation being performed; the access token must allow this
            allow_expired: If True, allow the request through even if the account
                is expired, or session token lifetime has ended. Note that
                /login will deliver access tokens regardless of expiration.

        Returns:
            Resolves to the requester
        Raises:
            InvalidClientCredentialsError if no user by that token exists or the token
                is invalid.
            AuthError if access is denied for the user in the access token
        """
        parent_span = active_span()
        with start_active_span("get_user_by_req"):
            requester = await self._wrapped_get_user_by_req(
                request, allow_guest, rights, allow_expired
            )

            if parent_span:
                if requester.authenticated_entity in self._force_tracing_for_users:
                    # request tracing is enabled for this user, so we need to force it
                    # tracing on for the parent span (which will be the servlet span).
                    #
                    # It's too late for the get_user_by_req span to inherit the setting,
                    # so we also force it on for that.
                    force_tracing()
                    force_tracing(parent_span)
                parent_span.set_tag(
                    "authenticated_entity", requester.authenticated_entity
                )
                parent_span.set_tag("user_id", requester.user.to_string())
                if requester.device_id is not None:
                    parent_span.set_tag("device_id", requester.device_id)
                if requester.app_service is not None:
                    parent_span.set_tag("appservice_id", requester.app_service.id)
            return requester
예제 #11
0
        async def root():
            with start_active_span("root span", tracer=self._tracer) as root_scope:
                self.assertEqual(self._tracer.active_span, root_scope.span)
                scopes.append(root_scope)

                d1 = run_in_background(task, 1)
                await clock.sleep(2)
                d2 = run_in_background(task, 2)

                # because we did run_in_background, the active span should still be the
                # root.
                self.assertEqual(self._tracer.active_span, root_scope.span)

                await make_deferred_yieldable(
                    defer.gatherResults([d1, d2], consumeErrors=True)
                )

                self.assertEqual(self._tracer.active_span, root_scope.span)
예제 #12
0
    def test_start_active_span(self) -> None:
        # the scope manager assumes a logging context of some sort.
        with LoggingContext("root context"):
            self.assertIsNone(self._tracer.active_span)

            # start_active_span should start and activate a span.
            scope = start_active_span("span", tracer=self._tracer)
            span = scope.span
            self.assertEqual(self._tracer.active_span, span)
            self.assertIsNotNone(span.start_time)

            # entering the context doesn't actually do a whole lot.
            with scope as ctx:
                self.assertIs(ctx, scope)
                self.assertEqual(self._tracer.active_span, span)

            # ... but leaving it unsets the active span, and finishes the span.
            self.assertIsNone(self._tracer.active_span)
            self.assertIsNotNone(span.end_time)

        # the span should have been reported
        self.assertEqual(self._reporter.get_spans(), [span])
예제 #13
0
    async def _send_request(
        self,
        request: MatrixFederationRequest,
        retry_on_dns_fail: bool = True,
        timeout: Optional[int] = None,
        long_retries: bool = False,
        ignore_backoff: bool = False,
        backoff_on_404: bool = False,
    ) -> IResponse:
        """
        Sends a request to the given server.

        Args:
            request: details of request to be sent

            retry_on_dns_fail: true if the request should be retied on DNS failures

            timeout: number of milliseconds to wait for the response headers
                (including connecting to the server), *for each attempt*.
                60s by default.

            long_retries: whether to use the long retry algorithm.

                The regular retry algorithm makes 4 attempts, with intervals
                [0.5s, 1s, 2s].

                The long retry algorithm makes 11 attempts, with intervals
                [4s, 16s, 60s, 60s, ...]

                Both algorithms add -20%/+40% jitter to the retry intervals.

                Note that the above intervals are *in addition* to the time spent
                waiting for the request to complete (up to `timeout` ms).

                NB: the long retry algorithm takes over 20 minutes to complete, with
                a default timeout of 60s!

            ignore_backoff: true to ignore the historical backoff data
                and try the request anyway.

            backoff_on_404: Back off if we get a 404

        Returns:
            Resolves with the HTTP response object on success.

        Raises:
            HttpResponseException: If we get an HTTP response code >= 300
                (except 429).
            NotRetryingDestination: If we are not yet ready to retry this
                server.
            FederationDeniedError: If this destination  is not on our
                federation whitelist
            RequestSendFailed: If there were problems connecting to the
                remote, due to e.g. DNS failures, connection timeouts etc.
        """
        if timeout:
            _sec_timeout = timeout / 1000
        else:
            _sec_timeout = self.default_timeout

        if (self.hs.config.federation.federation_domain_whitelist is not None
                and request.destination
                not in self.hs.config.federation.federation_domain_whitelist):
            raise FederationDeniedError(request.destination)

        limiter = await synapse.util.retryutils.get_retry_limiter(
            request.destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
            notifier=self.hs.get_notifier(),
            replication_client=self.hs.get_replication_command_handler(),
        )

        method_bytes = request.method.encode("ascii")
        destination_bytes = request.destination.encode("ascii")
        path_bytes = request.path.encode("ascii")
        query_bytes = encode_query_args(request.query)

        scope = start_active_span(
            "outgoing-federation-request",
            tags={
                tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
                tags.PEER_ADDRESS: request.destination,
                tags.HTTP_METHOD: request.method,
                tags.HTTP_URL: request.path,
            },
            finish_on_close=True,
        )

        # Inject the span into the headers
        headers_dict: Dict[bytes, List[bytes]] = {}
        opentracing.inject_header_dict(headers_dict, request.destination)

        headers_dict[b"User-Agent"] = [self.version_string_bytes]

        with limiter, scope:
            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            url_bytes = request.uri
            url_str = url_bytes.decode("ascii")

            url_to_sign_bytes = urllib.parse.urlunparse(
                (b"", b"", path_bytes, None, query_bytes, b""))

            while True:
                try:
                    json = request.get_json()
                    if json:
                        headers_dict[b"Content-Type"] = [b"application/json"]
                        auth_headers = self.build_auth_headers(
                            destination_bytes, method_bytes, url_to_sign_bytes,
                            json)
                        data = encode_canonical_json(json)
                        producer: Optional[
                            IBodyProducer] = QuieterFileBodyProducer(
                                BytesIO(data), cooperator=self._cooperator)
                    else:
                        producer = None
                        auth_headers = self.build_auth_headers(
                            destination_bytes, method_bytes, url_to_sign_bytes)

                    headers_dict[b"Authorization"] = auth_headers

                    logger.debug(
                        "{%s} [%s] Sending request: %s %s; timeout %fs",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _sec_timeout,
                    )

                    outgoing_requests_counter.labels(request.method).inc()

                    try:
                        with Measure(self.clock, "outbound_request"):
                            # we don't want all the fancy cookie and redirect handling
                            # that treq.request gives: just use the raw Agent.

                            # To preserve the logging context, the timeout is treated
                            # in a similar way to `defer.gatherResults`:
                            # * Each logging context-preserving fork is wrapped in
                            #   `run_in_background`. In this case there is only one,
                            #   since the timeout fork is not logging-context aware.
                            # * The `Deferred` that joins the forks back together is
                            #   wrapped in `make_deferred_yieldable` to restore the
                            #   logging context regardless of the path taken.
                            request_deferred = run_in_background(
                                self.agent.request,
                                method_bytes,
                                url_bytes,
                                headers=Headers(headers_dict),
                                bodyProducer=producer,
                            )
                            request_deferred = timeout_deferred(
                                request_deferred,
                                timeout=_sec_timeout,
                                reactor=self.reactor,
                            )

                            response = await make_deferred_yieldable(
                                request_deferred)
                    except DNSLookupError as e:
                        raise RequestSendFailed(
                            e, can_retry=retry_on_dns_fail) from e
                    except Exception as e:
                        raise RequestSendFailed(e, can_retry=True) from e

                    incoming_responses_counter.labels(request.method,
                                                      response.code).inc()

                    set_tag(tags.HTTP_STATUS_CODE, response.code)
                    response_phrase = response.phrase.decode("ascii",
                                                             errors="replace")

                    if 200 <= response.code < 300:
                        logger.debug(
                            "{%s} [%s] Got response headers: %d %s",
                            request.txn_id,
                            request.destination,
                            response.code,
                            response_phrase,
                        )
                    else:
                        logger.info(
                            "{%s} [%s] Got response headers: %d %s",
                            request.txn_id,
                            request.destination,
                            response.code,
                            response_phrase,
                        )
                        # :'(
                        # Update transactions table?
                        d = treq.content(response)
                        d = timeout_deferred(d,
                                             timeout=_sec_timeout,
                                             reactor=self.reactor)

                        try:
                            body = await make_deferred_yieldable(d)
                        except Exception as e:
                            # Eh, we're already going to raise an exception so lets
                            # ignore if this fails.
                            logger.warning(
                                "{%s} [%s] Failed to get error response: %s %s: %s",
                                request.txn_id,
                                request.destination,
                                request.method,
                                url_str,
                                _flatten_response_never_received(e),
                            )
                            body = None

                        exc = HttpResponseException(response.code,
                                                    response_phrase, body)

                        # Retry if the error is a 5xx or a 429 (Too Many
                        # Requests), otherwise just raise a standard
                        # `HttpResponseException`
                        if 500 <= response.code < 600 or response.code == 429:
                            raise RequestSendFailed(exc,
                                                    can_retry=True) from exc
                        else:
                            raise exc

                    break
                except RequestSendFailed as e:
                    logger.info(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _flatten_response_never_received(e.inner_exception),
                    )

                    if not e.can_retry:
                        raise

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4**(MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        logger.debug(
                            "{%s} [%s] Waiting %ss before re-sending...",
                            request.txn_id,
                            request.destination,
                            delay,
                        )

                        # Sleep for the calculated delay, or wake up immediately
                        # if we get notified that the server is back up.
                        await self._sleeper.sleep(request.destination,
                                                  delay * 1000)
                        retries_left -= 1
                    else:
                        raise

                except Exception as e:
                    logger.warning(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _flatten_response_never_received(e),
                    )
                    raise
        return response
예제 #14
0
    def _send_request(
        self,
        request,
        retry_on_dns_fail=True,
        timeout=None,
        long_retries=False,
        ignore_backoff=False,
        backoff_on_404=False,
    ):
        """
        Sends a request to the given server.

        Args:
            request (MatrixFederationRequest): details of request to be sent

            timeout (int|None): number of milliseconds to wait for the response headers
                (including connecting to the server), *for each attempt*.
                60s by default.

            long_retries (bool): whether to use the long retry algorithm.

                The regular retry algorithm makes 4 attempts, with intervals
                [0.5s, 1s, 2s].

                The long retry algorithm makes 11 attempts, with intervals
                [4s, 16s, 60s, 60s, ...]

                Both algorithms add -20%/+40% jitter to the retry intervals.

                Note that the above intervals are *in addition* to the time spent
                waiting for the request to complete (up to `timeout` ms).

                NB: the long retry algorithm takes over 20 minutes to complete, with
                a default timeout of 60s!

            ignore_backoff (bool): true to ignore the historical backoff data
                and try the request anyway.

            backoff_on_404 (bool): Back off if we get a 404

        Returns:
            Deferred[twisted.web.client.Response]: resolves with the HTTP
            response object on success.

        Raises:
            HttpResponseException: If we get an HTTP response code >= 300
                (except 429).
            NotRetryingDestination: If we are not yet ready to retry this
                server.
            FederationDeniedError: If this destination  is not on our
                federation whitelist
            RequestSendFailed: If there were problems connecting to the
                remote, due to e.g. DNS failures, connection timeouts etc.
        """
        if timeout:
            _sec_timeout = timeout / 1000
        else:
            _sec_timeout = self.default_timeout

        if (
            self.hs.config.federation_domain_whitelist is not None
            and request.destination not in self.hs.config.federation_domain_whitelist
        ):
            raise FederationDeniedError(request.destination)

        limiter = yield synapse.util.retryutils.get_retry_limiter(
            request.destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
        )

        method_bytes = request.method.encode("ascii")
        destination_bytes = request.destination.encode("ascii")
        path_bytes = request.path.encode("ascii")
        if request.query:
            query_bytes = encode_query_args(request.query)
        else:
            query_bytes = b""

        # Retreive current span
        scope = opentracing.start_active_span(
            "outgoing-federation-request",
            tags={
                opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
                opentracing.tags.PEER_ADDRESS: request.destination,
                opentracing.tags.HTTP_METHOD: request.method,
                opentracing.tags.HTTP_URL: request.path,
            },
            finish_on_close=True,
        )

        # Inject the span into the headers
        headers_dict = {}
        opentracing.inject_active_span_byte_dict(headers_dict, request.destination)

        headers_dict[b"User-Agent"] = [self.version_string_bytes]

        with limiter, scope:
            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            url_bytes = urllib.parse.urlunparse(
                (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
            )
            url_str = url_bytes.decode("ascii")

            url_to_sign_bytes = urllib.parse.urlunparse(
                (b"", b"", path_bytes, None, query_bytes, b"")
            )

            while True:
                try:
                    json = request.get_json()
                    if json:
                        headers_dict[b"Content-Type"] = [b"application/json"]
                        auth_headers = self.build_auth_headers(
                            destination_bytes, method_bytes, url_to_sign_bytes, json
                        )
                        data = encode_canonical_json(json)
                        producer = QuieterFileBodyProducer(
                            BytesIO(data), cooperator=self._cooperator
                        )
                    else:
                        producer = None
                        auth_headers = self.build_auth_headers(
                            destination_bytes, method_bytes, url_to_sign_bytes
                        )

                    headers_dict[b"Authorization"] = auth_headers

                    logger.info(
                        "{%s} [%s] Sending request: %s %s; timeout %fs",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _sec_timeout,
                    )

                    try:
                        with Measure(self.clock, "outbound_request"):
                            # we don't want all the fancy cookie and redirect handling
                            # that treq.request gives: just use the raw Agent.
                            request_deferred = self.agent.request(
                                method_bytes,
                                url_bytes,
                                headers=Headers(headers_dict),
                                bodyProducer=producer,
                            )

                            request_deferred = timeout_deferred(
                                request_deferred,
                                timeout=_sec_timeout,
                                reactor=self.reactor,
                            )

                            response = yield request_deferred
                    except DNSLookupError as e:
                        raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
                    except Exception as e:
                        logger.info("Failed to send request: %s", e)
                        raise_from(RequestSendFailed(e, can_retry=True), e)

                    logger.info(
                        "{%s} [%s] Got response headers: %d %s",
                        request.txn_id,
                        request.destination,
                        response.code,
                        response.phrase.decode("ascii", errors="replace"),
                    )

                    opentracing.set_tag(
                        opentracing.tags.HTTP_STATUS_CODE, response.code
                    )

                    if 200 <= response.code < 300:
                        pass
                    else:
                        # :'(
                        # Update transactions table?
                        d = treq.content(response)
                        d = timeout_deferred(
                            d, timeout=_sec_timeout, reactor=self.reactor
                        )

                        try:
                            body = yield make_deferred_yieldable(d)
                        except Exception as e:
                            # Eh, we're already going to raise an exception so lets
                            # ignore if this fails.
                            logger.warn(
                                "{%s} [%s] Failed to get error response: %s %s: %s",
                                request.txn_id,
                                request.destination,
                                request.method,
                                url_str,
                                _flatten_response_never_received(e),
                            )
                            body = None

                        e = HttpResponseException(response.code, response.phrase, body)

                        # Retry if the error is a 429 (Too Many Requests),
                        # otherwise just raise a standard HttpResponseException
                        if response.code == 429:
                            raise_from(RequestSendFailed(e, can_retry=True), e)
                        else:
                            raise e

                    break
                except RequestSendFailed as e:
                    logger.warn(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _flatten_response_never_received(e.inner_exception),
                    )

                    if not e.can_retry:
                        raise

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        logger.debug(
                            "{%s} [%s] Waiting %ss before re-sending...",
                            request.txn_id,
                            request.destination,
                            delay,
                        )

                        yield self.clock.sleep(delay)
                        retries_left -= 1
                    else:
                        raise

                except Exception as e:
                    logger.warn(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _flatten_response_never_received(e),
                    )
                    raise
        return response
예제 #15
0
파일: server.py 프로젝트: wuwei1972/synapse
        async def new_func(request, *args, **kwargs):
            """A callback which can be passed to HttpServer.RegisterPaths

            Args:
                request (twisted.web.http.Request):
                *args: unused?
                **kwargs (dict[unicode, unicode]): the dict mapping keys to path
                    components as specified in the path match regexp.

            Returns:
                Tuple[int, object]|None: (response code, response object) as returned by
                    the callback method. None if the request has already been handled.
            """
            content = None
            if request.method in [b"PUT", b"POST"]:
                # TODO: Handle other method types? other content types?
                content = parse_json_object_from_request(request)

            try:
                origin = await authenticator.authenticate_request(request, content)
            except NoAuthenticationError:
                origin = None
                if self.REQUIRE_AUTH:
                    logger.warn("authenticate_request failed: missing authentication")
                    raise
            except Exception as e:
                logger.warn("authenticate_request failed: %s", e)
                raise

            request_tags = {
                "request_id": request.get_request_id(),
                tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
                tags.HTTP_METHOD: request.get_method(),
                tags.HTTP_URL: request.get_redacted_uri(),
                tags.PEER_HOST_IPV6: request.getClientIP(),
                "authenticated_entity": origin,
                "servlet_name": request.request_metrics.name,
            }

            # Only accept the span context if the origin is authenticated
            # and whitelisted
            if origin and whitelisted_homeserver(origin):
                scope = start_active_span_from_request(
                    request, "incoming-federation-request", tags=request_tags
                )
            else:
                scope = start_active_span(
                    "incoming-federation-request", tags=request_tags
                )

            with scope:
                if origin:
                    with ratelimiter.ratelimit(origin) as d:
                        await d
                        response = await func(
                            origin, content, request.args, *args, **kwargs
                        )
                else:
                    response = await func(
                        origin, content, request.args, *args, **kwargs
                    )

            return response
예제 #16
0
    async def wait_for_events(
        self,
        user_id: str,
        timeout: int,
        callback: Callable[[StreamToken, StreamToken], Awaitable[T]],
        room_ids=None,
        from_token=StreamToken.START,
    ) -> T:
        """Wait until the callback returns a non empty response or the
        timeout fires.
        """
        user_stream = self.user_to_user_stream.get(user_id)
        if user_stream is None:
            current_token = self.event_sources.get_current_token()
            if room_ids is None:
                room_ids = await self.store.get_rooms_for_user(user_id)
            user_stream = _NotifierUserStream(
                user_id=user_id,
                rooms=room_ids,
                current_token=current_token,
                time_now_ms=self.clock.time_msec(),
            )
            self._register_with_keys(user_stream)

        result = None
        prev_token = from_token
        if timeout:
            end_time = self.clock.time_msec() + timeout

            while not result:
                try:
                    now = self.clock.time_msec()
                    if end_time <= now:
                        break

                    # Now we wait for the _NotifierUserStream to be told there
                    # is a new token.
                    listener = user_stream.new_listener(prev_token)
                    listener.deferred = timeout_deferred(
                        listener.deferred,
                        (end_time - now) / 1000.0,
                        self.hs.get_reactor(),
                    )

                    with start_active_span("wait_for_events.deferred"):
                        log_kv(
                            {
                                "wait_for_events": "sleep",
                                "token": prev_token,
                            }
                        )

                        with PreserveLoggingContext():
                            await listener.deferred

                        log_kv(
                            {
                                "wait_for_events": "woken",
                                "token": user_stream.current_token,
                            }
                        )

                    current_token = user_stream.current_token

                    result = await callback(prev_token, current_token)
                    log_kv(
                        {
                            "wait_for_events": "result",
                            "result": bool(result),
                        }
                    )
                    if result:
                        break

                    # Update the prev_token to the current_token since nothing
                    # has happened between the old prev_token and the current_token
                    prev_token = current_token
                except defer.TimeoutError:
                    log_kv({"wait_for_events": "timeout"})
                    break
                except defer.CancelledError:
                    log_kv({"wait_for_events": "cancelled"})
                    break

        if result is None:
            # This happened if there was no timeout or if the timeout had
            # already expired.
            current_token = user_stream.current_token
            result = await callback(prev_token, current_token)

        return result
예제 #17
0
    async def _unsafe_process(self) -> None:
        """
        Looks for unset notifications and dispatch them, in order
        Never call this directly: use _process which will only allow this to
        run once per pusher.
        """
        unprocessed = (
            await
            self.store.get_unread_push_actions_for_user_in_range_for_http(
                self.user_id, self.last_stream_ordering,
                self.max_stream_ordering))

        logger.info(
            "Processing %i unprocessed push actions for %s starting at "
            "stream_ordering %s",
            len(unprocessed),
            self.name,
            self.last_stream_ordering,
        )

        for push_action in unprocessed:
            with opentracing.start_active_span(
                    "http-push",
                    tags={
                        "authenticated_entity": self.user_id,
                        "event_id": push_action["event_id"],
                        "app_id": self.app_id,
                        "app_display_name": self.app_display_name,
                    },
            ):
                processed = await self._process_one(push_action)

            if processed:
                http_push_processed_counter.inc()
                self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                self.last_stream_ordering = push_action["stream_ordering"]
                pusher_still_exists = (
                    await
                    self.store.update_pusher_last_stream_ordering_and_success(
                        self.app_id,
                        self.pushkey,
                        self.user_id,
                        self.last_stream_ordering,
                        self.clock.time_msec(),
                    ))
                if not pusher_still_exists:
                    # The pusher has been deleted while we were processing, so
                    # lets just stop and return.
                    self.on_stop()
                    return

                if self.failing_since:
                    self.failing_since = None
                    await self.store.update_pusher_failing_since(
                        self.app_id, self.pushkey, self.user_id,
                        self.failing_since)
            else:
                http_push_failed_counter.inc()
                if not self.failing_since:
                    self.failing_since = self.clock.time_msec()
                    await self.store.update_pusher_failing_since(
                        self.app_id, self.pushkey, self.user_id,
                        self.failing_since)

                if (self.failing_since and self.failing_since <
                        self.clock.time_msec() - HttpPusher.GIVE_UP_AFTER_MS):
                    # we really only give up so that if the URL gets
                    # fixed, we don't suddenly deliver a load
                    # of old notifications.
                    logger.warning(
                        "Giving up on a notification to user %s, pushkey %s",
                        self.user_id,
                        self.pushkey,
                    )
                    self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
                    self.last_stream_ordering = push_action["stream_ordering"]
                    await self.store.update_pusher_last_stream_ordering(
                        self.app_id,
                        self.pushkey,
                        self.user_id,
                        self.last_stream_ordering,
                    )

                    self.failing_since = None
                    await self.store.update_pusher_failing_since(
                        self.app_id, self.pushkey, self.user_id,
                        self.failing_since)
                else:
                    logger.info("Push failed: delaying for %ds",
                                self.backoff_delay)
                    self.timed_call = self.hs.get_reactor().callLater(
                        self.backoff_delay, self.on_timer)
                    self.backoff_delay = min(self.backoff_delay * 2,
                                             self.MAX_BACKOFF_SEC)
                    break
예제 #18
0
파일: client.py 프로젝트: waylon531/synapse
    async def request(self, method, uri, data=None, headers=None):
        """
        Args:
            method (str): HTTP method to use.
            uri (str): URI to query.
            data (bytes): Data to send in the request body, if applicable.
            headers (t.w.http_headers.Headers): Request headers.
        """
        # A small wrapper around self.agent.request() so we can easily attach
        # counters to it
        outgoing_requests_counter.labels(method).inc()

        # log request but strip `access_token` (AS requests for example include this)
        logger.debug("Sending request %s %s", method, redact_uri(uri))

        with start_active_span(
                "outgoing-client-request",
                tags={
                    tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
                    tags.HTTP_METHOD: method,
                    tags.HTTP_URL: uri,
                },
                finish_on_close=True,
        ):
            try:
                body_producer = None
                if data is not None:
                    body_producer = QuieterFileBodyProducer(
                        BytesIO(data),
                        cooperator=self._cooperator,
                    )

                request_deferred = treq.request(method,
                                                uri,
                                                agent=self.agent,
                                                data=body_producer,
                                                headers=headers,
                                                **self._extra_treq_args)
                request_deferred = timeout_deferred(
                    request_deferred,
                    60,
                    self.hs.get_reactor(),
                    cancelled_to_request_timed_out_error,
                )
                response = await make_deferred_yieldable(request_deferred)

                incoming_responses_counter.labels(method, response.code).inc()
                logger.info(
                    "Received response to %s %s: %s",
                    method,
                    redact_uri(uri),
                    response.code,
                )
                return response
            except Exception as e:
                incoming_responses_counter.labels(method, "ERR").inc()
                logger.info(
                    "Error sending request to  %s %s: %s %s",
                    method,
                    redact_uri(uri),
                    type(e).__name__,
                    e.args[0],
                )
                set_tag(tags.ERROR, True)
                set_tag("error_reason", e.args[0])
                raise
예제 #19
0
파일: _base.py 프로젝트: samuelyi/synapse
        async def new_func(request: SynapseRequest, *args: Any,
                           **kwargs: str) -> Optional[Tuple[int, Any]]:
            """A callback which can be passed to HttpServer.RegisterPaths

            Args:
                request:
                *args: unused?
                **kwargs: the dict mapping keys to path components as specified
                    in the path match regexp.

            Returns:
                (response code, response object) as returned by the callback method.
                None if the request has already been handled.
            """
            content = None
            if request.method in [b"PUT", b"POST"]:
                # TODO: Handle other method types? other content types?
                content = parse_json_object_from_request(request)

            try:
                with start_active_span("authenticate_request"):
                    origin: Optional[
                        str] = await authenticator.authenticate_request(
                            request, content)
            except NoAuthenticationError:
                origin = None
                if self.REQUIRE_AUTH:
                    logger.warning(
                        "authenticate_request failed: missing authentication")
                    raise
            except Exception as e:
                logger.warning("authenticate_request failed: %s", e)
                raise

            # update the active opentracing span with the authenticated entity
            set_tag("authenticated_entity", origin)

            # if the origin is authenticated and whitelisted, use its span context
            # as the parent.
            context = None
            if origin and whitelisted_homeserver(origin):
                context = span_context_from_request(request)

            if context:
                servlet_span = active_span()
                # a scope which uses the origin's context as a parent
                processing_start_time = time.time()
                scope = start_active_span_follows_from(
                    "incoming-federation-request",
                    child_of=context,
                    contexts=(servlet_span, ),
                    start_time=processing_start_time,
                )

            else:
                # just use our context as a parent
                scope = start_active_span("incoming-federation-request", )

            try:
                with scope:
                    if origin and self.RATELIMIT:
                        with ratelimiter.ratelimit(origin) as d:
                            await d
                            if request._disconnected:
                                logger.warning(
                                    "client disconnected before we started processing "
                                    "request")
                                return None
                            response = await func(origin, content,
                                                  request.args, *args,
                                                  **kwargs)
                    else:
                        response = await func(origin, content, request.args,
                                              *args, **kwargs)
            finally:
                # if we used the origin's context as the parent, add a new span using
                # the servlet span as a parent, so that we have a link
                if context:
                    scope2 = start_active_span_follows_from(
                        "process-federation_request",
                        contexts=(scope.span, ),
                        start_time=processing_start_time,
                    )
                    scope2.close()

            return response
예제 #20
0
    async def request(
        self,
        method: str,
        uri: str,
        data: Optional[bytes] = None,
        headers: Optional[Headers] = None,
    ) -> IResponse:
        """
        Args:
            method: HTTP method to use.
            uri: URI to query.
            data: Data to send in the request body, if applicable.
            headers: Request headers.

        Returns:
            Response object, once the headers have been read.

        Raises:
            RequestTimedOutError if the request times out before the headers are read

        """
        outgoing_requests_counter.labels(method).inc()

        # log request but strip `access_token` (AS requests for example include this)
        logger.debug("Sending request %s %s", method, redact_uri(uri))

        with start_active_span(
                "outgoing-client-request",
                tags={
                    tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
                    tags.HTTP_METHOD: method,
                    tags.HTTP_URL: uri,
                },
                finish_on_close=True,
        ):
            try:
                body_producer = None
                if data is not None:
                    body_producer = QuieterFileBodyProducer(
                        BytesIO(data),
                        cooperator=self._cooperator,
                    )

                from skywalking import Layer, Component
                from skywalking.trace import tags as sw_tags
                from skywalking.trace.context import get_context
                from skywalking.trace.tags import Tag
                from yarl import URL

                context = get_context()

                url = URL(uri).with_user(None).with_password(None)
                peer = '%s:%d' % (url.host or '', url.port)

                span = context.new_exit_span(op=uri or "/", peer=peer)
                span.start()

                span.layer = Layer.Http
                span.component = Component.General
                span.tag(Tag(key=sw_tags.HttpMethod, val=method.upper()))
                span.tag(Tag(key=sw_tags.HttpUrl, val=uri))

                carrier = span.inject()
                for item in carrier:
                    headers.addRawHeader(item.key, item.val)

                request_deferred = treq.request(
                    method,
                    uri,
                    agent=self.agent,
                    data=body_producer,
                    headers=headers,
                    # Avoid buffering the body in treq since we do not reuse
                    # response bodies.
                    unbuffered=True,
                    **self._extra_treq_args,
                )  # type: defer.Deferred

                # we use our own timeout mechanism rather than treq's as a workaround
                # for https://twistedmatrix.com/trac/ticket/9534.
                request_deferred = timeout_deferred(
                    request_deferred,
                    60,
                    self.hs.get_reactor(),
                )

                # turn timeouts into RequestTimedOutErrors
                request_deferred.addErrback(
                    _timeout_to_request_timed_out_error)

                response = await make_deferred_yieldable(request_deferred)

                span.tag(
                    Tag(key=sw_tags.HttpStatus,
                        val=response.code,
                        overridable=True))
                if response.code >= 400:
                    span.error_occurred = True
                span.stop()

                incoming_responses_counter.labels(method, response.code).inc()
                logger.info(
                    "Received response to %s %s: %s",
                    method,
                    redact_uri(uri),
                    response.code,
                )
                return response
            except Exception as e:
                incoming_responses_counter.labels(method, "ERR").inc()
                logger.info(
                    "Error sending request to  %s %s: %s %s",
                    method,
                    redact_uri(uri),
                    type(e).__name__,
                    e.args[0],
                )
                set_tag(tags.ERROR, True)
                set_tag("error_reason", e.args[0])
                raise
예제 #21
0
    async def request(
        self,
        method: str,
        uri: str,
        data: Optional[bytes] = None,
        headers: Optional[Headers] = None,
    ) -> IResponse:
        """
        Args:
            method: HTTP method to use.
            uri: URI to query.
            data: Data to send in the request body, if applicable.
            headers: Request headers.

        Returns:
            Response object, once the headers have been read.

        Raises:
            RequestTimedOutError if the request times out before the headers are read

        """
        outgoing_requests_counter.labels(method).inc()

        # log request but strip `access_token` (AS requests for example include this)
        logger.debug("Sending request %s %s", method, redact_uri(uri))

        with start_active_span(
                "outgoing-client-request",
                tags={
                    tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
                    tags.HTTP_METHOD: method,
                    tags.HTTP_URL: uri,
                },
                finish_on_close=True,
        ):
            try:
                body_producer = None
                if data is not None:
                    body_producer = QuieterFileBodyProducer(
                        BytesIO(data),
                        cooperator=self._cooperator,
                    )

                request_deferred = treq.request(
                    method,
                    uri,
                    agent=self.agent,
                    data=body_producer,
                    headers=headers,
                    **self._extra_treq_args,
                )  # type: defer.Deferred

                # we use our own timeout mechanism rather than treq's as a workaround
                # for https://twistedmatrix.com/trac/ticket/9534.
                request_deferred = timeout_deferred(
                    request_deferred,
                    60,
                    self.hs.get_reactor(),
                )

                # turn timeouts into RequestTimedOutErrors
                request_deferred.addErrback(
                    _timeout_to_request_timed_out_error)

                response = await make_deferred_yieldable(request_deferred)

                incoming_responses_counter.labels(method, response.code).inc()
                logger.info(
                    "Received response to %s %s: %s",
                    method,
                    redact_uri(uri),
                    response.code,
                )
                return response
            except Exception as e:
                incoming_responses_counter.labels(method, "ERR").inc()
                logger.info(
                    "Error sending request to  %s %s: %s %s",
                    method,
                    redact_uri(uri),
                    type(e).__name__,
                    e.args[0],
                )
                set_tag(tags.ERROR, True)
                set_tag("error_reason", e.args[0])
                raise
예제 #22
0
        async def new_func(request: SynapseRequest, *args: Any,
                           **kwargs: str) -> Optional[Tuple[int, Any]]:
            """A callback which can be passed to HttpServer.RegisterPaths

            Args:
                request:
                *args: unused?
                **kwargs: the dict mapping keys to path components as specified
                    in the path match regexp.

            Returns:
                (response code, response object) as returned by the callback method.
                None if the request has already been handled.
            """
            content = None
            if request.method in [b"PUT", b"POST"]:
                # TODO: Handle other method types? other content types?
                content = parse_json_object_from_request(request)

            try:
                origin: Optional[
                    str] = await authenticator.authenticate_request(
                        request, content)
            except NoAuthenticationError:
                origin = None
                if self.REQUIRE_AUTH:
                    logger.warning(
                        "authenticate_request failed: missing authentication")
                    raise
            except Exception as e:
                logger.warning("authenticate_request failed: %s", e)
                raise

            request_tags = {
                SynapseTags.REQUEST_ID: request.get_request_id(),
                tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
                tags.HTTP_METHOD: request.get_method(),
                tags.HTTP_URL: request.get_redacted_uri(),
                tags.PEER_HOST_IPV6: request.getClientIP(),
                "authenticated_entity": origin,
                "servlet_name": request.request_metrics.name,
            }

            # Only accept the span context if the origin is authenticated
            # and whitelisted
            if origin and whitelisted_homeserver(origin):
                scope = start_active_span_from_request(
                    request, "incoming-federation-request", tags=request_tags)
            else:
                scope = start_active_span("incoming-federation-request",
                                          tags=request_tags)

            with scope:
                opentracing.inject_response_headers(request.responseHeaders)

                if origin and self.RATELIMIT:
                    with ratelimiter.ratelimit(origin) as d:
                        await d
                        if request._disconnected:
                            logger.warning(
                                "client disconnected before we started processing "
                                "request")
                            return None
                        response = await func(origin, content, request.args,
                                              *args, **kwargs)
                else:
                    response = await func(origin, content, request.args, *args,
                                          **kwargs)

            return response