def request(self, method, uri, data=None, headers=None): """ Args: method (str): HTTP method to use. uri (str): URI to query. data (bytes): Data to send in the request body, if applicable. headers (t.w.http_headers.Headers): Request headers. Raises: SynapseError: If the IP is blacklisted. """ # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.info("Sending request %s %s", method, redact_uri(uri)) try: body_producer = None if data is not None: body_producer = QuieterFileBodyProducer(BytesIO(data)) request_deferred = treq.request(method, uri, agent=self.agent, data=body_producer, headers=headers, **self._extra_treq_args) request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() logger.info("Received response to %s %s: %s", method, redact_uri(uri), response.code) defer.returnValue(response) except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.args[0], ) raise
async def request( self, method: str, uri: str, data: Optional[bytes] = None, headers: Optional[Headers] = None, ) -> IResponse: """ Args: method: HTTP method to use. uri: URI to query. data: Data to send in the request body, if applicable. headers: Request headers. Returns: Response object, once the headers have been read. Raises: RequestTimedOutError if the request times out before the headers are read """ outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.debug("Sending request %s %s", method, redact_uri(uri)) with start_active_span( "outgoing-client-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.HTTP_METHOD: method, tags.HTTP_URL: uri, }, finish_on_close=True, ): try: body_producer = None if data is not None: body_producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator, ) request_deferred = treq.request( method, uri, agent=self.agent, data=body_producer, headers=headers, **self._extra_treq_args, ) # type: defer.Deferred # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), ) # turn timeouts into RequestTimedOutErrors request_deferred.addErrback( _timeout_to_request_timed_out_error) response = await make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code, ) return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.args[0], ) set_tag(tags.ERROR, True) set_tag("error_reason", e.args[0]) raise
async def request(self, method, uri, data=None, headers=None): """ Args: method (str): HTTP method to use. uri (str): URI to query. data (bytes): Data to send in the request body, if applicable. headers (t.w.http_headers.Headers): Request headers. """ # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.debug("Sending request %s %s", method, redact_uri(uri)) with start_active_span( "outgoing-client-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.HTTP_METHOD: method, tags.HTTP_URL: uri, }, finish_on_close=True, ): try: body_producer = None if data is not None: body_producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator, ) request_deferred = treq.request(method, uri, agent=self.agent, data=body_producer, headers=headers, **self._extra_treq_args) request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) response = await make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code, ) return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.args[0], ) set_tag(tags.ERROR, True) set_tag("error_reason", e.args[0]) raise
def _send_request( self, request, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False, ): """ Sends a request to the given server. Args: request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. 60s by default. long_retries (bool): whether to use the long retry algorithm. The regular retry algorithm makes 4 attempts, with intervals [0.5s, 1s, 2s]. The long retry algorithm makes 11 attempts, with intervals [4s, 16s, 60s, 60s, ...] Both algorithms add -20%/+40% jitter to the retry intervals. Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). NB: the long retry algorithm takes over 20 minutes to complete, with a default timeout of 60s! ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred[twisted.web.client.Response]: resolves with the HTTP response object on success. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if ( self.hs.config.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) else: query_bytes = b"" # Retreive current span scope = opentracing.start_active_span( "outgoing-federation-request", tags={ opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT, opentracing.tags.PEER_ADDRESS: request.destination, opentracing.tags.HTTP_METHOD: request.method, opentracing.tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict = {} opentracing.inject_active_span_byte_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = urllib.parse.urlunparse( (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"") ) url_str = url_bytes.decode("ascii") url_to_sign_bytes = urllib.parse.urlunparse( (b"", b"", path_bytes, None, query_bytes, b"") ) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes, json ) data = encode_canonical_json(json) producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator ) else: producer = None auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes ) headers_dict[b"Authorization"] = auth_headers logger.info( "{%s} [%s] Sending request: %s %s; timeout %fs", request.txn_id, request.destination, request.method, url_str, _sec_timeout, ) try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling # that treq.request gives: just use the raw Agent. request_deferred = self.agent.request( method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.reactor, ) response = yield request_deferred except DNSLookupError as e: raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e) except Exception as e: logger.info("Failed to send request: %s", e) raise_from(RequestSendFailed(e, can_retry=True), e) logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), ) opentracing.set_tag( opentracing.tags.HTTP_STATUS_CODE, response.code ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred( d, timeout=_sec_timeout, reactor=self.reactor ) try: body = yield make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warn( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None e = HttpResponseException(response.code, response.phrase, body) # Retry if the error is a 429 (Too Many Requests), # otherwise just raise a standard HttpResponseException if response.code == 429: raise_from(RequestSendFailed(e, can_retry=True), e) else: raise e break except RequestSendFailed as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) yield self.clock.sleep(delay) retries_left -= 1 else: raise except Exception as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise return response
async def request( self, method: str, uri: str, data: Optional[bytes] = None, headers: Optional[Headers] = None, ) -> IResponse: """ Args: method: HTTP method to use. uri: URI to query. data: Data to send in the request body, if applicable. headers: Request headers. Returns: Response object, once the headers have been read. Raises: RequestTimedOutError if the request times out before the headers are read """ outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.debug("Sending request %s %s", method, redact_uri(uri)) with start_active_span( "outgoing-client-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.HTTP_METHOD: method, tags.HTTP_URL: uri, }, finish_on_close=True, ): try: body_producer = None if data is not None: body_producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator, ) from skywalking import Layer, Component from skywalking.trace import tags as sw_tags from skywalking.trace.context import get_context from skywalking.trace.tags import Tag from yarl import URL context = get_context() url = URL(uri).with_user(None).with_password(None) peer = '%s:%d' % (url.host or '', url.port) span = context.new_exit_span(op=uri or "/", peer=peer) span.start() span.layer = Layer.Http span.component = Component.General span.tag(Tag(key=sw_tags.HttpMethod, val=method.upper())) span.tag(Tag(key=sw_tags.HttpUrl, val=uri)) carrier = span.inject() for item in carrier: headers.addRawHeader(item.key, item.val) request_deferred = treq.request( method, uri, agent=self.agent, data=body_producer, headers=headers, # Avoid buffering the body in treq since we do not reuse # response bodies. unbuffered=True, **self._extra_treq_args, ) # type: defer.Deferred # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), ) # turn timeouts into RequestTimedOutErrors request_deferred.addErrback( _timeout_to_request_timed_out_error) response = await make_deferred_yieldable(request_deferred) span.tag( Tag(key=sw_tags.HttpStatus, val=response.code, overridable=True)) if response.code >= 400: span.error_occurred = True span.stop() incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code, ) return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.args[0], ) set_tag(tags.ERROR, True) set_tag("error_reason", e.args[0]) raise
async def _send_request( self, request: MatrixFederationRequest, retry_on_dns_fail: bool = True, timeout: Optional[int] = None, long_retries: bool = False, ignore_backoff: bool = False, backoff_on_404: bool = False, ) -> IResponse: """ Sends a request to the given server. Args: request: details of request to be sent retry_on_dns_fail: true if the request should be retied on DNS failures timeout: number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. 60s by default. long_retries: whether to use the long retry algorithm. The regular retry algorithm makes 4 attempts, with intervals [0.5s, 1s, 2s]. The long retry algorithm makes 11 attempts, with intervals [4s, 16s, 60s, 60s, ...] Both algorithms add -20%/+40% jitter to the retry intervals. Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). NB: the long retry algorithm takes over 20 minutes to complete, with a default timeout of 60s! ignore_backoff: true to ignore the historical backoff data and try the request anyway. backoff_on_404: Back off if we get a 404 Returns: Resolves with the HTTP response object on success. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if (self.hs.config.federation.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation.federation_domain_whitelist): raise FederationDeniedError(request.destination) limiter = await synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, notifier=self.hs.get_notifier(), replication_client=self.hs.get_replication_command_handler(), ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") query_bytes = encode_query_args(request.query) scope = start_active_span( "outgoing-federation-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.PEER_ADDRESS: request.destination, tags.HTTP_METHOD: request.method, tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict: Dict[bytes, List[bytes]] = {} opentracing.inject_header_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = request.uri url_str = url_bytes.decode("ascii") url_to_sign_bytes = urllib.parse.urlunparse( (b"", b"", path_bytes, None, query_bytes, b"")) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes, json) data = encode_canonical_json(json) producer: Optional[ IBodyProducer] = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator) else: producer = None auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes) headers_dict[b"Authorization"] = auth_headers logger.debug( "{%s} [%s] Sending request: %s %s; timeout %fs", request.txn_id, request.destination, request.method, url_str, _sec_timeout, ) outgoing_requests_counter.labels(request.method).inc() try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling # that treq.request gives: just use the raw Agent. # To preserve the logging context, the timeout is treated # in a similar way to `defer.gatherResults`: # * Each logging context-preserving fork is wrapped in # `run_in_background`. In this case there is only one, # since the timeout fork is not logging-context aware. # * The `Deferred` that joins the forks back together is # wrapped in `make_deferred_yieldable` to restore the # logging context regardless of the path taken. request_deferred = run_in_background( self.agent.request, method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.reactor, ) response = await make_deferred_yieldable( request_deferred) except DNSLookupError as e: raise RequestSendFailed( e, can_retry=retry_on_dns_fail) from e except Exception as e: raise RequestSendFailed(e, can_retry=True) from e incoming_responses_counter.labels(request.method, response.code).inc() set_tag(tags.HTTP_STATUS_CODE, response.code) response_phrase = response.phrase.decode("ascii", errors="replace") if 200 <= response.code < 300: logger.debug( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) else: logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred(d, timeout=_sec_timeout, reactor=self.reactor) try: body = await make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warning( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None exc = HttpResponseException(response.code, response_phrase, body) # Retry if the error is a 5xx or a 429 (Too Many # Requests), otherwise just raise a standard # `HttpResponseException` if 500 <= response.code < 600 or response.code == 429: raise RequestSendFailed(exc, can_retry=True) from exc else: raise exc break except RequestSendFailed as e: logger.info( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) # Sleep for the calculated delay, or wake up immediately # if we get notified that the server is back up. await self._sleeper.sleep(request.destination, delay * 1000) retries_left -= 1 else: raise except Exception as e: logger.warning( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise return response