def _request_token( self, email: str, client_secret: str, ip: str = "127.0.0.1", ) -> str: channel = self.make_request( "POST", b"account/password/email/requestToken", { "client_secret": client_secret, "email": email, "send_attempt": 1 }, client_ip=ip, ) if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"], ) return channel.json_body["sid"]
def create_user(self, localpart, token=None, appservice=False): request_data = { "username": localpart, "password": "******", "auth": { "type": LoginType.DUMMY }, } if appservice: request_data["type"] = APP_SERVICE_REGISTRATION_TYPE channel = self.make_request( "POST", "/register", request_data, access_token=token, ) if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"]).to_synapse_error() access_token = channel.json_body["access_token"] return access_token
def do_sync_for_user(self, token): channel = self.make_request("GET", "/sync", access_token=token) if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"]).to_synapse_error()
def get_raw(self, uri, args={}, headers=None): """ Gets raw text from the given URI. Args: uri (str): The URI to request, not including query parameters args (dict): A dictionary used to create query strings, defaults to None. **Note**: The value of each key is assumed to be an iterable and *not* a string. headers (dict[str, List[str]]|None): If not None, a map from header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the HTTP body at text. Raises: HttpResponseException on a non-2xx HTTP response. """ if len(args): query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) actual_headers = {b"User-Agent": [self.user_agent]} if headers: actual_headers.update(headers) response = yield self.request("GET", uri, headers=Headers(actual_headers)) body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: defer.returnValue(body) else: raise HttpResponseException(response.code, response.phrase, body)
def do_sync_for_user(self, token): request, channel = make_request(b"GET", b"/sync", access_token=token) render(request, self.resource, self.reactor) if channel.result["code"] != b"200": raise HttpResponseException( int(channel.result["code"]), channel.result["reason"], channel.result["body"], ).to_synapse_error()
def do_sync_for_user(self, token): request, channel = make_request("GET", "/sync", access_token=token.encode('ascii')) render(request, self.resource, self.reactor) if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"]).to_synapse_error()
async def post_urlencoded_get_json( self, uri: str, args: Mapping[str, Union[str, List[str]]] = {}, headers: Optional[RawHeaders] = None, ) -> Any: """ Args: uri: uri to query args: parameters to be url-encoded in the body headers: a map from header name to a list of values for that header Returns: parsed json Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True).encode("utf8") actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) response = await self.request("POST", uri, headers=Headers(actual_headers), data=query_bytes) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
async def put_json( self, uri: str, json_body: Any, args: Optional[QueryParams] = None, headers: RawHeaders = None, ) -> Any: """Puts some json to the given URI. Args: uri: The URI to request, not including query parameters json_body: The JSON to put in the HTTP body, args: A dictionary used to create query strings headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON """ if args: query_str = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_str) json_str = encode_canonical_json(json_body) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request("PUT", uri, headers=Headers(actual_headers), data=json_str) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
def test_create_user_mau_limit_reached_active_admin(self): """ Check that an admin can register a new user via the admin API even if the MAU limit is reached. Admin user was active before creating user. """ handler = self.hs.get_registration_handler() # Sync to set admin user to active # before limit of monthly active users is reached request, channel = self.make_request( "GET", "/sync", access_token=self.admin_user_tok ) self.render(request) if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"] ) # Set monthly active users to the limit self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.hs.config.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit self.get_failure( handler.register_user(localpart="local_part"), ResourceLimitError ) # Register new user with admin API url = "/_synapse/admin/v2/users/@bob:test" # Create user body = json.dumps({"password": "******", "admin": False}) request, channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) self.render(request) self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["admin"])
async def post_json_get_json(self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None) -> Any: """ Args: uri: URI to query. post_json: request body, to be encoded as json headers: a map from header name to a list of values for that header Returns: parsed json Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ json_str = encode_canonical_json(post_json) logger.debug("HTTP POST %s -> %s", json_str, uri) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request("POST", uri, headers=Headers(actual_headers), data=json_str) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
async def put_json(self, uri, json_body, args={}, headers=None): """ Puts some json to the given URI. Args: uri (str): The URI to request, not including query parameters json_body (dict): The JSON to put in the HTTP body, args (dict): A dictionary used to create query strings, defaults to None. **Note**: The value of each key is assumed to be an iterable and *not* a string. headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: Succeeds when we get *any* 2xx HTTP response, with the HTTP body as JSON. Raises: HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON """ if len(args): query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) json_str = encode_canonical_json(json_body) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) response = await self.request("PUT", uri, headers=Headers(actual_headers), data=json_str) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json.loads(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
async def post_urlencoded_get_json(self, uri, args={}, headers=None): """ Args: uri (str): args (dict[str, str|List[str]]): query params headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: object: parsed json Raises: HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True).encode("utf8") actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) response = await self.request("POST", uri, headers=Headers(actual_headers), data=query_bytes) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json.loads(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
def create_user(self, localpart): request_data = json.dumps({ "username": localpart, "password": "******", "auth": { "type": LoginType.DUMMY }, }) request, channel = self.make_request("POST", "/register", request_data) if channel.code != 200: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"]).to_synapse_error() access_token = channel.json_body["access_token"] return access_token
async def get_raw( self, uri: str, args: Optional[QueryParams] = None, headers: Optional[RawHeaders] = None, ) -> bytes: """Gets raw text from the given URI. Args: uri: The URI to request, not including query parameters args: A dictionary used to create query strings headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException on a non-2xx HTTP response. """ if args: query_str = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_str) actual_headers = {b"User-Agent": [self.user_agent]} if headers: actual_headers.update(headers) # type: ignore response = await self.request("GET", uri, headers=Headers(actual_headers)) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return body else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
async def post_json_get_json(self, uri, post_json, headers=None): """ Args: uri (str): post_json (object): headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from header name to a list of values for that header Returns: object: parsed json Raises: HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ json_str = encode_canonical_json(post_json) logger.debug("HTTP POST %s -> %s", json_str, uri) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) response = await self.request("POST", uri, headers=Headers(actual_headers), data=json_str) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json.loads(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body)
def post_urlencoded_get_json(self, uri, args={}, headers=None): """ Args: uri (str): args (dict[str, str|List[str]]): query params headers (dict[str, List[str]]|None): If not None, a map from header name to a list of values for that header Returns: Deferred[object]: parsed json Raises: HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) query_bytes = urllib.urlencode(encode_urlencode_args(args), True) actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], } if headers: actual_headers.update(headers) response = yield self.request("POST", uri.encode("ascii"), headers=Headers(actual_headers), bodyProducer=FileBodyProducer( StringIO(query_bytes))) body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: defer.returnValue(json.loads(body)) else: raise HttpResponseException(response.code, response.phrase, body)
def _is_unknown_endpoint( self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None) -> bool: """ Returns true if the response was due to an endpoint being unimplemented. Args: e: The error response received from the remote server. synapse_error: The above error converted to a SynapseError. This is automatically generated if not provided. """ if synapse_error is None: synapse_error = e.to_synapse_error() # There is no good way to detect an "unknown" endpoint. # # Dendrite returns a 404 (with no body); synapse returns a 400 # with M_UNRECOGNISED. return e.code == 404 or (e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED)
def post_json_get_json(self, uri, post_json, headers=None): """ Args: uri (str): post_json (object): headers (dict[str, List[str]]|None): If not None, a map from header name to a list of values for that header Returns: Deferred[object]: parsed json Raises: HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ json_str = encode_canonical_json(post_json) logger.debug("HTTP POST %s -> %s", json_str, uri) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], } if headers: actual_headers.update(headers) response = yield self.request("POST", uri.encode("ascii"), headers=Headers(actual_headers), bodyProducer=FileBodyProducer( StringIO(json_str))) body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: defer.returnValue(json.loads(body)) else: raise HttpResponseException(response.code, response.phrase, body)
def _request_token( self, email: str, client_secret: str, next_link: Optional[str] = None, expect_code: int = 200, ) -> str: """Request a validation token to add an email address to a user's account Args: email: The email address to validate client_secret: A secret string next_link: A link to redirect the user to after validation expect_code: Expected return code of the call Returns: The ID of the new threepid validation session """ body = { "client_secret": client_secret, "email": email, "send_attempt": 1 } if next_link: body["next_link"] = next_link channel = self.make_request( "POST", b"account/3pid/email/requestToken", body, ) if channel.code != expect_code: raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"], ) return channel.json_body.get("sid")
def create_user(self, localpart): request_data = json.dumps({ "username": localpart, "password": "******", "auth": { "type": LoginType.DUMMY }, }) request, channel = make_request(b"POST", b"/register", request_data) render(request, self.resource, self.reactor) if channel.result["code"] != b"200": raise HttpResponseException( int(channel.result["code"]), channel.result["reason"], channel.result["body"], ).to_synapse_error() access_token = channel.json_body["access_token"] return access_token
def _send_request( self, request, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False ): """ Sends a request to the given server. Args: request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers (including connecting to the server). 60s by default. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HttpResponseException``: if we get an HTTP response code >= 300 (except 429). Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist Fails with ``RequestSendFailed`` if there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if ( self.hs.config.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) else: query_bytes = b"" headers_dict = { b"User-Agent": [self.version_string_bytes], b"Host": [destination_bytes], } with limiter: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = urllib.parse.urlunparse(( b"matrix", destination_bytes, path_bytes, None, query_bytes, b"", )) url_str = url_bytes.decode('ascii') url_to_sign_bytes = urllib.parse.urlunparse(( b"", b"", path_bytes, None, query_bytes, b"", )) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] self.sign_request( destination_bytes, method_bytes, url_to_sign_bytes, headers_dict, json, ) data = encode_canonical_json(json) producer = FileBodyProducer( BytesIO(data), cooperator=self._cooperator, ) else: producer = None self.sign_request( destination_bytes, method_bytes, url_to_sign_bytes, headers_dict, ) logger.info( "{%s} [%s] Sending request: %s %s", request.txn_id, request.destination, request.method, url_str, ) # we don't want all the fancy cookie and redirect handling that # treq.request gives: just use the raw Agent. request_deferred = self.agent.request( method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.hs.get_reactor(), ) try: with Measure(self.clock, "outbound_request"): response = yield make_deferred_yieldable( request_deferred, ) except DNSLookupError as e: raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e) except Exception as e: raise_from(RequestSendFailed(e, can_retry=True), e) logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response.phrase.decode('ascii', errors='replace'), ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred( d, timeout=_sec_timeout, reactor=self.hs.get_reactor(), ) try: body = yield make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warn( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None e = HttpResponseException( response.code, response.phrase, body ) # Retry if the error is a 429 (Too Many Requests), # otherwise just raise a standard HttpResponseException if response.code == 429: raise_from(RequestSendFailed(e, can_retry=True), e) else: raise e break except RequestSendFailed as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) yield self.clock.sleep(delay) retries_left -= 1 else: raise except Exception as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise defer.returnValue(response)
def _request(self, destination, method, path, json=None, json_callback=None, param_bytes=b"", query=None, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): """ Creates and sends a request to the given server. Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path json (dict or None): JSON to send in the body. json_callback (func or None): A callback to generate the JSON. query (dict or None): Query arguments. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if (self.hs.config.federation_domain_whitelist is not None and destination not in self.hs.config.federation_domain_whitelist): raise FederationDeniedError(destination) limiter = yield synapse.util.retryutils.get_retry_limiter( destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) headers_dict = {} path_bytes = path.encode("ascii") if query: query_bytes = encode_query_args(query) else: query_bytes = b"" headers_dict = { "User-Agent": [self.version_string], "Host": [destination], } with limiter: url = self._create_url(destination.encode("ascii"), path_bytes, param_bytes, query_bytes).decode('ascii') txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (MAXINT - 1) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES http_url = urllib.parse.urlunparse( (b"", b"", path_bytes, param_bytes, query_bytes, b"")).decode('ascii') log_result = None while True: try: if json_callback: json = json_callback() if json: data = encode_canonical_json(json) headers_dict["Content-Type"] = ["application/json"] self.sign_request(destination, method, http_url, headers_dict, json) else: data = None self.sign_request(destination, method, http_url, headers_dict) outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id, destination, method, url) request_deferred = treq.request( method, url, headers=Headers(headers_dict), data=data, agent=self.agent, reactor=self.hs.get_reactor(), unbuffered=True) request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) # Sometimes the timeout above doesn't work, so lets hack yet # another layer of timeouts in in the vain hope that at some # point the world made sense and this really really really # should work. request_deferred = timeout_no_seriously( request_deferred, timeout=_sec_timeout * 2, reactor=self.hs.get_reactor(), ) with Measure(self.clock, "outbound_request"): response = yield make_deferred_yieldable( request_deferred, ) log_result = "%d %s" % ( response.code, response.phrase, ) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn("DNS Lookup failed to %s with %s", destination, e) log_result = "DNS Lookup failed to %s with %s" % ( destination, e) raise logger.warn( "{%s} Sending request failed to %s: %s %s: %s", txn_id, destination, method, url, _flatten_response_never_received(e), ) log_result = _flatten_response_never_received(e) if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug("{%s} Waiting %s before sending to %s...", txn_id, delay, destination) yield self.clock.sleep(delay) retries_left -= 1 else: raise finally: outbound_logger.info( "{%s} [%s] Result: %s", txn_id, destination, log_result, ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): d = treq.content(response) d.addTimeout(_sec_timeout, self.hs.get_reactor()) body = yield make_deferred_yieldable(d) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
def _request(self, destination, method, path, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): """ Creates and sends a request to the given server Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ if (self.hs.config.federation_domain_whitelist and destination not in self.hs.config.federation_domain_whitelist): raise FederationDeniedError(destination) limiter = yield synapse.util.retryutils.get_retry_limiter( destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) destination = destination.encode("ascii") path_bytes = path.encode("ascii") with limiter: headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = self._create_url(destination, path_bytes, param_bytes, query_bytes) txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (sys.maxint - 1) outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id, destination, method, url_bytes) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES http_url_bytes = urlparse.urlunparse( ("", "", path_bytes, param_bytes, query_bytes, "")) log_result = None try: while True: producer = None if body_callback: producer = body_callback(method, http_url_bytes, headers_dict) try: request_deferred = self.agent.request( method, url_bytes, Headers(headers_dict), producer) add_timeout_to_deferred( request_deferred, timeout / 1000. if timeout else 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, ) log_result = "%d %s" % ( response.code, response.phrase, ) break except Exception as e: if not retry_on_dns_fail and isinstance( e, DNSLookupError): logger.warn("DNS Lookup failed to %s with %s", destination, e) log_result = "DNS Lookup failed to %s with %s" % ( destination, e) raise logger.warn( "{%s} Sending request failed to %s: %s %s: %s", txn_id, destination, method, url_bytes, _flatten_response_never_received(e), ) log_result = _flatten_response_never_received(e) if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) yield self.clock.sleep(delay) retries_left -= 1 else: raise finally: outbound_logger.info( "{%s} [%s] Result: %s", txn_id, destination, log_result, ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): body = yield readBody(response) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
def _send_request( self, request, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False, ): """ Sends a request to the given server. Args: request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. 60s by default. long_retries (bool): whether to use the long retry algorithm. The regular retry algorithm makes 4 attempts, with intervals [0.5s, 1s, 2s]. The long retry algorithm makes 11 attempts, with intervals [4s, 16s, 60s, 60s, ...] Both algorithms add -20%/+40% jitter to the retry intervals. Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). NB: the long retry algorithm takes over 20 minutes to complete, with a default timeout of 60s! ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred[twisted.web.client.Response]: resolves with the HTTP response object on success. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if ( self.hs.config.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) else: query_bytes = b"" # Retreive current span scope = opentracing.start_active_span( "outgoing-federation-request", tags={ opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT, opentracing.tags.PEER_ADDRESS: request.destination, opentracing.tags.HTTP_METHOD: request.method, opentracing.tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict = {} opentracing.inject_active_span_byte_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = urllib.parse.urlunparse( (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"") ) url_str = url_bytes.decode("ascii") url_to_sign_bytes = urllib.parse.urlunparse( (b"", b"", path_bytes, None, query_bytes, b"") ) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes, json ) data = encode_canonical_json(json) producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator ) else: producer = None auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes ) headers_dict[b"Authorization"] = auth_headers logger.info( "{%s} [%s] Sending request: %s %s; timeout %fs", request.txn_id, request.destination, request.method, url_str, _sec_timeout, ) try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling # that treq.request gives: just use the raw Agent. request_deferred = self.agent.request( method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.reactor, ) response = yield request_deferred except DNSLookupError as e: raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e) except Exception as e: logger.info("Failed to send request: %s", e) raise_from(RequestSendFailed(e, can_retry=True), e) logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), ) opentracing.set_tag( opentracing.tags.HTTP_STATUS_CODE, response.code ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred( d, timeout=_sec_timeout, reactor=self.reactor ) try: body = yield make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warn( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None e = HttpResponseException(response.code, response.phrase, body) # Retry if the error is a 429 (Too Many Requests), # otherwise just raise a standard HttpResponseException if response.code == 429: raise_from(RequestSendFailed(e, can_retry=True), e) else: raise e break except RequestSendFailed as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) yield self.clock.sleep(delay) retries_left -= 1 else: raise except Exception as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise return response
def _create_request(self, destination, method, path_bytes, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True, timeout=None, long_retries=False): """ Creates and sends a request to the given url """ headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = self._create_url(destination, path_bytes, param_bytes, query_bytes) txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (sys.maxint - 1) outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id, destination, method, url_bytes) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES http_url_bytes = urlparse.urlunparse( ("", "", path_bytes, param_bytes, query_bytes, "")) log_result = None try: while True: producer = None if body_callback: producer = body_callback(method, http_url_bytes, headers_dict) try: def send_request(): request_deferred = preserve_context_over_fn( self.agent.request, method, url_bytes, Headers(headers_dict), producer) return self.clock.time_bound_deferred( request_deferred, time_out=timeout / 1000. if timeout else 60, ) response = yield preserve_context_over_fn(send_request) log_result = "%d %s" % ( response.code, response.phrase, ) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn("DNS Lookup failed to %s with %s", destination, e) log_result = "DNS Lookup failed to %s with %s" % ( destination, e) raise logger.warn( "{%s} Sending request failed to %s: %s %s: %s - %s", txn_id, destination, method, url_bytes, type(e).__name__, _flatten_response_never_received(e), ) log_result = "%s - %s" % ( type(e).__name__, _flatten_response_never_received(e), ) if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) yield sleep(delay) retries_left -= 1 else: raise finally: outbound_logger.info( "{%s} [%s] Result: %s", txn_id, destination, log_result, ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? body = yield preserve_context_over_fn(readBody, response) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
def _create_request(self, destination, method, path_bytes, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True): """ Creates and sends a request to the given url """ headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = urlparse.urlunparse(( "", "", path_bytes, param_bytes, query_bytes, "", )) logger.info("Sending request to %s: %s %s", destination, method, url_bytes) logger.debug("Types: %s", [ type(destination), type(method), type(path_bytes), type(param_bytes), type(query_bytes) ]) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) retries_left = 5 endpoint = self._getEndpoint(reactor, destination) while True: producer = None if body_callback: producer = body_callback(method, url_bytes, headers_dict) try: with PreserveLoggingContext(): request_deferred = self.agent.request( destination, endpoint, method, path_bytes, param_bytes, query_bytes, Headers(headers_dict), producer) response = yield self.clock.time_bound_deferred( request_deferred, time_out=60, ) logger.debug("Got response to %s", method) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn("DNS Lookup failed to %s with %s", destination, e) raise logger.warn( "Sending request failed to %s: %s %s: %s - %s", destination, method, url_bytes, type(e).__name__, _flatten_response_never_received(e), ) if retries_left: yield sleep(2**(5 - retries_left)) retries_left -= 1 else: raise logger.info("Received response %d %s for %s: %s %s", response.code, response.phrase, destination, method, url_bytes) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? body = yield readBody(response) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
def _send_request(self, request, retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): """ Sends a request to the given server. Args: request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers (including connecting to the server). 60s by default. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if (self.hs.config.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation_domain_whitelist): raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) method = request.method destination = request.destination path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) else: query_bytes = b"" headers_dict = { "User-Agent": [self.version_string], "Host": [request.destination], } with limiter: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url = urllib.parse.urlunparse(( b"matrix", destination.encode("ascii"), path_bytes, None, query_bytes, b"", )).decode('ascii') http_url = urllib.parse.urlunparse(( b"", b"", path_bytes, None, query_bytes, b"", )).decode('ascii') while True: try: json = request.get_json() if json: data = encode_canonical_json(json) headers_dict["Content-Type"] = ["application/json"] self.sign_request(destination, method, http_url, headers_dict, json) else: data = None self.sign_request(destination, method, http_url, headers_dict) logger.info("{%s} [%s] Sending request: %s %s", request.txn_id, destination, method, url) if data: producer = FileBodyProducer( BytesIO(data), cooperator=self._cooperator) else: producer = None request_deferred = treq.request( method, url, headers=Headers(headers_dict), data=producer, agent=self.agent, reactor=self.hs.get_reactor(), unbuffered=True) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.hs.get_reactor(), ) with Measure(self.clock, "outbound_request"): response = yield make_deferred_yieldable( request_deferred, ) break except Exception as e: logger.warn( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, destination, method, url, _flatten_response_never_received(e), ) if not retry_on_dns_fail and isinstance(e, DNSLookupError): raise if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, destination, delay, ) yield self.clock.sleep(delay) retries_left -= 1 else: raise logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, destination, response.code, response.phrase.decode('ascii', errors='replace'), ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred( d, timeout=_sec_timeout, reactor=self.hs.get_reactor(), ) body = yield make_deferred_yieldable(d) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
async def _send_request( self, request: MatrixFederationRequest, retry_on_dns_fail: bool = True, timeout: Optional[int] = None, long_retries: bool = False, ignore_backoff: bool = False, backoff_on_404: bool = False, ) -> IResponse: """ Sends a request to the given server. Args: request: details of request to be sent retry_on_dns_fail: true if the request should be retied on DNS failures timeout: number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. 60s by default. long_retries: whether to use the long retry algorithm. The regular retry algorithm makes 4 attempts, with intervals [0.5s, 1s, 2s]. The long retry algorithm makes 11 attempts, with intervals [4s, 16s, 60s, 60s, ...] Both algorithms add -20%/+40% jitter to the retry intervals. Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). NB: the long retry algorithm takes over 20 minutes to complete, with a default timeout of 60s! ignore_backoff: true to ignore the historical backoff data and try the request anyway. backoff_on_404: Back off if we get a 404 Returns: Resolves with the HTTP response object on success. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if (self.hs.config.federation.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation.federation_domain_whitelist): raise FederationDeniedError(request.destination) limiter = await synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, notifier=self.hs.get_notifier(), replication_client=self.hs.get_replication_command_handler(), ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") query_bytes = encode_query_args(request.query) scope = start_active_span( "outgoing-federation-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.PEER_ADDRESS: request.destination, tags.HTTP_METHOD: request.method, tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict: Dict[bytes, List[bytes]] = {} opentracing.inject_header_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = request.uri url_str = url_bytes.decode("ascii") url_to_sign_bytes = urllib.parse.urlunparse( (b"", b"", path_bytes, None, query_bytes, b"")) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes, json) data = encode_canonical_json(json) producer: Optional[ IBodyProducer] = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator) else: producer = None auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes) headers_dict[b"Authorization"] = auth_headers logger.debug( "{%s} [%s] Sending request: %s %s; timeout %fs", request.txn_id, request.destination, request.method, url_str, _sec_timeout, ) outgoing_requests_counter.labels(request.method).inc() try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling # that treq.request gives: just use the raw Agent. # To preserve the logging context, the timeout is treated # in a similar way to `defer.gatherResults`: # * Each logging context-preserving fork is wrapped in # `run_in_background`. In this case there is only one, # since the timeout fork is not logging-context aware. # * The `Deferred` that joins the forks back together is # wrapped in `make_deferred_yieldable` to restore the # logging context regardless of the path taken. request_deferred = run_in_background( self.agent.request, method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.reactor, ) response = await make_deferred_yieldable( request_deferred) except DNSLookupError as e: raise RequestSendFailed( e, can_retry=retry_on_dns_fail) from e except Exception as e: raise RequestSendFailed(e, can_retry=True) from e incoming_responses_counter.labels(request.method, response.code).inc() set_tag(tags.HTTP_STATUS_CODE, response.code) response_phrase = response.phrase.decode("ascii", errors="replace") if 200 <= response.code < 300: logger.debug( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) else: logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred(d, timeout=_sec_timeout, reactor=self.reactor) try: body = await make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warning( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None exc = HttpResponseException(response.code, response_phrase, body) # Retry if the error is a 5xx or a 429 (Too Many # Requests), otherwise just raise a standard # `HttpResponseException` if 500 <= response.code < 600 or response.code == 429: raise RequestSendFailed(exc, can_retry=True) from exc else: raise exc break except RequestSendFailed as e: logger.info( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) # Sleep for the calculated delay, or wake up immediately # if we get notified that the server is back up. await self._sleeper.sleep(request.destination, delay * 1000) retries_left -= 1 else: raise except Exception as e: logger.warning( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise return response