def test_catch_invalid_imds_error(self): with mock.patch('botocore.httpsession.URLLib3Session.send') as send_mock: fetcher = InstanceMetadataFetcher() e = LocationParseError(location="foo") send_mock.side_effect = HTTPClientError(error=e) with self.assertRaises(InvalidIMDSEndpointError): fetcher.retrieve_iam_role_credentials()
def send(self, request): try: proxy_url = self._proxy_config.proxy_url_for(request.url) manager = self._get_connection_manager(request.url, proxy_url) conn = manager.connection_from_url(request.url) self._setup_ssl_cert(conn, request.url, self._verify) request_target = self._get_request_target(request.url, proxy_url) urllib_response = conn.urlopen( method=request.method, url=request_target, body=request.body, headers=request.headers, retries=Retry(False), assert_same_host=False, preload_content=False, decode_content=False, chunked=self._chunked(request.headers), ) http_response = botocore.awsrequest.AWSResponse( request.url, urllib_response.status, urllib_response.headers, urllib_response, ) if not request.stream_output: # Cause the raw stream to be exhausted immediately. We do it # this way instead of using preload_content because # preload_content will never buffer chunked responses http_response.content return http_response except URLLib3SSLError as e: raise SSLError(endpoint_url=request.url, error=e) except (NewConnectionError, socket.gaierror) as e: raise EndpointConnectionError(endpoint_url=request.url, error=e) except ProxyError as e: raise ProxyConnectionError(proxy_url=proxy_url, error=e) except URLLib3ConnectTimeoutError as e: raise ConnectTimeoutError(endpoint_url=request.url, error=e) except URLLib3ReadTimeoutError as e: raise ReadTimeoutError(endpoint_url=request.url, error=e) except ProtocolError as e: raise ConnectionClosedError( error=e, request=request, endpoint_url=request.url ) except Exception as e: message = 'Exception received when sending urllib3 HTTP request' logger.debug(message, exc_info=True) raise HTTPClientError(error=e)
def test_retry_on_socket_errors(self): self.event_emitter.emit.side_effect = self.get_emitter_responses( num_retries=1) self.http_session.send.side_effect = HTTPClientError(error='wrapped') with self.assertRaises(HTTPClientError): self.endpoint.make_request(self._operation, request_dict()) self.assert_events_emitted( self.event_emitter, expected_events=[ 'request-created.ec2.DescribeInstances', 'before-send.ec2.DescribeInstances', 'response-received.ec2.DescribeInstances', 'needs-retry.ec2.DescribeInstances', ] * 2)
(400, 'PriorRequestNotComplete', True), # "Any HTTP response with an HTTP status code of 500, 502, 503, or 504". (500, None, True), (502, None, True), (503, None, True), (504, None, True), # We'll also add a few errors with an explicit error code to verify # that the code doesn't matter. (500, 'InternalServiceError', True), (502, 'BadError', True), # These are botocore specific errors that correspond to # "Any IO (socket) level error where we are unable to read an HTTP # response. (None, ConnectionError(error='unknown'), True), (None, HTTPClientError(error='unknown'), True), # Negative cases (200, None, False), # This is a throttling error not a transient error (400, 'Throttling', False), (400, None, False), ] # These tests are intended to be paired with the # SERVICE_DESCRIPTION_WITH_RETRIES definition. RETRYABLE_MODELED_ERRORS = [ (400, 'ModeledThrottlingError', True), (400, 'ModeledRetryableError', True), # Note this is ErrorCodeRetryable, not ModeledRetryableErrorWithCode, # because the shape has a error code defined for it.
def send(self, request): try: proxy_url = self._proxy_config.proxy_url_for(request.url) manager = self._get_connection_manager(request.url, proxy_url) conn = manager.connection_from_url(request.url) self._setup_ssl_cert(conn, request.url, self._verify) if ensure_boolean( os.environ.get('BOTO_EXPERIMENTAL__ADD_PROXY_HOST_HEADER', '')): # This is currently an "experimental" feature which provides # no guarantees of backwards compatibility. It may be subject # to change or removal in any patch version. Anyone opting in # to this feature should strictly pin botocore. host = urlparse(request.url).hostname conn.proxy_headers['host'] = host request_target = self._get_request_target(request.url, proxy_url) urllib_response = conn.urlopen( method=request.method, url=request_target, body=request.body, headers=request.headers, retries=Retry(False), assert_same_host=False, preload_content=False, decode_content=False, chunked=self._chunked(request.headers), ) http_response = botocore.awsrequest.AWSResponse( request.url, urllib_response.status, urllib_response.headers, urllib_response, ) if not request.stream_output: # Cause the raw stream to be exhausted immediately. We do it # this way instead of using preload_content because # preload_content will never buffer chunked responses http_response.content return http_response except URLLib3SSLError as e: raise SSLError(endpoint_url=request.url, error=e) except (NewConnectionError, socket.gaierror) as e: raise EndpointConnectionError(endpoint_url=request.url, error=e) except ProxyError as e: raise ProxyConnectionError(proxy_url=proxy_url, error=e) except URLLib3ConnectTimeoutError as e: raise ConnectTimeoutError(endpoint_url=request.url, error=e) except URLLib3ReadTimeoutError as e: raise ReadTimeoutError(endpoint_url=request.url, error=e) except ProtocolError as e: raise ConnectionClosedError(error=e, request=request, endpoint_url=request.url) except Exception as e: message = 'Exception received when sending urllib3 HTTP request' logger.debug(message, exc_info=True) raise HTTPClientError(error=e)