def test_create_retry_handler_with_no_operation(self): handler = retryhandler.create_retry_handler(self.retry_config, operation_name=None) self.assertIsInstance(handler, retryhandler.RetryHandler) # No good way to test for the delay function as the action # other than to just invoke it. self.assertEqual(handler._action(attempts=2), 2) self.assertEqual(handler._action(attempts=3), 4)
def test_connection_timeouts_are_retried(self): # If a connection times out, we get a Timout exception # from requests. We should be retrying those. handler = retryhandler.create_retry_handler( self.retry_config, operation_name='OperationBar') sleep_time = handler(response=None, attempts=1, caught_exception=Timeout()) self.assertEqual(sleep_time, 1)
def _register_for_operations(config, session, service_name): # There's certainly a tradeoff for registering the retry config # for the operations when the service is created. In practice, # there aren't a whole lot of per operation retry configs so # this is ok for now. for key in config: if key == '__default__': continue handler = retryhandler.create_retry_handler(config, key) unique_id = 'retry-config-%s-%s' % (service_name, key) session.register('needs-retry.%s.%s' % (service_name, key), handler, unique_id=unique_id)
def test_retry_pool_closed_errors(self): # A ClosedPoolError is retried (this is a workaround for a urllib3 # bug). Can be removed once we upgrade to requests 2.0.0. handler = retryhandler.create_retry_handler( self.retry_config, operation_name='OperationBar') # 4th attempt is retried. sleep_time = handler(response=None, attempts=4, caught_exception=ClosedPoolError( 'FakePool', 'Message')) self.assertEqual(sleep_time, 8) # But the 5th time propogates the error. with self.assertRaises(ClosedPoolError): handler(response=None, attempts=10, caught_exception=ClosedPoolError('FakePool', 'Message'))
def test_create_retry_handler_with_socket_errors(self): handler = retryhandler.create_retry_handler( self.retry_config, operation_name='OperationBar') exception = EndpointConnectionError(endpoint_url='') with self.assertRaises(EndpointConnectionError): handler(response=None, attempts=10, caught_exception=exception) # No connection error raised because attempts < max_attempts. sleep_time = handler(response=None, attempts=1, caught_exception=exception) self.assertEqual(sleep_time, 1) # But any other exception should be raised even if # attempts < max_attempts. with self.assertRaises(ValueError): sleep_time = handler(response=None, attempts=1, caught_exception=ValueError())
def test_crc32_check_propogates_error(self): handler = retryhandler.create_retry_handler( self.retry_config, operation_name='OperationFoo') http_response = mock.Mock() http_response.status_code = 200 # This is not the crc32 of b'foo', so this should # fail the crc32 check. http_response.headers = {'x-amz-crc32': 2356372768} http_response.content = b'foo' # The first 10 attempts we get a retry. self.assertEqual( handler(response=(http_response, {}), attempts=1, caught_exception=None), 1) with self.assertRaises(ChecksumError): handler(response=(http_response, {}), attempts=10, caught_exception=None)
def register_retries_for_service(service_data, session, service_name, **kwargs): loader = session.get_component('data_loader') endpoint_prefix = service_data.get('metadata', {}).get('endpointPrefix') if endpoint_prefix is None: logger.debug( "Not registering retry handlers, could not endpoint " "prefix from model for service %s", service_name) return config = _load_retry_config(loader, endpoint_prefix) if not config: return logger.debug("Registering retry handlers for service: %s", service_name) handler = retryhandler.create_retry_handler(config, endpoint_prefix) unique_id = 'retry-config-%s' % endpoint_prefix session.register('needs-retry.%s' % endpoint_prefix, handler, unique_id=unique_id) _register_for_operations(config, session, service_name=endpoint_prefix)