def test_linear_retry_interval_async(self):
        # Arrange
        context_stub = {}

        for i in range(10):
            # Act
            retry_policy = LinearRetry(backoff=1, random_jitter_range=3)
            backoff = retry_policy.get_backoff_time(context_stub)

            # Assert backoff interval is within +/- 3 of 1
            self.assertTrue(0 <= backoff <= 4)

            # Act
            retry_policy = LinearRetry(backoff=5, random_jitter_range=3)
            backoff = retry_policy.get_backoff_time(context_stub)

            # Assert backoff interval is within +/- 3 of 5
            self.assertTrue(2 <= backoff <= 8)

            # Act
            retry_policy = LinearRetry(backoff=15, random_jitter_range=3)
            backoff = retry_policy.get_backoff_time(context_stub)

            # Assert backoff interval is within +/- 3 of 15
            self.assertTrue(12 <= backoff <= 18)
    async def _test_retry_on_socket_timeout_async(self):
        if TestMode.need_recording_file(self.test_mode):
            return
        # Arrange
        container_name = self.get_resource_name()
        retry = LinearRetry(backoff=1)

        # make the connect timeout reasonable, but packet timeout truly small, to make sure the request always times out
        socket_timeout = 0.000000000001
        service = self._create_storage_service(
            BlobServiceClient,
            self.settings,
            retry_policy=retry,
            connection_timeout=socket_timeout,
            transport=AiohttpTestTransport())

        assert service._client._client._pipeline._transport.connection_config.timeout == socket_timeout

        # Act
        try:
            with self.assertRaises(ServiceResponseError) as error:
                await service.create_container(container_name)
            # Assert
            # This call should succeed on the server side, but fail on the client side due to socket timeout
            self.assertTrue(
                'read timeout' in str(error.exception),
                'Expected socket timeout but got different exception.')

        finally:
            # we must make the timeout normal again to let the delete operation succeed
            try:
                await service.delete_container(container_name,
                                               connection_timeout=11)
            except:
                pass
    async def _test_retry_callback_and_retry_context_async(self):
        # Arrange
        container_name = self.get_resource_name()
        retry = LinearRetry(backoff=1)
        service = self._create_storage_service(
            BlobServiceClient,
            self.settings,
            retry_policy=retry,
            transport=AiohttpTestTransport())

        # Force the create call to 'timeout' with a 408
        callback = ResponseCallback(status=201, new_status=408).override_status

        def assert_exception_is_present_on_retry_context(**kwargs):
            self.assertIsNotNone(kwargs.get('response'))
            self.assertEqual(kwargs['response'].status_code, 408)

        # Act
        try:
            # The initial create will return 201, but we overwrite it and retry.
            # The retry will then get a 409 and return false.
            with self.assertRaises(ResourceExistsError):
                await service.create_container(
                    container_name,
                    raw_response_hook=callback,
                    retry_hook=assert_exception_is_present_on_retry_context)
        finally:
            await service.delete_container(container_name)
Exemplo n.º 4
0
    async def test_retry_on_socket_timeout_async(self, resource_group, location, storage_account, storage_account_key):
        # Arrange
        container_name = self.get_resource_name('utcontainer')
        retry = LinearRetry(backoff=1)
        retry_transport = AiohttpRetryTestTransport(connection_timeout=11, read_timeout=0.000000000001)
        # make the connect timeout reasonable, but packet timeout truly small, to make sure the request always times out
        service = self._create_storage_service(
            BlobServiceClient, storage_account, storage_account_key, retry_policy=retry, transport=retry_transport)

        assert service._client._client._pipeline._transport.connection_config.timeout == 11
        assert service._client._client._pipeline._transport.connection_config.read_timeout == 0.000000000001

        # Act
        try:
            with self.assertRaises(AzureError) as error:
                await service.create_container(container_name)
            

            # Assert
            # 3 retries + 1 original == 4
            assert retry_transport.count == 4
            # This call should succeed on the server side, but fail on the client side due to socket timeout
            self.assertTrue(
                'Timeout on reading data from socket' in str(error.exception),
                'Expected socket timeout but got different exception.'
            )

        finally:
            # we must make the timeout normal again to let the delete operation succeed
            try:
                await service.delete_container(container_name, connection_timeout=11)
            except:
                pass
Exemplo n.º 5
0
    async def test_linear_retry_async(self, resource_group, location, storage_account, storage_account_key):
        # Arrange
        container_name = self.get_resource_name('utcontainer')
        retry = LinearRetry(backoff=1)
        service = self._create_storage_service(
            BlobServiceClient, storage_account, storage_account_key, retry_policy=retry, transport=AiohttpTestTransport())

        # Force the create call to 'timeout' with a 408
        callback = ResponseCallback(status=201, new_status=408).override_status

        # Act
        try:
            # The initial create will return 201, but we overwrite it and retry.
            # The retry will then get a 409 and return false.
            with self.assertRaises(ResourceExistsError):
                await service.create_container(container_name, raw_response_hook=callback)
        finally:
            await service.delete_container(container_name)