Beispiel #1
0
def upload_from_url(url, s3_key, on_stream_opened=None):
    bucket = app.config['LOCH_S3_BUCKET']
    s3_url = build_s3_url(s3_key)
    with requests.get(url, stream=True) as response:
        if response.status_code != 200:
            app.logger.error(
                f'Received unexpected status code, aborting S3 upload '
                f'(status={response.status_code}, body={response.text}, key={s3_key} url={url})')
            raise ConnectionError(f'Response {response.status_code}: {response.text}')
        if on_stream_opened:
            on_stream_opened(response.headers)
        try:
            s3_upload_args = {'ServerSideEncryption': 'AES256'}
            if s3_url.endswith('.gz'):
                s3_upload_args.update({
                    'ContentEncoding': 'gzip',
                    'ContentType': 'text/plain',
                })
            # smart_open needs to be told to ignore the .gz extension, or it will smartly attempt to double-compress it.
            with smart_open.smart_open(s3_url, 'wb', ignore_extension=True, s3_upload_args=s3_upload_args) as s3_out:
                for chunk in response.iter_content(chunk_size=1024):
                    s3_out.write(chunk)
        except (ClientError, ConnectionError, ValueError) as e:
            app.logger.error(f'Error on S3 upload: source_url={url}, bucket={bucket}, key={s3_key}, error={e}')
            raise e
    s3_response = get_client().head_object(Bucket=bucket, Key=s3_key)
    if s3_response:
        app.logger.info(f'S3 upload complete: source_url={url}, bucket={bucket}, key={s3_key}')
        return s3_response
    def test_feed_with_wire_exception_retries_exceeded(self):
        self.feed_before_parameter_build_event(current_time=1)
        self.feed_request_created_event(current_time=2)

        self.mock_time.return_value = 3
        # Connection errors are retryable
        wire_exception = ConnectionError(error='connection issue')
        attempt_event = self.adapter.feed(
            'response-received', {
                'parsed_response': None,
                'context': self.context,
                'exception': wire_exception
            })
        self.mock_time.return_value = 4
        call_event = self.adapter.feed('after-call-error', {
            'exception': wire_exception,
            'context': self.context
        })
        self.assertEqual(
            call_event,
            APICallEvent(service=self.service_id,
                         operation=self.wire_name,
                         timestamp=1000,
                         latency=3000,
                         attempts=[attempt_event],
                         retries_exceeded=True))
Beispiel #3
0
 def setUp(self):
     self.success_response = {'ResponseMetadata': {}, 'Foo': {}}
     self.failed_response = {'ResponseMetadata': {}, 'Error': {}}
     self.http_success = AWSResponse(
         status_code=200, raw=None, headers={}, url='https://foo/')
     self.http_failed = AWSResponse(
         status_code=500, raw=None, headers={}, url='https://foo/')
     self.caught_exception = ConnectionError(error='unknown')
Beispiel #4
0
 def test_describe_endpoint_optional_fails_no_cache(self):
     side_effect = [ConnectionError(error=None)]
     self.construct_manager(side_effect=side_effect)
     kwargs = {'Operation': 'TestDiscoveryOptional'}
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertIsNone(endpoint)
     # This second call should be blocked as we just failed
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertIsNone(endpoint)
     self.client.describe_endpoints.call_args_list == [mock.call()]
Beispiel #5
0
 def test_describe_endpoint_required_fails_no_cache(self):
     side_effect = [ConnectionError(error=None)] * 2
     self.construct_manager(side_effect=side_effect)
     kwargs = {'Operation': 'TestDiscoveryRequired'}
     with self.assertRaises(EndpointDiscoveryRefreshFailed):
         self.manager.describe_endpoint(**kwargs)
     # This second call should go through, as we have no cache
     with self.assertRaises(EndpointDiscoveryRefreshFailed):
         self.manager.describe_endpoint(**kwargs)
     describe_count = self.client.describe_endpoints.call_count
     self.assertEqual(describe_count, 2)
Beispiel #6
0
 def put_object(self, Bucket, *args, **kwargs):
     if Bucket == 's3_api_failure':
         raise ClientError(error_response={
             'Error': {
                 'Code': 'MyCode',
                 'Message': 'MyMessage'
             }
         },
                           operation_name='myoperation')
     if Bucket == 's3_connection_error':
         raise ConnectionError(error='MyMessage')
Beispiel #7
0
 def test_describe_endpoint_required_fails_stale_cache(self):
     key = ()
     cache = {key: [{'Address': 'old.com', 'Expiration': 0}]}
     side_effect = [ConnectionError(error=None)] * 2
     self.construct_manager(cache=cache, side_effect=side_effect)
     kwargs = {'Operation': 'TestDiscoveryRequired'}
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertEqual(endpoint, 'old.com')
     # We have a stale endpoint, so this shouldn't fail or force a refresh
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertEqual(endpoint, 'old.com')
     self.client.describe_endpoints.call_args_list == [mock.call()]
Beispiel #8
0
 def test_describe_endpoint_optional_fails_stale_cache(self):
     key = ()
     cache = {key: [{'Address': 'old.com', 'Expiration': 0}]}
     side_effect = [ConnectionError(error=None)] * 2
     self.construct_manager(cache=cache, side_effect=side_effect)
     kwargs = {'Operation': 'TestDiscoveryOptional'}
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertEqual(endpoint, 'old.com')
     # This second call shouldn't go through as we just failed
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertEqual(endpoint, 'old.com')
     self.client.describe_endpoints.call_args_list == [mock.call()]
Beispiel #9
0
 def test_describe_endpoint_required_force_refresh_success(self):
     side_effect = [
         ConnectionError(error=None),
         {
             'Endpoints': [{
                 'Address': 'new.com',
                 'CachePeriodInMinutes': 2,
             }]
         },
     ]
     self.construct_manager(side_effect=side_effect)
     kwargs = {'Operation': 'TestDiscoveryRequired'}
     # First call will fail
     with self.assertRaises(EndpointDiscoveryRefreshFailed):
         self.manager.describe_endpoint(**kwargs)
     self.client.describe_endpoints.call_args_list == [mock.call()]
     # Force a refresh if the cache is empty but discovery is required
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertEqual(endpoint, 'new.com')
Beispiel #10
0
 def test_describe_endpoint_retries_after_failing(self):
     fake_time = mock.Mock()
     fake_time.side_effect = [0, 100, 200]
     side_effect = [
         ConnectionError(error=None),
         {
             'Endpoints': [{
                 'Address': 'new.com',
                 'CachePeriodInMinutes': 2,
             }]
         },
     ]
     self.construct_manager(side_effect=side_effect, time=fake_time)
     kwargs = {'Operation': 'TestDiscoveryOptional'}
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertIsNone(endpoint)
     self.client.describe_endpoints.call_args_list == [mock.call()]
     # Second time should try again as enough time has elapsed
     endpoint = self.manager.describe_endpoint(**kwargs)
     self.assertEqual(endpoint, 'new.com')
Beispiel #11
0
 def test_find_usage_no_endpoint(self):
     exc = ConnectionError(error='foo')
     mock_conn = Mock()
     with patch('%s.connect' % pb) as mock_connect:
         with patch('%s.paginate_dict' % pbm) as mock_paginate:
             mock_paginate.side_effect = exc
             cls = _EfsService(21, 43, {}, None)
             cls.conn = mock_conn
             assert cls._have_usage is False
             cls.find_usage()
     assert cls._have_usage is True
     assert mock_connect.mock_calls == [call()]
     assert mock_paginate.mock_calls == [
         call(mock_conn.describe_file_systems,
              alc_marker_path=['NextMarker'],
              alc_data_path=['FileSystems'],
              alc_marker_param='Marker')
     ]
     assert len(cls.limits) == 1
     usage = cls.limits['File systems'].get_current_usage()
     assert len(usage) == 0
Beispiel #12
0
def upload_from_url(url, s3_key, on_stream_opened=None):
    bucket = app.config['LOCH_S3_BUCKET']
    with requests.get(url, stream=True) as response:
        if response.status_code != 200:
            app.logger.error(
                f'Received unexpected status code, aborting S3 upload '
                f'(status={response.status_code}, body={response.text}, key={s3_key} url={url})'
            )
            raise ConnectionError(
                f'Response {response.status_code}: {response.text}')
        try:
            with stream_upload(response, s3_key):
                on_stream_opened(response.headers)
        except (ClientError, ConnectionError, ValueError) as e:
            app.logger.error(
                f'Error on S3 upload: source_url={url}, bucket={bucket}, key={s3_key}, error={e}'
            )
            raise e
    s3_response = get_client().head_object(Bucket=bucket, Key=s3_key)
    if s3_response:
        app.logger.info(
            f'S3 upload complete: source_url={url}, bucket={bucket}, key={s3_key}'
        )
        return s3_response
Beispiel #13
0
    (400, 'RequestTimeoutException', True),
    (400, 'PriorRequestNotComplete', True),

    # "Any HTTP response with an HTTP status code of 500, 502, 503, or 504".
    (500, None, True),
    (502, None, True),
    (503, None, True),
    (504, None, True),
    # We'll also add a few errors with an explicit error code to verify
    # that the code doesn't matter.
    (500, 'InternalServiceError', True),
    (502, 'BadError', True),
    # These are botocore specific errors that correspond to
    # "Any IO (socket) level error where we are unable to read an HTTP
    # response.
    (None, ConnectionError(error='unknown'), True),
    (None, HTTPClientError(error='unknown'), True),

    # Negative cases
    (200, None, False),
    # This is a throttling error not a transient error
    (400, 'Throttling', False),
    (400, None, False),
]

# These tests are intended to be paired with the
# SERVICE_DESCRIPTION_WITH_RETRIES definition.
RETRYABLE_MODELED_ERRORS = [
    (400, 'ModeledThrottlingError', True),
    (400, 'ModeledRetryableError', True),
    # Note this is ErrorCodeRetryable, not ModeledRetryableErrorWithCode,
Beispiel #14
0
        ("150", "05"),  # Group description
        ("285", "00"),  # Regulation
    ]

    codes = taric_xml_record_codes(xml)

    assert codes == expected_codes

    assert WorkBasket.objects.filter(
        status=WorkflowStatus.SENT).exists() == True


@mock.patch(
    "exporter.storages.HMRCStorage.save",
    side_effect=[
        ConnectionError(error={"endpoint_url": "http://example.com"}),
        SentinelError(),
    ],
)
def test_upload_workbaskets_retries(mock_save, settings):
    """Verify if HMRCStorage.save raises a boto.ConnectionError the task
    upload_workflow task retries based on
    settings.EXPORTER_UPLOAD_MAX_RETRIES."""
    settings.EXPORTER_DISABLE_NOTIFICATION = True
    # Notifications are disabled, as they are not being tested here.
    settings.EXPORTER_UPLOAD_MAX_RETRIES = 1

    with ApprovedTransactionFactory.create():
        RegulationFactory.create(),
        FootnoteTypeFactory.create()