コード例 #1
0
ファイル: test_integration.py プロジェクト: bbc/localstack
def test_firehose_s3():

    s3_resource = aws_stack.connect_to_resource('s3')
    firehose = aws_stack.connect_to_service('firehose')

    s3_prefix = '/testdata'
    test_data = '{"test": "firehose_data_%s"}' % short_uid()
    # create Firehose stream
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        S3DestinationConfiguration={
            'RoleARN': aws_stack.iam_resource_arn('firehose'),
            'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
            'Prefix': s3_prefix
        }
    )
    assert stream
    assert TEST_FIREHOSE_NAME in firehose.list_delivery_streams()['DeliveryStreamNames']
    # create target S3 bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

    # put records
    firehose.put_record(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        Record={
            'Data': to_bytes(test_data)
        }
    )
    # check records in target bucket
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
コード例 #2
0
ファイル: testutil.py プロジェクト: bbc/localstack
def create_dynamodb_table(table_name, partition_key, env=None, stream_view_type=None):
    """Utility method to create a DynamoDB table"""

    dynamodb = aws_stack.connect_to_service('dynamodb', env=env, client=True)
    stream_spec = {'StreamEnabled': False}
    key_schema = [{
        'AttributeName': partition_key,
        'KeyType': 'HASH'
    }]
    attr_defs = [{
        'AttributeName': partition_key,
        'AttributeType': 'S'
    }]
    if stream_view_type is not None:
        stream_spec = {
            'StreamEnabled': True,
            'StreamViewType': stream_view_type
        }
    table = None
    try:
        table = dynamodb.create_table(TableName=table_name, KeySchema=key_schema,
            AttributeDefinitions=attr_defs, ProvisionedThroughput={
                'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10
            },
            StreamSpecification=stream_spec
        )
    except Exception as e:
        if 'ResourceInUseException' in str(e):
            # Table already exists -> return table reference
            return aws_stack.connect_to_resource('dynamodb', env=env).Table(table_name)
    time.sleep(2)
    return table
コード例 #3
0
ファイル: test_s3.py プロジェクト: bbc/localstack
def test_bucket_policy():

    s3_resource = aws_stack.connect_to_resource('s3')
    s3_client = aws_stack.connect_to_service('s3')

    # create test bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_POLICY)

    # put bucket policy
    policy = {
        'Version': '2012-10-17',
        'Statement': {
            'Action': ['s3:GetObject'],
            'Effect': 'Allow',
            'Resource': 'arn:aws:s3:::bucketName/*',
            'Principal': {
                'AWS': ['*']
            }
        }
    }
    response = s3_client.put_bucket_policy(
        Bucket=TEST_BUCKET_NAME_WITH_POLICY,
        Policy=json.dumps(policy)
    )
    assert response['ResponseMetadata']['HTTPStatusCode'] == 204

    # retrieve and check policy config
    saved_policy = s3_client.get_bucket_policy(Bucket=TEST_BUCKET_NAME_WITH_POLICY)['Policy']
    assert json.loads(saved_policy) == policy
コード例 #4
0
ファイル: test_dynamodb.py プロジェクト: bbc/localstack
    def test_time_to_live(self):
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_client = aws_stack.connect_to_service('dynamodb')

        testutil.create_dynamodb_table(TEST_DDB_TABLE_NAME_3, partition_key=PARTITION_KEY)
        table = dynamodb.Table(TEST_DDB_TABLE_NAME_3)

        # Insert some items to the table
        items = {
            'id1': {PARTITION_KEY: 'id1', 'data': 'IT IS'},
            'id2': {PARTITION_KEY: 'id2', 'data': 'TIME'},
            'id3': {PARTITION_KEY: 'id3', 'data': 'TO LIVE!'}
        }
        for k, item in items.items():
            table.put_item(Item=item)

        # Describe TTL when still unset.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED'

        # Enable TTL for given table
        response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is True

        # Describe TTL status after being enabled.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED'

        # Disable TTL for given table
        response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, False)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is False

        # Describe TTL status after being disabled.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED'

        # Enable TTL for given table again
        response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is True

        # Describe TTL status after being enabled again.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED'

        # Clean up table
        dynamodb_client.delete_table(TableName=TEST_DDB_TABLE_NAME_3)
コード例 #5
0
ファイル: test_cloudformation.py プロジェクト: bbc/localstack
    def test_list_stack_resources_returns_queue_urls(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_2))
        cloudformation.create_stack(StackName=TEST_STACK_NAME_2, TemplateBody=template)

        def check_stack():
            stack = get_stack_details(TEST_STACK_NAME_2)
            assert stack['StackStatus'] == 'CREATE_COMPLETE'

        retry(check_stack, retries=3, sleep=2)

        list_stack_summaries = list_stack_resources(TEST_STACK_NAME_2)
        queue_urls = get_queue_urls()

        for resource in list_stack_summaries:
            assert resource['PhysicalResourceId'] in queue_urls
コード例 #6
0
ファイル: template_deployer.py プロジェクト: bbc/localstack
def get_client(resource):
    resource_type = get_resource_type(resource)
    service = get_service_name(resource)
    resource_config = RESOURCE_TO_FUNCTION.get(resource_type)
    if resource_config is None:
        raise Exception('CloudFormation deployment for resource type %s not yet implemented' % resource_type)
    if ACTION_CREATE not in resource_config:
        # nothing to do for this resource
        return
    try:
        if resource_config[ACTION_CREATE].get('boto_client') == 'resource':
            return aws_stack.connect_to_resource(service)
        return aws_stack.connect_to_service(service)
    except Exception as e:
        LOGGER.warning('Unable to get client for "%s" API, skipping deployment: %s' % (service, e))
        return None
コード例 #7
0
ファイル: test_dynamodb.py プロジェクト: bbc/localstack
    def test_large_data_download(self):
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_client = aws_stack.connect_to_service('dynamodb')

        testutil.create_dynamodb_table(TEST_DDB_TABLE_NAME_2, partition_key=PARTITION_KEY)
        table = dynamodb.Table(TEST_DDB_TABLE_NAME_2)

        # Create a large amount of items
        num_items = 20
        for i in range(0, num_items):
            item = {PARTITION_KEY: 'id%s' % i, 'data1': 'foobar123 ' * 1000}
            table.put_item(Item=item)

        # Retrieve the items. The data will be transmitted to the client with chunked transfer encoding
        result = table.scan(TableName=TEST_DDB_TABLE_NAME_2)
        assert len(result['Items']) == num_items

        # Clean up
        dynamodb_client.delete_table(TableName=TEST_DDB_TABLE_NAME_2)
コード例 #8
0
def test_dynamodb_error_injection():
    if not do_run():
        return

    dynamodb = aws_stack.connect_to_resource('dynamodb')
    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY)
    table = dynamodb.Table(TEST_TABLE_NAME)

    # by default, no errors
    test_no_errors = table.put_item(Item={PARTITION_KEY: short_uid(), 'data': 'foobar123'})
    assert_equal(test_no_errors['ResponseMetadata']['HTTPStatusCode'], 200)

    # with a probability of 1, always throw errors
    config.DYNAMODB_ERROR_PROBABILITY = 1.0
    assert_raises(ClientError, table.put_item, Item={PARTITION_KEY: short_uid(), 'data': 'foobar123'})

    # reset probability to zero
    config.DYNAMODB_ERROR_PROBABILITY = 0.0
コード例 #9
0
ファイル: test_dynamodb.py プロジェクト: bbc/localstack
    def test_non_ascii_chars(self):
        dynamodb = aws_stack.connect_to_resource('dynamodb')

        testutil.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
        table = dynamodb.Table(TEST_DDB_TABLE_NAME)

        # write some items containing non-ASCII characters
        items = {
            'id1': {PARTITION_KEY: 'id1', 'data': 'foobar123 ✓'},
            'id2': {PARTITION_KEY: 'id2', 'data': 'foobar123 £'},
            'id3': {PARTITION_KEY: 'id3', 'data': 'foobar123 ¢'}
        }
        for k, item in items.items():
            table.put_item(Item=item)

        for item_id in items.keys():
            item = table.get_item(Key={PARTITION_KEY: item_id})['Item']
            # need to fix up the JSON and convert str to unicode for Python 2
            item1 = json_safe(item)
            item2 = json_safe(items[item_id])
            assert item1 == item2
コード例 #10
0
ファイル: test_cloudformation.py プロジェクト: bbc/localstack
    def test_apply_template(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        cloudformation.create_stack(StackName=TEST_STACK_NAME, TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(TEST_STACK_NAME)
            assert stack['StackStatus'] == 'CREATE_COMPLETE'

        retry(check_stack, retries=3, sleep=2)

        # assert that bucket has been created
        assert bucket_exists('cf-test-bucket-1')
        # assert that queue has been created
        assert queue_exists('cf-test-queue-1')
        # assert that stream has been created
        assert stream_exists('cf-test-stream-1')
        # assert that queue has been created
        resource = describe_stack_resource(TEST_STACK_NAME, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
コード例 #11
0
def invoke_rest_api(api_id,
                    stage,
                    method,
                    invocation_path,
                    data,
                    headers,
                    path=None):
    path = path or invocation_path
    relative_path, query_string_params = extract_query_string_params(
        path=invocation_path)

    # run gateway authorizers for this request
    authorize_invocation(api_id, headers)

    path_map = helpers.get_rest_api_paths(rest_api_id=api_id)
    try:
        extracted_path, resource = get_resource_for_path(path=relative_path,
                                                         path_map=path_map)
    except Exception:
        return make_error_response('Unable to find path %s' % path, 404)

    integrations = resource.get('resourceMethods', {})
    integration = integrations.get(method, {})
    if not integration:
        integration = integrations.get('ANY', {})
    integration = integration.get('methodIntegration')
    if not integration:
        if method == 'OPTIONS' and 'Origin' in headers:
            # default to returning CORS headers if this is an OPTIONS request
            return get_cors_response(headers)
        return make_error_response(
            'Unable to find integration for path %s' % path, 404)

    uri = integration.get('uri')
    if integration['type'] == 'AWS':
        if 'kinesis:action/' in uri:
            if uri.endswith('kinesis:action/PutRecords'):
                target = kinesis_listener.ACTION_PUT_RECORDS
            if uri.endswith('kinesis:action/ListStreams'):
                target = kinesis_listener.ACTION_LIST_STREAMS

            template = integration['requestTemplates'][APPLICATION_JSON]
            new_request = aws_stack.render_velocity_template(template, data)
            # forward records to target kinesis stream
            headers = aws_stack.mock_aws_request_headers(service='kinesis')
            headers['X-Amz-Target'] = target
            result = common.make_http_request(url=TEST_KINESIS_URL,
                                              method='POST',
                                              data=new_request,
                                              headers=headers)
            return result

        if method == 'POST':
            if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri:
                template = integration['requestTemplates'][APPLICATION_JSON]
                account_id, queue = uri.split('/')[-2:]
                region_name = uri.split(':')[3]

                new_request = aws_stack.render_velocity_template(
                    template, data) + '&QueueName=%s' % queue
                headers = aws_stack.mock_aws_request_headers(
                    service='sqs', region_name=region_name)

                url = urljoin(TEST_SQS_URL, 'queue/%s' % queue)
                result = common.make_http_request(url,
                                                  method='POST',
                                                  headers=headers,
                                                  data=new_request)
                return result

        msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (
            uri, method)
        LOGGER.warning(msg)
        return make_error_response(msg, 404)

    elif integration['type'] == 'AWS_PROXY':
        if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:
            func_arn = uri.split(':lambda:path')[1].split(
                'functions/')[1].split('/invocations')[0]
            data_str = json.dumps(data) if isinstance(data,
                                                      (dict,
                                                       list)) else to_str(data)
            account_id = uri.split(':lambda:path')[1].split(
                ':function:')[0].split(':')[-1]

            source_ip = headers['X-Forwarded-For'].split(',')[-2]

            # Sample request context:
            # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
            request_context = {
                'path': relative_path,
                'accountId': account_id,
                'resourceId': resource.get('id'),
                'stage': stage,
                'identity': {
                    'accountId': account_id,
                    'sourceIp': source_ip,
                    'userAgent': headers['User-Agent'],
                }
            }

            try:
                path_params = extract_path_params(
                    path=relative_path, extracted_path=extracted_path)
            except Exception:
                path_params = {}

            result = lambda_api.process_apigateway_invocation(
                func_arn,
                relative_path,
                data_str,
                headers,
                path_params=path_params,
                query_string_params=query_string_params,
                method=method,
                resource_path=path,
                request_context=request_context)
            if isinstance(result, FlaskResponse):
                return flask_to_requests_response(result)
            if isinstance(result, Response):
                return result

            response = LambdaResponse()
            parsed_result = result if isinstance(result, dict) else json.loads(
                str(result))
            parsed_result = common.json_safe(parsed_result)
            parsed_result = {} if parsed_result is None else parsed_result
            response.status_code = int(parsed_result.get('statusCode', 200))
            parsed_headers = parsed_result.get('headers', {})
            if parsed_headers is not None:
                response.headers.update(parsed_headers)
            try:
                if isinstance(parsed_result['body'], dict):
                    response.content = json.dumps(parsed_result['body'])
                else:
                    response.content = to_bytes(parsed_result['body'])
            except Exception:
                response._content = '{}'
            if response.content:
                response.headers['Content-Length'] = str(len(response.content))
            response.multi_value_headers = parsed_result.get(
                'multiValueHeaders') or {}
            return response

        elif uri.startswith(
                'arn:aws:apigateway:') and ':dynamodb:action' in uri:
            # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
            table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1]
            action = uri.split(':dynamodb:action')[1].split('&Table=')[0]

            if 'PutItem' in action and method == 'PUT':
                response_template = path_map.get(relative_path, {}).get('resourceMethods', {})\
                    .get(method, {}).get('methodIntegration', {}).\
                    get('integrationResponses', {}).get('200', {}).get('responseTemplates', {})\
                    .get('application/json', None)

                if response_template is None:
                    msg = 'Invalid response template defined in integration response.'
                    return make_error_response(msg, 404)

                response_template = json.loads(response_template)
                if response_template['TableName'] != table_name:
                    msg = 'Invalid table name specified in integration response template.'
                    return make_error_response(msg, 404)

                dynamo_client = aws_stack.connect_to_resource('dynamodb')
                table = dynamo_client.Table(table_name)

                event_data = {}
                data_dict = json.loads(data)
                for key, _ in response_template['Item'].items():
                    event_data[key] = data_dict[key]

                table.put_item(Item=event_data)
                response = requests_response(
                    event_data, headers=aws_stack.mock_aws_request_headers())
                return response
        else:
            msg = 'API Gateway action uri "%s" not yet implemented' % uri
            LOGGER.warning(msg)
            return make_error_response(msg, 404)

    elif integration['type'] == 'HTTP':
        function = getattr(requests, method.lower())
        if isinstance(data, dict):
            data = json.dumps(data)
        result = function(integration['uri'], data=data, headers=headers)
        return result

    else:
        msg = (
            'API Gateway integration type "%s" for method "%s" not yet implemented'
            % (integration['type'], method))
        LOGGER.warning(msg)
        return make_error_response(msg, 404)
コード例 #12
0
def get_dynamodb_table():
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    # create table with stream forwarding config
    aws_stack.create_dynamodb_table(TEST_TABLE_NAME,
                                    partition_key=PARTITION_KEY)
    return dynamodb.Table(TEST_TABLE_NAME)
コード例 #13
0
def invoke_rest_api_integration(api_id,
                                stage,
                                integration,
                                method,
                                path,
                                invocation_path,
                                data,
                                headers,
                                resource_path,
                                context={},
                                resource_id=None,
                                response_templates={}):

    relative_path, query_string_params = extract_query_string_params(
        path=invocation_path)
    integration_type_orig = integration.get('type') or integration.get(
        'integrationType') or ''
    integration_type = integration_type_orig.upper()
    uri = integration.get('uri') or integration.get('integrationUri')

    if (uri.startswith('arn:aws:apigateway:')
            and ':lambda:path' in uri) or uri.startswith('arn:aws:lambda'):
        if integration_type in ['AWS', 'AWS_PROXY']:
            func_arn = uri
            if ':lambda:path' in uri:
                func_arn = uri.split(':lambda:path')[1].split(
                    'functions/')[1].split('/invocations')[0]
            data_str = json.dumps(data) if isinstance(data,
                                                      (dict,
                                                       list)) else to_str(data)

            try:
                path_params = extract_path_params(path=relative_path,
                                                  extracted_path=resource_path)
            except Exception:
                path_params = {}

            # apply custom request template
            data_str = apply_template(integration,
                                      'request',
                                      data_str,
                                      path_params=path_params,
                                      query_params=query_string_params,
                                      headers=headers)

            # Sample request context:
            # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
            request_context = get_lambda_event_request_context(
                method,
                path,
                data,
                headers,
                integration_uri=uri,
                resource_id=resource_id)
            stage_variables = get_stage_variables(api_id, stage)

            result = lambda_api.process_apigateway_invocation(
                func_arn,
                relative_path,
                data_str,
                stage,
                api_id,
                headers,
                path_params=path_params,
                query_string_params=query_string_params,
                method=method,
                resource_path=resource_path,
                request_context=request_context,
                event_context=context,
                stage_variables=stage_variables)

            if isinstance(result, FlaskResponse):
                response = flask_to_requests_response(result)
            elif isinstance(result, Response):
                response = result
            else:
                response = LambdaResponse()
                parsed_result = result if isinstance(
                    result, dict) else json.loads(str(result or '{}'))
                parsed_result = common.json_safe(parsed_result)
                parsed_result = {} if parsed_result is None else parsed_result
                response.status_code = int(parsed_result.get(
                    'statusCode', 200))
                parsed_headers = parsed_result.get('headers', {})
                if parsed_headers is not None:
                    response.headers.update(parsed_headers)
                try:
                    if isinstance(parsed_result['body'], dict):
                        response._content = json.dumps(parsed_result['body'])
                    else:
                        response._content = to_bytes(parsed_result['body'])
                except Exception:
                    response._content = '{}'
                update_content_length(response)
                response.multi_value_headers = parsed_result.get(
                    'multiValueHeaders') or {}

            # apply custom response template
            response._content = apply_template(integration, 'response',
                                               response._content)
            response.headers['Content-Length'] = str(
                len(response.content or ''))

            return response

        msg = 'API Gateway %s integration action "%s", method "%s" not yet implemented' % (
            integration_type, uri, method)
        LOGGER.warning(msg)
        return make_error_response(msg, 404)

    elif integration_type == 'AWS':
        if 'kinesis:action/' in uri:
            if uri.endswith('kinesis:action/PutRecord'):
                target = kinesis_listener.ACTION_PUT_RECORD
            if uri.endswith('kinesis:action/PutRecords'):
                target = kinesis_listener.ACTION_PUT_RECORDS
            if uri.endswith('kinesis:action/ListStreams'):
                target = kinesis_listener.ACTION_LIST_STREAMS

            template = integration['requestTemplates'][APPLICATION_JSON]
            new_request = aws_stack.render_velocity_template(template, data)
            # forward records to target kinesis stream
            headers = aws_stack.mock_aws_request_headers(service='kinesis')
            headers['X-Amz-Target'] = target
            result = common.make_http_request(url=TEST_KINESIS_URL,
                                              method='POST',
                                              data=new_request,
                                              headers=headers)
            # TODO apply response template..?
            return result

        elif 'states:action/' in uri:
            if uri.endswith('states:action/StartExecution'):
                action = 'StartExecution'
            decoded_data = data.decode()
            payload = {}
            if 'stateMachineArn' in decoded_data and 'input' in decoded_data:
                payload = json.loads(decoded_data)
            elif APPLICATION_JSON in integration.get('requestTemplates', {}):
                template = integration['requestTemplates'][APPLICATION_JSON]
                payload = aws_stack.render_velocity_template(template,
                                                             data,
                                                             as_json=True)
            client = aws_stack.connect_to_service('stepfunctions')

            kwargs = {'name': payload['name']} if 'name' in payload else {}
            result = client.start_execution(
                stateMachineArn=payload['stateMachineArn'],
                input=payload['input'],
                **kwargs)
            response = requests_response(
                content={
                    'executionArn': result['executionArn'],
                    'startDate': str(result['startDate'])
                },
                headers=aws_stack.mock_aws_request_headers())
            return response

        if method == 'POST':
            if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri:
                template = integration['requestTemplates'][APPLICATION_JSON]
                account_id, queue = uri.split('/')[-2:]
                region_name = uri.split(':')[3]

                new_request = '%s&QueueName=%s' % (
                    aws_stack.render_velocity_template(template, data), queue)
                headers = aws_stack.mock_aws_request_headers(
                    service='sqs', region_name=region_name)

                url = urljoin(TEST_SQS_URL,
                              '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue))
                result = common.make_http_request(url,
                                                  method='POST',
                                                  headers=headers,
                                                  data=new_request)
                return result

        msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (
            uri, method)
        LOGGER.warning(msg)
        return make_error_response(msg, 404)

    elif integration_type == 'AWS_PROXY':
        if uri.startswith('arn:aws:apigateway:') and ':dynamodb:action' in uri:
            # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
            table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1]
            action = uri.split(':dynamodb:action')[1].split('&Table=')[0]

            if 'PutItem' in action and method == 'PUT':
                response_template = response_templates.get(
                    'application/json', None)

                if response_template is None:
                    msg = 'Invalid response template defined in integration response.'
                    return make_error_response(msg, 404)

                response_template = json.loads(response_template)
                if response_template['TableName'] != table_name:
                    msg = 'Invalid table name specified in integration response template.'
                    return make_error_response(msg, 404)

                dynamo_client = aws_stack.connect_to_resource('dynamodb')
                table = dynamo_client.Table(table_name)

                event_data = {}
                data_dict = json.loads(data)
                for key, _ in response_template['Item'].items():
                    event_data[key] = data_dict[key]

                table.put_item(Item=event_data)
                response = requests_response(
                    event_data, headers=aws_stack.mock_aws_request_headers())
                return response
        else:
            msg = 'API Gateway action uri "%s", integration type %s not yet implemented' % (
                uri, integration_type)
            LOGGER.warning(msg)
            return make_error_response(msg, 404)

    elif integration_type in ['HTTP_PROXY', 'HTTP']:
        function = getattr(requests, method.lower())

        # apply custom request template
        data = apply_template(integration, 'request', data)

        if isinstance(data, dict):
            data = json.dumps(data)

        result = function(uri, data=data, headers=headers)

        # apply custom response template
        data = apply_template(integration, 'response', data)

        return result

    elif integration_type == 'MOCK':
        # TODO: add logic for MOCK responses
        pass

    if method == 'OPTIONS':
        # fall back to returning CORS headers if this is an OPTIONS request
        return get_cors_response(headers)

    msg = (
        'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented'
        % (integration_type, method, uri))
    LOGGER.warning(msg)
    return make_error_response(msg, 404)
コード例 #14
0
def test_kinesis_lambda_ddb_streams():

    env = ENV_DEV
    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb', env=env)
    dynamodb_service = aws_stack.connect_to_service('dynamodb', env=env)
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams', env=env)
    kinesis = aws_stack.connect_to_service('kinesis', env=env)

    print('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(TableName=TEST_STREAM_NAME +
                                                   ddb_lease_table_suffix),
             print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(
        TEST_STREAM_NAME,
        listener_func=process_records,
        wait_until_started=True,
        ddb_lease_table_suffix=ddb_lease_table_suffix)

    print("Kinesis consumer initialized.")

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME,
                                   partition_key=PARTITION_KEY,
                                   env=env,
                                   stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda (Python) connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(TEST_LAMBDA_PYTHON,
                                              get_content=True,
                                              libs=['localstack'],
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                    zip_file=zip_file,
                                    event_source_arn=ddb_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception,
                  testutil.create_lambda_function,
                  func_name=TEST_LAMBDA_NAME_DDB,
                  zip_file=zip_file,
                  event_source_arn=ddb_event_source_arn,
                  runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda (Python) connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM_PY,
                                    zip_file=zip_file,
                                    event_source_arn=kinesis_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    if use_docker():
        # deploy test lambda (Node.js) connected to Kinesis Stream
        zip_file = testutil.create_lambda_archive(
            TEST_LAMBDA_NODEJS,
            get_content=True,
            runtime=LAMBDA_RUNTIME_NODEJS)
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )['StreamDescription']['StreamARN']
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_STREAM_JS,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
            runtime=LAMBDA_RUNTIME_NODEJS)

    # put items to table
    num_events_ddb = 10
    print('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_events_ddb):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })

    # put items to stream
    num_events_kinesis = 10
    print('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(Records=[{
        'Data': '{}',
        'PartitionKey': 'testId%s' % i
    } for i in range(0, num_events_kinesis)],
                        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    print("Waiting some time before finishing test.")
    time.sleep(4)

    num_events = num_events_ddb + num_events_kinesis
    print('DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s' %
          (len(EVENTS), num_events))
    assert len(EVENTS) == num_events
コード例 #15
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(TableName=TEST_STREAM_NAME +
                                                   ddb_lease_table_suffix),
             print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(
        TEST_STREAM_NAME,
        listener_func=process_records,
        wait_until_started=True,
        ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME,
                                   partition_key=PARTITION_KEY,
                                   stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                    zip_file=zip_file,
                                    event_source_arn=ddb_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception,
                  testutil.create_lambda_function,
                  func_name=TEST_LAMBDA_NAME_DDB,
                  zip_file=zip_file,
                  event_source_arn=ddb_event_source_arn,
                  runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
                                    zip_file=zip_file,
                                    event_source_arn=kinesis_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    # put items to table
    num_events_ddb = 10
    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_events_ddb - 3):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    dynamodb.batch_write_item(
        RequestItems={
            TEST_TABLE_NAME: [{
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123'
                    }
                }
            }]
        })

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(Records=[{
        'Data': '{}',
        'PartitionKey': 'testId%s' % i
    } for i in range(0, num_events_kinesis)],
                        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' %
                       lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                       PartitionKey='testIderror',
                       StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(
        TopicArn=response['TopicArn'],
        Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'],
                    Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(
        TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000',
        count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(
                ('DynamoDB and Kinesis updates retrieved ' +
                 '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == 10
コード例 #16
0
def test_kinesis_lambda_sns_ddb_sqs_streams():
    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')
    sqs = aws_stack.connect_to_service('sqs')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(TableName=TEST_STREAM_NAME +
                                                   ddb_lease_table_suffix),
             print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(
        TEST_STREAM_NAME,
        listener_func=process_records,
        wait_until_started=True,
        ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME,
                                   partition_key=PARTITION_KEY,
                                   stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                    zip_file=zip_file,
                                    event_source_arn=ddb_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception,
                  testutil.create_lambda_function,
                  func_name=TEST_LAMBDA_NAME_DDB,
                  zip_file=zip_file,
                  event_source_arn=ddb_event_source_arn,
                  runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
                                    zip_file=zip_file,
                                    event_source_arn=kinesis_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to SQS queue
    sqs_queue_info = testutil.create_sqs_queue(TEST_LAMBDA_NAME_QUEUE)
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_QUEUE,
        zip_file=zip_file,
        event_source_arn=sqs_queue_info['QueueArn'],
        runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(
        RequestItems={
            TEST_TABLE_NAME: [{
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ✓'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 £'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ¢'
                    }
                }
            }]
        })
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(
            TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {
                'S': 'testId%s' % i
            }},
            AttributeUpdates={
                'data': {
                    'Action': 'PUT',
                    'Value': {
                        'S': 'foobar123_updated'
                    }
                }
            })

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(Records=[{
        'Data': '{}',
        'PartitionKey': 'testId%s' % i
    } for i in range(0, num_events_kinesis)],
                        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' %
                       lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                       PartitionKey='testIderror',
                       StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(
        TopicArn=response['TopicArn'],
        Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'],
                    Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(
        TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000',
        count=10)
    assert len(latest) == 10

    # send messages to SQS queue
    num_events_sqs = 4
    for i in range(num_events_sqs):
        sqs.send_message(QueueUrl=sqs_queue_info['QueueUrl'],
                         MessageBody=str(i))

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
    num_events = num_events_lambda + num_events_kinesis

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning((
                'DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s'
            ) % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [
            e for e in event_items if e.get('__action_type') == 'INSERT'
        ]
        modifies = [
            e for e in event_items if e.get('__action_type') == 'MODIFY'
        ]
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    num_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM)
    assert num_invocations == 2 + num_events_lambda
    num_error_invocations = get_lambda_invocations_count(
        TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert num_error_invocations == 1
コード例 #17
0
ファイル: test_notifications.py プロジェクト: bbc/localstack
def test_bucket_notifications():

    s3_resource = aws_stack.connect_to_resource('s3')
    s3_client = aws_stack.connect_to_service('s3')
    sqs_client = aws_stack.connect_to_service('sqs')

    # create test bucket and queue
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_S3)

    # create notification on bucket
    queue_url = queue_info['QueueUrl']
    queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_S3)
    events = ['s3:ObjectCreated:*', 's3:ObjectRemoved:Delete']
    filter_rules = {
        'FilterRules': [{
            'Name': 'prefix',
            'Value': 'testupload/'
        }, {
            'Name': 'suffix',
            'Value': 'testfile.txt'
        }]
    }
    s3_client.put_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'QueueConfigurations': [{
                'Id': 'id123456',
                'QueueArn': queue_arn,
                'Events': events,
                'Filter': {
                    'Key': filter_rules
                }
            }]
        }
    )

    # retrieve and check notification config
    config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    config = config['QueueConfigurations'][0]
    assert events == config['Events']
    assert filter_rules == config['Filter']['Key']

    # upload file to S3 (this should NOT trigger a notification)
    test_key1 = '/testdata'
    test_data1 = b'{"test": "bucket_notification1"}'
    s3_client.upload_fileobj(BytesIO(test_data1), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key1)

    # upload file to S3 (this should trigger a notification)
    test_key2 = 'testupload/dir1/testfile.txt'
    test_data2 = b'{"test": "bucket_notification2"}'
    s3_client.upload_fileobj(BytesIO(test_data2), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)

    # receive, assert, and delete message from SQS
    receive_assert_delete(queue_url, [{'key': test_key2}, {'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS}], sqs_client)

    # delete notification config
    _delete_notification_config()

    # put notification config with single event type
    event = 's3:ObjectCreated:*'
    s3_client.put_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'QueueConfigurations': [{
                'Id': 'id123456',
                'QueueArn': queue_arn,
                'Events': [event]
            }]
        }
    )
    config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    config = config['QueueConfigurations'][0]
    assert config['Events'] == [event]

    # put notification config with single event type
    event = 's3:ObjectCreated:*'
    filter_rules = {
        'FilterRules': [{
            'Name': 'prefix',
            'Value': 'testupload/'
        }]
    }
    s3_client.put_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'QueueConfigurations': [{
                'Id': 'id123456',
                'QueueArn': queue_arn,
                'Events': [event],
                'Filter': {
                    'Key': filter_rules
                }
            }]
        }
    )
    config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    config = config['QueueConfigurations'][0]
    assert config['Events'] == [event]
    assert filter_rules == config['Filter']['Key']

    # upload file to S3 (this should trigger a notification)
    test_key2 = 'testupload/dir1/testfile.txt'
    test_data2 = b'{"test": "bucket_notification2"}'
    s3_client.upload_fileobj(BytesIO(test_data2), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)
    # receive, assert, and delete message from SQS
    receive_assert_delete(queue_url, [{'key': test_key2}, {'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS}], sqs_client)

    # delete notification config
    _delete_notification_config()

    #
    # Tests s3->sns->sqs notifications
    #
    sns_client = aws_stack.connect_to_service('sns')
    topic_info = sns_client.create_topic(Name=TEST_S3_TOPIC_NAME)

    s3_client.put_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'TopicConfigurations': [
                {
                    'Id': 'id123',
                    'Events': ['s3:ObjectCreated:*'],
                    'TopicArn': topic_info['TopicArn']
                }
            ]
        })

    sns_client.subscribe(TopicArn=topic_info['TopicArn'], Protocol='sqs', Endpoint=queue_arn)

    test_key2 = 'testupload/dir1/testfile.txt'
    test_data2 = b'{"test": "bucket_notification2"}'

    s3_client.upload_fileobj(BytesIO(test_data2), TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)

    # verify subject and records

    response = sqs_client.receive_message(QueueUrl=queue_url)
    for message in response['Messages']:
        snsObj = json.loads(message['Body'])
        testutil.assert_object({'Subject': 'Amazon S3 Notification'}, snsObj)
        notificationObj = json.loads(snsObj['Message'])
#        notificationRecs = [ json.loads( rec_text ) for rec_text in notification
        testutil.assert_objects(
            [
                {'key': test_key2},
                {'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS}
            ], notificationObj['Records'])

        sqs_client.delete_message(QueueUrl=queue_url, ReceiptHandle=message['ReceiptHandle'])

    _delete_notification_config()
コード例 #18
0
def invoke_rest_api_integration_backend(api_id,
                                        stage,
                                        integration,
                                        method,
                                        path,
                                        invocation_path,
                                        data,
                                        headers,
                                        resource_path,
                                        context={},
                                        resource_id=None,
                                        response_templates={}):

    relative_path, query_string_params = extract_query_string_params(
        path=invocation_path)
    integration_type_orig = integration.get('type') or integration.get(
        'integrationType') or ''
    integration_type = integration_type_orig.upper()
    uri = integration.get('uri') or integration.get('integrationUri') or ''
    try:
        path_params = extract_path_params(path=relative_path,
                                          extracted_path=resource_path)
    except Exception:
        path_params = {}
    if (uri.startswith('arn:aws:apigateway:')
            and ':lambda:path' in uri) or uri.startswith('arn:aws:lambda'):
        if integration_type in ['AWS', 'AWS_PROXY']:
            func_arn = uri
            if ':lambda:path' in uri:
                func_arn = uri.split(':lambda:path')[1].split(
                    'functions/')[1].split('/invocations')[0]

            # apply custom request template
            data_str = data
            try:
                data_str = json.dumps(data) if isinstance(
                    data, (dict, list)) else to_str(data)
                data_str = apply_template(integration,
                                          'request',
                                          data_str,
                                          path_params=path_params,
                                          query_params=query_string_params,
                                          headers=headers)
            except Exception:
                pass

            # Sample request context:
            # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
            request_context = get_lambda_event_request_context(
                method,
                path,
                data,
                headers,
                integration_uri=uri,
                resource_id=resource_id,
                resource_path=resource_path)
            stage_variables = get_stage_variables(api_id, stage)

            result = lambda_api.process_apigateway_invocation(
                func_arn,
                relative_path,
                data_str,
                stage,
                api_id,
                headers,
                path_params=path_params,
                query_string_params=query_string_params,
                method=method,
                resource_path=resource_path,
                request_context=request_context,
                event_context=context,
                stage_variables=stage_variables)

            if isinstance(result, FlaskResponse):
                response = flask_to_requests_response(result)
            elif isinstance(result, Response):
                response = result
            else:
                response = LambdaResponse()
                parsed_result = result if isinstance(
                    result, dict) else json.loads(str(result or '{}'))
                parsed_result = common.json_safe(parsed_result)
                parsed_result = {} if parsed_result is None else parsed_result
                response.status_code = int(parsed_result.get(
                    'statusCode', 200))
                parsed_headers = parsed_result.get('headers', {})
                if parsed_headers is not None:
                    response.headers.update(parsed_headers)
                try:
                    if isinstance(parsed_result['body'], dict):
                        response._content = json.dumps(parsed_result['body'])
                    else:
                        body_bytes = to_bytes(parsed_result.get('body') or '')
                        if parsed_result.get('isBase64Encoded', False):
                            body_bytes = base64.b64decode(body_bytes)
                        response._content = body_bytes
                except Exception as e:
                    LOG.warning("Couldn't set lambda response content: %s" % e)
                    response._content = '{}'
                update_content_length(response)
                response.multi_value_headers = parsed_result.get(
                    'multiValueHeaders') or {}

            # apply custom response template
            response._content = apply_template(integration, 'response',
                                               response._content)
            response.headers['Content-Length'] = str(
                len(response.content or ''))

            return response

        raise Exception(
            'API Gateway %s integration action "%s", method "%s" not yet implemented'
            % (integration_type, uri, method))

    elif integration_type == 'AWS':
        if 'kinesis:action/' in uri:
            if uri.endswith('kinesis:action/PutRecord'):
                target = kinesis_listener.ACTION_PUT_RECORD
            if uri.endswith('kinesis:action/PutRecords'):
                target = kinesis_listener.ACTION_PUT_RECORDS
            if uri.endswith('kinesis:action/ListStreams'):
                target = kinesis_listener.ACTION_LIST_STREAMS

            template = integration['requestTemplates'][APPLICATION_JSON]
            new_request = aws_stack.render_velocity_template(template, data)
            # forward records to target kinesis stream
            headers = aws_stack.mock_aws_request_headers(service='kinesis')
            headers['X-Amz-Target'] = target
            result = common.make_http_request(url=TEST_KINESIS_URL,
                                              method='POST',
                                              data=new_request,
                                              headers=headers)
            # TODO apply response template..?
            return result

        elif 'states:action/' in uri:
            if uri.endswith('states:action/StartExecution'):
                action = 'StartExecution'
            decoded_data = data.decode()
            payload = {}
            if 'stateMachineArn' in decoded_data and 'input' in decoded_data:
                payload = json.loads(decoded_data)
            elif APPLICATION_JSON in integration.get('requestTemplates', {}):
                template = integration['requestTemplates'][APPLICATION_JSON]
                payload = aws_stack.render_velocity_template(template,
                                                             data,
                                                             as_json=True)
            client = aws_stack.connect_to_service('stepfunctions')

            kwargs = {'name': payload['name']} if 'name' in payload else {}
            result = client.start_execution(
                stateMachineArn=payload['stateMachineArn'],
                input=payload['input'],
                **kwargs)
            response = requests_response(
                content={
                    'executionArn': result['executionArn'],
                    'startDate': str(result['startDate'])
                },
                headers=aws_stack.mock_aws_request_headers())
            response.headers['content-type'] = APPLICATION_JSON
            return response
        elif 's3:path/' in uri and method == 'GET':
            s3 = aws_stack.connect_to_service('s3')
            uri_match = re.match(TARGET_REGEX_S3_URI, uri)
            if uri_match:
                bucket, object_key = uri_match.group('bucket', 'object')
                LOG.debug('Getting request for bucket %s object %s', bucket,
                          object_key)
                try:
                    object = s3.get_object(Bucket=bucket, Key=object_key)
                except s3.exceptions.NoSuchKey:
                    msg = 'Object %s not found' % object_key
                    LOG.debug(msg)
                    return make_error_response(msg, 404)

                headers = aws_stack.mock_aws_request_headers(service='s3')

                if object.get('ContentType'):
                    headers['Content-Type'] = object['ContentType']

                # stream used so large files do not fill memory
                response = request_response_stream(stream=object['Body'],
                                                   headers=headers)
                return response
            else:
                msg = 'Request URI does not match s3 specifications'
                LOG.warning(msg)
                return make_error_response(msg, 400)

        if method == 'POST':
            if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri:
                template = integration['requestTemplates'][APPLICATION_JSON]
                account_id, queue = uri.split('/')[-2:]
                region_name = uri.split(':')[3]

                new_request = '%s&QueueName=%s' % (
                    aws_stack.render_velocity_template(template, data), queue)
                headers = aws_stack.mock_aws_request_headers(
                    service='sqs', region_name=region_name)

                url = urljoin(TEST_SQS_URL,
                              '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue))
                result = common.make_http_request(url,
                                                  method='POST',
                                                  headers=headers,
                                                  data=new_request)
                return result

        raise Exception(
            'API Gateway AWS integration action URI "%s", method "%s" not yet implemented'
            % (uri, method))

    elif integration_type == 'AWS_PROXY':
        if uri.startswith('arn:aws:apigateway:') and ':dynamodb:action' in uri:
            # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
            table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1]
            action = uri.split(':dynamodb:action')[1].split('&Table=')[0]

            if 'PutItem' in action and method == 'PUT':
                response_template = response_templates.get('application/json')

                if response_template is None:
                    msg = 'Invalid response template defined in integration response.'
                    LOG.info('%s Existing: %s' % (msg, response_templates))
                    return make_error_response(msg, 404)

                response_template = json.loads(response_template)
                if response_template['TableName'] != table_name:
                    msg = 'Invalid table name specified in integration response template.'
                    return make_error_response(msg, 404)

                dynamo_client = aws_stack.connect_to_resource('dynamodb')
                table = dynamo_client.Table(table_name)

                event_data = {}
                data_dict = json.loads(data)
                for key, _ in response_template['Item'].items():
                    event_data[key] = data_dict[key]

                table.put_item(Item=event_data)
                response = requests_response(
                    event_data, headers=aws_stack.mock_aws_request_headers())
                return response
        else:
            raise Exception(
                'API Gateway action uri "%s", integration type %s not yet implemented'
                % (uri, integration_type))

    elif integration_type in ['HTTP_PROXY', 'HTTP']:

        if ':servicediscovery:' in uri:
            # check if this is a servicediscovery integration URI
            client = aws_stack.connect_to_service('servicediscovery')
            service_id = uri.split('/')[-1]
            instances = client.list_instances(
                ServiceId=service_id)['Instances']
            instance = (instances or [None])[0]
            if instance and instance.get('Id'):
                uri = 'http://%s/%s' % (instance['Id'],
                                        invocation_path.lstrip('/'))

        # apply custom request template
        data = apply_template(integration, 'request', data)
        if isinstance(data, dict):
            data = json.dumps(data)
        uri = apply_request_parameter(integration=integration,
                                      path_params=path_params)
        function = getattr(requests, method.lower())
        result = function(uri, data=data, headers=headers)
        # apply custom response template
        data = apply_template(integration, 'response', data)
        return result

    elif integration_type == 'MOCK':
        # return empty response - details filled in via responseParameters above...
        return requests_response({})

    if method == 'OPTIONS':
        # fall back to returning CORS headers if this is an OPTIONS request
        return get_cors_response(headers)

    raise Exception(
        'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented'
        % (integration_type, method, uri))
コード例 #19
0
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(
            load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name,
                                    TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        assert queue_exists('cf-test-queue-1')
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name,
                                           'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(
            tags, [{
                'Key': 'foobar',
                'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')
            }])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(
            tags, [{
                'Key': 'foo',
                'Value': 'cf-test-bucket-1'
            }, {
                'Key': 'bar',
                'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')
            }])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [
            s for s in subs
            if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']
        ]
        self.assertEqual(len(subs), 1)
        self.assertIn(
            ':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name),
            subs[0]['TopicArn'])

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [
            a for a in apigateway.get_rest_apis()['items']
            if a['name'] == test_api_name
        ][0]
        responses = apigateway.get_gateway_responses(
            restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
コード例 #20
0
def invoke_rest_api_integration_backend(
    api_id,
    stage,
    integration,
    method,
    path,
    invocation_path,
    data,
    headers,
    resource_path,
    context={},
    resource_id=None,
    response_templates={},
    auth_info={},
):

    relative_path, query_string_params = extract_query_string_params(
        path=invocation_path)
    integration_type_orig = integration.get("type") or integration.get(
        "integrationType") or ""
    integration_type = integration_type_orig.upper()
    uri = integration.get("uri") or integration.get("integrationUri") or ""
    try:
        path_params = extract_path_params(path=relative_path,
                                          extracted_path=resource_path)
    except Exception:
        path_params = {}
    if (uri.startswith("arn:aws:apigateway:")
            and ":lambda:path" in uri) or uri.startswith("arn:aws:lambda"):
        if integration_type in ["AWS", "AWS_PROXY"]:
            func_arn = uri
            if ":lambda:path" in uri:
                func_arn = (uri.split(":lambda:path")[1].split("functions/")
                            [1].split("/invocations")[0])

            # apply custom request template
            data_str = data
            is_base64_encoded = False
            try:
                data_str = json.dumps(data) if isinstance(
                    data, (dict, list)) else to_str(data)
                data_str = apply_template(
                    integration,
                    "request",
                    data_str,
                    path_params=path_params,
                    query_params=query_string_params,
                    headers=headers,
                )
            except UnicodeDecodeError:
                data_str = base64.b64encode(data_str)
                is_base64_encoded = True
            except Exception as e:
                LOG.warning(
                    "Unable to convert API Gateway payload to str: %s" % (e))
                pass

            # Sample request context:
            # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
            request_context = get_lambda_event_request_context(
                method,
                path,
                data,
                headers,
                integration_uri=uri,
                resource_id=resource_id,
                resource_path=resource_path,
                auth_info=auth_info,
            )
            stage_variables = get_stage_variables(api_id, stage)

            result = lambda_api.process_apigateway_invocation(
                func_arn,
                relative_path,
                data_str,
                stage,
                api_id,
                headers,
                is_base64_encoded=is_base64_encoded,
                path_params=path_params,
                query_string_params=query_string_params,
                method=method,
                resource_path=resource_path,
                request_context=request_context,
                event_context=context,
                stage_variables=stage_variables,
            )

            if isinstance(result, FlaskResponse):
                response = flask_to_requests_response(result)
            elif isinstance(result, Response):
                response = result
            else:
                response = LambdaResponse()
                parsed_result = (result if isinstance(result, dict) else
                                 json.loads(str(result or "{}")))
                parsed_result = common.json_safe(parsed_result)
                parsed_result = {} if parsed_result is None else parsed_result
                response.status_code = int(parsed_result.get(
                    "statusCode", 200))
                parsed_headers = parsed_result.get("headers", {})
                if parsed_headers is not None:
                    response.headers.update(parsed_headers)
                try:
                    result_body = parsed_result.get("body")
                    if isinstance(result_body, dict):
                        response._content = json.dumps(result_body)
                    else:
                        body_bytes = to_bytes(to_str(result_body or ""))
                        if parsed_result.get("isBase64Encoded", False):
                            body_bytes = base64.b64decode(body_bytes)
                        response._content = body_bytes
                except Exception as e:
                    LOG.warning("Couldn't set Lambda response content: %s" % e)
                    response._content = "{}"
                update_content_length(response)
                response.multi_value_headers = parsed_result.get(
                    "multiValueHeaders") or {}

            # apply custom response template
            response._content = apply_template(integration, "response",
                                               response._content)
            response.headers["Content-Length"] = str(
                len(response.content or ""))

            return response

        raise Exception(
            'API Gateway integration type "%s", action "%s", method "%s" invalid or not yet implemented'
            % (integration_type, uri, method))

    elif integration_type == "AWS":
        if "kinesis:action/" in uri:
            if uri.endswith("kinesis:action/PutRecord"):
                target = kinesis_listener.ACTION_PUT_RECORD
            if uri.endswith("kinesis:action/PutRecords"):
                target = kinesis_listener.ACTION_PUT_RECORDS
            if uri.endswith("kinesis:action/ListStreams"):
                target = kinesis_listener.ACTION_LIST_STREAMS

            # apply request templates
            new_data = apply_request_response_templates(
                data,
                integration.get("requestTemplates"),
                content_type=APPLICATION_JSON)
            # forward records to target kinesis stream
            headers = aws_stack.mock_aws_request_headers(service="kinesis")
            headers["X-Amz-Target"] = target
            result = common.make_http_request(url=config.TEST_KINESIS_URL,
                                              method="POST",
                                              data=new_data,
                                              headers=headers)
            # apply response template
            result = apply_request_response_templates(
                result, response_templates, content_type=APPLICATION_JSON)
            return result

        elif "states:action/" in uri:
            action = uri.split("/")[-1]
            payload = {}

            if APPLICATION_JSON in integration.get("requestTemplates", {}):
                payload = apply_request_response_templates(
                    data,
                    integration.get("requestTemplates"),
                    content_type=APPLICATION_JSON,
                    as_json=True,
                )
            else:
                payload = json.loads(data.decode("utf-8"))
            client = aws_stack.connect_to_service("stepfunctions")

            # Hot fix since step functions local package responses: Unsupported Operation: 'StartSyncExecution'
            method_name = (camel_to_snake_case(action)
                           if action != "StartSyncExecution" else
                           "start_execution")

            try:
                method = getattr(client, method_name)
            except AttributeError:
                msg = "Invalid step function action: %s" % method_name
                LOG.error(msg)
                return make_error_response(msg, 400)

            result = method(**payload, )
            result = json_safe(
                {k: result[k]
                 for k in result if k not in "ResponseMetadata"})
            response = requests_response(
                content=result,
                headers=aws_stack.mock_aws_request_headers(),
            )

            if action == "StartSyncExecution":
                # poll for the execution result and return it
                result = await_sfn_execution_result(result["executionArn"])
                result_status = result.get("status")
                if result_status != "SUCCEEDED":
                    return make_error_response(
                        "StepFunctions execution %s failed with status '%s'" %
                        (result["executionArn"], result_status),
                        500,
                    )
                result = json_safe(result)
                response = requests_response(content=result)

            # apply response templates
            response = apply_request_response_templates(
                response, response_templates, content_type=APPLICATION_JSON)
            return response
        elif "s3:path/" in uri and method == "GET":
            s3 = aws_stack.connect_to_service("s3")
            uri_match = re.match(TARGET_REGEX_S3_URI, uri)
            if uri_match:
                bucket, object_key = uri_match.group("bucket", "object")
                LOG.debug("Getting request for bucket %s object %s", bucket,
                          object_key)
                try:
                    object = s3.get_object(Bucket=bucket, Key=object_key)
                except s3.exceptions.NoSuchKey:
                    msg = "Object %s not found" % object_key
                    LOG.debug(msg)
                    return make_error_response(msg, 404)

                headers = aws_stack.mock_aws_request_headers(service="s3")

                if object.get("ContentType"):
                    headers["Content-Type"] = object["ContentType"]

                # stream used so large files do not fill memory
                response = request_response_stream(stream=object["Body"],
                                                   headers=headers)
                return response
            else:
                msg = "Request URI does not match s3 specifications"
                LOG.warning(msg)
                return make_error_response(msg, 400)

        if method == "POST":
            if uri.startswith("arn:aws:apigateway:") and ":sqs:path" in uri:
                template = integration["requestTemplates"][APPLICATION_JSON]
                account_id, queue = uri.split("/")[-2:]
                region_name = uri.split(":")[3]

                new_request = "%s&QueueName=%s" % (
                    aws_stack.render_velocity_template(template, data),
                    queue,
                )
                headers = aws_stack.mock_aws_request_headers(
                    service="sqs", region_name=region_name)

                url = urljoin(config.TEST_SQS_URL,
                              "%s/%s" % (TEST_AWS_ACCOUNT_ID, queue))
                result = common.make_http_request(url,
                                                  method="POST",
                                                  headers=headers,
                                                  data=new_request)
                return result

        raise Exception(
            'API Gateway AWS integration action URI "%s", method "%s" not yet implemented'
            % (uri, method))

    elif integration_type == "AWS_PROXY":
        if uri.startswith("arn:aws:apigateway:") and ":dynamodb:action" in uri:
            # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
            table_name = uri.split(":dynamodb:action")[1].split("&Table=")[1]
            action = uri.split(":dynamodb:action")[1].split("&Table=")[0]

            if "PutItem" in action and method == "PUT":
                response_template = response_templates.get("application/json")

                if response_template is None:
                    msg = "Invalid response template defined in integration response."
                    LOG.info("%s Existing: %s" % (msg, response_templates))
                    return make_error_response(msg, 404)

                response_template = json.loads(response_template)
                if response_template["TableName"] != table_name:
                    msg = "Invalid table name specified in integration response template."
                    return make_error_response(msg, 404)

                dynamo_client = aws_stack.connect_to_resource("dynamodb")
                table = dynamo_client.Table(table_name)

                event_data = {}
                data_dict = json.loads(data)
                for key, _ in response_template["Item"].items():
                    event_data[key] = data_dict[key]

                table.put_item(Item=event_data)
                response = requests_response(event_data)
                return response
        else:
            raise Exception(
                'API Gateway action uri "%s", integration type %s not yet implemented'
                % (uri, integration_type))

    elif integration_type in ["HTTP_PROXY", "HTTP"]:

        if ":servicediscovery:" in uri:
            # check if this is a servicediscovery integration URI
            client = aws_stack.connect_to_service("servicediscovery")
            service_id = uri.split("/")[-1]
            instances = client.list_instances(
                ServiceId=service_id)["Instances"]
            instance = (instances or [None])[0]
            if instance and instance.get("Id"):
                uri = "http://%s/%s" % (instance["Id"],
                                        invocation_path.lstrip("/"))

        # apply custom request template
        data = apply_template(integration, "request", data)
        if isinstance(data, dict):
            data = json.dumps(data)
        uri = apply_request_parameter(integration=integration,
                                      path_params=path_params)
        function = getattr(requests, method.lower())
        result = function(uri, data=data, headers=headers)
        # apply custom response template
        data = apply_template(integration, "response", data)
        return result

    elif integration_type == "MOCK":
        # return empty response - details filled in via responseParameters above...
        return requests_response({})

    if method == "OPTIONS":
        # fall back to returning CORS headers if this is an OPTIONS request
        return get_cors_response(headers)

    raise Exception(
        'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented'
        % (integration_type, method, uri))
コード例 #21
0
    def test_bucket_notifications(self):

        s3_resource = aws_stack.connect_to_resource('s3')
        s3_client = aws_stack.connect_to_service('s3')
        sqs_client = aws_stack.connect_to_service('sqs')

        # create test bucket and queue
        s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
        queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_S3)

        # create notification on bucket
        queue_url = queue_info['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_S3)
        events = ['s3:ObjectCreated:*', 's3:ObjectRemoved:Delete']
        filter_rules = {
            'FilterRules': [{
                'Name': 'prefix',
                'Value': 'testupload/'
            }, {
                'Name': 'suffix',
                'Value': 'testfile.txt'
            }]
        }
        s3_client.put_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
            NotificationConfiguration={
                'QueueConfigurations': [
                    {
                        'Id': 'id0001',
                        'QueueArn': queue_arn,
                        'Events': events,
                        'Filter': {
                            'Key': filter_rules
                        }
                    },
                    {
                        # Add second dummy config to fix https://github.com/localstack/localstack/issues/450
                        'Id': 'id0002',
                        'QueueArn': queue_arn,
                        'Events': [],
                        'Filter': {
                            'Key': filter_rules
                        }
                    }
                ]
            })

        # retrieve and check notification config
        config = s3_client.get_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
        self.assertEqual(len(config['QueueConfigurations']), 2)
        config = [c for c in config['QueueConfigurations'] if c['Events']][0]
        self.assertEqual(events, config['Events'])
        self.assertEqual(filter_rules, config['Filter']['Key'])

        # upload file to S3 (this should NOT trigger a notification)
        test_key1 = '/testdata'
        test_data1 = b'{"test": "bucket_notification1"}'
        s3_client.upload_fileobj(BytesIO(test_data1),
                                 TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
                                 test_key1)

        # upload file to S3 (this should trigger a notification)
        test_key2 = 'testupload/dir1/testfile.txt'
        test_data2 = b'{"test": "bucket_notification2"}'
        s3_client.upload_fileobj(BytesIO(test_data2),
                                 TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
                                 test_key2)

        # receive, assert, and delete message from SQS
        self._receive_assert_delete(
            queue_url, [{
                'key': test_key2
            }, {
                'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS
            }], sqs_client)

        # delete notification config
        self._delete_notification_config()

        # put notification config with single event type
        event = 's3:ObjectCreated:*'
        s3_client.put_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
            NotificationConfiguration={
                'QueueConfigurations': [{
                    'Id': 'id123456',
                    'QueueArn': queue_arn,
                    'Events': [event]
                }]
            })
        config = s3_client.get_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
        config = config['QueueConfigurations'][0]
        self.assertEqual(config['Events'], [event])

        # put notification config with single event type
        event = 's3:ObjectCreated:*'
        filter_rules = {
            'FilterRules': [{
                'Name': 'prefix',
                'Value': 'testupload/'
            }]
        }
        s3_client.put_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
            NotificationConfiguration={
                'QueueConfigurations': [{
                    'Id': 'id123456',
                    'QueueArn': queue_arn,
                    'Events': [event],
                    'Filter': {
                        'Key': filter_rules
                    }
                }]
            })
        config = s3_client.get_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
        config = config['QueueConfigurations'][0]
        self.assertEqual(config['Events'], [event])
        self.assertEqual(filter_rules, config['Filter']['Key'])

        # upload file to S3 (this should trigger a notification)
        test_key2 = 'testupload/dir1/testfile.txt'
        test_data2 = b'{"test": "bucket_notification2"}'
        s3_client.upload_fileobj(BytesIO(test_data2),
                                 TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
                                 test_key2)
        # receive, assert, and delete message from SQS
        self._receive_assert_delete(
            queue_url, [{
                'key': test_key2
            }, {
                'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS
            }], sqs_client)

        # delete notification config
        self._delete_notification_config()

        #
        # Tests s3->sns->sqs notifications
        #
        sns_client = aws_stack.connect_to_service('sns')
        topic_info = sns_client.create_topic(Name=TEST_S3_TOPIC_NAME)

        s3_client.put_bucket_notification_configuration(
            Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
            NotificationConfiguration={
                'TopicConfigurations': [{
                    'Id': 'id123',
                    'Events': ['s3:ObjectCreated:*'],
                    'TopicArn': topic_info['TopicArn']
                }]
            })

        sns_client.subscribe(TopicArn=topic_info['TopicArn'],
                             Protocol='sqs',
                             Endpoint=queue_arn)

        test_key2 = 'testupload/dir1/testfile.txt'
        test_data2 = b'{"test": "bucket_notification2"}'

        s3_client.upload_fileobj(BytesIO(test_data2),
                                 TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
                                 test_key2)

        # verify subject and records
        def verify():
            response = sqs_client.receive_message(QueueUrl=queue_url)
            for message in response['Messages']:
                snsObj = json.loads(message['Body'])
                testutil.assert_object({'Subject': 'Amazon S3 Notification'},
                                       snsObj)
                notificationObj = json.loads(snsObj['Message'])
                testutil.assert_objects(
                    [{
                        'key': test_key2
                    }, {
                        'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS
                    }], notificationObj['Records'])

                sqs_client.delete_message(
                    QueueUrl=queue_url, ReceiptHandle=message['ReceiptHandle'])

        retry(verify, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
        self._delete_notification_config()
コード例 #22
0
def test_bucket_notifications():

    s3_resource = aws_stack.connect_to_resource('s3')
    s3_client = aws_stack.connect_to_service('s3')
    sqs_client = aws_stack.connect_to_service('sqs')

    # create test bucket and queue
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_S3)

    # create notification on bucket
    queue_url = queue_info['QueueUrl']
    queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_S3)
    events = ['s3:ObjectCreated:*', 's3:ObjectRemoved:Delete']
    filter_rules = {
        'FilterRules': [{
            'Name': 'prefix',
            'Value': 'testupload/'
        }, {
            'Name': 'suffix',
            'Value': 'testfile.txt'
        }]
    }
    s3_client.put_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'QueueConfigurations': [{
                'Id': 'id123456',
                'QueueArn': queue_arn,
                'Events': events,
                'Filter': {
                    'Key': filter_rules
                }
            }]
        })

    # retrieve and check notification config
    config = s3_client.get_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    config = config['QueueConfigurations'][0]
    assert events == config['Events']
    assert filter_rules == config['Filter']['Key']

    # upload file to S3 (this should NOT trigger a notification)
    test_key1 = '/testdata'
    test_data1 = b'{"test": "bucket_notification1"}'
    s3_client.upload_fileobj(BytesIO(test_data1),
                             TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key1)

    # upload file to S3 (this should trigger a notification)
    test_key2 = 'testupload/dir1/testfile.txt'
    test_data2 = b'{"test": "bucket_notification2"}'
    s3_client.upload_fileobj(BytesIO(test_data2),
                             TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)

    # receive, assert, and delete message from SQS
    receive_assert_delete(queue_url,
                          [{
                              'key': test_key2
                          }, {
                              'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS
                          }], sqs_client)

    # delete notification config
    _delete_notification_config()

    # put notification config with single event type
    event = 's3:ObjectCreated:*'
    s3_client.put_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'QueueConfigurations': [{
                'Id': 'id123456',
                'QueueArn': queue_arn,
                'Events': [event]
            }]
        })
    config = s3_client.get_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    config = config['QueueConfigurations'][0]
    assert config['Events'] == [event]

    # put notification config with single event type
    event = 's3:ObjectCreated:*'
    filter_rules = {
        'FilterRules': [{
            'Name': 'prefix',
            'Value': 'testupload/'
        }]
    }
    s3_client.put_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS,
        NotificationConfiguration={
            'QueueConfigurations': [{
                'Id': 'id123456',
                'QueueArn': queue_arn,
                'Events': [event],
                'Filter': {
                    'Key': filter_rules
                }
            }]
        })
    config = s3_client.get_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    config = config['QueueConfigurations'][0]
    assert config['Events'] == [event]
    assert filter_rules == config['Filter']['Key']

    # upload file to S3 (this should trigger a notification)
    test_key2 = 'testupload/dir1/testfile.txt'
    test_data2 = b'{"test": "bucket_notification2"}'
    s3_client.upload_fileobj(BytesIO(test_data2),
                             TEST_BUCKET_NAME_WITH_NOTIFICATIONS, test_key2)
    # receive, assert, and delete message from SQS
    receive_assert_delete(queue_url,
                          [{
                              'key': test_key2
                          }, {
                              'name': TEST_BUCKET_NAME_WITH_NOTIFICATIONS
                          }], sqs_client)

    # delete notification config
    _delete_notification_config()
コード例 #23
0
ファイル: test_integration.py プロジェクト: bbc/localstack
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(
        TableName=TEST_STREAM_NAME + ddb_lease_table_suffix), print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
        wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
        stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
        zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(RequestItems={TEST_TABLE_NAME: [
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
    ]})
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {'S': 'testId%s' % i}},
            AttributeUpdates={'data': {
                'Action': 'PUT',
                'Value': {'S': 'foobar123_updated'}
            }})

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(
        Records=[
            {
                'Data': '{}',
                'PartitionKey': 'testId%s' % i
            } for i in range(0, num_events_kinesis)
        ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
        PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000', count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(('DynamoDB and Kinesis updates retrieved ' +
                '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
        modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == num_events_ddb
コード例 #24
0
def start_test(env=ENV_DEV):
    try:
        # setup environment
        if env == ENV_DEV:
            infra.start_infra(async=True)
            time.sleep(6)

        dynamodb = aws_stack.connect_to_resource('dynamodb', env=env)
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams', env=env)
        kinesis = aws_stack.connect_to_service('kinesis', env=env)

        print('Creating stream...')
        aws_stack.create_kinesis_stream(TEST_STREAM_NAME)

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            EVENTS.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
            wait_until_started=True)

        print("Kinesis consumer initialized.")

        # create table with stream forwarding config
        create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
            env=env, stream_view_type='NEW_AND_OLD_IMAGES')

        # list streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == TEST_TABLE_NAME:
                event_source_arn = stream['StreamArn']
        assert event_source_arn

        # deploy test lambda
        script = load_file(os.path.join(LOCALSTACK_ROOT_FOLDER, 'tests', 'lambdas', 'lambda_integration.py'))
        zip_file = create_lambda_archive(script, get_content=True)
        create_lambda_function(func_name=TEST_LAMBDA_NAME, zip_file=zip_file, event_source_arn=event_source_arn)

        # put items to table
        num_events = 10
        print('Putting %s items to table...' % num_events)
        table = dynamodb.Table(TEST_TABLE_NAME)
        for i in range(0, num_events):
            table.put_item(Item={
                PARTITION_KEY: 'testId123',
                'data': 'foobar123'
            })

        print("Waiting some time before finishing test.")
        time.sleep(10)

        print('DynamoDB updates retrieved via Kinesis (actual/expected): %s/%s' % (len(EVENTS), num_events))
        if len(EVENTS) != num_events:
            print('ERROR receiving DynamoDB updates. Running processes:')
            print(run("ps aux | grep 'python\|java\|node'"))
        assert len(EVENTS) == num_events

        print("Test finished successfully")
        cleanup(env=env)

    except KeyboardInterrupt, e:
        infra.KILLED = True
コード例 #25
0
 def setUpClass(cls):
     cls.dynamodb = aws_stack.connect_to_resource('dynamodb')
コード例 #26
0
def invoke_rest_api_integration_backend(
        invocation_context: ApiInvocationContext):
    # define local aliases from invocation context
    invocation_path = invocation_context.path_with_query_string
    method = invocation_context.method
    data = invocation_context.data
    headers = invocation_context.headers
    api_id = invocation_context.api_id
    stage = invocation_context.stage
    resource_path = invocation_context.resource_path
    response_templates = invocation_context.response_templates
    integration = invocation_context.integration

    # extract integration type and path parameters
    relative_path, query_string_params = extract_query_string_params(
        path=invocation_path)
    integration_type_orig = integration.get("type") or integration.get(
        "integrationType") or ""
    integration_type = integration_type_orig.upper()
    uri = integration.get("uri") or integration.get("integrationUri") or ""
    try:
        path_params = extract_path_params(path=relative_path,
                                          extracted_path=resource_path)
        invocation_context.path_params = path_params
    except Exception:
        path_params = {}

    if (uri.startswith("arn:aws:apigateway:")
            and ":lambda:path" in uri) or uri.startswith("arn:aws:lambda"):
        if integration_type in ["AWS", "AWS_PROXY"]:
            func_arn = uri
            if ":lambda:path" in uri:
                func_arn = (uri.split(":lambda:path")[1].split("functions/")
                            [1].split("/invocations")[0])

            invocation_context.context = get_event_request_context(
                invocation_context)
            invocation_context.stage_variables = helpers.get_stage_variables(
                invocation_context)
            request_templates = RequestTemplates()
            payload = request_templates.render(invocation_context)

            # TODO: change this signature to InvocationContext as well!
            result = lambda_api.process_apigateway_invocation(
                func_arn,
                relative_path,
                payload,
                stage,
                api_id,
                headers,
                is_base64_encoded=invocation_context.is_data_base64_encoded,
                path_params=path_params,
                query_string_params=query_string_params,
                method=method,
                resource_path=resource_path,
                request_context=invocation_context.context,
                stage_variables=invocation_context.stage_variables,
            )

            if isinstance(result, FlaskResponse):
                response = flask_to_requests_response(result)
            elif isinstance(result, Response):
                response = result
            else:
                response = LambdaResponse()
                parsed_result = (result if isinstance(result, dict) else
                                 json.loads(str(result or "{}")))
                parsed_result = common.json_safe(parsed_result)
                parsed_result = {} if parsed_result is None else parsed_result
                response.status_code = int(parsed_result.get(
                    "statusCode", 200))
                parsed_headers = parsed_result.get("headers", {})
                if parsed_headers is not None:
                    response.headers.update(parsed_headers)
                try:
                    result_body = parsed_result.get("body")
                    if isinstance(result_body, dict):
                        response._content = json.dumps(result_body)
                    else:
                        body_bytes = to_bytes(to_str(result_body or ""))
                        if parsed_result.get("isBase64Encoded", False):
                            body_bytes = base64.b64decode(body_bytes)
                        response._content = body_bytes
                except Exception as e:
                    LOG.warning("Couldn't set Lambda response content: %s", e)
                    response._content = "{}"
                update_content_length(response)
                response.multi_value_headers = parsed_result.get(
                    "multiValueHeaders") or {}

            # apply custom response template
            invocation_context.response = response

            response_templates = ResponseTemplates()
            response_templates.render(invocation_context)
            invocation_context.response.headers["Content-Length"] = str(
                len(response.content or ""))
            return invocation_context.response

        raise Exception(
            f'API Gateway integration type "{integration_type}", action "{uri}", method "{method}"'
        )

    elif integration_type == "AWS":
        if "kinesis:action/" in uri:
            if uri.endswith("kinesis:action/PutRecord"):
                target = kinesis_listener.ACTION_PUT_RECORD
            elif uri.endswith("kinesis:action/PutRecords"):
                target = kinesis_listener.ACTION_PUT_RECORDS
            elif uri.endswith("kinesis:action/ListStreams"):
                target = kinesis_listener.ACTION_LIST_STREAMS
            else:
                LOG.info(
                    f"Unexpected API Gateway integration URI '{uri}' for integration type {integration_type}",
                )
                target = ""

            try:
                invocation_context.context = get_event_request_context(
                    invocation_context)
                invocation_context.stage_variables = helpers.get_stage_variables(
                    invocation_context)
                request_templates = RequestTemplates()
                payload = request_templates.render(invocation_context)

            except Exception as e:
                LOG.warning("Unable to convert API Gateway payload to str", e)
                raise

            # forward records to target kinesis stream
            headers = aws_stack.mock_aws_request_headers(
                service="kinesis", region_name=invocation_context.region_name)
            headers["X-Amz-Target"] = target

            result = common.make_http_request(
                url=config.service_url("kineses"),
                data=payload,
                headers=headers,
                method="POST")

            # apply response template
            invocation_context.response = result
            response_templates = ResponseTemplates()
            response_templates.render(invocation_context)
            return invocation_context.response

        elif "states:action/" in uri:
            action = uri.split("/")[-1]

            if APPLICATION_JSON in integration.get("requestTemplates", {}):
                request_templates = RequestTemplates()
                payload = request_templates.render(invocation_context)
                payload = json.loads(payload)
            else:
                # XXX decoding in py3 sounds wrong, this actually might break
                payload = json.loads(data.decode("utf-8"))
            client = aws_stack.connect_to_service("stepfunctions")

            if isinstance(payload.get("input"), dict):
                payload["input"] = json.dumps(payload["input"])

            # Hot fix since step functions local package responses: Unsupported Operation: 'StartSyncExecution'
            method_name = (camel_to_snake_case(action)
                           if action != "StartSyncExecution" else
                           "start_execution")

            try:
                method = getattr(client, method_name)
            except AttributeError:
                msg = "Invalid step function action: %s" % method_name
                LOG.error(msg)
                return make_error_response(msg, 400)

            result = method(**payload)
            result = json_safe(
                {k: result[k]
                 for k in result if k not in "ResponseMetadata"})
            response = requests_response(
                content=result,
                headers=aws_stack.mock_aws_request_headers(),
            )

            if action == "StartSyncExecution":
                # poll for the execution result and return it
                result = await_sfn_execution_result(result["executionArn"])
                result_status = result.get("status")
                if result_status != "SUCCEEDED":
                    return make_error_response(
                        "StepFunctions execution %s failed with status '%s'" %
                        (result["executionArn"], result_status),
                        500,
                    )
                result = json_safe(result)
                response = requests_response(content=result)

            # apply response templates
            invocation_context.response = response
            response_templates = ResponseTemplates()
            response_templates.render(invocation_context)
            # response = apply_request_response_templates(
            #     response, response_templates, content_type=APPLICATION_JSON
            # )
            return response
        # https://docs.aws.amazon.com/apigateway/api-reference/resource/integration/
        elif ("s3:path/" in uri or "s3:action/" in uri) and method == "GET":
            s3 = aws_stack.connect_to_service("s3")
            uri = apply_request_parameters(
                uri,
                integration=integration,
                path_params=path_params,
                query_params=query_string_params,
            )
            uri_match = re.match(TARGET_REGEX_PATH_S3_URI, uri) or re.match(
                TARGET_REGEX_ACTION_S3_URI, uri)
            if uri_match:
                bucket, object_key = uri_match.group("bucket", "object")
                LOG.debug("Getting request for bucket %s object %s", bucket,
                          object_key)
                try:
                    object = s3.get_object(Bucket=bucket, Key=object_key)
                except s3.exceptions.NoSuchKey:
                    msg = "Object %s not found" % object_key
                    LOG.debug(msg)
                    return make_error_response(msg, 404)

                headers = aws_stack.mock_aws_request_headers(service="s3")

                if object.get("ContentType"):
                    headers["Content-Type"] = object["ContentType"]

                # stream used so large files do not fill memory
                response = request_response_stream(stream=object["Body"],
                                                   headers=headers)
                return response
            else:
                msg = "Request URI does not match s3 specifications"
                LOG.warning(msg)
                return make_error_response(msg, 400)

        if method == "POST":
            if uri.startswith("arn:aws:apigateway:") and ":sqs:path" in uri:
                template = integration["requestTemplates"][APPLICATION_JSON]
                account_id, queue = uri.split("/")[-2:]
                region_name = uri.split(":")[3]
                if "GetQueueUrl" in template or "CreateQueue" in template:
                    request_templates = RequestTemplates()
                    payload = request_templates.render(invocation_context)
                    new_request = f"{payload}&QueueName={queue}"
                else:
                    request_templates = RequestTemplates()
                    payload = request_templates.render(invocation_context)
                    queue_url = f"{config.get_edge_url()}/{account_id}/{queue}"
                    new_request = f"{payload}&QueueUrl={queue_url}"
                headers = aws_stack.mock_aws_request_headers(
                    service="sqs", region_name=region_name)

                url = urljoin(config.service_url("sqs"),
                              f"{TEST_AWS_ACCOUNT_ID}/{queue}")
                result = common.make_http_request(url,
                                                  method="POST",
                                                  headers=headers,
                                                  data=new_request)
                return result
            elif uri.startswith("arn:aws:apigateway:") and ":sns:path" in uri:
                invocation_context.context = get_event_request_context(
                    invocation_context)
                invocation_context.stage_variables = helpers.get_stage_variables(
                    invocation_context)

                integration_response = SnsIntegration().invoke(
                    invocation_context)
                return apply_request_response_templates(
                    integration_response,
                    response_templates,
                    content_type=APPLICATION_JSON)

        raise Exception(
            'API Gateway AWS integration action URI "%s", method "%s" not yet implemented'
            % (uri, method))

    elif integration_type == "AWS_PROXY":
        if uri.startswith("arn:aws:apigateway:") and ":dynamodb:action" in uri:
            # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
            table_name = uri.split(":dynamodb:action")[1].split("&Table=")[1]
            action = uri.split(":dynamodb:action")[1].split("&Table=")[0]

            if "PutItem" in action and method == "PUT":
                response_template = response_templates.get("application/json")

                if response_template is None:
                    msg = "Invalid response template defined in integration response."
                    LOG.info("%s Existing: %s", msg, response_templates)
                    return make_error_response(msg, 404)

                response_template = json.loads(response_template)
                if response_template["TableName"] != table_name:
                    msg = "Invalid table name specified in integration response template."
                    return make_error_response(msg, 404)

                dynamo_client = aws_stack.connect_to_resource("dynamodb")
                table = dynamo_client.Table(table_name)

                event_data = {}
                data_dict = json.loads(data)
                for key, _ in response_template["Item"].items():
                    event_data[key] = data_dict[key]

                table.put_item(Item=event_data)
                response = requests_response(event_data)
                return response
        else:
            raise Exception(
                'API Gateway action uri "%s", integration type %s not yet implemented'
                % (uri, integration_type))

    elif integration_type in ["HTTP_PROXY", "HTTP"]:

        if ":servicediscovery:" in uri:
            # check if this is a servicediscovery integration URI
            client = aws_stack.connect_to_service("servicediscovery")
            service_id = uri.split("/")[-1]
            instances = client.list_instances(
                ServiceId=service_id)["Instances"]
            instance = (instances or [None])[0]
            if instance and instance.get("Id"):
                uri = "http://%s/%s" % (instance["Id"],
                                        invocation_path.lstrip("/"))

        # apply custom request template
        invocation_context.context = get_event_request_context(
            invocation_context)
        invocation_context.stage_variables = helpers.get_stage_variables(
            invocation_context)
        request_templates = RequestTemplates()
        payload = request_templates.render(invocation_context)

        if isinstance(payload, dict):
            payload = json.dumps(payload)
        uri = apply_request_parameters(uri,
                                       integration=integration,
                                       path_params=path_params,
                                       query_params=query_string_params)
        result = requests.request(method=method,
                                  url=uri,
                                  data=payload,
                                  headers=headers)
        # apply custom response template
        invocation_context.response = result
        response_templates = ResponseTemplates()
        response_templates.render(invocation_context)
        return invocation_context.response

    elif integration_type == "MOCK":

        # TODO: apply tell don't ask principle inside ResponseTemplates or InvocationContext
        invocation_context.stage_variables = helpers.get_stage_variables(
            invocation_context)
        invocation_context.response = requests_response({})

        response_templates = ResponseTemplates()
        response_templates.render(invocation_context)

        return invocation_context.response

    if method == "OPTIONS":
        # fall back to returning CORS headers if this is an OPTIONS request
        return get_cors_response(headers)

    raise Exception(
        'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented'
        % (integration_type, method, uri))
コード例 #27
0
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(
            load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name,
                                    TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        queue_url = queue_exists('cf-test-queue-1')
        assert queue_url
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name,
                                           'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
        assert ssm_param_exists('cf-test-param-1')

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(
            tags, [{
                'Key': 'foobar',
                'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')
            }])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(
            tags, [{
                'Key': 'foo',
                'Value': 'cf-test-bucket-1'
            }, {
                'Key': 'bar',
                'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')
            }])
        queue_tags = sqs.list_queue_tags(QueueUrl=queue_url)
        self.assertIn('Tags', queue_tags)
        self.assertEqual(queue_tags['Tags'], {
            'key1': 'value1',
            'key2': 'value2'
        })

        # assert that bucket notifications have been created
        notifications = s3.get_bucket_notification_configuration(
            Bucket='cf-test-bucket-1')
        self.assertIn('QueueConfigurations', notifications)
        self.assertIn('LambdaFunctionConfigurations', notifications)
        self.assertEqual(notifications['QueueConfigurations'][0]['QueueArn'],
                         'aws:arn:sqs:test:testqueue')
        self.assertEqual(notifications['QueueConfigurations'][0]['Events'],
                         ['s3:ObjectDeleted:*'])
        self.assertEqual(
            notifications['LambdaFunctionConfigurations'][0]
            ['LambdaFunctionArn'], 'aws:arn:lambda:test:testfunc')
        self.assertEqual(
            notifications['LambdaFunctionConfigurations'][0]['Events'],
            ['s3:ObjectCreated:*'])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [
            s for s in subs
            if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']
        ]
        self.assertEqual(len(subs), 1)
        self.assertIn(
            ':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name),
            subs[0]['TopicArn'])
        # assert that subscription attributes are added properly
        attrs = sns.get_subscription_attributes(
            SubscriptionArn=subs[0]['SubscriptionArn'])['Attributes']
        self.assertEqual(
            attrs, {
                'Endpoint': subs[0]['Endpoint'],
                'Protocol': 'sqs',
                'SubscriptionArn': subs[0]['SubscriptionArn'],
                'TopicArn': subs[0]['TopicArn'],
                'FilterPolicy': json.dumps({'eventType': ['created']})
            })

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [
            a for a in apigateway.get_rest_apis()['items']
            if a['name'] == test_api_name
        ][0]
        responses = apigateway.get_gateway_responses(
            restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
コード例 #28
0
def put_records(stream_name: str, records: List[Dict]) -> Dict:
    """Put a list of records to the firehose stream - either directly from a PutRecord API call, or
    received from an underlying Kinesis stream (if 'KinesisStreamAsSource' is configured)"""
    stream = get_stream(stream_name)
    if not stream:
        return error_not_found(stream_name)

    # preprocess records, add any missing attributes
    add_missing_record_attributes(records)

    for dest in stream.get("Destinations", []):

        # apply processing steps to incoming items
        proc_config = {}
        for child in dest.values():
            proc_config = (isinstance(child, dict)
                           and child.get("ProcessingConfiguration")
                           or proc_config)
        if proc_config.get("Enabled") is not False:
            for processor in proc_config.get("Processors", []):
                # TODO: run processors asynchronously, to avoid request timeouts on PutRecord API calls
                records = preprocess_records(processor, records)

        if "ESDestinationDescription" in dest:
            es_dest = dest["ESDestinationDescription"]
            es_index = es_dest["IndexName"]
            es_type = es_dest.get("TypeName")
            es = connect_elasticsearch(endpoint=es_dest.get("ClusterEndpoint"),
                                       domain=es_dest.get("DomainARN"))
            for record in records:
                obj_id = uuid.uuid4()

                data = "{}"
                # DirectPut
                if "Data" in record:
                    data = base64.b64decode(record["Data"])
                # KinesisAsSource
                elif "data" in record:
                    data = base64.b64decode(record["data"])

                body = json.loads(data)

                try:
                    es.create(index=es_index,
                              doc_type=es_type,
                              id=obj_id,
                              body=body)
                except Exception as e:
                    LOG.error("Unable to put record to stream: %s %s" %
                              (e, traceback.format_exc()))
                    raise e
        if "S3DestinationDescription" in dest:
            s3_dest = dest["S3DestinationDescription"]
            bucket = s3_bucket_name(s3_dest["BucketARN"])
            prefix = s3_dest.get("Prefix", "")

            s3 = connect_to_resource("s3")
            batched_data = b"".join([
                base64.b64decode(r.get("Data") or r["data"]) for r in records
            ])

            obj_path = get_s3_object_path(stream_name, prefix)
            try:
                s3.Object(bucket, obj_path).put(Body=batched_data)
            except Exception as e:
                LOG.error("Unable to put record to stream: %s %s" %
                          (e, traceback.format_exc()))
                raise e
        if "HttpEndpointDestinationDescription" in dest:
            http_dest = dest["HttpEndpointDestinationDescription"]
            end_point = http_dest["EndpointConfiguration"]
            url = end_point["Url"]
            record_to_send = {
                "requestId": str(uuid.uuid4()),
                "timestamp": (int(time.time())),
                "records": [],
            }
            for record in records:
                data = record.get("Data") or record.get("data")
                record_to_send["records"].append({"data": data})
            headers = {
                "Content-Type": "application/json",
            }
            try:
                requests.post(url, json=record_to_send, headers=headers)
            except Exception as e:
                LOG.info(
                    "Unable to put Firehose records to HTTP endpoint %s: %s %s"
                    % (url, e, traceback.format_exc()))
                raise e
    return {"RecordId": str(uuid.uuid4())}
コード例 #29
0
def get_client(resource):
    resource_type = get_resource_type(resource)
    service = get_service_name(resource)
    if RESOURCE_TO_FUNCTION[resource_type][ACTION_CREATE].get('boto_client') == 'resource':
        return aws_stack.connect_to_resource(service)
    return aws_stack.connect_to_service(service)
コード例 #30
0
def start_test(env=ENV_DEV):
    try:
        # setup environment
        if env == ENV_DEV:
            infra.start_infra(async=True)
            time.sleep(6)

        dynamodb = aws_stack.connect_to_resource('dynamodb', env=env)
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams',
                                                       env=env)
        kinesis = aws_stack.connect_to_service('kinesis', env=env)

        print('Creating stream...')
        aws_stack.create_kinesis_stream(TEST_STREAM_NAME)

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            EVENTS.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME,
                                            listener_func=process_records,
                                            wait_until_started=True)

        print("Kinesis consumer initialized.")

        # create table with stream forwarding config
        create_dynamodb_table(TEST_TABLE_NAME,
                              partition_key=PARTITION_KEY,
                              env=env,
                              stream_view_type='NEW_AND_OLD_IMAGES')

        # list streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == TEST_TABLE_NAME:
                event_source_arn = stream['StreamArn']
        assert event_source_arn

        # deploy test lambda
        script = load_file(
            os.path.join(LOCALSTACK_ROOT_FOLDER, 'tests', 'lambdas',
                         'lambda_integration.py'))
        zip_file = create_lambda_archive(script, get_content=True)
        create_lambda_function(func_name=TEST_LAMBDA_NAME,
                               zip_file=zip_file,
                               event_source_arn=event_source_arn)

        # put items to table
        num_events = 10
        print('Putting %s items to table...' % num_events)
        table = dynamodb.Table(TEST_TABLE_NAME)
        for i in range(0, num_events):
            table.put_item(Item={
                PARTITION_KEY: 'testId123',
                'data': 'foobar123'
            })

        print("Waiting some time before finishing test.")
        time.sleep(10)

        print(
            'DynamoDB updates retrieved via Kinesis (actual/expected): %s/%s' %
            (len(EVENTS), num_events))
        if len(EVENTS) != num_events:
            print('ERROR receiving DynamoDB updates. Running processes:')
            print(run("ps aux | grep 'python\|java\|node'"))
        assert len(EVENTS) == num_events

        print("Test finished successfully")
        cleanup(env=env)

    except KeyboardInterrupt, e:
        infra.KILLED = True
コード例 #31
0
def all_s3_object_keys(bucket):
    s3_client = aws_stack.connect_to_resource('s3')
    bucket = s3_client.Bucket(bucket) if isinstance(bucket, str) else bucket
    keys = [key for key in bucket.objects.all()]
    return keys
コード例 #32
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        def create_kinesis_stream(name, delete=False):
            stream = aws_stack.create_kinesis_stream(name, delete=delete)
            stream.wait_for()

        ddb_lease_table_suffix = "-kclapp"
        table_name = TEST_TABLE_NAME + "klsdss" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_stream_name = "lambda-stream-%s" % short_uid()
        lambda_queue_name = "lambda-queue-%s" % short_uid()
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        queue_name = "queue-%s" % short_uid()
        dynamodb = aws_stack.connect_to_resource("dynamodb")
        dynamodb_service = aws_stack.connect_to_service("dynamodb")
        dynamodbstreams = aws_stack.connect_to_service("dynamodbstreams")
        kinesis = aws_stack.connect_to_service("kinesis")
        sns = aws_stack.connect_to_service("sns")
        sqs = aws_stack.connect_to_service("sqs")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )

        create_kinesis_stream(stream_name, delete=True)
        create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS)
        testutil.create_lambda_function(
            func_name=lambda_ddb_name,
            zip_file=zip_file,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )
        # make sure we cannot create Lambda with same name twice
        with self.assertRaises(Exception):
            testutil.create_lambda_function(
                func_name=lambda_ddb_name,
                zip_file=zip_file,
                event_source_arn=ddb_event_source_arn,
            )

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )["StreamDescription"]["StreamARN"]
        testutil.create_lambda_function(
            func_name=lambda_stream_name,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
        )

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(queue_name)
        testutil.create_lambda_function(
            func_name=lambda_queue_name,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info["QueueArn"],
        )

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = (num_events_ddb - num_put_new_items -
                           num_put_existing_items - num_batch_items)

        LOGGER.info("Putting %s items to table..." % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123"
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123_put_existing"
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ✓"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 £"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ¢"
                            }
                        }
                    },
                ]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": "testId%s" % i
                }},
                AttributeUpdates={
                    "data": {
                        "Action": "PUT",
                        "Value": {
                            "S": "foobar123_updated"
                        }
                    }
                },
            )

        # put items to stream
        num_events_kinesis = 1
        num_kinesis_records = 10
        LOGGER.info("Putting %s records in %s event to stream..." %
                    (num_kinesis_records, num_events_kinesis))
        kinesis.put_records(
            Records=[{
                "Data": "{}",
                "PartitionKey": "testId%s" % i
            } for i in range(0, num_kinesis_records)],
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
        )

        # put 1 item to stream that will trigger an error in the Lambda
        num_events_kinesis_err = 1
        for i in range(num_events_kinesis_err):
            kinesis.put_record(
                Data='{"%s": 1}' %
                lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                PartitionKey="testIdError",
                StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
            )

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response["TopicArn"],
            Protocol="lambda",
            Endpoint=aws_stack.lambda_function_arn(lambda_stream_name),
        )
        for i in range(num_events_sns):
            sns.publish(
                TopicArn=response["TopicArn"],
                Subject="test_subject",
                Message="test message %s" % i,
            )

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id="shardId-000000000000",
            count=10)
        self.assertEqual(10, len(latest))

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info["QueueUrl"],
                             MessageBody=str(i))

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_kinesis_records

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            self.assertEqual(num_put_new_items + num_batch_items, len(inserts))
            self.assertEqual(num_put_existing_items + num_updates_ddb,
                             len(modifies))

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=15, sleep=2)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(lambda_stream_name)
            expected_invocation_count = num_events_kinesis + num_events_kinesis_err + num_events_sns
            self.assertEqual(expected_invocation_count, num_invocations)
            num_error_invocations = get_lambda_invocations_count(
                lambda_stream_name, "Errors")
            self.assertEqual(num_events_kinesis_err, num_error_invocations)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=7, sleep=2)

        # clean up
        testutil.delete_lambda_function(lambda_stream_name)
        testutil.delete_lambda_function(lambda_ddb_name)
        testutil.delete_lambda_function(lambda_queue_name)
        sqs.delete_queue(QueueUrl=sqs_queue_info["QueueUrl"])
コード例 #33
0
def put_records(stream_name, records):
    stream = get_stream(stream_name)
    if not stream:
        return error_not_found(stream_name)
    for dest in stream.get("Destinations", []):
        if "ESDestinationDescription" in dest:
            es_dest = dest["ESDestinationDescription"]
            es_index = es_dest["IndexName"]
            es_type = es_dest.get("TypeName")
            es = connect_elasticsearch(endpoint=es_dest.get("ClusterEndpoint"),
                                       domain=es_dest.get("DomainARN"))
            for record in records:
                obj_id = uuid.uuid4()

                # DirectPut
                if "Data" in record:
                    data = base64.b64decode(record["Data"])
                # KinesisAsSource
                elif "data" in record:
                    data = base64.b64decode(record["data"])

                body = json.loads(data)

                try:
                    es.create(index=es_index,
                              doc_type=es_type,
                              id=obj_id,
                              body=body)
                except Exception as e:
                    LOG.error("Unable to put record to stream: %s %s" %
                              (e, traceback.format_exc()))
                    raise e
        if "S3DestinationDescription" in dest:
            s3_dest = dest["S3DestinationDescription"]
            bucket = bucket_name(s3_dest["BucketARN"])
            prefix = s3_dest.get("Prefix", "")

            s3 = connect_to_resource("s3")
            batched_data = b"".join([
                base64.b64decode(r.get("Data") or r["data"]) for r in records
            ])

            obj_path = get_s3_object_path(stream_name, prefix)
            try:
                s3.Object(bucket, obj_path).put(Body=batched_data)
            except Exception as e:
                LOG.error("Unable to put record to stream: %s %s" %
                          (e, traceback.format_exc()))
                raise e
        if "HttpEndpointDestinationDescription" in dest:
            http_dest = dest["HttpEndpointDestinationDescription"]
            end_point = http_dest["EndpointConfiguration"]
            url = end_point["Url"]
            record_to_send = {
                "requestId": str(uuid.uuid4()),
                "timestamp": (int(time.time())),
                "records": [],
            }
            for record in records:
                data = record.get("Data") or record.get("data")
                record_to_send["records"].append({"data": data})
            headers = {
                "Content-Type": "application/json",
            }
            try:
                requests.post(url, json=record_to_send, headers=headers)
            except Exception as e:
                LOG.info(
                    "Unable to put Firehose records to HTTP endpoint %s: %s %s"
                    % (url, e, traceback.format_exc()))
                raise e
    return {"RecordId": str(uuid.uuid4())}
コード例 #34
0
ファイル: test_dynamodb.py プロジェクト: zhiweil/localstack
    def test_time_to_live(self):
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_client = aws_stack.connect_to_service('dynamodb')

        aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_3,
                                        partition_key=PARTITION_KEY)
        table = dynamodb.Table(TEST_DDB_TABLE_NAME_3)

        # Insert some items to the table
        items = {
            'id1': {
                PARTITION_KEY: 'id1',
                'data': 'IT IS'
            },
            'id2': {
                PARTITION_KEY: 'id2',
                'data': 'TIME'
            },
            'id3': {
                PARTITION_KEY: 'id3',
                'data': 'TO LIVE!'
            }
        }
        for k, item in items.items():
            table.put_item(Item=item)

        # Describe TTL when still unset.
        response = testutil.send_describe_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(
            response._content
        )['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED'

        # Enable TTL for given table
        response = testutil.send_update_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3, True)
        assert response.status_code == 200
        assert json.loads(
            response._content)['TimeToLiveSpecification']['Enabled'] is True

        # Describe TTL status after being enabled.
        response = testutil.send_describe_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(
            response._content
        )['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED'

        # Disable TTL for given table
        response = testutil.send_update_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3, False)
        assert response.status_code == 200
        assert json.loads(
            response._content)['TimeToLiveSpecification']['Enabled'] is False

        # Describe TTL status after being disabled.
        response = testutil.send_describe_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(
            response._content
        )['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED'

        # Enable TTL for given table again
        response = testutil.send_update_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3, True)
        assert response.status_code == 200
        assert json.loads(
            response._content)['TimeToLiveSpecification']['Enabled'] is True

        # Describe TTL status after being enabled again.
        response = testutil.send_describe_dynamodb_ttl_request(
            TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(
            response._content
        )['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED'

        # Clean up table
        dynamodb_client.delete_table(TableName=TEST_DDB_TABLE_NAME_3)