Example #1
0
def test_kinesis_lambda_forward_chain():
    kinesis = aws_stack.connect_to_service('kinesis')
    s3 = aws_stack.connect_to_service('s3')

    aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True)
    s3.create_bucket(Bucket=TEST_BUCKET_NAME)

    # deploy test lambdas connected to Kinesis streams
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file,
        event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file,
        event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), runtime=LAMBDA_RUNTIME_PYTHON27)

    # publish test record
    test_data = {'test_data': 'forward_chain_data_%s' % short_uid()}
    data = clone(test_data)
    data[lambda_integration.MSG_BODY_MESSAGE_TARGET] = 'kinesis:%s' % TEST_CHAIN_STREAM2_NAME
    kinesis.put_record(Data=to_bytes(json.dumps(data)), PartitionKey='testId', StreamName=TEST_CHAIN_STREAM1_NAME)

    # check results
    time.sleep(5)
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(test_data, all_objects)
Example #2
0
def test_upload_lambda_from_s3():

    s3_client = aws_stack.connect_to_service('s3')
    lambda_client = aws_stack.connect_to_service('lambda')

    lambda_name = 'test_lambda_%s' % short_uid()
    bucket_name = 'test_bucket_lambda'
    bucket_key = 'test_lambda.zip'

    # upload zip file to S3
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    s3_client.create_bucket(Bucket=bucket_name)
    s3_client.upload_fileobj(BytesIO(zip_file), bucket_name, bucket_key)

    # create lambda function
    lambda_client.create_function(
        FunctionName=lambda_name, Handler='handler.handler',
        Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
        Code={
            'S3Bucket': bucket_name,
            'S3Key': bucket_key
        }
    )

    # invoke lambda function
    data_before = b'{"foo": "bar"}'
    result = lambda_client.invoke(FunctionName=lambda_name, Payload=data_before)
    data_after = result['Payload'].read()
    assert json.loads(to_str(data_before)) == json.loads(to_str(data_after))
    def return_response(self, method, path, data, headers, response):
        req_data = None
        if method == 'POST' and path == '/':
            req_data = urlparse.parse_qs(to_str(data))
            action = req_data.get('Action')[0]

        if req_data:
            if action == 'DescribeStackResources':
                if response.status_code < 300:
                    response_dict = xmltodict.parse(response.content)['DescribeStackResourcesResponse']
                    resources = response_dict['DescribeStackResourcesResult']['StackResources']
                    if not resources:
                        # Check if stack exists
                        stack_name = req_data.get('StackName')[0]
                        cloudformation_client = aws_stack.connect_to_service('cloudformation')
                        try:
                            cloudformation_client.describe_stacks(StackName=stack_name)
                        except Exception:
                            return error_response('Stack with id %s does not exist' % stack_name, code=404)
            if action == 'DescribeStackResource':
                if response.status_code >= 500:
                    # fix an error in moto where it fails with 500 if the stack does not exist
                    return error_response('Stack resource does not exist', code=404)
            if action == 'ListStackResources':
                response_dict = xmltodict.parse(response.content, force_list=('member'))['ListStackResourcesResponse']
                resources = response_dict['ListStackResourcesResult']['StackResourceSummaries']
                if resources:
                    sqs_client = aws_stack.connect_to_service('sqs')
                    content_str = content_str_original = to_str(response.content)
                    new_response = Response()
                    new_response.status_code = response.status_code
                    new_response.headers = response.headers
                    for resource in resources['member']:
                        if resource['ResourceType'] == 'AWS::SQS::Queue':
                            try:
                                queue_name = resource['PhysicalResourceId']
                                queue_url = sqs_client.get_queue_url(QueueName=queue_name)['QueueUrl']
                            except Exception:
                                stack_name = req_data.get('StackName')[0]
                                return error_response('Stack with id %s does not exist' % stack_name, code=404)
                            content_str = re.sub(resource['PhysicalResourceId'], queue_url, content_str)
                    new_response._content = content_str
                    if content_str_original != new_response._content:
                        # if changes have been made, return patched response
                        new_response.headers['content-length'] = len(new_response._content)
                        return new_response
            elif action in ('CreateStack', 'UpdateStack'):
                if response.status_code >= 400:
                    return response
                # run the actual deployment
                template = template_deployer.template_to_json(req_data.get('TemplateBody')[0])
                template_deployer.deploy_template(template, req_data.get('StackName')[0])
Example #4
0
def _delete_notification_config():
    s3_client = aws_stack.connect_to_service('s3')
    s3_client.put_bucket_notification_configuration(
        Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS, NotificationConfiguration={})
    config = s3_client.get_bucket_notification_configuration(Bucket=TEST_BUCKET_NAME_WITH_NOTIFICATIONS)
    assert not config.get('QueueConfigurations')
    assert not config.get('TopicConfiguration')
Example #5
0
def find_existing_item(put_item):
    table_name = put_item['TableName']
    ddb_client = aws_stack.connect_to_service('dynamodb')

    search_key = {}
    if 'Key' in put_item:
        search_key = put_item['Key']
    else:
        schema = ddb_client.describe_table(TableName=table_name)
        schemas = [schema['Table']['KeySchema']]
        for index in schema['Table'].get('GlobalSecondaryIndexes', []):
            # schemas.append(index['KeySchema'])
            pass
        for schema in schemas:
            for key in schema:
                key_name = key['AttributeName']
                search_key[key_name] = put_item['Item'][key_name]
        if not search_key:
            return

    req = {'TableName': table_name, 'Key': search_key}
    existing_item = aws_stack.dynamodb_get_item_raw(req)
    if 'Item' not in existing_item:
        if 'message' in existing_item:
            table_names = ddb_client.list_tables()['TableNames']
            msg = ('Unable to get item from DynamoDB (existing tables: %s): %s' %
                (table_names, existing_item['message']))
            LOGGER.warning(msg)
        return
    return existing_item.get('Item')
Example #6
0
def check_infra_s3(expect_shutdown=False):
    out = None
    try:
        # check S3
        out = aws_stack.connect_to_service(service_name='s3', client=True, env=ENV_DEV).list_buckets()
    except Exception, e:
        pass
Example #7
0
def check_infra_dynamodb(expect_shutdown=False):
    out = None
    try:
        # check DynamoDB
        out = aws_stack.connect_to_service(service_name='dynamodb', client=True, env=ENV_DEV).list_tables()
    except Exception, e:
        pass
Example #8
0
def check_infra_kinesis(expect_shutdown=False):
    out = None
    try:
        # check Kinesis
        out = aws_stack.connect_to_service(service_name='kinesis', client=True, env=ENV_DEV).list_streams()
    except Exception, e:
        pass
def stack_exists(stack_name):
    cloudformation = aws_stack.connect_to_service('cloudformation')
    stacks = cloudformation.list_stacks()
    for stack in stacks['StackSummaries']:
        if stack['StackName'] == stack_name:
            return True
    return False
Example #10
0
    def test_describe_parameters(self):
        ssm_client = aws_stack.connect_to_service('ssm')

        response = ssm_client.describe_parameters()

        assert 'Parameters' in response
        assert isinstance(response['Parameters'], list)
Example #11
0
def test_kinesis_error_injection():
    if not do_run():
        return

    kinesis = aws_stack.connect_to_service('kinesis')
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME)

    records = [
        {
            'Data': '0',
            'ExplicitHashKey': '0',
            'PartitionKey': '0'
        }
    ]

    # by default, no errors
    test_no_errors = kinesis.put_records(StreamName=TEST_STREAM_NAME, Records=records)
    assert_equal(test_no_errors['FailedRecordCount'], 0)

    # with a probability of 1, always throw errors
    config.KINESIS_ERROR_PROBABILITY = 1.0
    test_all_errors = kinesis.put_records(StreamName=TEST_STREAM_NAME, Records=records)
    assert_equal(test_all_errors['FailedRecordCount'], 1)

    # reset probability to zero
    config.KINESIS_ERROR_PROBABILITY = 0.0
Example #12
0
def create_lambda_function(func_name, zip_file, event_source_arn=None, handler=LAMBDA_DEFAULT_HANDLER,
        starting_position=LAMBDA_DEFAULT_STARTING_POSITION, runtime=LAMBDA_DEFAULT_RUNTIME,
        envvars={}):
    """Utility method to create a new function via the Lambda API"""

    client = aws_stack.connect_to_service('lambda')
    # create function
    client.create_function(
        FunctionName=func_name,
        Runtime=runtime,
        Handler=handler,
        Role=LAMBDA_TEST_ROLE,
        Code={
            'ZipFile': zip_file
        },
        Timeout=LAMBDA_DEFAULT_TIMEOUT,
        Environment=dict(Variables=envvars)
    )
    # create event source mapping
    if event_source_arn:
        client.create_event_source_mapping(
            FunctionName=func_name,
            EventSourceArn=event_source_arn,
            StartingPosition=starting_position
        )
Example #13
0
def test_bucket_policy():

    s3_resource = aws_stack.connect_to_resource('s3')
    s3_client = aws_stack.connect_to_service('s3')

    # create test bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_POLICY)

    # put bucket policy
    policy = {
        'Version': '2012-10-17',
        'Statement': {
            'Action': ['s3:GetObject'],
            'Effect': 'Allow',
            'Resource': 'arn:aws:s3:::bucketName/*',
            'Principal': {
                'AWS': ['*']
            }
        }
    }
    response = s3_client.put_bucket_policy(
        Bucket=TEST_BUCKET_NAME_WITH_POLICY,
        Policy=json.dumps(policy)
    )
    assert response['ResponseMetadata']['HTTPStatusCode'] == 204

    # retrieve and check policy config
    saved_policy = s3_client.get_bucket_policy(Bucket=TEST_BUCKET_NAME_WITH_POLICY)['Policy']
    assert json.loads(saved_policy) == policy
Example #14
0
def create_dynamodb_table(table_name, partition_key, env=None, stream_view_type=None):
    """Utility method to create a DynamoDB table"""

    dynamodb = aws_stack.connect_to_service('dynamodb', env=env, client=True)
    stream_spec = {'StreamEnabled': False}
    key_schema = [{
        'AttributeName': partition_key,
        'KeyType': 'HASH'
    }]
    attr_defs = [{
        'AttributeName': partition_key,
        'AttributeType': 'S'
    }]
    if stream_view_type is not None:
        stream_spec = {
            'StreamEnabled': True,
            'StreamViewType': stream_view_type
        }
    table = None
    try:
        table = dynamodb.create_table(TableName=table_name, KeySchema=key_schema,
            AttributeDefinitions=attr_defs, ProvisionedThroughput={
                'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10
            },
            StreamSpecification=stream_spec
        )
    except Exception as e:
        if 'ResourceInUseException' in str(e):
            # Table already exists -> return table reference
            return aws_stack.connect_to_resource('dynamodb', env=env).Table(table_name)
    time.sleep(2)
    return table
Example #15
0
def test_firehose_s3():

    s3_resource = aws_stack.connect_to_resource('s3')
    firehose = aws_stack.connect_to_service('firehose')

    s3_prefix = '/testdata'
    test_data = '{"test": "firehose_data_%s"}' % short_uid()
    # create Firehose stream
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        S3DestinationConfiguration={
            'RoleARN': aws_stack.iam_resource_arn('firehose'),
            'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
            'Prefix': s3_prefix
        }
    )
    assert stream
    assert TEST_FIREHOSE_NAME in firehose.list_delivery_streams()['DeliveryStreamNames']
    # create target S3 bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

    # put records
    firehose.put_record(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        Record={
            'Data': to_bytes(test_data)
        }
    )
    # check records in target bucket
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
Example #16
0
def test_domain_creation():
    es_client = aws_stack.connect_to_service('es')

    # create ES domain
    es_client.create_elasticsearch_domain(DomainName=TEST_DOMAIN_NAME)
    assert_true(TEST_DOMAIN_NAME in
        [d['DomainName'] for d in es_client.list_domain_names()['DomainNames']])

    # make sure we cannot re-create same domain name
    assert_raises(ClientError, es_client.create_elasticsearch_domain, DomainName=TEST_DOMAIN_NAME)

    # get domain status
    status = es_client.describe_elasticsearch_domain(DomainName=TEST_DOMAIN_NAME)
    assert_equal(status['DomainStatus']['DomainName'], TEST_DOMAIN_NAME)
    assert_true(status['DomainStatus']['Created'])
    assert_false(status['DomainStatus']['Processing'])
    assert_false(status['DomainStatus']['Deleted'])
    assert_equal(status['DomainStatus']['Endpoint'], aws_stack.get_elasticsearch_endpoint())
    assert_true(status['DomainStatus']['EBSOptions']['EBSEnabled'])

    # make sure we can fake adding tags to a domain
    response = es_client.add_tags(ARN='string', TagList=[{'Key': 'SOME_TAG', 'Value': 'SOME_VALUE'}])
    assert_equal(200, response['ResponseMetadata']['HTTPStatusCode'])

    # make sure domain deletion works
    es_client.delete_elasticsearch_domain(DomainName=TEST_DOMAIN_NAME)
    assert_false(TEST_DOMAIN_NAME in
        [d['DomainName'] for d in es_client.list_domain_names()['DomainNames']])
Example #17
0
def post_request():
    action = request.headers.get('x-amz-target')
    data = json.loads(to_str(request.data))
    result = {}
    kinesis = aws_stack.connect_to_service('kinesis')
    if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:
        result = {
            'Streams': list(DDB_STREAMS.values()),
            'LastEvaluatedStreamArn': 'TODO'
        }
    elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:
        for stream in DDB_STREAMS.values():
            if stream['StreamArn'] == data['StreamArn']:
                result = {
                    'StreamDescription': stream
                }
                # get stream details
                dynamodb = aws_stack.connect_to_service('dynamodb')
                table_name = table_name_from_stream_arn(stream['StreamArn'])
                stream_name = get_kinesis_stream_name(table_name)
                stream_details = kinesis.describe_stream(StreamName=stream_name)
                table_details = dynamodb.describe_table(TableName=table_name)
                stream['KeySchema'] = table_details['Table']['KeySchema']

                # Replace Kinesis ShardIDs with ones that mimic actual
                # DynamoDBStream ShardIDs.
                stream_shards = stream_details['StreamDescription']['Shards']
                for shard in stream_shards:
                    shard['ShardId'] = shard_id(stream_name, shard['ShardId'])
                stream['Shards'] = stream_shards
                break
        if not result:
            return error_response('Requested resource not found', error_type='ResourceNotFoundException')
    elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX:
        # forward request to Kinesis API
        stream_name = stream_name_from_stream_arn(data['StreamArn'])
        stream_shard_id = kinesis_shard_id(data['ShardId'])
        result = kinesis.get_shard_iterator(StreamName=stream_name,
            ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType'])
    elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX:
        kinesis_records = kinesis.get_records(**data)
        result = {'Records': []}
        for record in kinesis_records['Records']:
            result['Records'].append(json.loads(to_str(record['Data'])))
    else:
        print('WARNING: Unknown operation "%s"' % action)
    return jsonify(result)
Example #18
0
def send_notifications(method, bucket_name, object_path):
    for bucket, config in iteritems(S3_NOTIFICATIONS):
        if bucket == bucket_name:
            action = {'PUT': 'ObjectCreated', 'POST': 'ObjectCreated', 'DELETE': 'ObjectRemoved'}[method]
            # TODO: support more detailed methods, e.g., DeleteMarkerCreated
            # http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
            api_method = {'PUT': 'Put', 'POST': 'Post', 'DELETE': 'Delete'}[method]
            event_name = '%s:%s' % (action, api_method)
            if (event_type_matches(config['Event'], action, api_method) and
                    filter_rules_match(config.get('Filter'), object_path)):
                # send notification
                message = get_event_message(
                    event_name=event_name, bucket_name=bucket_name,
                    file_name=urlparse.urlparse(object_path[1:]).path
                )
                message = json.dumps(message)
                if config.get('Queue'):
                    sqs_client = aws_stack.connect_to_service('sqs')
                    try:
                        queue_url = queue_url_for_arn(config['Queue'])
                        sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
                    except Exception as e:
                        LOGGER.warning('Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s' %
                            (bucket_name, config['Queue'], e))
                if config.get('Topic'):
                    sns_client = aws_stack.connect_to_service('sns')
                    try:
                        sns_client.publish(TopicArn=config['Topic'], Message=message, Subject='Amazon S3 Notification')
                    except Exception as e:
                        LOGGER.warning('Unable to send notification for S3 bucket "%s" to SNS topic "%s".' %
                            (bucket_name, config['Topic']))
                # CloudFunction and LambdaFunction are semantically identical
                lambda_function_config = config.get('CloudFunction') or config.get('LambdaFunction')
                if lambda_function_config:
                    # make sure we don't run into a socket timeout
                    connection_config = botocore.config.Config(read_timeout=300)
                    lambda_client = aws_stack.connect_to_service('lambda', config=connection_config)
                    try:
                        lambda_client.invoke(FunctionName=lambda_function_config,
                                             InvocationType='Event', Payload=message)
                    except Exception as e:
                        LOGGER.warning('Unable to send notification for S3 bucket "%s" to Lambda function "%s".' %
                            (bucket_name, lambda_function_config))
                if not filter(lambda x: config.get(x), NOTIFICATION_DESTINATION_TYPES):
                    LOGGER.warning('Neither of %s defined for S3 notification.' %
                        '/'.join(NOTIFICATION_DESTINATION_TYPES))
Example #19
0
def describe_stack_resources(stack_name, logical_resource_id):
    client = aws_stack.connect_to_service('cloudformation')
    resources = client.describe_stack_resources(StackName=stack_name, LogicalResourceId=logical_resource_id)
    result = []
    for res in resources['StackResources']:
        if res.get('LogicalResourceId') == logical_resource_id:
            result.append(res)
    return result
Example #20
0
def get_rest_api_paths(rest_api_id):
    apigateway = aws_stack.connect_to_service(service_name='apigateway')
    resources = apigateway.get_resources(restApiId=rest_api_id, limit=100)
    resource_map = {}
    for resource in resources['items']:
        path = aws_stack.get_apigateway_path_for_resource(rest_api_id, resource['id'])
        resource_map[path] = resource
    return resource_map
Example #21
0
def receive_assert_delete(queue_url, assertions, sqs_client=None, required_subject=None):
    if not sqs_client:
        sqs_client = aws_stack.connect_to_service('sqs')

    response = sqs_client.receive_message(QueueUrl=queue_url)

    messages = [json.loads(to_str(m['Body'])) for m in response['Messages']]
    testutil.assert_objects(assertions, messages)
    for message in response['Messages']:
        sqs_client.delete_message(QueueUrl=queue_url, ReceiptHandle=message['ReceiptHandle'])
Example #22
0
    def test_validate_invalid_json_template_should_fail(self):
        cloudformation = aws_stack.connect_to_service('cloudformation')
        invalid_json = '{"this is invalid JSON"="bobbins"}'

        try:
            cloudformation.validate_template(TemplateBody=invalid_json)
            self.fail('Should raise ValidationError')
        except (ClientError, ResponseParserError) as err:
            if isinstance(err, ClientError):
                assert err.response['ResponseMetadata']['HTTPStatusCode'] == 400
                assert err.response['Error']['Message'] == 'Template Validation Error'
Example #23
0
def test_s3_put_object_notification():

    s3_client = aws_stack.connect_to_service('s3')
    sqs_client = aws_stack.connect_to_service('sqs')

    key_by_path = 'key-by-hostname'
    key_by_host = 'key-by-host'

    # create test queue
    queue_url = sqs_client.create_queue(QueueName=TEST_QUEUE_FOR_BUCKET_WITH_NOTIFICATION)['QueueUrl']
    queue_attributes = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn'])

    # create test bucket
    s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_NOTIFICATION)
    s3_client.put_bucket_notification_configuration(Bucket=TEST_BUCKET_WITH_NOTIFICATION,
                                                    NotificationConfiguration={'QueueConfigurations': [
                                                        {'QueueArn': queue_attributes['Attributes']['QueueArn'],
                                                         'Events': ['s3:ObjectCreated:*']}]})

    # put an object where the bucket_name is in the path
    s3_client.put_object(Bucket=TEST_BUCKET_WITH_NOTIFICATION, Key=key_by_path, Body='something')

    # put an object where the bucket_name is in the host
    # it doesn't care about the authorization header as long as it's present
    headers = {'Host': '{}.s3.amazonaws.com'.format(TEST_BUCKET_WITH_NOTIFICATION), 'authorization': 'some_token'}
    url = '{}/{}'.format(os.getenv('TEST_S3_URL'), key_by_host)
    # verify=False must be set as this test fails on travis because of an SSL error non-existent locally
    response = requests.put(url, data='something else', headers=headers, verify=False)
    assert response.ok

    queue_attributes = sqs_client.get_queue_attributes(QueueUrl=queue_url,
                                                       AttributeNames=['ApproximateNumberOfMessages'])
    message_count = queue_attributes['Attributes']['ApproximateNumberOfMessages']
    # the ApproximateNumberOfMessages attribute is a string
    assert message_count == '2'

    # clean up
    sqs_client.delete_queue(QueueUrl=queue_url)
    s3_client.delete_objects(Bucket=TEST_BUCKET_WITH_NOTIFICATION,
                             Delete={'Objects': [{'Key': key_by_path}, {'Key': key_by_host}]})
    s3_client.delete_bucket(Bucket=TEST_BUCKET_WITH_NOTIFICATION)
Example #24
0
    def test_time_to_live(self):
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_client = aws_stack.connect_to_service('dynamodb')

        testutil.create_dynamodb_table(TEST_DDB_TABLE_NAME_3, partition_key=PARTITION_KEY)
        table = dynamodb.Table(TEST_DDB_TABLE_NAME_3)

        # Insert some items to the table
        items = {
            'id1': {PARTITION_KEY: 'id1', 'data': 'IT IS'},
            'id2': {PARTITION_KEY: 'id2', 'data': 'TIME'},
            'id3': {PARTITION_KEY: 'id3', 'data': 'TO LIVE!'}
        }
        for k, item in items.items():
            table.put_item(Item=item)

        # Describe TTL when still unset.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED'

        # Enable TTL for given table
        response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is True

        # Describe TTL status after being enabled.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED'

        # Disable TTL for given table
        response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, False)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is False

        # Describe TTL status after being disabled.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED'

        # Enable TTL for given table again
        response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is True

        # Describe TTL status after being enabled again.
        response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
        assert response.status_code == 200
        assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED'

        # Clean up table
        dynamodb_client.delete_table(TableName=TEST_DDB_TABLE_NAME_3)
Example #25
0
def check_kinesis(expect_shutdown=False, print_error=False):
    out = None
    try:
        # check Kinesis
        out = aws_stack.connect_to_service(service_name='kinesis').list_streams()
    except Exception as e:
        if print_error:
            LOGGER.error('Kinesis health check failed: %s %s' % (e, traceback.format_exc()))
    if expect_shutdown:
        assert out is None
    else:
        assert isinstance(out['StreamNames'], list)
Example #26
0
def test_lambda_environment():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda without Docker
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_ENV), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_ENV,
        zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON27, envvars={'Hello': 'World'})
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_ENV, Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload']
    assert json.load(result_data) == {'Hello': 'World'}
Example #27
0
def forward_events(records):
    global SEQUENCE_NUMBER_COUNTER
    kinesis = aws_stack.connect_to_service('kinesis')
    for record in records:
        if 'SequenceNumber' not in record['dynamodb']:
            record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER)
            SEQUENCE_NUMBER_COUNTER += 1
        table_arn = record['eventSourceARN']
        stream = DDB_STREAMS.get(table_arn)
        if stream:
            table_name = table_name_from_stream_arn(stream['StreamArn'])
            stream_name = get_kinesis_stream_name(table_name)
            kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO')
Example #28
0
def publish_lambda_metric(metric, value, kwargs):
    # publish metric only if CloudWatch service is available
    if not config.service_port('cloudwatch'):
        return
    cw_client = aws_stack.connect_to_service('cloudwatch')
    cw_client.put_metric_data(Namespace='AWS/Lambda',
        MetricData=[{
            'MetricName': metric,
            'Dimensions': dimension_lambda(kwargs),
            'Timestamp': datetime.now(),
            'Value': value
        }]
    )
Example #29
0
def test_sns_to_sqs():
    sqs_client = aws_stack.connect_to_service('sqs')
    sns_client = aws_stack.connect_to_service('sns')

    # create topic and queue
    queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_SNS)
    topic_info = sns_client.create_topic(Name=TEST_TOPIC_NAME)

    # subscribe SQS to SNS, publish message
    sns_client.subscribe(TopicArn=topic_info['TopicArn'], Protocol='sqs',
        Endpoint=aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_SNS))
    test_value = short_uid()
    sns_client.publish(TopicArn=topic_info['TopicArn'], Message='test message for SQS',
        MessageAttributes={'attr1': {'DataType': 'String', 'StringValue': test_value}})

    # receive, assert, and delete message from SQS
    queue_url = queue_info['QueueUrl']
    assertions = []
    # make sure we receive the correct topic ARN in notifications
    assertions.append({'TopicArn': topic_info['TopicArn']})
    # make sure the notification contains message attributes
    assertions.append({'Value': test_value})
    receive_assert_delete(queue_url, assertions, sqs_client)
Example #30
0
def check_s3(expect_shutdown=False, print_error=False):
    out = None
    try:
        # wait for port to be opened
        wait_for_port_open(DEFAULT_PORT_S3_BACKEND)
        # check S3
        out = aws_stack.connect_to_service(service_name='s3').list_buckets()
    except Exception as e:
        if print_error:
            LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))
    if expect_shutdown:
        assert out is None
    else:
        assert isinstance(out['Buckets'], list)
Example #31
0
def stream_exists(name):
    kinesis_client = aws_stack.connect_to_service('kinesis')
    streams = kinesis_client.list_streams()
    return name in streams['StreamNames']
def forward_event_to_target_stream(record, stream_name):
    kinesis = aws_stack.connect_to_service('kinesis')
    kinesis.put_record(StreamName=stream_name,
                       Data=record['Data'],
                       PartitionKey=record['PartitionKey'])
Example #33
0
def message_to_subscribers(message_id,
                           message,
                           topic_arn,
                           req_data,
                           headers,
                           subscription_arn=None,
                           skip_checks=False):

    subscriptions = SNS_SUBSCRIPTIONS.get(topic_arn, [])
    for subscriber in list(subscriptions):
        if subscription_arn not in [None, subscriber['SubscriptionArn']]:
            continue

        filter_policy = json.loads(subscriber.get('FilterPolicy') or '{}')
        message_attributes = get_message_attributes(req_data)
        if not skip_checks and not check_filter_policy(filter_policy,
                                                       message_attributes):
            LOG.info('SNS filter policy %s does not match attributes %s' %
                     (filter_policy, message_attributes))
            continue

        if subscriber['Protocol'] == 'sms':
            event = {
                'topic_arn': topic_arn,
                'endpoint': subscriber['Endpoint'],
                'message_content': req_data['Message'][0]
            }
            SMS_MESSAGES.append(event)
            LOG.info('Delivering SMS message to %s: %s',
                     subscriber['Endpoint'], req_data['Message'][0])

        elif subscriber['Protocol'] == 'sqs':
            queue_url = None
            try:
                endpoint = subscriber['Endpoint']
                if 'sqs_queue_url' in subscriber:
                    queue_url = subscriber.get('sqs_queue_url')
                elif '://' in endpoint:
                    queue_url = endpoint
                else:
                    queue_name = endpoint.split(':')[5]
                    queue_url = aws_stack.get_sqs_queue_url(queue_name)
                    subscriber['sqs_queue_url'] = queue_url

                message_group_id = req_data.get(
                    'MessageGroupId')[0] if req_data.get(
                        'MessageGroupId') else ''

                sqs_client = aws_stack.connect_to_service('sqs')

                # TODO remove this kwargs if we stop using ElasticMQ entirely
                kwargs = {
                    'MessageGroupId': message_group_id
                } if SQS_BACKEND_IMPL == 'moto' else {}
                sqs_client.send_message(
                    QueueUrl=queue_url,
                    MessageBody=create_sns_message_body(
                        subscriber, req_data, message_id),
                    MessageAttributes=create_sqs_message_attributes(
                        subscriber, message_attributes),
                    MessageSystemAttributes=create_sqs_system_attributes(
                        headers),
                    **kwargs)
            except Exception as exc:
                LOG.warning('Unable to forward SNS message to SQS: %s %s' %
                            (exc, traceback.format_exc()))
                sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'],
                                               req_data, str(exc))
                if 'NonExistentQueue' in str(exc):
                    LOG.info(
                        'Removing non-existent queue "%s" subscribed to topic "%s"'
                        % (queue_url, topic_arn))
                    subscriptions.remove(subscriber)

        elif subscriber['Protocol'] == 'lambda':
            try:
                external_url = external_service_url('sns')
                unsubscribe_url = '%s/?Action=Unsubscribe&SubscriptionArn=%s' % (
                    external_url, subscriber['SubscriptionArn'])
                response = lambda_api.process_sns_notification(
                    subscriber['Endpoint'],
                    topic_arn,
                    subscriber['SubscriptionArn'],
                    message,
                    message_id,
                    message_attributes,
                    unsubscribe_url,
                    subject=req_data.get('Subject', [None])[0])
                if isinstance(response, Response):
                    response.raise_for_status()
                elif isinstance(response, FlaskResponse):
                    if response.status_code >= 400:
                        raise Exception('Error response (code %s): %s' %
                                        (response.status_code, response.data))
            except Exception as exc:
                LOG.warning(
                    'Unable to run Lambda function on SNS message: %s %s' %
                    (exc, traceback.format_exc()))
                sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'],
                                               req_data, str(exc))

        elif subscriber['Protocol'] in ['http', 'https']:
            msg_type = (req_data.get('Type') or ['Notification'])[0]
            try:
                message_body = create_sns_message_body(subscriber, req_data,
                                                       message_id)
            except Exception:
                continue
            try:
                response = requests.post(
                    subscriber['Endpoint'],
                    headers={
                        'Content-Type': 'text/plain',
                        # AWS headers according to
                        # https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header
                        'x-amz-sns-message-type': msg_type,
                        'x-amz-sns-topic-arn': subscriber['TopicArn'],
                        'x-amz-sns-subscription-arn':
                        subscriber['SubscriptionArn'],
                        'User-Agent':
                        'Amazon Simple Notification Service Agent'
                    },
                    data=message_body,
                    verify=False)
                response.raise_for_status()
            except Exception as exc:
                LOG.info(
                    'Received error on sending SNS message, putting to DLQ (if configured): %s'
                    % exc)
                sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'],
                                               req_data, str(exc))

        elif subscriber['Protocol'] == 'application':
            try:
                sns_client = aws_stack.connect_to_service('sns')
                sns_client.publish(TargetArn=subscriber['Endpoint'],
                                   Message=message)
            except Exception as exc:
                LOG.warning(
                    'Unable to forward SNS message to SNS platform app: %s %s'
                    % (exc, traceback.format_exc()))
                sns_error_to_dead_letter_queue(subscriber['SubscriptionArn'],
                                               req_data, str(exc))

        elif subscriber['Protocol'] == 'email':
            ses_client = aws_stack.connect_to_service('ses')
            if subscriber.get('Endpoint'):
                ses_client.verify_email_address(
                    EmailAddress=subscriber.get('Endpoint'))
                ses_client.verify_email_address(
                    EmailAddress='*****@*****.**')

                ses_client.send_email(
                    Source='*****@*****.**',
                    Message={
                        'Body': {
                            'Text': {
                                'Data': message
                            }
                        },
                        'Subject': {
                            'Data': 'SNS-Subscriber-Endpoint'
                        }
                    },
                    Destination={'ToAddresses': [subscriber.get('Endpoint')]})
        else:
            LOG.warning('Unexpected protocol "%s" for SNS subscription' %
                        subscriber['Protocol'])
Example #34
0
def post_request():
    region = DynamoDBStreamsBackend.get()
    action = request.headers.get("x-amz-target", "")
    action = action.split(".")[-1]
    data = json.loads(to_str(request.data))
    result = {}
    kinesis = aws_stack.connect_to_service("kinesis")
    if action == "ListStreams":
        result = {"Streams": list(region.ddb_streams.values())}

    elif action == "DescribeStream":
        for stream in region.ddb_streams.values():
            if stream["StreamArn"] == data["StreamArn"]:
                result = {"StreamDescription": stream}
                # get stream details
                dynamodb = aws_stack.connect_to_service("dynamodb")
                table_name = table_name_from_stream_arn(stream["StreamArn"])
                stream_name = get_kinesis_stream_name(table_name)
                stream_details = kinesis.describe_stream(
                    StreamName=stream_name)
                table_details = dynamodb.describe_table(TableName=table_name)
                stream["KeySchema"] = table_details["Table"]["KeySchema"]

                # Replace Kinesis ShardIDs with ones that mimic actual
                # DynamoDBStream ShardIDs.
                stream_shards = stream_details["StreamDescription"]["Shards"]
                for shard in stream_shards:
                    shard["ShardId"] = shard_id(stream_name, shard["ShardId"])
                stream["Shards"] = stream_shards
                break
        if not result:
            return error_response("Requested resource not found",
                                  error_type="ResourceNotFoundException")

    elif action == "GetShardIterator":
        # forward request to Kinesis API
        stream_name = stream_name_from_stream_arn(data["StreamArn"])
        stream_shard_id = kinesis_shard_id(data["ShardId"])

        kwargs = ({
            "StartingSequenceNumber": data["SequenceNumber"]
        } if data.get("SequenceNumber") else {})
        result = kinesis.get_shard_iterator(
            StreamName=stream_name,
            ShardId=stream_shard_id,
            ShardIteratorType=data["ShardIteratorType"],
            **kwargs,
        )

    elif action == "GetRecords":
        try:
            kinesis_records = kinesis.get_records(**data)
        except kinesis.exceptions.ExpiredIteratorException:
            LOG.debug("Shard iterator for underlying kinesis stream expired")
            return error_response("Shard iterator has expired",
                                  error_type="ExpiredIteratorException",
                                  code=400)
        result = {
            "Records": [],
            "NextShardIterator": kinesis_records.get("NextShardIterator"),
        }
        for record in kinesis_records["Records"]:
            record_data = json.loads(to_str(record["Data"]))
            record_data["dynamodb"]["SequenceNumber"] = record[
                "SequenceNumber"]
            result["Records"].append(record_data)
    else:
        print('WARNING: Unknown operation "%s"' % action)
    return jsonify(result)
Example #35
0
def queue_url_for_arn(queue_arn):
    sqs_client = aws_stack.connect_to_service('sqs')
    parts = queue_arn.split(':')
    return sqs_client.get_queue_url(
        QueueName=parts[5], QueueOwnerAWSAccountId=parts[4])['QueueUrl']
def queue_exists(name):
    sqs_client = aws_stack.connect_to_service('sqs')
    queues = sqs_client.list_queues()
    for queue_url in queues['QueueUrls']:
        if queue_url.endswith('/%s' % name):
            return True
Example #37
0
    def return_response(self, method, path, data, headers, response):
        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        if response._content:
            # fix the table ARN (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'"TableArn"\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
                r'"TableArn": "arn:aws:dynamodb:%s:\1"' %
                aws_stack.get_local_region(), to_str(response._content))
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target')
        if not action:
            return

        record = {
            'eventID': '1',
            'eventVersion': '1.0',
            'dynamodb': {
                'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': DEFAULT_REGION,
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        if action == '%s.UpdateItem' % ACTION_PREFIX:
            req = {'TableName': data['TableName'], 'Key': data['Key']}
            new_item = aws_stack.dynamodb_get_item_raw(req)
            if 'Item' not in new_item:
                if 'message' in new_item:
                    ddb_client = aws_stack.connect_to_service('dynamodb')
                    table_names = ddb_client.list_tables()['TableNames']
                    msg = (
                        'Unable to get item from DynamoDB (existing tables: %s): %s'
                        % (table_names, new_item['message']))
                    LOGGER.warning(msg)
                return
            record['eventName'] = 'MODIFY'
            record['dynamodb']['Keys'] = data['Key']
            record['dynamodb']['NewImage'] = new_item['Item']
            record['dynamodb']['SizeBytes'] = len(json.dumps(new_item['Item']))
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            records = []
            for table_name, requests in data['RequestItems'].items():
                for request in requests:
                    put_request = request.get('PutRequest')
                    if put_request:
                        keys = dynamodb_extract_keys(item=put_request['Item'],
                                                     table_name=table_name)
                        if isinstance(keys, Response):
                            return keys
                        new_record = clone(record)
                        new_record['eventName'] = 'INSERT'
                        new_record['dynamodb']['Keys'] = keys
                        new_record['dynamodb']['NewImage'] = put_request[
                            'Item']
                        new_record[
                            'eventSourceARN'] = aws_stack.dynamodb_table_arn(
                                table_name)
                        records.append(new_record)
        elif action == '%s.PutItem' % ACTION_PREFIX:
            record['eventName'] = 'INSERT'
            keys = dynamodb_extract_keys(item=data['Item'],
                                         table_name=data['TableName'])
            if isinstance(keys, Response):
                return keys
            record['dynamodb']['Keys'] = keys
            record['dynamodb']['NewImage'] = data['Item']
            record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
        elif action == '%s.GetItem' % ACTION_PREFIX:
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get(
                        'ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
                    content['ConsumedCapacity'] = {
                        'CapacityUnits': 0.5,  # TODO hardcoded
                        'TableName': data['TableName']
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)
        elif action == '%s.DeleteItem' % ACTION_PREFIX:
            record['eventName'] = 'REMOVE'
            record['dynamodb']['Keys'] = data['Key']
        elif action == '%s.CreateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.UpdateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            return
        else:
            # nothing to do
            return

        if len(records) > 0 and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    data['TableName'])
            forward_to_lambda(records)
            forward_to_ddb_stream(records)
Example #38
0
 def setUp(self):
     self.logs_client = aws_stack.connect_to_service('logs')
Example #39
0
    def test_put_subscription_filter_firehose(self):
        log_group = 'lg-%s' % short_uid()
        log_stream = 'ls-%s' % short_uid()
        s3_bucket = 's3-%s' % short_uid()
        s3_bucket_arn = 'arn:aws:s3:::{}'.format(s3_bucket)
        firehose = 'firehose-%s' % short_uid()

        s3_client = aws_stack.connect_to_service('s3')
        firehose_client = aws_stack.connect_to_service('firehose')

        s3_client.create_bucket(Bucket=s3_bucket)
        response = firehose_client.create_delivery_stream(
            DeliveryStreamName=firehose,
            S3DestinationConfiguration={
                'BucketARN': s3_bucket_arn,
                'RoleARN': 'arn:aws:iam::000000000000:role/FirehosetoS3Role'
            })
        firehose_arn = response['DeliveryStreamARN']

        self.create_log_group_and_stream(log_group, log_stream)

        self.logs_client.put_subscription_filter(
            logGroupName=log_group,
            filterName='Destination',
            filterPattern='',
            destinationArn=firehose_arn,
        )

        self.logs_client.put_log_events(logGroupName=log_group,
                                        logStreamName=log_stream,
                                        logEvents=[
                                            {
                                                'timestamp': 0,
                                                'message': 'test'
                                            },
                                            {
                                                'timestamp': 0,
                                                'message': 'test 2'
                                            },
                                        ])

        self.logs_client.put_log_events(logGroupName=log_group,
                                        logStreamName=log_stream,
                                        logEvents=[
                                            {
                                                'timestamp': 0,
                                                'message': 'test'
                                            },
                                            {
                                                'timestamp': 0,
                                                'message': 'test 2'
                                            },
                                        ])

        response = s3_client.list_objects(Bucket=s3_bucket)
        self.assertEqual(2, len(response['Contents']))

        # clean up
        self.cleanup(log_group, log_stream)
        firehose_client.delete_delivery_stream(DeliveryStreamName=firehose,
                                               AllowForceDelete=True)
Example #40
0
    def test_put_event_with_content_base_rule_in_pattern(self):
        queue_name = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        pattern = {
            'Source': [{
                'exists': True
            }],
            'detail-type': [{
                'prefix': 'core.app'
            }],
            'Detail':
            json.dumps({
                'decription': ['this-is-event-details'],
                'amount': [200],
                'salary': [2000, 4000],
                'env': ['dev', 'prod'],
                'user': ['user1', 'user2', 'user3'],
                'admins': ['skyli', {
                    'prefix': 'hey'
                }, {
                    'prefix': 'ad'
                }],
                'test1': [{
                    'anything-but': 200
                }],
                'test2': [{
                    'anything-but': 'test2'
                }],
                'test3': [{
                    'anything-but': ['test3', 'test33']
                }],
                'test4': [{
                    'anything-but': {
                        'prefix': 'test4'
                    }
                }],
                'ip': [{
                    'cidr': '10.102.1.0/24'
                }],
                'num-test1': [{
                    'numeric': ['<', 200]
                }],
                'num-test2': [{
                    'numeric': ['<=', 200]
                }],
                'num-test3': [{
                    'numeric': ['>', 200]
                }],
                'num-test4': [{
                    'numeric': ['>=', 200]
                }],
                'num-test5': [{
                    'numeric': ['>=', 200, '<=', 500]
                }],
                'num-test6': [{
                    'numeric': ['>', 200, '<', 500]
                }],
                'num-test7': [{
                    'numeric': ['>=', 200, '<', 500]
                }]
            })
        }

        event = {
            'EventBusName':
            TEST_EVENT_BUS_NAME,
            'Source':
            'core.update-account-command',
            'DetailType':
            'core.app.backend',
            'Detail':
            json.dumps({
                'decription': 'this-is-event-details',
                'amount': 200,
                'salary': 2000,
                'env': 'prod',
                'user': ['user4', 'user3'],
                'admins': 'admin',
                'test1': 300,
                'test2': 'test22',
                'test3': 'test333',
                'test4': 'this test4',
                'ip': '10.102.1.100',
                'num-test1': 100,
                'num-test2': 200,
                'num-test3': 300,
                'num-test4': 200,
                'num-test5': 500,
                'num-test6': 300,
                'num-test7': 300
            })
        }

        self.events_client.create_event_bus(Name=TEST_EVENT_BUS_NAME)

        self.events_client.put_rule(Name=rule_name,
                                    EventBusName=TEST_EVENT_BUS_NAME,
                                    EventPattern=json.dumps(pattern))

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=TEST_EVENT_BUS_NAME,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': queue_arn,
                                           'InputPath': '$.detail'
                                       }])
        self.events_client.put_events(Entries=[event])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get('Messages')

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)
        self.assertEqual(json.loads(messages[0].get('Body')),
                         json.loads(event['Detail']))

        event_details = json.loads(event['Detail'])
        event_details['admins'] = 'not_admin'
        event['Detail'] = json.dumps(event_details)

        self.events_client.put_events(Entries=[event])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(messages, None)

        # clean up
        sqs_client.delete_queue(QueueUrl=queue_url)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=TEST_EVENT_BUS_NAME,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=TEST_EVENT_BUS_NAME,
                                       Force=True)
        self.events_client.delete_event_bus(Name=TEST_EVENT_BUS_NAME)
Example #41
0
    def test_put_events_with_input_path_multiple(self):
        queue_name = 'queue-{}'.format(short_uid())
        queue_name_1 = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        target_id_1 = 'target-{}'.format(short_uid())
        bus_name = 'bus-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        queue_url_1 = sqs_client.create_queue(
            QueueName=queue_name_1)['QueueUrl']
        queue_arn_1 = aws_stack.sqs_queue_arn(queue_name_1)

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN))

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=bus_name,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': queue_arn,
                                           'InputPath': '$.detail'
                                       }, {
                                           'Id': target_id_1,
                                           'Arn': queue_arn_1,
                                       }])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get('Messages')

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)
        self.assertEqual(json.loads(messages[0].get('Body')), EVENT_DETAIL)

        messages = retry(get_message,
                         retries=3,
                         sleep=1,
                         queue_url=queue_url_1)
        self.assertEqual(len(messages), 1)
        self.assertEqual(
            json.loads(messages[0].get('Body')).get('detail'), EVENT_DETAIL)

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': 'dummySource',
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(messages, None)

        # clean up
        sqs_client.delete_queue(QueueUrl=queue_url)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=bus_name,
                                       Force=True)
        self.events_client.delete_event_bus(Name=bus_name)
Example #42
0
    def test_put_events_with_target_firehose(self):
        s3_bucket = 's3-{}'.format(short_uid())
        s3_prefix = 'testeventdata'
        stream_name = 'firehose-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        bus_name = 'bus-{}'.format(short_uid())

        # create firehose target bucket
        s3_client = aws_stack.connect_to_service('s3')
        s3_client.create_bucket(Bucket=s3_bucket)

        # create firehose delivery stream to s3
        firehose_client = aws_stack.connect_to_service('firehose')
        stream = firehose_client.create_delivery_stream(
            DeliveryStreamName=stream_name,
            S3DestinationConfiguration={
                'RoleARN': aws_stack.iam_resource_arn('firehose'),
                'BucketARN': aws_stack.s3_bucket_arn(s3_bucket),
                'Prefix': s3_prefix
            })
        stream_arn = stream['DeliveryStreamARN']

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN))

        rs = self.events_client.put_targets(Rule=rule_name,
                                            EventBusName=bus_name,
                                            Targets=[{
                                                'Id': target_id,
                                                'Arn': stream_arn
                                            }])

        self.assertIn('FailedEntryCount', rs)
        self.assertIn('FailedEntries', rs)
        self.assertEqual(rs['FailedEntryCount'], 0)
        self.assertEqual(rs['FailedEntries'], [])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        # run tests
        bucket_contents = s3_client.list_objects(Bucket=s3_bucket)['Contents']
        self.assertEqual(len(bucket_contents), 1)
        key = bucket_contents[0]['Key']
        s3_object = s3_client.get_object(Bucket=s3_bucket, Key=key)
        actual_event = json.loads(s3_object['Body'].read().decode())
        self.assertIsValidEvent(actual_event)
        self.assertEqual(actual_event['detail'],
                         TEST_EVENT_PATTERN['Detail'][0])

        # clean up
        firehose_client.delete_delivery_stream(DeliveryStreamName=stream_name)
        # empty and delete bucket
        s3_client.delete_object(Bucket=s3_bucket, Key=key)
        s3_client.delete_bucket(Bucket=s3_bucket)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=bus_name,
                                       Force=True)
        self.events_client.delete_event_bus(Name=bus_name)
Example #43
0
 def setUp(self):
     self.events_client = aws_stack.connect_to_service('events')
     self.iam_client = aws_stack.connect_to_service('iam')
     self.sns_client = aws_stack.connect_to_service('sns')
     self.sfn_client = aws_stack.connect_to_service('stepfunctions')
     self.sqs_client = aws_stack.connect_to_service('sqs')
Example #44
0
def invoke_rest_api_integration(api_id,
                                stage,
                                integration,
                                method,
                                path,
                                invocation_path,
                                data,
                                headers,
                                resource_path,
                                context={},
                                resource_id=None,
                                response_templates={}):

    relative_path, query_string_params = extract_query_string_params(
        path=invocation_path)
    integration_type = integration.get('type') or integration.get(
        'integrationType')
    uri = integration.get('uri') or integration.get('integrationUri')

    if (uri.startswith('arn:aws:apigateway:')
            and ':lambda:path' in uri) or uri.startswith('arn:aws:lambda'):
        if integration_type in ['AWS', 'AWS_PROXY']:
            func_arn = uri
            if ':lambda:path' in uri:
                func_arn = uri.split(':lambda:path')[1].split(
                    'functions/')[1].split('/invocations')[0]
            data_str = json.dumps(data) if isinstance(data,
                                                      (dict,
                                                       list)) else to_str(data)

            try:
                path_params = extract_path_params(path=relative_path,
                                                  extracted_path=resource_path)
            except Exception:
                path_params = {}

            # apply custom request template
            data_str = apply_template(integration,
                                      'request',
                                      data_str,
                                      path_params=path_params,
                                      query_params=query_string_params,
                                      headers=headers)

            # Sample request context:
            # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
            request_context = get_lambda_event_request_context(
                method,
                path,
                data,
                headers,
                integration_uri=uri,
                resource_id=resource_id)
            stage_variables = get_stage_variables(api_id, stage)

            result = lambda_api.process_apigateway_invocation(
                func_arn,
                relative_path,
                data_str,
                stage,
                api_id,
                headers,
                path_params=path_params,
                query_string_params=query_string_params,
                method=method,
                resource_path=path,
                request_context=request_context,
                event_context=context,
                stage_variables=stage_variables)

            if isinstance(result, FlaskResponse):
                response = flask_to_requests_response(result)
            elif isinstance(result, Response):
                response = result
            else:
                response = LambdaResponse()
                parsed_result = result if isinstance(
                    result, dict) else json.loads(str(result or '{}'))
                parsed_result = common.json_safe(parsed_result)
                parsed_result = {} if parsed_result is None else parsed_result
                response.status_code = int(parsed_result.get(
                    'statusCode', 200))
                parsed_headers = parsed_result.get('headers', {})
                if parsed_headers is not None:
                    response.headers.update(parsed_headers)
                try:
                    if isinstance(parsed_result['body'], dict):
                        response._content = json.dumps(parsed_result['body'])
                    else:
                        response._content = to_bytes(parsed_result['body'])
                except Exception:
                    response._content = '{}'
                update_content_length(response)
                response.multi_value_headers = parsed_result.get(
                    'multiValueHeaders') or {}

            # apply custom response template
            response._content = apply_template(integration, 'response',
                                               response._content)
            response.headers['Content-Length'] = str(
                len(response.content or ''))

            return response

        msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (
            uri, method)
        LOGGER.warning(msg)
        return make_error_response(msg, 404)

    elif integration_type == 'AWS':
        if 'kinesis:action/' in uri:
            if uri.endswith('kinesis:action/PutRecords'):
                target = kinesis_listener.ACTION_PUT_RECORDS
            if uri.endswith('kinesis:action/ListStreams'):
                target = kinesis_listener.ACTION_LIST_STREAMS

            template = integration['requestTemplates'][APPLICATION_JSON]
            new_request = aws_stack.render_velocity_template(template, data)
            # forward records to target kinesis stream
            headers = aws_stack.mock_aws_request_headers(service='kinesis')
            headers['X-Amz-Target'] = target
            result = common.make_http_request(url=TEST_KINESIS_URL,
                                              method='POST',
                                              data=new_request,
                                              headers=headers)
            # TODO apply response template..?
            return result

        elif 'states:action/' in uri:
            if uri.endswith('states:action/StartExecution'):
                action = 'StartExecution'
            decoded_data = data.decode()
            if 'stateMachineArn' in decoded_data and 'input' in decoded_data:
                payload = json.loads(decoded_data)
            elif APPLICATION_JSON in integration.get('requestTemplates', {}):
                template = integration['requestTemplates'][APPLICATION_JSON]
                payload = aws_stack.render_velocity_template(template,
                                                             data,
                                                             as_json=True)
            client = aws_stack.connect_to_service('stepfunctions')

            kwargs = {'name': payload['name']} if 'name' in payload else {}
            result = client.start_execution(
                stateMachineArn=payload['stateMachineArn'],
                input=payload['input'],
                **kwargs)
            response = requests_response(
                content={
                    'executionArn': result['executionArn'],
                    'startDate': str(result['startDate'])
                },
                headers=aws_stack.mock_aws_request_headers())
            return response

        if method == 'POST':
            if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri:
                template = integration['requestTemplates'][APPLICATION_JSON]
                account_id, queue = uri.split('/')[-2:]
                region_name = uri.split(':')[3]

                new_request = '%s&QueueName=%s' % (
                    aws_stack.render_velocity_template(template, data), queue)
                headers = aws_stack.mock_aws_request_headers(
                    service='sqs', region_name=region_name)

                url = urljoin(TEST_SQS_URL,
                              '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue))
                result = common.make_http_request(url,
                                                  method='POST',
                                                  headers=headers,
                                                  data=new_request)
                return result

        msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (
            uri, method)
        LOGGER.warning(msg)
        return make_error_response(msg, 404)

    elif integration_type == 'AWS_PROXY':
        if uri.startswith('arn:aws:apigateway:') and ':dynamodb:action' in uri:
            # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection
            table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1]
            action = uri.split(':dynamodb:action')[1].split('&Table=')[0]

            if 'PutItem' in action and method == 'PUT':
                response_template = response_templates.get(
                    'application/json', None)

                if response_template is None:
                    msg = 'Invalid response template defined in integration response.'
                    return make_error_response(msg, 404)

                response_template = json.loads(response_template)
                if response_template['TableName'] != table_name:
                    msg = 'Invalid table name specified in integration response template.'
                    return make_error_response(msg, 404)

                dynamo_client = aws_stack.connect_to_resource('dynamodb')
                table = dynamo_client.Table(table_name)

                event_data = {}
                data_dict = json.loads(data)
                for key, _ in response_template['Item'].items():
                    event_data[key] = data_dict[key]

                table.put_item(Item=event_data)
                response = requests_response(
                    event_data, headers=aws_stack.mock_aws_request_headers())
                return response
        else:
            msg = 'API Gateway action uri "%s", integration type %s not yet implemented' % (
                uri, integration_type)
            LOGGER.warning(msg)
            return make_error_response(msg, 404)

    elif integration_type in ['HTTP_PROXY', 'HTTP']:
        function = getattr(requests, method.lower())

        # apply custom request template
        data = apply_template(integration, 'request', data)

        if isinstance(data, dict):
            data = json.dumps(data)

        result = function(uri, data=data, headers=headers)

        # apply custom response template
        data = apply_template(integration, 'response', data)

        return result

    elif integration_type == 'MOCK':
        # TODO: add logic for MOCK responses
        pass

    if method == 'OPTIONS':
        # fall back to returning CORS headers if this is an OPTIONS request
        return get_cors_response(headers)

    msg = (
        'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented'
        % (integration_type, method, uri))
    LOGGER.warning(msg)
    return make_error_response(msg, 404)
Example #45
0
def authorize_invocation(api_id, headers):
    client = aws_stack.connect_to_service('apigateway')
    authorizers = client.get_authorizers(restApiId=api_id,
                                         limit=100).get('items', [])
    for authorizer in authorizers:
        run_authorizer(api_id, headers, authorizer)
Example #46
0
    def test_metric_filters(self):
        log_group = 'g-%s' % short_uid()
        log_stream = 's-%s' % short_uid()
        filter_name = 'f-%s' % short_uid()
        metric_ns = 'ns-%s' % short_uid()
        metric_name = 'metric1'
        transforms = {
            'metricNamespace': metric_ns,
            'metricName': metric_name,
            'metricValue': '1',
            'defaultValue': 123
        }
        result = self.logs_client.put_metric_filter(
            logGroupName=log_group,
            filterName=filter_name,
            filterPattern='*',
            metricTransformations=[transforms])
        self.assertEqual(200, result['ResponseMetadata']['HTTPStatusCode'])

        result = self.logs_client.describe_metric_filters(
            logGroupName=log_group, filterNamePrefix='f-')
        self.assertEqual(200, result['ResponseMetadata']['HTTPStatusCode'])
        result = [
            mf for mf in result['metricFilters']
            if mf['filterName'] == filter_name
        ]
        self.assertEqual(1, len(result))

        # put log events and assert metrics being published
        events = [{
            'timestamp': 1585902800,
            'message': 'log message 1'
        }, {
            'timestamp': 1585902961,
            'message': 'log message 2'
        }]
        self.create_log_group_and_stream(log_group, log_stream)
        self.logs_client.put_log_events(logGroupName=log_group,
                                        logStreamName=log_stream,
                                        logEvents=events)

        # Get metric data
        cw_client = aws_stack.connect_to_service('cloudwatch')
        metric_data = cw_client.get_metric_data(
            MetricDataQueries=[{
                'Id': 'q1',
                'MetricStat': {
                    'Metric': {
                        'Namespace': metric_ns,
                        'MetricName': metric_name
                    },
                    'Period': 60,
                    'Stat': 'Sum'
                }
            }],
            StartTime=datetime.utcnow() - timedelta(hours=1),
            EndTime=datetime.utcnow(),
        )['MetricDataResults']
        self.assertEqual(1, len(metric_data))
        self.assertEqual([1], metric_data[0]['Values'])
        self.assertEqual('Complete', metric_data[0]['StatusCode'])

        # delete filters
        result = self.logs_client.delete_metric_filter(logGroupName=log_group,
                                                       filterName=filter_name)
        self.assertEqual(200, result['ResponseMetadata']['HTTPStatusCode'])

        result = self.logs_client.describe_metric_filters(
            logGroupName=log_group, filterNamePrefix='f-')
        self.assertEqual(200, result['ResponseMetadata']['HTTPStatusCode'])
        result = [
            mf for mf in result['metricFilters']
            if mf['filterName'] == filter_name
        ]
        self.assertEqual(0, len(result))
Example #47
0
def get_queue_urls():
    sqs = aws_stack.connect_to_service('sqs')
    response = sqs.list_queues()
    return response['QueueUrls']
Example #48
0
    def test_put_events_into_event_bus(self):
        queue_name = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        bus_name_1 = 'bus1-{}'.format(short_uid())
        bus_name_2 = 'bus2-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        self.events_client.create_event_bus(Name=bus_name_1)

        resp = self.events_client.create_event_bus(Name=bus_name_2)

        print(resp)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name_1,
        )

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=bus_name_1,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': resp.get('EventBusArn')
                                       }])

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=bus_name_2,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': queue_arn
                                       }])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name_1,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp['Messages']

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)

        actual_event = json.loads(messages[0]['Body'])
        self.assertIsValidEvent(actual_event)
        self.assertEqual(actual_event['detail'],
                         TEST_EVENT_PATTERN['Detail'][0])

        # clean up
        sqs_client.delete_queue(QueueUrl=queue_url)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name_1,
                                          Ids=[target_id],
                                          Force=True)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name_2,
                                          Ids=[target_id],
                                          Force=True)

        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=bus_name_1,
                                       Force=True)
        self.events_client.delete_event_bus(Name=bus_name_1)
        self.events_client.delete_event_bus(Name=bus_name_2)
Example #49
0
def send_notifications(method, bucket_name, object_path, version_id):
    for bucket, b_cfg in iteritems(S3_NOTIFICATIONS):
        if bucket == bucket_name:
            action = {
                'PUT': 'ObjectCreated',
                'POST': 'ObjectCreated',
                'DELETE': 'ObjectRemoved'
            }[method]
            # TODO: support more detailed methods, e.g., DeleteMarkerCreated
            # http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
            if action == 'ObjectCreated' and method == 'POST':
                api_method = 'CompleteMultipartUpload'
            else:
                api_method = {
                    'PUT': 'Put',
                    'POST': 'Post',
                    'DELETE': 'Delete'
                }[method]

            event_name = '%s:%s' % (action, api_method)
            if (event_type_matches(b_cfg['Event'], action, api_method)
                    and filter_rules_match(b_cfg.get('Filter'), object_path)):
                # send notification
                message = get_event_message(event_name=event_name,
                                            bucket_name=bucket_name,
                                            file_name=urlparse.urlparse(
                                                object_path[1:]).path,
                                            version_id=version_id)
                message = json.dumps(message)
                if b_cfg.get('Queue'):
                    sqs_client = aws_stack.connect_to_service('sqs')
                    try:
                        queue_url = queue_url_for_arn(b_cfg['Queue'])
                        sqs_client.send_message(QueueUrl=queue_url,
                                                MessageBody=message)
                    except Exception as e:
                        LOGGER.warning(
                            'Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s'
                            % (bucket_name, b_cfg['Queue'], e))
                if b_cfg.get('Topic'):
                    sns_client = aws_stack.connect_to_service('sns')
                    try:
                        sns_client.publish(TopicArn=b_cfg['Topic'],
                                           Message=message,
                                           Subject='Amazon S3 Notification')
                    except Exception:
                        LOGGER.warning(
                            'Unable to send notification for S3 bucket "%s" to SNS topic "%s".'
                            % (bucket_name, b_cfg['Topic']))
                # CloudFunction and LambdaFunction are semantically identical
                lambda_function_config = b_cfg.get(
                    'CloudFunction') or b_cfg.get('LambdaFunction')
                if lambda_function_config:
                    # make sure we don't run into a socket timeout
                    connection_config = botocore.config.Config(
                        read_timeout=300)
                    lambda_client = aws_stack.connect_to_service(
                        'lambda', config=connection_config)
                    try:
                        lambda_client.invoke(
                            FunctionName=lambda_function_config,
                            InvocationType='Event',
                            Payload=message)
                    except Exception:
                        LOGGER.warning(
                            'Unable to send notification for S3 bucket "%s" to Lambda function "%s".'
                            % (bucket_name, lambda_function_config))
                if not filter(lambda x: b_cfg.get(x),
                              NOTIFICATION_DESTINATION_TYPES):
                    LOGGER.warning(
                        'Neither of %s defined for S3 notification.' %
                        '/'.join(NOTIFICATION_DESTINATION_TYPES))
Example #50
0
def queue_url_exists(queue_url):
    sqs_client = aws_stack.connect_to_service('sqs')
    queues = sqs_client.list_queues()
    return queue_url in queues['QueueUrls']
Example #51
0
def test_lambda_runtimes():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda - Python 2.7
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY,
                                    zip_file=zip_file,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Python 3.6
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON3),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON36)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY3,
                                        zip_file=zip_file,
                                        runtime=LAMBDA_RUNTIME_PYTHON36)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY3,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java
    if not os.path.exists(TEST_LAMBDA_JAVA):
        mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
        download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
    zip_file = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA,
        zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'LinkedHashMap' in to_str(result_data)

    # test SNSEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        InvocationType='Event',
        Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert json.loads(to_str(result_data)) == {'async': 'True'}

    # test KinesisEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        Payload=
        b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}'
    )
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'KinesisEvent' in to_str(result_data)

    # deploy and invoke lambda - Java with stream handler
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA_STREAM,
        zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaStreamHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Node.js
        zip_file = testutil.create_zip_file(TEST_LAMBDA_NODEJS,
                                            get_content=True)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JS,
                                        zip_file=zip_file,
                                        handler='lambda_integration.handler',
                                        runtime=LAMBDA_RUNTIME_NODEJS)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JS,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'
Example #52
0
def bucket_exists(name):
    s3_client = aws_stack.connect_to_service('s3')
    buckets = s3_client.list_buckets()
    for bucket in buckets['Buckets']:
        if bucket['Name'] == name:
            return True
Example #53
0
def list_stack_resources(stack_name):
    cloudformation = aws_stack.connect_to_service('cloudformation')
    response = cloudformation.list_stack_resources(StackName=stack_name)
    return response['StackResourceSummaries']
def forward_events(records):
    if not records:
        return
    kinesis = aws_stack.connect_to_service('kinesis')
    kinesis.put_records(StreamName=KINESIS_STREAM_NAME, Records=records)
Example #55
0
def describe_stack_resource(stack_name, resource_logical_id):
    cloudformation = aws_stack.connect_to_service('cloudformation')
    response = cloudformation.describe_stack_resources(StackName=stack_name)
    for resource in response['StackResources']:
        if resource['LogicalResourceId'] == resource_logical_id:
            return resource
Example #56
0
 def test_list_stack_events(self):
     cloudformation = aws_stack.connect_to_service('cloudformation')
     response = cloudformation.describe_stack_events()
     self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
Example #57
0
 def test_validate_template(self):
     cloudformation = aws_stack.connect_to_service('cloudformation')
     template = template_deployer.template_to_json(
         load_file(TEST_TEMPLATE_1))
     response = cloudformation.validate_template(TemplateBody=template)
     self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
Example #58
0
def get_topic_arns():
    sqs = aws_stack.connect_to_service('sns')
    response = sqs.list_topics()
    return [t['TopicArn'] for t in response['Topics']]
Example #59
0
    def test_put_events_with_target_sns(self):
        queue_name = 'test-%s' % short_uid()
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        bus_name = 'bus-{}'.format(short_uid())

        sns_client = aws_stack.connect_to_service('sns')
        sqs_client = aws_stack.connect_to_service('sqs')
        topic_name = 'topic-{}'.format(short_uid())
        topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn']

        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        sns_client.subscribe(TopicArn=topic_arn,
                             Protocol='sqs',
                             Endpoint=queue_arn)

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN))

        rs = self.events_client.put_targets(Rule=rule_name,
                                            EventBusName=bus_name,
                                            Targets=[{
                                                'Id': target_id,
                                                'Arn': topic_arn
                                            }])

        self.assertIn('FailedEntryCount', rs)
        self.assertIn('FailedEntries', rs)
        self.assertEqual(rs['FailedEntryCount'], 0)
        self.assertEqual(rs['FailedEntries'], [])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp['Messages']

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)

        actual_event = json.loads(messages[0]['Body']).get('Message')
        self.assertIsValidEvent(actual_event)
        self.assertEqual(
            json.loads(actual_event).get('detail'),
            TEST_EVENT_PATTERN['Detail'][0])

        # clean up
        sqs_client.delete_queue(QueueUrl=queue_url)
        sns_client.delete_topic(TopicArn=topic_arn)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=bus_name,
                                       Force=True)
        self.events_client.delete_event_bus(Name=bus_name)
Example #60
0
def get_stack_details(stack_name):
    cloudformation = aws_stack.connect_to_service('cloudformation')
    stacks = cloudformation.describe_stacks(StackName=stack_name)
    for stack in stacks['Stacks']:
        if stack['StackName'] == stack_name:
            return stack