Beispiel #1
0
    def checkpoint(self, checkpointer, sequence_number=None, sub_sequence_number=None):
        def do_checkpoint():
            checkpointer.checkpoint(sequence_number, sub_sequence_number)

        try:
            retry(do_checkpoint, retries=CHECKPOINT_RETRIES, sleep=CHECKPOINT_SLEEP_SECS)
        except Exception as e:
            LOGGER.warning('Unable to checkpoint Kinesis after retries: %s' % e)
Beispiel #2
0
    def test_list_stack_resources_returns_queue_urls(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_2))
        cloudformation.create_stack(StackName=TEST_STACK_NAME_2, TemplateBody=template)

        def check_stack():
            stack = get_stack_details(TEST_STACK_NAME_2)
            assert stack['StackStatus'] == 'CREATE_COMPLETE'

        retry(check_stack, retries=3, sleep=2)

        list_stack_summaries = list_stack_resources(TEST_STACK_NAME_2)
        queue_urls = get_queue_urls()

        for resource in list_stack_summaries:
            assert resource['PhysicalResourceId'] in queue_urls
Beispiel #3
0
    def test_apply_template(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        cloudformation.create_stack(StackName=TEST_STACK_NAME, TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(TEST_STACK_NAME)
            assert stack['StackStatus'] == 'CREATE_COMPLETE'

        retry(check_stack, retries=3, sleep=2)

        # assert that bucket has been created
        assert bucket_exists('cf-test-bucket-1')
        # assert that queue has been created
        assert queue_exists('cf-test-queue-1')
        # assert that stream has been created
        assert stream_exists('cf-test-stream-1')
        # assert that queue has been created
        resource = describe_stack_resource(TEST_STACK_NAME, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
Beispiel #4
0
    def test_lambda_streams_batch_and_transactions(self):
        ddb_lease_table_suffix = '-kclapp2'
        table_name = TEST_TABLE_NAME + 'lsbat' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_service('dynamodb', client=True)
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(
            TableName=stream_name + ddb_lease_table_suffix), print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(stream_name, listener_func=process_records,
            wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name, partition_key=PARTITION_KEY,
            stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        testutil.create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON, libs=TEST_LAMBDA_LIBS, func_name=TEST_LAMBDA_NAME_DDB,
            event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27, delete=True)

        # submit a batch with writes
        dynamodb.batch_write_item(RequestItems={table_name: [
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId0'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId1'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId2'}, 'data': {'S': 'foobar123'}}}}
        ]})

        # submit a batch with writes and deletes
        dynamodb.batch_write_item(RequestItems={table_name: [
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId3'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId4'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId5'}, 'data': {'S': 'foobar123'}}}},
            {'DeleteRequest': {'Key': {PARTITION_KEY: {'S': 'testId0'}}}},
            {'DeleteRequest': {'Key': {PARTITION_KEY: {'S': 'testId1'}}}},
            {'DeleteRequest': {'Key': {PARTITION_KEY: {'S': 'testId2'}}}},
        ]})

        # submit a transaction with writes and delete
        dynamodb.transact_write_items(TransactItems=[
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId6'}, 'data': {'S': 'foobar123'}}}},
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId7'}, 'data': {'S': 'foobar123'}}}},
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId8'}, 'data': {'S': 'foobar123'}}}},
            {'Delete': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId3'}}}},
            {'Delete': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId4'}}}},
            {'Delete': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId5'}}}},
        ])

        # submit a batch with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId6'}, 'data': {'S': 'foobar123_updated1'}}}},
        ])

        # submit a transaction with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId7'}, 'data': {'S': 'foobar123_updated1'}}}},
        ])

        # submit a transaction with updates
        dynamodb.transact_write_items(TransactItems=[
            {'Update': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId6'}},
                'UpdateExpression': 'SET #0 = :0',
                'ExpressionAttributeNames': {'#0': 'data'},
                'ExpressionAttributeValues': {':0': {'S': 'foobar123_updated2'}}}},
            {'Update': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId7'}},
                'UpdateExpression': 'SET #0 = :0',
                'ExpressionAttributeNames': {'#0': 'data'},
                'ExpressionAttributeValues': {':0': {'S': 'foobar123_updated2'}}}},
            {'Update': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId8'}},
                'UpdateExpression': 'SET #0 = :0',
                'ExpressionAttributeNames': {'#0': 'data'},
                'ExpressionAttributeValues': {':0': {'S': 'foobar123_updated2'}}}},
        ])

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_insert = 9
        num_modify = 5
        num_delete = 6
        num_events = num_insert + num_modify + num_delete

        def check_events():
            if len(events) != num_events:
                LOGGER.warning(('DynamoDB updates retrieved (actual/expected): %s/%s') %
                    (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [json.loads(base64.b64decode(e['data'])) for e in events]
            # make sure the we have the right amount of expected event types
            inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
            modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
            removes = [e for e in event_items if e.get('__action_type') == 'REMOVE']
            self.assertEqual(len(inserts), num_insert)
            self.assertEqual(len(modifies), num_modify)
            self.assertEqual(len(removes), num_delete)

            # assert that all inserts were received

            for i, event in enumerate(inserts):
                self.assertNotIn('old_image', event)
                item_id = 'testId%d' % i
                matching = [i for i in inserts if i['new_image']['id'] == item_id][0]
                self.assertEqual(matching['new_image'], {'id': item_id, 'data': 'foobar123'})

            # assert that all updates were received

            def assert_updates(expected_updates, modifies):
                def found(update):
                    for modif in modifies:
                        if modif['old_image']['id'] == update['id']:
                            self.assertEqual(modif['old_image'], {'id': update['id'], 'data': update['old']})
                            self.assertEqual(modif['new_image'], {'id': update['id'], 'data': update['new']})
                            return True
                for update in expected_updates:
                    self.assertTrue(found(update))

            updates1 = [
                {'id': 'testId6', 'old': 'foobar123', 'new': 'foobar123_updated1'},
                {'id': 'testId7', 'old': 'foobar123', 'new': 'foobar123_updated1'}
            ]
            updates2 = [
                {'id': 'testId6', 'old': 'foobar123_updated1', 'new': 'foobar123_updated2'},
                {'id': 'testId7', 'old': 'foobar123_updated1', 'new': 'foobar123_updated2'},
                {'id': 'testId8', 'old': 'foobar123', 'new': 'foobar123_updated2'}
            ]

            assert_updates(updates1, modifies[:2])
            assert_updates(updates2, modifies[2:])

            # assert that all removes were received

            for i, event in enumerate(removes):
                self.assertNotIn('new_image', event)
                item_id = 'testId%d' % i
                matching = [i for i in removes if i['old_image']['id'] == item_id][0]
                self.assertEqual(matching['old_image'], {'id': item_id, 'data': 'foobar123'})

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=4)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
Beispiel #5
0
    def test_put_events_into_event_bus(self):
        queue_name = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        bus_name_1 = 'bus1-{}'.format(short_uid())
        bus_name_2 = 'bus2-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        self.events_client.create_event_bus(Name=bus_name_1)

        resp = self.events_client.create_event_bus(Name=bus_name_2)

        print(resp)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name_1,
        )

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=bus_name_1,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': resp.get('EventBusArn')
                                       }])

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=bus_name_2,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': queue_arn
                                       }])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name_1,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp['Messages']

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)

        actual_event = json.loads(messages[0]['Body'])
        self.assertIsValidEvent(actual_event)
        self.assertEqual(actual_event['detail'],
                         TEST_EVENT_PATTERN['Detail'][0])

        # clean up
        sqs_client.delete_queue(QueueUrl=queue_url)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name_1,
                                          Ids=[target_id],
                                          Force=True)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=bus_name_2,
                                          Ids=[target_id],
                                          Force=True)

        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=bus_name_1,
                                       Force=True)
        self.events_client.delete_event_bus(Name=bus_name_1)
        self.events_client.delete_event_bus(Name=bus_name_2)
Beispiel #6
0
    def test_scheduled_expression_events(self):
        class HttpEndpointListener(ProxyListener):
            def forward_request(self, method, path, data, headers):
                event = json.loads(to_str(data))
                events.append(event)
                return 200

        local_port = get_free_tcp_port()
        proxy = start_proxy(local_port,
                            backend_url=None,
                            update_listener=HttpEndpointListener())
        wait_for_port_open(local_port)

        topic_name = 'topic-{}'.format(short_uid())
        queue_name = 'queue-{}'.format(short_uid())
        fifo_queue_name = 'queue-{}.fifo'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        endpoint = '{}://{}:{}'.format(get_service_protocol(),
                                       config.LOCALSTACK_HOSTNAME, local_port)
        sm_role_arn = aws_stack.role_arn('sfn_role')
        sm_name = 'state-machine-{}'.format(short_uid())
        topic_target_id = 'target-{}'.format(short_uid())
        sm_target_id = 'target-{}'.format(short_uid())
        queue_target_id = 'target-{}'.format(short_uid())
        fifo_queue_target_id = 'target-{}'.format(short_uid())

        events = []
        state_machine_definition = """
        {
            "StartAt": "Hello",
            "States": {
                "Hello": {
                    "Type": "Pass",
                    "Result": "World",
                    "End": true
                }
            }
        }
        """

        state_machine_arn = self.sfn_client.create_state_machine(
            name=sm_name,
            definition=state_machine_definition,
            roleArn=sm_role_arn)['stateMachineArn']

        topic_arn = self.sns_client.create_topic(Name=topic_name)['TopicArn']
        self.sns_client.subscribe(TopicArn=topic_arn,
                                  Protocol='http',
                                  Endpoint=endpoint)

        queue_url = self.sqs_client.create_queue(
            QueueName=queue_name)['QueueUrl']
        fifo_queue_url = self.sqs_client.create_queue(
            QueueName=fifo_queue_name, Attributes={'FifoQueue':
                                                   'true'})['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)
        fifo_queue_arn = aws_stack.sqs_queue_arn(fifo_queue_name)

        event = {'env': 'testing'}

        self.events_client.put_rule(Name=rule_name,
                                    ScheduleExpression='rate(1 minutes)')

        self.events_client.put_targets(Rule=rule_name,
                                       Targets=[{
                                           'Id': topic_target_id,
                                           'Arn': topic_arn,
                                           'Input': json.dumps(event)
                                       }, {
                                           'Id': sm_target_id,
                                           'Arn': state_machine_arn,
                                           'Input': json.dumps(event)
                                       }, {
                                           'Id': queue_target_id,
                                           'Arn': queue_arn,
                                           'Input': json.dumps(event)
                                       }, {
                                           'Id': fifo_queue_target_id,
                                           'Arn': fifo_queue_arn,
                                           'Input': json.dumps(event),
                                           'SqsParameters': {
                                               'MessageGroupId': '123'
                                           }
                                       }])

        def received(q_urls):
            # state machine got executed
            executions = self.sfn_client.list_executions(
                stateMachineArn=state_machine_arn)['executions']
            self.assertGreaterEqual(len(executions), 1)

            # http endpoint got events
            self.assertGreaterEqual(len(events), 2)
            notifications = [
                event['Message'] for event in events
                if event['Type'] == 'Notification'
            ]
            self.assertGreaterEqual(len(notifications), 1)

            # get state machine execution detail
            execution_arn = executions[0]['executionArn']
            execution_input = self.sfn_client.describe_execution(
                executionArn=execution_arn)['input']

            all_msgs = []
            # get message from queue
            for url in q_urls:
                msgs = self.sqs_client.receive_message(QueueUrl=url).get(
                    'Messages', [])
                self.assertGreaterEqual(len(msgs), 1)
                all_msgs.append(msgs[0])

            return execution_input, notifications[0], all_msgs

        execution_input, notification, msgs_received = retry(
            received, retries=5, sleep=15, q_urls=[queue_url, fifo_queue_url])
        self.assertEqual(json.loads(notification), event)
        self.assertEqual(json.loads(execution_input), event)
        for msg_received in msgs_received:
            self.assertEqual(json.loads(msg_received['Body']), event)

        # clean up
        proxy.stop()
        self.cleanup(None,
                     rule_name,
                     target_ids=[topic_target_id, sm_target_id],
                     queue_url=queue_url)
        self.sns_client.delete_topic(TopicArn=topic_arn)
        self.sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
Beispiel #7
0
    def test_put_event_with_content_base_rule_in_pattern(self):
        queue_name = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        pattern = {
            'Source': [{
                'exists': True
            }],
            'detail-type': [{
                'prefix': 'core.app'
            }],
            'Detail':
            json.dumps({
                'decription': ['this-is-event-details'],
                'amount': [200],
                'salary': [2000, 4000],
                'env': ['dev', 'prod'],
                'user': ['user1', 'user2', 'user3'],
                'admins': ['skyli', {
                    'prefix': 'hey'
                }, {
                    'prefix': 'ad'
                }],
                'test1': [{
                    'anything-but': 200
                }],
                'test2': [{
                    'anything-but': 'test2'
                }],
                'test3': [{
                    'anything-but': ['test3', 'test33']
                }],
                'test4': [{
                    'anything-but': {
                        'prefix': 'test4'
                    }
                }],
                'ip': [{
                    'cidr': '10.102.1.0/24'
                }],
                'num-test1': [{
                    'numeric': ['<', 200]
                }],
                'num-test2': [{
                    'numeric': ['<=', 200]
                }],
                'num-test3': [{
                    'numeric': ['>', 200]
                }],
                'num-test4': [{
                    'numeric': ['>=', 200]
                }],
                'num-test5': [{
                    'numeric': ['>=', 200, '<=', 500]
                }],
                'num-test6': [{
                    'numeric': ['>', 200, '<', 500]
                }],
                'num-test7': [{
                    'numeric': ['>=', 200, '<', 500]
                }]
            })
        }

        event = {
            'EventBusName':
            TEST_EVENT_BUS_NAME,
            'Source':
            'core.update-account-command',
            'DetailType':
            'core.app.backend',
            'Detail':
            json.dumps({
                'decription': 'this-is-event-details',
                'amount': 200,
                'salary': 2000,
                'env': 'prod',
                'user': ['user4', 'user3'],
                'admins': 'admin',
                'test1': 300,
                'test2': 'test22',
                'test3': 'test333',
                'test4': 'this test4',
                'ip': '10.102.1.100',
                'num-test1': 100,
                'num-test2': 200,
                'num-test3': 300,
                'num-test4': 200,
                'num-test5': 500,
                'num-test6': 300,
                'num-test7': 300
            })
        }

        self.events_client.create_event_bus(Name=TEST_EVENT_BUS_NAME)
        self.events_client.put_rule(Name=rule_name,
                                    EventBusName=TEST_EVENT_BUS_NAME,
                                    EventPattern=json.dumps(pattern))

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=TEST_EVENT_BUS_NAME,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': queue_arn,
                                           'InputPath': '$.detail'
                                       }])
        self.events_client.put_events(Entries=[event])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get('Messages')

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)
        self.assertEqual(json.loads(messages[0].get('Body')),
                         json.loads(event['Detail']))

        event_details = json.loads(event['Detail'])
        event_details['admins'] = 'not_admin'
        event['Detail'] = json.dumps(event_details)

        self.events_client.put_events(Entries=[event])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(messages, None)

        # clean up
        self.cleanup(TEST_EVENT_BUS_NAME,
                     rule_name,
                     target_id,
                     queue_url=queue_url)
Beispiel #8
0
    def test_schedule_expression_event_stepfunction(self):
        state_machine_name = 'state-machine-{}'.format(short_uid())
        role_name = 'role-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())

        state_machine_definition = """
        {
            "StartAt": "Hello",
            "States": {
                "Hello": {
                    "Type": "Pass",
                    "Result": "World",
                    "End": true
                }
            }
        }
        """

        assume_role_policy_document = """
        {
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Effect": "Allow",
                    "Principal": {
                        "Service": "events.amazonaws.com"
                    },
                    "Action": "sts:AssumeRole"
                }
            ]
        }
        """

        state_machine_role_arn = self.iam_client.create_role(
            RoleName=role_name,
            AssumeRolePolicyDocument=assume_role_policy_document
        )['Role']['Arn']

        state_machine_arn = self.stepfunctions_client.create_state_machine(
            name=state_machine_name,
            definition=state_machine_definition,
            roleArn=state_machine_role_arn)['stateMachineArn']

        event = {'env': 'testing'}

        self.events_client.put_rule(Name=rule_name,
                                    ScheduleExpression='rate(1 minutes)')

        self.events_client.put_targets(Rule=rule_name,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': state_machine_arn,
                                           'Input': json.dumps(event)
                                       }])

        def check_executions():
            executions = self.stepfunctions_client.list_executions(
                stateMachineArn=state_machine_arn)['executions']
            self.assertGreaterEqual(len(executions), 1)
            execution_arn = executions[0]['executionArn']
            execution_input = self.stepfunctions_client.describe_execution(
                executionArn=execution_arn)['input']
            self.assertEqual(execution_input, json.dumps(event))

        retry(check_executions, retries=2, sleep=40)

        self.events_client.remove_targets(Rule=rule_name,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name, Force=True)

        self.iam_client.delete_role(RoleName=role_name)

        self.stepfunctions_client.delete_state_machine(
            stateMachineArn=state_machine_arn)
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(
            load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name,
                                    TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        assert queue_exists('cf-test-queue-1')
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name,
                                           'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(
            tags, [{
                'Key': 'foobar',
                'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')
            }])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(
            tags, [{
                'Key': 'foo',
                'Value': 'cf-test-bucket-1'
            }, {
                'Key': 'bar',
                'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')
            }])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [
            s for s in subs
            if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']
        ]
        self.assertEqual(len(subs), 1)
        self.assertIn(
            ':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name),
            subs[0]['TopicArn'])
        # assert that subscription attributes are added properly
        attrs = sns.get_subscription_attributes(
            SubscriptionArn=subs[0]['SubscriptionArn'])['Attributes']
        self.assertEqual(
            attrs, {
                'Endpoint': subs[0]['Endpoint'],
                'Protocol': 'sqs',
                'SubscriptionArn': subs[0]['SubscriptionArn'],
                'TopicArn': subs[0]['TopicArn'],
                'FilterPolicy': json.dumps({'eventType': ['created']})
            })

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [
            a for a in apigateway.get_rest_apis()['items']
            if a['name'] == test_api_name
        ][0]
        responses = apigateway.get_gateway_responses(
            restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
Beispiel #10
0
    def test_put_event_with_content_base_rule_in_pattern(self):
        queue_name = "queue-{}".format(short_uid())
        rule_name = "rule-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())

        sqs_client = aws_stack.create_external_boto_client("sqs")
        queue_url = sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        pattern = {
            "Source": [{
                "exists": True
            }],
            "detail-type": [{
                "prefix": "core.app"
            }],
            "Detail": {
                "decription": ["this-is-event-details"],
                "amount": [200],
                "salary": [2000, 4000],
                "env": ["dev", "prod"],
                "user": ["user1", "user2", "user3"],
                "admins": ["skyli", {
                    "prefix": "hey"
                }, {
                    "prefix": "ad"
                }],
                "test1": [{
                    "anything-but": 200
                }],
                "test2": [{
                    "anything-but": "test2"
                }],
                "test3": [{
                    "anything-but": ["test3", "test33"]
                }],
                "test4": [{
                    "anything-but": {
                        "prefix": "test4"
                    }
                }],
                "ip": [{
                    "cidr": "10.102.1.0/24"
                }],
                "num-test1": [{
                    "numeric": ["<", 200]
                }],
                "num-test2": [{
                    "numeric": ["<=", 200]
                }],
                "num-test3": [{
                    "numeric": [">", 200]
                }],
                "num-test4": [{
                    "numeric": [">=", 200]
                }],
                "num-test5": [{
                    "numeric": [">=", 200, "<=", 500]
                }],
                "num-test6": [{
                    "numeric": [">", 200, "<", 500]
                }],
                "num-test7": [{
                    "numeric": [">=", 200, "<", 500]
                }],
            },
        }

        event = {
            "EventBusName":
            TEST_EVENT_BUS_NAME,
            "Source":
            "core.update-account-command",
            "DetailType":
            "core.app.backend",
            "Detail":
            json.dumps({
                "decription": "this-is-event-details",
                "amount": 200,
                "salary": 2000,
                "env": "prod",
                "user": "******",
                "admins": "admin",
                "test1": 300,
                "test2": "test22",
                "test3": "test333",
                "test4": "this test4",
                "ip": "10.102.1.100",
                "num-test1": 100,
                "num-test2": 200,
                "num-test3": 300,
                "num-test4": 200,
                "num-test5": 500,
                "num-test6": 300,
                "num-test7": 300,
            }),
        }

        self.events_client.create_event_bus(Name=TEST_EVENT_BUS_NAME)
        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            EventPattern=json.dumps(pattern),
        )

        self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            Targets=[{
                "Id": target_id,
                "Arn": queue_arn,
                "InputPath": "$.detail"
            }],
        )
        self.events_client.put_events(Entries=[event])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get("Messages")

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(1, len(messages))
        self.assertEqual(json.loads(event["Detail"]),
                         json.loads(messages[0].get("Body")))
        event_details = json.loads(event["Detail"])
        event_details["admins"] = "no"
        event["Detail"] = json.dumps(event_details)

        self.events_client.put_events(Entries=[event])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertIsNone(messages)

        # clean up
        self.cleanup(TEST_EVENT_BUS_NAME,
                     rule_name,
                     target_id,
                     queue_url=queue_url)
Beispiel #11
0
    def test_put_events_with_target_sns(self):
        queue_name = 'test-%s' % short_uid()
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        bus_name = 'bus-{}'.format(short_uid())

        sns_client = aws_stack.connect_to_service('sns')
        sqs_client = aws_stack.connect_to_service('sqs')
        topic_name = 'topic-{}'.format(short_uid())
        topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn']

        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=queue_arn)

        self.events_client.create_event_bus(Name=bus_name)
        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN)
        )
        rs = self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[
                {
                    'Id': target_id,
                    'Arn': topic_arn
                }
            ]
        )

        self.assertIn('FailedEntryCount', rs)
        self.assertIn('FailedEntries', rs)
        self.assertEqual(rs['FailedEntryCount'], 0)
        self.assertEqual(rs['FailedEntries'], [])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }]
        )

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp['Messages']

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)

        actual_event = json.loads(messages[0]['Body']).get('Message')
        self.assertIsValidEvent(actual_event)
        self.assertEqual(json.loads(actual_event).get('detail'), TEST_EVENT_PATTERN['Detail'][0])

        # clean up
        sns_client.delete_topic(TopicArn=topic_arn)
        self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url)
Beispiel #12
0
def _await_stack_status(stack_name, expected_status, retries=3, sleep=2):
    def check_stack():
        stack = get_stack_details(stack_name)
        assert stack['StackStatus'] == expected_status
        return stack
    return retry(check_stack, retries, sleep)
Beispiel #13
0
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name, TemplateBody=template)

        _await_stack_completion(stack_name)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        queue_url = queue_exists('cf-test-queue-1')
        assert queue_url
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
        assert ssm_param_exists('cf-test-param-1')

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(tags, [{'Key': 'foobar', 'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')}])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(tags, [
            {'Key': 'foo', 'Value': 'cf-test-bucket-1'},
            {'Key': 'bar', 'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')}
        ])
        queue_tags = sqs.list_queue_tags(QueueUrl=queue_url)
        self.assertIn('Tags', queue_tags)
        self.assertEqual(queue_tags['Tags'], {'key1': 'value1', 'key2': 'value2'})

        # assert that bucket notifications have been created
        notifications = s3.get_bucket_notification_configuration(Bucket='cf-test-bucket-1')
        self.assertIn('QueueConfigurations', notifications)
        self.assertIn('LambdaFunctionConfigurations', notifications)
        self.assertEqual(notifications['QueueConfigurations'][0]['QueueArn'], 'aws:arn:sqs:test:testqueue')
        self.assertEqual(notifications['QueueConfigurations'][0]['Events'], ['s3:ObjectDeleted:*'])
        self.assertEqual(
            notifications['LambdaFunctionConfigurations'][0]['LambdaFunctionArn'],
            'aws:arn:lambda:test:testfunc'
        )
        self.assertEqual(notifications['LambdaFunctionConfigurations'][0]['Events'], ['s3:ObjectCreated:*'])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [s for s in subs if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']]
        self.assertEqual(len(subs), 1)
        self.assertIn(':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name), subs[0]['TopicArn'])
        # assert that subscription attributes are added properly
        attrs = sns.get_subscription_attributes(SubscriptionArn=subs[0]['SubscriptionArn'])['Attributes']
        self.assertEqual(attrs, {'Endpoint': subs[0]['Endpoint'], 'Protocol': 'sqs',
            'SubscriptionArn': subs[0]['SubscriptionArn'], 'TopicArn': subs[0]['TopicArn'],
            'FilterPolicy': json.dumps({'eventType': ['created']})})

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [a for a in apigateway.get_rest_apis()['items'] if a['name'] == test_api_name][0]
        responses = apigateway.get_gateway_responses(restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
Beispiel #14
0
    def test_put_events_with_target_sqs(self):
        queue_name = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        self.events_client.create_event_bus(
            Name=TEST_EVENT_BUS_NAME
        )

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            EventPattern=json.dumps(TEST_EVENT_PATTERN)
        )

        rs = self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            Targets=[
                {
                    'Id': target_id,
                    'Arn': queue_arn
                }
            ]
        )

        self.assertIn('FailedEntryCount', rs)
        self.assertIn('FailedEntries', rs)
        self.assertEqual(rs['FailedEntryCount'], 0)
        self.assertEqual(rs['FailedEntries'], [])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': TEST_EVENT_BUS_NAME,
                'Source': TEST_EVENT_PATTERN['Source'],
                'DetailType': TEST_EVENT_PATTERN['DetailType'],
                'Detail': TEST_EVENT_PATTERN['Detail']
            }]
        )

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp['Messages']

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)
        self.assertEqual(messages[0]['Body'], TEST_EVENT_PATTERN['Detail'])

        # clean up
        sqs_client.delete_queue(QueueUrl=queue_url)

        self.events_client.remove_targets(
            Rule=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            Ids=[target_id],
            Force=True
        )
        self.events_client.delete_rule(
            Name=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            Force=True
        )
        self.events_client.delete_event_bus(
            Name=TEST_EVENT_BUS_NAME
        )
    def test_sqs_batch_lambda_forward(self):
        sqs = aws_stack.connect_to_service('sqs')
        lambda_api = aws_stack.connect_to_service('lambda')

        lambda_name_queue_batch = 'lambda_queue_batch-%s' % short_uid()

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(lambda_name_queue_batch)
        queue_url = sqs_queue_info['QueueUrl']
        resp = testutil.create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON_ECHO,
            func_name=lambda_name_queue_batch,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27,
            libs=TEST_LAMBDA_LIBS)

        event_source_id = resp['CreateEventSourceMappingResponse']['UUID']
        lambda_api.update_event_source_mapping(UUID=event_source_id,
                                               BatchSize=5)

        messages_to_send = [{
            'Id': 'message{:02d}'.format(i),
            'MessageBody': 'msgBody{:02d}'.format(i),
            'MessageAttributes': {
                'CustomAttribute': {
                    'DataType': 'String',
                    'StringValue': 'CustomAttributeValue{:02d}'.format(i)
                }
            }
        } for i in range(1, 12)]

        # send 11 messages (which should get split into 3 batches)
        sqs.send_message_batch(QueueUrl=queue_url,
                               Entries=messages_to_send[:10])
        sqs.send_message(
            QueueUrl=queue_url,
            MessageBody=messages_to_send[10]['MessageBody'],
            MessageAttributes=messages_to_send[10]['MessageAttributes'])

        def wait_for_done():
            attributes = sqs.get_queue_attributes(
                QueueUrl=queue_url,
                AttributeNames=[
                    'ApproximateNumberOfMessages',
                    'ApproximateNumberOfMessagesDelayed',
                    'ApproximateNumberOfMessagesNotVisible'
                ],
            )['Attributes']
            msg_count = int(attributes.get('ApproximateNumberOfMessages'))
            self.assertEqual(msg_count, 0, 'expecting queue to be empty')

            delayed_count = int(
                attributes.get('ApproximateNumberOfMessagesDelayed'))
            if delayed_count != 0:
                LOGGER.warning(
                    'SQS delayed message count (actual/expected): %s/%s' %
                    (delayed_count, 0))

            not_visible_count = int(
                attributes.get('ApproximateNumberOfMessagesNotVisible'))
            if not_visible_count != 0:
                LOGGER.warning(
                    'SQS messages not visible (actual/expected): %s/%s' %
                    (not_visible_count, 0))

            self.assertEqual(delayed_count, 0, 'no messages waiting for retry')
            self.assertEqual(delayed_count + not_visible_count, 0,
                             'no in flight messages')

        # wait for the queue to drain (max 60s)
        retry(wait_for_done, retries=12, sleep=5.0)

        events = get_lambda_log_events(lambda_name_queue_batch, 10)
        self.assertEqual(len(events), 3, 'expected 3 lambda invocations')

        testutil.delete_lambda_function(lambda_name_queue_batch)
        sqs.delete_queue(QueueUrl=queue_url)
Beispiel #16
0
def test_sqs_batch_lambda_forward(lambda_client, sqs_client,
                                  create_lambda_function):

    lambda_name_queue_batch = "lambda_queue_batch-%s" % short_uid()

    # deploy test lambda connected to SQS queue
    sqs_queue_info = testutil.create_sqs_queue(lambda_name_queue_batch)
    queue_url = sqs_queue_info["QueueUrl"]
    resp = create_lambda_function(
        handler_file=TEST_LAMBDA_PYTHON_ECHO,
        func_name=lambda_name_queue_batch,
        event_source_arn=sqs_queue_info["QueueArn"],
        libs=TEST_LAMBDA_LIBS,
    )

    event_source_id = resp["CreateEventSourceMappingResponse"]["UUID"]
    lambda_client.update_event_source_mapping(UUID=event_source_id,
                                              BatchSize=5)

    messages_to_send = [{
        "Id": "message{:02d}".format(i),
        "MessageBody": "msgBody{:02d}".format(i),
        "MessageAttributes": {
            "CustomAttribute": {
                "DataType": "String",
                "StringValue": "CustomAttributeValue{:02d}".format(i),
            }
        },
    } for i in range(1, 12)]

    # send 11 messages (which should get split into 3 batches)
    sqs_client.send_message_batch(QueueUrl=queue_url,
                                  Entries=messages_to_send[:10])
    sqs_client.send_message(
        QueueUrl=queue_url,
        MessageBody=messages_to_send[10]["MessageBody"],
        MessageAttributes=messages_to_send[10]["MessageAttributes"],
    )

    def wait_for_done():
        attributes = sqs_client.get_queue_attributes(
            QueueUrl=queue_url,
            AttributeNames=[
                "ApproximateNumberOfMessages",
                "ApproximateNumberOfMessagesDelayed",
                "ApproximateNumberOfMessagesNotVisible",
            ],
        )["Attributes"]
        msg_count = int(attributes.get("ApproximateNumberOfMessages"))
        assert 0 == msg_count, "expecting queue to be empty"

        delayed_count = int(
            attributes.get("ApproximateNumberOfMessagesDelayed"))
        if delayed_count != 0:
            LOGGER.warning(
                "SQS delayed message count (actual/expected): %s/%s" %
                (delayed_count, 0))

        not_visible_count = int(
            attributes.get("ApproximateNumberOfMessagesNotVisible"))
        if not_visible_count != 0:
            LOGGER.warning(
                "SQS messages not visible (actual/expected): %s/%s" %
                (not_visible_count, 0))

        assert 0 == delayed_count, "no messages waiting for retry"
        assert 0 == delayed_count + not_visible_count, "no in flight messages"

    # wait for the queue to drain (max 60s)
    retry(wait_for_done, retries=12, sleep=5.0)

    def check_lambda_logs():
        events = get_lambda_log_events(lambda_name_queue_batch, 10)
        assert 3 == len(events), "expected 3 lambda invocations"

    retry(check_lambda_logs, retries=5, sleep=3)

    sqs_client.delete_queue(QueueUrl=queue_url)
Beispiel #17
0
    def test_lambda_streams_batch_and_transactions(self):
        ddb_lease_table_suffix = "-kclapp2"
        table_name = TEST_TABLE_NAME + "lsbat" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        dynamodb = aws_stack.connect_to_service("dynamodb", client=True)
        dynamodb_service = aws_stack.connect_to_service("dynamodb")
        dynamodbstreams = aws_stack.connect_to_service("dynamodbstreams")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )
        aws_stack.create_kinesis_stream(stream_name, delete=True)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        testutil.create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON,
            libs=TEST_LAMBDA_LIBS,
            func_name=lambda_ddb_name,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )

        # submit a batch with writes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId0"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId1"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId2"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                ]
            })

        # submit a batch with writes and deletes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId3"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId4"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId5"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId0"
                                }
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId1"
                                }
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId2"
                                }
                            }
                        }
                    },
                ]
            })

        # submit a transaction with writes and delete
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId8"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId3"
                        }
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId4"
                        }
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId5"
                        }
                    },
                }
            },
        ])

        # submit a batch with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        },
                        "data": {
                            "S": "foobar123_updated1"
                        },
                    },
                }
            },
        ])

        # submit a transaction with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        },
                        "data": {
                            "S": "foobar123_updated1"
                        },
                    },
                }
            },
        ])

        # submit a transaction with updates
        dynamodb.transact_write_items(TransactItems=[
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId8"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
        ])

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_insert = 9
        num_modify = 5
        num_delete = 6
        num_events = num_insert + num_modify + num_delete

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of expected event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            removes = [
                e for e in event_items if e.get("__action_type") == "REMOVE"
            ]
            self.assertEqual(num_insert, len(inserts))
            self.assertEqual(num_modify, len(modifies))
            self.assertEqual(num_delete, len(removes))

            # assert that all inserts were received

            for i, event in enumerate(inserts):
                self.assertNotIn("old_image", event)
                item_id = "testId%d" % i
                matching = [
                    i for i in inserts if i["new_image"]["id"] == item_id
                ][0]
                self.assertEqual({
                    "id": item_id,
                    "data": "foobar123"
                }, matching["new_image"])

            # assert that all updates were received

            def assert_updates(expected_updates, modifies):
                def found(update):
                    for modif in modifies:
                        if modif["old_image"]["id"] == update["id"]:
                            self.assertEqual(
                                modif["old_image"],
                                {
                                    "id": update["id"],
                                    "data": update["old"]
                                },
                            )
                            self.assertEqual(
                                modif["new_image"],
                                {
                                    "id": update["id"],
                                    "data": update["new"]
                                },
                            )
                            return True

                for update in expected_updates:
                    self.assertTrue(found(update))

            updates1 = [
                {
                    "id": "testId6",
                    "old": "foobar123",
                    "new": "foobar123_updated1"
                },
                {
                    "id": "testId7",
                    "old": "foobar123",
                    "new": "foobar123_updated1"
                },
            ]
            updates2 = [
                {
                    "id": "testId6",
                    "old": "foobar123_updated1",
                    "new": "foobar123_updated2",
                },
                {
                    "id": "testId7",
                    "old": "foobar123_updated1",
                    "new": "foobar123_updated2",
                },
                {
                    "id": "testId8",
                    "old": "foobar123",
                    "new": "foobar123_updated2"
                },
            ]

            assert_updates(updates1, modifies[:2])
            assert_updates(updates2, modifies[2:])

            # assert that all removes were received

            for i, event in enumerate(removes):
                self.assertNotIn("new_image", event)
                item_id = "testId%d" % i
                matching = [
                    i for i in removes if i["old_image"]["id"] == item_id
                ][0]
                self.assertEqual({
                    "id": item_id,
                    "data": "foobar123"
                }, matching["old_image"])

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=4)

        # clean up
        testutil.delete_lambda_function(lambda_ddb_name)
Beispiel #18
0
    def test_put_events_with_target_sqs_event_detail_match(self):
        queue_name = "queue-{}".format(short_uid())
        rule_name = "rule-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())
        bus_name = "bus-{}".format(short_uid())

        sqs_client = aws_stack.create_external_boto_client("sqs")
        queue_url = sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps({"detail": {
                "EventType": ["0", "1"]
            }}),
        )

        rs = self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[{
                "Id": target_id,
                "Arn": queue_arn,
                "InputPath": "$.detail"
            }],
        )

        self.assertIn("FailedEntryCount", rs)
        self.assertIn("FailedEntries", rs)
        self.assertEqual(0, rs["FailedEntryCount"])
        self.assertEqual([], rs["FailedEntries"])

        self.events_client.put_events(
            Entries=[{
                "EventBusName": bus_name,
                "Source": TEST_EVENT_PATTERN["Source"][0],
                "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                "Detail": json.dumps({"EventType": "1"}),
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get("Messages")

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(1, len(messages))

        actual_event = json.loads(messages[0]["Body"])
        self.assertEqual({"EventType": "1"}, actual_event)

        self.events_client.put_events(
            Entries=[{
                "EventBusName": bus_name,
                "Source": TEST_EVENT_PATTERN["Source"][0],
                "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                "Detail": json.dumps({"EventType": "2"}),
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get("Messages", [])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(0, len(messages))

        # clean up
        self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url)
Beispiel #19
0
    def test_put_subscription_filter_kinesis(
        self,
        logs_client,
        logs_log_group,
        logs_log_stream,
        kinesis_client,
        iam_client,
        create_iam_role_with_policy,
    ):

        kinesis_name = f"test-kinesis-{short_uid()}"
        filter_name = "Destination"
        kinesis_client.create_stream(StreamName=kinesis_name, ShardCount=1)

        try:
            result = kinesis_client.describe_stream(
                StreamName=kinesis_name)["StreamDescription"]
            kinesis_arn = result["StreamARN"]
            role = f"test-kinesis-role-{short_uid()}"
            policy_name = f"test-kinesis-role-policy-{short_uid()}"
            role_arn = create_iam_role_with_policy(
                RoleName=role,
                PolicyName=policy_name,
                RoleDefinition=logs_role,
                PolicyDefinition=kinesis_permission,
            )

            # wait for stream-status "ACTIVE"
            status = result["StreamStatus"]
            if status != "ACTIVE":

                def check_stream_active():
                    state = kinesis_client.describe_stream(
                        StreamName=kinesis_name
                    )["StreamDescription"]["StreamStatus"]
                    if state != "ACTIVE":
                        raise Exception(f"StreamStatus is {state}")

                retry(check_stream_active,
                      retries=6,
                      sleep=1.0,
                      sleep_before=2.0)

            def put_subscription_filter():
                logs_client.put_subscription_filter(
                    logGroupName=logs_log_group,
                    filterName=filter_name,
                    filterPattern="",
                    destinationArn=kinesis_arn,
                    roleArn=role_arn,
                )

            # for a weird reason the put_subscription_filter fails on AWS the first time,
            # even-though we check for ACTIVE state...
            retry(put_subscription_filter, retries=6, sleep=3.0)

            def put_event():
                logs_client.put_log_events(
                    logGroupName=logs_log_group,
                    logStreamName=logs_log_stream,
                    logEvents=[
                        {
                            "timestamp": now_utc(millis=True),
                            "message": "test"
                        },
                        {
                            "timestamp": now_utc(millis=True),
                            "message": "test 2"
                        },
                    ],
                )

            retry(put_event, retries=6, sleep=3.0)

            shard_iterator = kinesis_client.get_shard_iterator(
                StreamName=kinesis_name,
                ShardId="shardId-000000000000",
                ShardIteratorType="TRIM_HORIZON",
            )["ShardIterator"]

            response = kinesis_client.get_records(ShardIterator=shard_iterator)
            # AWS sends messages as health checks
            assert len(response["Records"]) >= 1
            found = False
            for record in response["Records"]:
                data = record["Data"]
                unzipped_data = gzip.decompress(data)
                json_data = json.loads(unzipped_data)
                if "test" in json.dumps(json_data["logEvents"]):
                    assert len(json_data["logEvents"]) == 2
                    assert json_data["logEvents"][0]["message"] == "test"
                    assert json_data["logEvents"][1]["message"] == "test 2"
                    found = True

            assert found
        # clean up
        finally:
            kinesis_client.delete_stream(StreamName=kinesis_name,
                                         EnforceConsumerDeletion=True)
            logs_client.delete_subscription_filter(logGroupName=logs_log_group,
                                                   filterName=filter_name)
Beispiel #20
0
    def test_put_events_with_target_sns(self):
        queue_name = "test-%s" % short_uid()
        rule_name = "rule-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())
        bus_name = "bus-{}".format(short_uid())

        sns_client = aws_stack.create_external_boto_client("sns")
        sqs_client = aws_stack.create_external_boto_client("sqs")
        topic_name = "topic-{}".format(short_uid())
        topic_arn = sns_client.create_topic(Name=topic_name)["TopicArn"]

        queue_url = sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        sns_client.subscribe(TopicArn=topic_arn,
                             Protocol="sqs",
                             Endpoint=queue_arn)

        self.events_client.create_event_bus(Name=bus_name)
        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN),
        )
        rs = self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[{
                "Id": target_id,
                "Arn": topic_arn
            }],
        )

        self.assertIn("FailedEntryCount", rs)
        self.assertIn("FailedEntries", rs)
        self.assertEqual(0, rs["FailedEntryCount"])
        self.assertEqual([], rs["FailedEntries"])

        self.events_client.put_events(
            Entries=[{
                "EventBusName": bus_name,
                "Source": TEST_EVENT_PATTERN["Source"][0],
                "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                "Detail": json.dumps(TEST_EVENT_PATTERN["Detail"][0]),
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp["Messages"]

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(1, len(messages))

        actual_event = json.loads(messages[0]["Body"]).get("Message")
        self.assertIsValidEvent(actual_event)
        self.assertEqual(TEST_EVENT_PATTERN["Detail"][0],
                         json.loads(actual_event).get("detail"))

        # clean up
        sns_client.delete_topic(TopicArn=topic_arn)
        self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url)
Beispiel #21
0
    def test_set_alarm(self, sns_client, cloudwatch_client, sqs_client,
                       sns_create_topic, sqs_create_queue):
        # create topics for state 'ALARM' and 'OK'
        sns_topic_alarm = sns_create_topic()
        topic_arn_alarm = sns_topic_alarm["TopicArn"]
        sns_topic_ok = sns_create_topic()
        topic_arn_ok = sns_topic_ok["TopicArn"]

        # create queues for 'ALARM' and 'OK' (will receive sns messages)
        uid = short_uid()
        queue_url_alarm = sqs_create_queue(QueueName=f"AlarmQueue-{uid}")
        queue_url_ok = sqs_create_queue(QueueName=f"OKQueue-{uid}")

        arn_queue_alarm = sqs_client.get_queue_attributes(
            QueueUrl=queue_url_alarm,
            AttributeNames=["QueueArn"])["Attributes"]["QueueArn"]
        arn_queue_ok = sqs_client.get_queue_attributes(
            QueueUrl=queue_url_ok,
            AttributeNames=["QueueArn"])["Attributes"]["QueueArn"]
        sqs_client.set_queue_attributes(
            QueueUrl=queue_url_alarm,
            Attributes={
                "Policy": get_sqs_policy(arn_queue_alarm, topic_arn_alarm)
            },
        )
        sqs_client.set_queue_attributes(
            QueueUrl=queue_url_ok,
            Attributes={"Policy": get_sqs_policy(arn_queue_ok, topic_arn_ok)})

        alarm_name = "test-alarm"
        alarm_description = "Test Alarm when CPU exceeds 50 percent"

        expected_trigger = {
            "MetricName": "CPUUtilization",
            "Namespace": "AWS/EC2",
            "Unit": "Percent",
            "Period": 300,
            "EvaluationPeriods": 1,
            "ComparisonOperator": "GreaterThanThreshold",
            "Threshold": 50.0,
            "TreatMissingData": "ignore",
            "EvaluateLowSampleCountPercentile": "",
            "Dimensions": [{
                "value": "i-0317828c84edbe100",
                "name": "InstanceId"
            }],
            "StatisticType": "Statistic",
            "Statistic": "AVERAGE",
        }
        try:
            # subscribe to SQS
            subscription_alarm = sns_client.subscribe(TopicArn=topic_arn_alarm,
                                                      Protocol="sqs",
                                                      Endpoint=arn_queue_alarm)
            subscription_ok = sns_client.subscribe(TopicArn=topic_arn_ok,
                                                   Protocol="sqs",
                                                   Endpoint=arn_queue_ok)

            # create alarm with actions for "OK" and "ALARM"
            cloudwatch_client.put_metric_alarm(
                AlarmName=alarm_name,
                AlarmDescription=alarm_description,
                MetricName=expected_trigger["MetricName"],
                Namespace=expected_trigger["Namespace"],
                ActionsEnabled=True,
                Period=expected_trigger["Period"],
                Threshold=expected_trigger["Threshold"],
                Dimensions=[{
                    "Name": "InstanceId",
                    "Value": "i-0317828c84edbe100"
                }],
                Unit=expected_trigger["Unit"],
                Statistic=expected_trigger["Statistic"].capitalize(),
                OKActions=[topic_arn_ok],
                AlarmActions=[topic_arn_alarm],
                EvaluationPeriods=expected_trigger["EvaluationPeriods"],
                ComparisonOperator=expected_trigger["ComparisonOperator"],
                TreatMissingData=expected_trigger["TreatMissingData"],
            )

            # trigger alarm
            state_value = "ALARM"
            state_reason = "testing alarm"
            cloudwatch_client.set_alarm_state(AlarmName=alarm_name,
                                              StateReason=state_reason,
                                              StateValue=state_value)

            retry(
                check_message,
                retries=PUBLICATION_RETRIES,
                sleep_before=1,
                sqs_client=sqs_client,
                expected_queue_url=queue_url_alarm,
                expected_topic_arn=topic_arn_alarm,
                expected_new=state_value,
                expected_reason=state_reason,
                alarm_name=alarm_name,
                alarm_description=alarm_description,
                expected_trigger=expected_trigger,
            )

            # trigger OK
            state_value = "OK"
            state_reason = "resetting alarm"
            cloudwatch_client.set_alarm_state(AlarmName=alarm_name,
                                              StateReason=state_reason,
                                              StateValue=state_value)

            retry(
                check_message,
                retries=PUBLICATION_RETRIES,
                sleep_before=1,
                sqs_client=sqs_client,
                expected_queue_url=queue_url_ok,
                expected_topic_arn=topic_arn_ok,
                expected_new=state_value,
                expected_reason=state_reason,
                alarm_name=alarm_name,
                alarm_description=alarm_description,
                expected_trigger=expected_trigger,
            )
        finally:
            # cleanup
            sns_client.unsubscribe(
                SubscriptionArn=subscription_alarm["SubscriptionArn"])
            sns_client.unsubscribe(
                SubscriptionArn=subscription_ok["SubscriptionArn"])
            cloudwatch_client.delete_alarms(AlarmNames=[alarm_name])
Beispiel #22
0
    def test_scheduled_expression_events(self):
        class HttpEndpointListener(ProxyListener):
            def forward_request(self, method, path, data, headers):
                event = json.loads(to_str(data))
                events.append(event)
                return 200

        local_port = get_free_tcp_port()
        proxy = start_proxy(local_port, update_listener=HttpEndpointListener())
        wait_for_port_open(local_port)

        topic_name = "topic-{}".format(short_uid())
        queue_name = "queue-{}".format(short_uid())
        fifo_queue_name = "queue-{}.fifo".format(short_uid())
        rule_name = "rule-{}".format(short_uid())
        endpoint = "{}://{}:{}".format(get_service_protocol(),
                                       config.LOCALSTACK_HOSTNAME, local_port)
        sm_role_arn = aws_stack.role_arn("sfn_role")
        sm_name = "state-machine-{}".format(short_uid())
        topic_target_id = "target-{}".format(short_uid())
        sm_target_id = "target-{}".format(short_uid())
        queue_target_id = "target-{}".format(short_uid())
        fifo_queue_target_id = "target-{}".format(short_uid())

        events = []
        state_machine_definition = """
        {
            "StartAt": "Hello",
            "States": {
                "Hello": {
                    "Type": "Pass",
                    "Result": "World",
                    "End": true
                }
            }
        }
        """

        state_machine_arn = self.sfn_client.create_state_machine(
            name=sm_name,
            definition=state_machine_definition,
            roleArn=sm_role_arn)["stateMachineArn"]

        topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
        self.sns_client.subscribe(TopicArn=topic_arn,
                                  Protocol="http",
                                  Endpoint=endpoint)

        queue_url = self.sqs_client.create_queue(
            QueueName=queue_name)["QueueUrl"]
        fifo_queue_url = self.sqs_client.create_queue(
            QueueName=fifo_queue_name,
            Attributes={
                "FifoQueue": "true",
                "ContentBasedDeduplication": "true"
            },
        )["QueueUrl"]
        queue_arn = aws_stack.sqs_queue_arn(queue_name)
        fifo_queue_arn = aws_stack.sqs_queue_arn(fifo_queue_name)

        event = {"env": "testing"}

        self.events_client.put_rule(Name=rule_name,
                                    ScheduleExpression="rate(1 minutes)")

        self.events_client.put_targets(
            Rule=rule_name,
            Targets=[
                {
                    "Id": topic_target_id,
                    "Arn": topic_arn,
                    "Input": json.dumps(event)
                },
                {
                    "Id": sm_target_id,
                    "Arn": state_machine_arn,
                    "Input": json.dumps(event),
                },
                {
                    "Id": queue_target_id,
                    "Arn": queue_arn,
                    "Input": json.dumps(event)
                },
                {
                    "Id": fifo_queue_target_id,
                    "Arn": fifo_queue_arn,
                    "Input": json.dumps(event),
                    "SqsParameters": {
                        "MessageGroupId": "123"
                    },
                },
            ],
        )

        def received(q_urls):
            # state machine got executed
            executions = self.sfn_client.list_executions(
                stateMachineArn=state_machine_arn)["executions"]
            self.assertGreaterEqual(len(executions), 1)

            # http endpoint got events
            self.assertGreaterEqual(len(events), 2)
            notifications = [
                event["Message"] for event in events
                if event["Type"] == "Notification"
            ]
            self.assertGreaterEqual(len(notifications), 1)

            # get state machine execution detail
            execution_arn = executions[0]["executionArn"]
            execution_input = self.sfn_client.describe_execution(
                executionArn=execution_arn)["input"]

            all_msgs = []
            # get message from queue
            for url in q_urls:
                msgs = self.sqs_client.receive_message(QueueUrl=url).get(
                    "Messages", [])
                self.assertGreaterEqual(len(msgs), 1)
                all_msgs.append(msgs[0])

            return execution_input, notifications[0], all_msgs

        execution_input, notification, msgs_received = retry(
            received, retries=5, sleep=15, q_urls=[queue_url, fifo_queue_url])
        self.assertEqual(event, json.loads(notification))
        self.assertEqual(event, json.loads(execution_input))
        for msg_received in msgs_received:
            self.assertEqual(event, json.loads(msg_received["Body"]))

        # clean up
        proxy.stop()
        self.cleanup(
            None,
            rule_name,
            target_ids=[topic_target_id, sm_target_id],
            queue_url=queue_url,
        )
        self.sns_client.delete_topic(TopicArn=topic_arn)
        self.sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
Beispiel #23
0
    def test_sqs_batch_lambda_forward(self):
        sqs = aws_stack.connect_to_service('sqs')
        lambda_api = aws_stack.connect_to_service('lambda')

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(
            TEST_LAMBDA_NAME_QUEUE_BATCH)
        queue_url = sqs_queue_info['QueueUrl']
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON_ECHO),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27)
        resp = testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_QUEUE_BATCH,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27)

        event_source_id = resp['CreateEventSourceMappingResponse']['UUID']
        lambda_api.update_event_source_mapping(UUID=event_source_id,
                                               BatchSize=5)

        messages_to_send = [{
            'Id': 'message{:02d}'.format(i),
            'MessageBody': 'msgBody{:02d}'.format(i),
            'MessageAttributes': {
                'CustomAttribute': {
                    'DataType': 'String',
                    'StringValue': 'CustomAttributeValue{:02d}'.format(i)
                }
            }
        } for i in range(1, 12)]

        start_time = datetime.now()

        # send 11 messages (which should get split into 3 batches)
        sqs.send_message_batch(QueueUrl=queue_url,
                               Entries=messages_to_send[:10])
        sqs.send_message(
            QueueUrl=queue_url,
            MessageBody=messages_to_send[10]['MessageBody'],
            MessageAttributes=messages_to_send[10]['MessageAttributes'])

        def wait_for_done():
            attributes = sqs.get_queue_attributes(
                QueueUrl=queue_url,
                AttributeNames=[
                    'ApproximateNumberOfMessages',
                    'ApproximateNumberOfMessagesDelayed',
                    'ApproximateNumberOfMessagesNotVisible'
                ],
            )['Attributes']
            msg_count = int(attributes.get('ApproximateNumberOfMessages'))
            self.assertEqual(msg_count, 0, 'expecting queue to be empty')

            delayed_count = int(
                attributes.get('ApproximateNumberOfMessagesDelayed'))
            if delayed_count != 0:
                LOGGER.warning(
                    ('SQS delayed message count (actual/expected): %s/%s') %
                    (delayed_count, 0))

            not_visible_count = int(
                attributes.get('ApproximateNumberOfMessagesNotVisible'))
            if not_visible_count != 0:
                LOGGER.warning(
                    ('SQS messages not visible (actual/expected): %s/%s') %
                    (not_visible_count, 0))

            invocation_count = get_lambda_invocations_count(
                TEST_LAMBDA_NAME_QUEUE_BATCH,
                period=120,
                start_time=start_time,
                end_time=datetime.now())
            if invocation_count != 3:
                LOGGER.warning(
                    ('Lambda invocations (actual/expected): %s/%s') %
                    (invocation_count, 3))

            self.assertEqual(delayed_count, 0, 'no messages waiting for retry')
            self.assertEqual(delayed_count + not_visible_count, 0,
                             'no in flight messages')
            self.assertEqual(invocation_count, 3,
                             'expected 3 lambda invocations')

        # wait for the queue to drain (max 90s)
        retry(wait_for_done, retries=18, sleep=5.0)

        testutil.delete_lambda_function(TEST_LAMBDA_NAME_QUEUE_BATCH)
        sqs.delete_queue(QueueUrl=queue_url)
Beispiel #24
0
    def test_api_destinations(self):

        token = short_uid()
        bearer = "Bearer %s" % token

        class HttpEndpointListener(ProxyListener):
            def forward_request(self, method, path, data, headers):
                event = json.loads(to_str(data))
                events.append(event)
                paths_list.append(path)
                auth = headers.get("Api") or headers.get("Authorization")
                if auth not in headers_list:
                    headers_list.append(auth)

                return requests_response({
                    "access_token": token,
                    "token_type": "Bearer",
                    "expires_in": 86400,
                })

        events = []
        paths_list = []
        headers_list = []

        local_port = get_free_tcp_port()
        proxy = start_proxy(local_port, update_listener=HttpEndpointListener())
        wait_for_port_open(local_port)
        events_client = aws_stack.create_external_boto_client("events")
        url = "http://localhost:%s" % local_port

        auth_types = [
            {
                "type": "BASIC",
                "key": "BasicAuthParameters",
                "parameters": {
                    "Username": "******",
                    "Password": "******"
                },
            },
            {
                "type": "API_KEY",
                "key": "ApiKeyAuthParameters",
                "parameters": {
                    "ApiKeyName": "Api",
                    "ApiKeyValue": "apikey_secret"
                },
            },
            {
                "type": "OAUTH_CLIENT_CREDENTIALS",
                "key": "OAuthParameters",
                "parameters": {
                    "AuthorizationEndpoint": url,
                    "ClientParameters": {
                        "ClientID": "id",
                        "ClientSecret": "password"
                    },
                    "HttpMethod": "put",
                },
            },
        ]

        for auth in auth_types:
            connection_name = "c-%s" % short_uid()
            connection_arn = events_client.create_connection(
                Name=connection_name,
                AuthorizationType=auth.get("type"),
                AuthParameters={
                    auth.get("key"): auth.get("parameters"),
                    "InvocationHttpParameters": {
                        "BodyParameters": [{
                            "Key": "key",
                            "Value": "value",
                            "IsValueSecret": False
                        }],
                        "HeaderParameters": [{
                            "Key": "key",
                            "Value": "value",
                            "IsValueSecret": False
                        }],
                        "QueryStringParameters": [{
                            "Key": "key",
                            "Value": "value",
                            "IsValueSecret": False
                        }],
                    },
                },
            )["ConnectionArn"]

            # create api destination
            dest_name = "d-%s" % short_uid()
            result = self.events_client.create_api_destination(
                Name=dest_name,
                ConnectionArn=connection_arn,
                InvocationEndpoint=url,
                HttpMethod="POST",
            )

            # create rule and target
            rule_name = "r-%s" % short_uid()
            target_id = "target-{}".format(short_uid())
            pattern = json.dumps({
                "source": ["source-123"],
                "detail-type": ["type-123"]
            })
            self.events_client.put_rule(Name=rule_name, EventPattern=pattern)
            self.events_client.put_targets(
                Rule=rule_name,
                Targets=[{
                    "Id": target_id,
                    "Arn": result["ApiDestinationArn"]
                }],
            )

            entries = [{
                "Source": "source-123",
                "DetailType": "type-123",
                "Detail": '{"i": %s}' % 0,
            }]
            self.events_client.put_events(Entries=entries)

            # cleaning
            self.events_client.delete_connection(Name=connection_name)
            self.events_client.delete_api_destination(Name=dest_name)
            self.events_client.delete_rule(Name=rule_name, Force=True)

        # assert that all events have been received in the HTTP server listener
        def check():
            self.assertTrue(len(events) >= len(auth_types))
            self.assertTrue("key" in paths_list[0]
                            and "value" in paths_list[0])
            self.assertTrue(events[0].get("key") == "value")

            # TODO examine behavior difference between LS pro/community
            # Pro seems to (correctly) use base64 for basic authentication instead of plaintext
            user_pass = to_str(base64.b64encode(b"user:pass"))
            self.assertTrue("Basic user:pass" in headers_list
                            or f"Basic {user_pass}" in headers_list)
            self.assertTrue("apikey_secret" in headers_list)
            self.assertTrue(bearer in headers_list)

        retry(check, sleep=0.5, retries=5)

        # clean up
        proxy.stop()
Beispiel #25
0
    def test_put_events_with_target_lambda(self):
        rule_name = 'rule-{}'.format(short_uid())
        function_name = 'lambda-func-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())

        rs = testutil.create_lambda_function(handler_file=os.path.join(
            THIS_FOLDER, 'lambdas', 'lambda_echo.py'),
                                             func_name=function_name,
                                             runtime=LAMBDA_RUNTIME_PYTHON36)

        func_arn = rs['CreateFunctionResponse']['FunctionArn']

        self.events_client.create_event_bus(Name=TEST_EVENT_BUS_NAME)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            EventPattern=json.dumps(TEST_EVENT_PATTERN))

        rs = self.events_client.put_targets(Rule=rule_name,
                                            EventBusName=TEST_EVENT_BUS_NAME,
                                            Targets=[{
                                                'Id': target_id,
                                                'Arn': func_arn
                                            }])

        self.assertIn('FailedEntryCount', rs)
        self.assertIn('FailedEntries', rs)
        self.assertEqual(rs['FailedEntryCount'], 0)
        self.assertEqual(rs['FailedEntries'], [])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': TEST_EVENT_BUS_NAME,
                'Source': TEST_EVENT_PATTERN['Source'],
                'DetailType': TEST_EVENT_PATTERN['DetailType'],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'])
            }])

        # Get lambda's log events
        events = retry(check_expected_lambda_log_events_length,
                       retries=3,
                       sleep=1,
                       function_name=function_name,
                       expected_length=1)
        actual_event = events[0]
        self.assertIsValidEvent(actual_event)
        self.assertDictEqual(json.loads(actual_event['detail']),
                             json.loads(TEST_EVENT_PATTERN['Detail']))

        # clean up
        testutil.delete_lambda_function(function_name)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=TEST_EVENT_BUS_NAME,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=TEST_EVENT_BUS_NAME,
                                       Force=True)
        self.events_client.delete_event_bus(Name=TEST_EVENT_BUS_NAME)
Beispiel #26
0
    def test_put_events_with_target_kinesis(self):
        rule_name = "rule-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())
        bus_name = "bus-{}".format(short_uid())
        stream_name = "stream-{}".format(short_uid())
        stream_arn = aws_stack.kinesis_stream_arn(stream_name)

        kinesis_client = aws_stack.create_external_boto_client("kinesis")
        kinesis_client.create_stream(StreamName=stream_name, ShardCount=1)

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN),
        )

        put_response = self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[{
                "Id": target_id,
                "Arn": stream_arn,
                "KinesisParameters": {
                    "PartitionKeyPath": "$.detail-type"
                },
            }],
        )

        self.assertIn("FailedEntryCount", put_response)
        self.assertIn("FailedEntries", put_response)
        self.assertEqual(0, put_response["FailedEntryCount"])
        self.assertEqual([], put_response["FailedEntries"])

        def check_stream_status():
            _stream = kinesis_client.describe_stream(StreamName=stream_name)
            assert _stream["StreamDescription"]["StreamStatus"] == "ACTIVE"

        # wait until stream becomes available
        retry(check_stream_status, retries=7, sleep=0.8)

        self.events_client.put_events(
            Entries=[{
                "EventBusName": bus_name,
                "Source": TEST_EVENT_PATTERN["Source"][0],
                "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                "Detail": json.dumps(TEST_EVENT_PATTERN["Detail"][0]),
            }])

        stream = kinesis_client.describe_stream(StreamName=stream_name)
        shard_id = stream["StreamDescription"]["Shards"][0]["ShardId"]
        shard_iterator = kinesis_client.get_shard_iterator(
            StreamName=stream_name,
            ShardId=shard_id,
            ShardIteratorType="AT_TIMESTAMP",
            Timestamp=datetime(2020, 1, 1),
        )["ShardIterator"]

        record = kinesis_client.get_records(
            ShardIterator=shard_iterator)["Records"][0]

        partition_key = record["PartitionKey"]
        data = json.loads(record["Data"].decode())

        self.assertEqual(TEST_EVENT_PATTERN["detail-type"][0], partition_key)
        self.assertEqual(EVENT_DETAIL, data["detail"])
        self.assertIsValidEvent(data)
Beispiel #27
0
    def test_put_events_with_input_path_multiple(self):
        queue_name = 'queue-{}'.format(short_uid())
        queue_name_1 = 'queue-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())
        target_id_1 = 'target-{}'.format(short_uid())
        bus_name = 'bus-{}'.format(short_uid())

        sqs_client = aws_stack.connect_to_service('sqs')
        queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        queue_url_1 = sqs_client.create_queue(
            QueueName=queue_name_1)['QueueUrl']
        queue_arn_1 = aws_stack.sqs_queue_arn(queue_name_1)

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN))

        self.events_client.put_targets(Rule=rule_name,
                                       EventBusName=bus_name,
                                       Targets=[{
                                           'Id': target_id,
                                           'Arn': queue_arn,
                                           'InputPath': '$.detail'
                                       }, {
                                           'Id': target_id_1,
                                           'Arn': queue_arn_1,
                                       }])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': TEST_EVENT_PATTERN['Source'][0],
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get('Messages')

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(len(messages), 1)
        self.assertEqual(json.loads(messages[0].get('Body')), EVENT_DETAIL)

        messages = retry(get_message,
                         retries=3,
                         sleep=1,
                         queue_url=queue_url_1)
        self.assertEqual(len(messages), 1)
        self.assertEqual(
            json.loads(messages[0].get('Body')).get('detail'), EVENT_DETAIL)

        self.events_client.put_events(
            Entries=[{
                'EventBusName': bus_name,
                'Source': 'dummySource',
                'DetailType': TEST_EVENT_PATTERN['detail-type'][0],
                'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0])
            }])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(messages, None)

        # clean up
        self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url)
Beispiel #28
0
    def test_put_events_with_input_path_multiple(self):
        queue_name = "queue-{}".format(short_uid())
        queue_name_1 = "queue-{}".format(short_uid())
        rule_name = "rule-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())
        target_id_1 = "target-{}".format(short_uid())
        bus_name = "bus-{}".format(short_uid())

        sqs_client = aws_stack.create_external_boto_client("sqs")
        queue_url = sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
        queue_arn = aws_stack.sqs_queue_arn(queue_name)

        queue_url_1 = sqs_client.create_queue(
            QueueName=queue_name_1)["QueueUrl"]
        queue_arn_1 = aws_stack.sqs_queue_arn(queue_name_1)

        self.events_client.create_event_bus(Name=bus_name)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN),
        )

        self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[
                {
                    "Id": target_id,
                    "Arn": queue_arn,
                    "InputPath": "$.detail"
                },
                {
                    "Id": target_id_1,
                    "Arn": queue_arn_1,
                },
            ],
        )

        self.events_client.put_events(
            Entries=[{
                "EventBusName": bus_name,
                "Source": TEST_EVENT_PATTERN["Source"][0],
                "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                "Detail": json.dumps(TEST_EVENT_PATTERN["Detail"][0]),
            }])

        def get_message(queue_url):
            resp = sqs_client.receive_message(QueueUrl=queue_url)
            return resp.get("Messages")

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertEqual(1, len(messages))
        self.assertEqual(EVENT_DETAIL, json.loads(messages[0].get("Body")))

        messages = retry(get_message,
                         retries=3,
                         sleep=1,
                         queue_url=queue_url_1)
        self.assertEqual(1, len(messages))
        self.assertEqual(EVENT_DETAIL,
                         json.loads(messages[0].get("Body")).get("detail"))

        self.events_client.put_events(
            Entries=[{
                "EventBusName": bus_name,
                "Source": "dummySource",
                "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                "Detail": json.dumps(TEST_EVENT_PATTERN["Detail"][0]),
            }])

        messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url)
        self.assertIsNone(messages)

        # clean up
        self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url)
Beispiel #29
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        ddb_lease_table_suffix = '-kclapp'
        table_name = TEST_TABLE_NAME + 'klsdss' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
        kinesis = aws_stack.connect_to_service('kinesis')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(
            TableName=stream_name + ddb_lease_table_suffix), print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)
        aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(stream_name, listener_func=process_records,
            wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name, partition_key=PARTITION_KEY,
            stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
            libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
            zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27, delete=True)
        # make sure we cannot create Lambda with same name twice
        assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
            zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
            zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(TEST_LAMBDA_NAME_QUEUE)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_QUEUE,
            zip_file=zip_file, event_source_arn=sqs_queue_info['QueueArn'], runtime=LAMBDA_RUNTIME_PYTHON27)

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

        LOGGER.info('Putting %s items to table...' % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123'
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123_put_existing'
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(RequestItems={table_name: [
            {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
            {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
            {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
        ]})
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(TableName=table_name,
                Key={PARTITION_KEY: {'S': 'testId%s' % i}},
                AttributeUpdates={'data': {
                    'Action': 'PUT',
                    'Value': {'S': 'foobar123_updated'}
                }})

        # put items to stream
        num_events_kinesis = 10
        LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
        kinesis.put_records(
            Records=[
                {
                    'Data': '{}',
                    'PartitionKey': 'testId%s' % i
                } for i in range(0, num_events_kinesis)
            ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )

        # put 1 item to stream that will trigger an error in the Lambda
        kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
            PartitionKey='testIdError', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
            Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
        for i in range(0, num_events_sns):
            sns.publish(TopicArn=response['TopicArn'], Subject='test_subject', Message='test message %s' % i)

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id='shardId-000000000000', count=10)
        self.assertEqual(len(latest), 10)

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info['QueueUrl'], MessageBody=str(i))

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_events_kinesis

        def check_events():
            if len(events) != num_events:
                LOGGER.warning(('DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s') %
                    (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [json.loads(base64.b64decode(e['data'])) for e in events]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
            modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
            self.assertEqual(len(inserts), num_put_new_items + num_batch_items)
            self.assertEqual(len(modifies), num_put_existing_items + num_updates_ddb)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM)
            # TODO: It seems that CloudWatch is currently reporting an incorrect number of
            #   invocations, namely the sum over *all* lambdas, not the single one we're asking for.
            #   Also, we need to bear in mind that Kinesis may perform batch updates, i.e., a single
            #   Lambda invocation may happen with a set of Kinesis records, hence we cannot simply
            #   add num_events_ddb to num_events_lambda above!
            # self.assertEqual(num_invocations, 2 + num_events_lambda)
            self.assertGreater(num_invocations, num_events_sns + num_events_sqs)
            num_error_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM, 'Errors')
            self.assertEqual(num_error_invocations, 1)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=5, sleep=2)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_STREAM)
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_QUEUE)
        sqs.delete_queue(QueueUrl=sqs_queue_info['QueueUrl'])
Beispiel #30
0
    def test_put_events_with_target_lambda(self):
        rule_name = "rule-{}".format(short_uid())
        function_name = "lambda-func-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())
        bus_name = "bus-{}".format(short_uid())

        handler_file = os.path.join(THIS_FOLDER, "lambdas", "lambda_echo.py")
        rs = testutil.create_lambda_function(
            handler_file=handler_file,
            func_name=function_name,
            runtime=LAMBDA_RUNTIME_PYTHON36,
        )

        func_arn = rs["CreateFunctionResponse"]["FunctionArn"]

        self.events_client.create_event_bus(Name=bus_name)
        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN),
        )
        rs = self.events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[{"Id": target_id, "Arn": func_arn}],
        )

        self.assertIn("FailedEntryCount", rs)
        self.assertIn("FailedEntries", rs)
        self.assertEqual(0, rs["FailedEntryCount"])
        self.assertEqual([], rs["FailedEntries"])

        self.events_client.put_events(
            Entries=[
                {
                    "EventBusName": bus_name,
                    "Source": TEST_EVENT_PATTERN["Source"][0],
                    "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                    "Detail": json.dumps(TEST_EVENT_PATTERN["Detail"][0]),
                }
            ]
        )

        # Get lambda's log events
        events = retry(
            check_expected_lambda_log_events_length,
            retries=3,
            sleep=1,
            function_name=function_name,
            expected_length=1,
        )
        actual_event = events[0]
        self.assertIsValidEvent(actual_event)
        self.assertDictEqual(
            json.loads(actual_event["detail"]),
            json.loads(TEST_EVENT_PATTERN["Detail"][0]),
        )

        # clean up
        testutil.delete_lambda_function(function_name)
        self.cleanup(bus_name, rule_name, target_id)
Beispiel #31
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(
        TableName=TEST_STREAM_NAME + ddb_lease_table_suffix), print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
        wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
        stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
        zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(RequestItems={TEST_TABLE_NAME: [
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
    ]})
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {'S': 'testId%s' % i}},
            AttributeUpdates={'data': {
                'Action': 'PUT',
                'Value': {'S': 'foobar123_updated'}
            }})

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(
        Records=[
            {
                'Data': '{}',
                'PartitionKey': 'testId%s' % i
            } for i in range(0, num_events_kinesis)
        ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
        PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000', count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(('DynamoDB and Kinesis updates retrieved ' +
                '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
        modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == num_events_ddb
Beispiel #32
0
    def test_put_subscription_filter_firehose(
        self,
        logs_client,
        logs_log_group,
        logs_log_stream,
        s3_bucket,
        s3_client,
        firehose_client,
        iam_client,
        create_iam_role_with_policy,
    ):
        try:
            firehose_name = f"test-firehose-{short_uid()}"
            s3_bucket_arn = f"arn:aws:s3:::{s3_bucket}"

            role = f"test-firehose-s3-role-{short_uid()}"
            policy_name = f"test-firehose-s3-role-policy-{short_uid()}"
            role_arn = create_iam_role_with_policy(
                RoleName=role,
                PolicyName=policy_name,
                RoleDefinition=s3_firehose_role,
                PolicyDefinition=s3_firehose_permission,
            )

            # TODO AWS has troubles creating the delivery stream the first time
            # policy is not accepted at first, so we try again
            def create_delivery_stream():
                firehose_client.create_delivery_stream(
                    DeliveryStreamName=firehose_name,
                    S3DestinationConfiguration={
                        "BucketARN": s3_bucket_arn,
                        "RoleARN": role_arn,
                        "BufferingHints": {
                            "SizeInMBs": 1,
                            "IntervalInSeconds": 60
                        },
                    },
                )

            retry(create_delivery_stream, retries=5, sleep=10.0)

            response = firehose_client.describe_delivery_stream(
                DeliveryStreamName=firehose_name)
            firehose_arn = response["DeliveryStreamDescription"][
                "DeliveryStreamARN"]

            role = f"test-firehose-role-{short_uid()}"
            policy_name = f"test-firehose-role-policy-{short_uid()}"
            role_arn_logs = create_iam_role_with_policy(
                RoleName=role,
                PolicyName=policy_name,
                RoleDefinition=logs_role,
                PolicyDefinition=firehose_permission,
            )

            def check_stream_active():
                state = firehose_client.describe_delivery_stream(
                    DeliveryStreamName=firehose_name
                )["DeliveryStreamDescription"]["DeliveryStreamStatus"]
                if state != "ACTIVE":
                    raise Exception(f"DeliveryStreamStatus is {state}")

            retry(check_stream_active, retries=60, sleep=30.0)

            logs_client.put_subscription_filter(
                logGroupName=logs_log_group,
                filterName="Destination",
                filterPattern="",
                destinationArn=firehose_arn,
                roleArn=role_arn_logs,
            )

            logs_client.put_log_events(
                logGroupName=logs_log_group,
                logStreamName=logs_log_stream,
                logEvents=[
                    {
                        "timestamp": now_utc(millis=True),
                        "message": "test"
                    },
                    {
                        "timestamp": now_utc(millis=True),
                        "message": "test 2"
                    },
                ],
            )

            def list_objects():
                response = s3_client.list_objects(Bucket=s3_bucket)
                assert len(response["Contents"]) >= 1

            retry(list_objects, retries=60, sleep=30.0)
            response = s3_client.list_objects(Bucket=s3_bucket)
            key = response["Contents"][-1]["Key"]
            response = s3_client.get_object(Bucket=s3_bucket, Key=key)
            content = gzip.decompress(response["Body"].read()).decode("utf-8")
            assert "DATA_MESSAGE" in content
            assert "test" in content
            assert "test 2" in content

        finally:
            # clean up
            firehose_client.delete_delivery_stream(
                DeliveryStreamName=firehose_name, AllowForceDelete=True)
Beispiel #33
0
    def test_put_subscription_filter_lambda(
        self,
        lambda_client,
        logs_client,
        logs_log_group,
        logs_log_stream,
        create_lambda_function,
        sts_client,
        snapshot,
    ):
        snapshot.add_transformer(snapshot.transform.lambda_api())
        # special replacements for this test case:
        snapshot.add_transformer(snapshot.transform.key_value("logGroupName"))
        snapshot.add_transformer(snapshot.transform.key_value("logStreamName"))
        snapshot.add_transformer(
            KeyValueBasedTransformer(
                lambda k, v:
                (v if k == "eventId" and (isinstance(v, str) and re.match(
                    re.compile(r"^[0-9]+$"), v)) else None),
                replacement="event_id",
                replace_reference=False,
            ), )

        test_lambda_name = f"test-lambda-function-{short_uid()}"
        create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON3,
            libs=TEST_LAMBDA_LIBS,
            func_name=test_lambda_name,
            runtime=LAMBDA_RUNTIME_PYTHON36,
        )
        lambda_client.invoke(FunctionName=test_lambda_name, Payload=b"{}")
        # get account-id to set the correct policy
        account_id = sts_client.get_caller_identity()["Account"]
        result = lambda_client.add_permission(
            FunctionName=test_lambda_name,
            StatementId=test_lambda_name,
            Principal=f"logs.{config.DEFAULT_REGION}.amazonaws.com",
            Action="lambda:InvokeFunction",
            SourceArn=
            f"arn:aws:logs:{config.DEFAULT_REGION}:{account_id}:log-group:{logs_log_group}:*",
            SourceAccount=account_id,
        )

        snapshot.match("add_permission", result)

        result = logs_client.put_subscription_filter(
            logGroupName=logs_log_group,
            filterName="test",
            filterPattern="",
            destinationArn=aws_stack.lambda_function_arn(
                test_lambda_name,
                account_id=account_id,
                region_name=config.DEFAULT_REGION),
        )
        snapshot.match("put_subscription_filter", result)

        logs_client.put_log_events(
            logGroupName=logs_log_group,
            logStreamName=logs_log_stream,
            logEvents=[
                {
                    "timestamp": now_utc(millis=True),
                    "message": "test"
                },
                {
                    "timestamp": now_utc(millis=True),
                    "message": "test 2"
                },
            ],
        )

        response = logs_client.describe_subscription_filters(
            logGroupName=logs_log_group)
        assert len(response["subscriptionFilters"]) == 1
        snapshot.match("describe_subscription_filter", response)

        def check_invocation():
            events = testutil.list_all_log_events(
                log_group_name=logs_log_group, logs_client=logs_client)
            assert len(events) == 2
            events.sort(key=lambda k: k.get("message"))
            snapshot.match("list_all_log_events", events)
            assert isinstance(events[0]["eventId"], str)
            assert "test" == events[0]["message"]
            assert "test 2" in events[1]["message"]

        retry(check_invocation, retries=6, sleep=3.0)
Beispiel #34
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        def create_kinesis_stream(name, delete=False):
            stream = aws_stack.create_kinesis_stream(name, delete=delete)
            stream.wait_for()

        ddb_lease_table_suffix = "-kclapp"
        table_name = TEST_TABLE_NAME + "klsdss" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_stream_name = "lambda-stream-%s" % short_uid()
        lambda_queue_name = "lambda-queue-%s" % short_uid()
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        queue_name = "queue-%s" % short_uid()
        dynamodb = aws_stack.connect_to_resource("dynamodb")
        dynamodb_service = aws_stack.connect_to_service("dynamodb")
        dynamodbstreams = aws_stack.connect_to_service("dynamodbstreams")
        kinesis = aws_stack.connect_to_service("kinesis")
        sns = aws_stack.connect_to_service("sns")
        sqs = aws_stack.connect_to_service("sqs")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )

        create_kinesis_stream(stream_name, delete=True)
        create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS)
        testutil.create_lambda_function(
            func_name=lambda_ddb_name,
            zip_file=zip_file,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )
        # make sure we cannot create Lambda with same name twice
        with self.assertRaises(Exception):
            testutil.create_lambda_function(
                func_name=lambda_ddb_name,
                zip_file=zip_file,
                event_source_arn=ddb_event_source_arn,
            )

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )["StreamDescription"]["StreamARN"]
        testutil.create_lambda_function(
            func_name=lambda_stream_name,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
        )

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(queue_name)
        testutil.create_lambda_function(
            func_name=lambda_queue_name,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info["QueueArn"],
        )

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = (num_events_ddb - num_put_new_items -
                           num_put_existing_items - num_batch_items)

        LOGGER.info("Putting %s items to table..." % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123"
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123_put_existing"
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ✓"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 £"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ¢"
                            }
                        }
                    },
                ]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": "testId%s" % i
                }},
                AttributeUpdates={
                    "data": {
                        "Action": "PUT",
                        "Value": {
                            "S": "foobar123_updated"
                        }
                    }
                },
            )

        # put items to stream
        num_events_kinesis = 1
        num_kinesis_records = 10
        LOGGER.info("Putting %s records in %s event to stream..." %
                    (num_kinesis_records, num_events_kinesis))
        kinesis.put_records(
            Records=[{
                "Data": "{}",
                "PartitionKey": "testId%s" % i
            } for i in range(0, num_kinesis_records)],
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
        )

        # put 1 item to stream that will trigger an error in the Lambda
        num_events_kinesis_err = 1
        for i in range(num_events_kinesis_err):
            kinesis.put_record(
                Data='{"%s": 1}' %
                lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                PartitionKey="testIdError",
                StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
            )

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response["TopicArn"],
            Protocol="lambda",
            Endpoint=aws_stack.lambda_function_arn(lambda_stream_name),
        )
        for i in range(num_events_sns):
            sns.publish(
                TopicArn=response["TopicArn"],
                Subject="test_subject",
                Message="test message %s" % i,
            )

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id="shardId-000000000000",
            count=10)
        self.assertEqual(10, len(latest))

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info["QueueUrl"],
                             MessageBody=str(i))

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_kinesis_records

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            self.assertEqual(num_put_new_items + num_batch_items, len(inserts))
            self.assertEqual(num_put_existing_items + num_updates_ddb,
                             len(modifies))

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=15, sleep=2)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(lambda_stream_name)
            expected_invocation_count = num_events_kinesis + num_events_kinesis_err + num_events_sns
            self.assertEqual(expected_invocation_count, num_invocations)
            num_error_invocations = get_lambda_invocations_count(
                lambda_stream_name, "Errors")
            self.assertEqual(num_events_kinesis_err, num_error_invocations)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=7, sleep=2)

        # clean up
        testutil.delete_lambda_function(lambda_stream_name)
        testutil.delete_lambda_function(lambda_ddb_name)
        testutil.delete_lambda_function(lambda_queue_name)
        sqs.delete_queue(QueueUrl=sqs_queue_info["QueueUrl"])