Ejemplo n.º 1
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        ddb_lease_table_suffix = '-kclapp'
        table_name = TEST_TABLE_NAME + 'klsdss' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
        kinesis = aws_stack.connect_to_service('kinesis')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                       ddb_lease_table_suffix),
                 print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)
        aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name,
                                        partition_key=PARTITION_KEY,
                                        stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                        zip_file=zip_file,
                                        event_source_arn=ddb_event_source_arn,
                                        runtime=LAMBDA_RUNTIME_PYTHON27,
                                        delete=True)
        # make sure we cannot create Lambda with same name twice
        assert_raises(Exception,
                      testutil.create_lambda_function,
                      func_name=TEST_LAMBDA_NAME_DDB,
                      zip_file=zip_file,
                      event_source_arn=ddb_event_source_arn,
                      runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )['StreamDescription']['StreamARN']
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_STREAM,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
            runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(TEST_LAMBDA_NAME_QUEUE)
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_QUEUE,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27)

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

        LOGGER.info('Putting %s items to table...' % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123'
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123_put_existing'
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [{
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 ✓'
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 £'
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 ¢'
                        }
                    }
                }]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    'S': 'testId%s' % i
                }},
                AttributeUpdates={
                    'data': {
                        'Action': 'PUT',
                        'Value': {
                            'S': 'foobar123_updated'
                        }
                    }
                })

        # put items to stream
        num_events_kinesis = 10
        LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
        kinesis.put_records(Records=[{
            'Data': '{}',
            'PartitionKey': 'testId%s' % i
        } for i in range(0, num_events_kinesis)],
                            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # put 1 item to stream that will trigger an error in the Lambda
        kinesis.put_record(Data='{"%s": 1}' %
                           lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                           PartitionKey='testIderror',
                           StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response['TopicArn'],
            Protocol='lambda',
            Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
        for i in range(0, num_events_sns):
            sns.publish(TopicArn=response['TopicArn'],
                        Message='test message %s' % i)

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id='shardId-000000000000',
            count=10)
        self.assertEqual(len(latest), 10)

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info['QueueUrl'],
                             MessageBody=str(i))

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_events_kinesis

        def check_events():
            if len(events) != num_events:
                LOGGER.warning((
                    'DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s'
                ) % (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [
                json.loads(base64.b64decode(e['data'])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get('__action_type') == 'INSERT'
            ]
            modifies = [
                e for e in event_items if e.get('__action_type') == 'MODIFY'
            ]
            self.assertEqual(len(inserts), num_put_new_items + num_batch_items)
            self.assertEqual(len(modifies),
                             num_put_existing_items + num_updates_ddb)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # check cloudwatch notifications
        num_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM)
        # TODO: It seems that CloudWatch is currently reporting an incorrect number of
        #   invocations, namely the sum over *all* lambdas, not the single one we're asking for.
        #   Also, we need to bear in mind that Kinesis may perform batch updates, i.e., a single
        #   Lambda invocation may happen with a set of Kinesis records, hence we cannot simply
        #   add num_events_ddb to num_events_lambda above!
        # self.assertEqual(num_invocations, 2 + num_events_lambda)
        self.assertGreater(num_invocations, num_events_sns + num_events_sqs)
        num_error_invocations = get_lambda_invocations_count(
            TEST_LAMBDA_NAME_STREAM, 'Errors')
        self.assertEqual(num_error_invocations, 1)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_STREAM)
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
Ejemplo n.º 2
0
    def test_sqs_batch_lambda_forward(self):
        sqs = aws_stack.connect_to_service('sqs')
        lambda_api = aws_stack.connect_to_service('lambda')

        lambda_name_queue_batch = 'lambda_queue_batch-%s' % short_uid()

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(lambda_name_queue_batch)
        queue_url = sqs_queue_info['QueueUrl']
        resp = testutil.create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON_ECHO,
            func_name=lambda_name_queue_batch,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27,
            libs=TEST_LAMBDA_LIBS)

        event_source_id = resp['CreateEventSourceMappingResponse']['UUID']
        lambda_api.update_event_source_mapping(UUID=event_source_id,
                                               BatchSize=5)

        messages_to_send = [{
            'Id': 'message{:02d}'.format(i),
            'MessageBody': 'msgBody{:02d}'.format(i),
            'MessageAttributes': {
                'CustomAttribute': {
                    'DataType': 'String',
                    'StringValue': 'CustomAttributeValue{:02d}'.format(i)
                }
            }
        } for i in range(1, 12)]

        # send 11 messages (which should get split into 3 batches)
        sqs.send_message_batch(QueueUrl=queue_url,
                               Entries=messages_to_send[:10])
        sqs.send_message(
            QueueUrl=queue_url,
            MessageBody=messages_to_send[10]['MessageBody'],
            MessageAttributes=messages_to_send[10]['MessageAttributes'])

        def wait_for_done():
            attributes = sqs.get_queue_attributes(
                QueueUrl=queue_url,
                AttributeNames=[
                    'ApproximateNumberOfMessages',
                    'ApproximateNumberOfMessagesDelayed',
                    'ApproximateNumberOfMessagesNotVisible'
                ],
            )['Attributes']
            msg_count = int(attributes.get('ApproximateNumberOfMessages'))
            self.assertEqual(msg_count, 0, 'expecting queue to be empty')

            delayed_count = int(
                attributes.get('ApproximateNumberOfMessagesDelayed'))
            if delayed_count != 0:
                LOGGER.warning(
                    'SQS delayed message count (actual/expected): %s/%s' %
                    (delayed_count, 0))

            not_visible_count = int(
                attributes.get('ApproximateNumberOfMessagesNotVisible'))
            if not_visible_count != 0:
                LOGGER.warning(
                    'SQS messages not visible (actual/expected): %s/%s' %
                    (not_visible_count, 0))

            self.assertEqual(delayed_count, 0, 'no messages waiting for retry')
            self.assertEqual(delayed_count + not_visible_count, 0,
                             'no in flight messages')

        # wait for the queue to drain (max 60s)
        retry(wait_for_done, retries=12, sleep=5.0)

        events = get_lambda_log_events(lambda_name_queue_batch, 10)
        self.assertEqual(len(events), 3, 'expected 3 lambda invocations')

        testutil.delete_lambda_function(lambda_name_queue_batch)
        sqs.delete_queue(QueueUrl=queue_url)
Ejemplo n.º 3
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        def create_kinesis_stream(name, delete=False):
            stream = aws_stack.create_kinesis_stream(name, delete=delete)
            stream.wait_for()

        ddb_lease_table_suffix = "-kclapp"
        table_name = TEST_TABLE_NAME + "klsdss" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_stream_name = "lambda-stream-%s" % short_uid()
        lambda_queue_name = "lambda-queue-%s" % short_uid()
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        queue_name = "queue-%s" % short_uid()
        dynamodb = aws_stack.connect_to_resource("dynamodb")
        dynamodb_service = aws_stack.create_external_boto_client("dynamodb")
        dynamodbstreams = aws_stack.create_external_boto_client(
            "dynamodbstreams")
        kinesis = aws_stack.create_external_boto_client("kinesis")
        sns = aws_stack.create_external_boto_client("sns")
        sqs = aws_stack.create_external_boto_client("sqs")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )

        create_kinesis_stream(stream_name, delete=True)
        create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            records = [
                json.loads(base64.b64decode(r["data"])) if r.get("data") else r
                for r in records
            ]
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS)
        testutil.create_lambda_function(
            func_name=lambda_ddb_name,
            zip_file=zip_file,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )
        # make sure we cannot create Lambda with same name twice
        with self.assertRaises(Exception):
            testutil.create_lambda_function(
                func_name=lambda_ddb_name,
                zip_file=zip_file,
                event_source_arn=ddb_event_source_arn,
            )

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )["StreamDescription"]["StreamARN"]
        testutil.create_lambda_function(
            func_name=lambda_stream_name,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
        )

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(queue_name)
        testutil.create_lambda_function(
            func_name=lambda_queue_name,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info["QueueArn"],
        )

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = (num_events_ddb - num_put_new_items -
                           num_put_existing_items - num_batch_items)

        LOGGER.info("Putting %s items to table...", num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: f"testId{i}",
                "data": "foobar123"
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: f"testId{i}",
                "data": "foobar_put_existing"
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobaz123 ✓"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobaz123 £"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobaz123 ¢"
                            }
                        }
                    },
                ]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": f"testId{i}"
                }},
                AttributeUpdates={
                    "data": {
                        "Action": "PUT",
                        "Value": {
                            "S": "foo_updated"
                        }
                    }
                },
            )

        # put items to stream
        num_events_kinesis = 1
        num_kinesis_records = 10
        LOGGER.info("Putting %s records in %s event to stream...",
                    num_kinesis_records, num_events_kinesis)
        kinesis.put_records(
            Records=[{
                "Data": "{}",
                "PartitionKey": f"testId{i}"
            } for i in range(0, num_kinesis_records)],
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
        )

        # put 1 item to stream that will trigger an error in the Lambda
        num_events_kinesis_err = 1
        for i in range(num_events_kinesis_err):
            kinesis.put_record(
                Data='{"%s": 1}' %
                lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                PartitionKey="testIdError",
                StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
            )

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response["TopicArn"],
            Protocol="lambda",
            Endpoint=aws_stack.lambda_function_arn(lambda_stream_name),
        )
        for i in range(num_events_sns):
            sns.publish(
                TopicArn=response["TopicArn"],
                Subject="test_subject",
                Message=f"test message {i}",
            )

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id="shardId-000000000000",
            count=10)
        self.assertEqual(10, len(latest))

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info["QueueUrl"],
                             MessageBody=str(i))

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_kinesis_records

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [e for e in events if e.get("__action_type") == "INSERT"]
            modifies = [
                e for e in events if e.get("__action_type") == "MODIFY"
            ]
            self.assertEqual(num_put_new_items + num_batch_items, len(inserts))
            self.assertEqual(num_put_existing_items + num_updates_ddb,
                             len(modifies))

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=15, sleep=2)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(lambda_stream_name)
            expected_invocation_count = num_events_kinesis + num_events_kinesis_err + num_events_sns
            self.assertEqual(expected_invocation_count, num_invocations)
            num_error_invocations = get_lambda_invocations_count(
                lambda_stream_name, "Errors")
            self.assertEqual(num_events_kinesis_err, num_error_invocations)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=7, sleep=2)

        # clean up
        testutil.delete_lambda_function(lambda_stream_name)
        testutil.delete_lambda_function(lambda_ddb_name)
        testutil.delete_lambda_function(lambda_queue_name)
        sqs.delete_queue(QueueUrl=sqs_queue_info["QueueUrl"])
Ejemplo n.º 4
0
def test_sqs_batch_lambda_forward(lambda_client, sqs_client,
                                  create_lambda_function):

    lambda_name_queue_batch = "lambda_queue_batch-%s" % short_uid()

    # deploy test lambda connected to SQS queue
    sqs_queue_info = testutil.create_sqs_queue(lambda_name_queue_batch)
    queue_url = sqs_queue_info["QueueUrl"]
    resp = create_lambda_function(
        handler_file=TEST_LAMBDA_PYTHON_ECHO,
        func_name=lambda_name_queue_batch,
        event_source_arn=sqs_queue_info["QueueArn"],
        libs=TEST_LAMBDA_LIBS,
    )

    event_source_id = resp["CreateEventSourceMappingResponse"]["UUID"]
    lambda_client.update_event_source_mapping(UUID=event_source_id,
                                              BatchSize=5)

    messages_to_send = [{
        "Id": "message{:02d}".format(i),
        "MessageBody": "msgBody{:02d}".format(i),
        "MessageAttributes": {
            "CustomAttribute": {
                "DataType": "String",
                "StringValue": "CustomAttributeValue{:02d}".format(i),
            }
        },
    } for i in range(1, 12)]

    # send 11 messages (which should get split into 3 batches)
    sqs_client.send_message_batch(QueueUrl=queue_url,
                                  Entries=messages_to_send[:10])
    sqs_client.send_message(
        QueueUrl=queue_url,
        MessageBody=messages_to_send[10]["MessageBody"],
        MessageAttributes=messages_to_send[10]["MessageAttributes"],
    )

    def wait_for_done():
        attributes = sqs_client.get_queue_attributes(
            QueueUrl=queue_url,
            AttributeNames=[
                "ApproximateNumberOfMessages",
                "ApproximateNumberOfMessagesDelayed",
                "ApproximateNumberOfMessagesNotVisible",
            ],
        )["Attributes"]
        msg_count = int(attributes.get("ApproximateNumberOfMessages"))
        assert 0 == msg_count, "expecting queue to be empty"

        delayed_count = int(
            attributes.get("ApproximateNumberOfMessagesDelayed"))
        if delayed_count != 0:
            LOGGER.warning(
                "SQS delayed message count (actual/expected): %s/%s",
                delayed_count, 0)

        not_visible_count = int(
            attributes.get("ApproximateNumberOfMessagesNotVisible"))
        if not_visible_count != 0:
            LOGGER.warning("SQS messages not visible (actual/expected): %s/%s",
                           not_visible_count, 0)

        assert 0 == delayed_count, "no messages waiting for retry"
        assert 0 == delayed_count + not_visible_count, "no in flight messages"

    # wait for the queue to drain (max 60s)
    retry(wait_for_done, retries=12, sleep=5.0)

    def check_lambda_logs():
        events = get_lambda_log_events(lambda_name_queue_batch, 10)
        assert 3 == len(events), "expected 3 lambda invocations"

    retry(check_lambda_logs, retries=5, sleep=3)

    sqs_client.delete_queue(QueueUrl=queue_url)
Ejemplo n.º 5
0
    def test_sqs_batch_lambda_forward(self):
        sqs = aws_stack.connect_to_service('sqs')
        lambda_api = aws_stack.connect_to_service('lambda')

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(
            TEST_LAMBDA_NAME_QUEUE_BATCH)
        queue_url = sqs_queue_info['QueueUrl']
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON_ECHO),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27)
        resp = testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_QUEUE_BATCH,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27)

        event_source_id = resp['CreateEventSourceMappingResponse']['UUID']
        lambda_api.update_event_source_mapping(UUID=event_source_id,
                                               BatchSize=5)

        messages_to_send = [{
            'Id': 'message{:02d}'.format(i),
            'MessageBody': 'msgBody{:02d}'.format(i),
            'MessageAttributes': {
                'CustomAttribute': {
                    'DataType': 'String',
                    'StringValue': 'CustomAttributeValue{:02d}'.format(i)
                }
            }
        } for i in range(1, 12)]

        start_time = datetime.now()

        # send 11 messages (which should get split into 3 batches)
        sqs.send_message_batch(QueueUrl=queue_url,
                               Entries=messages_to_send[:10])
        sqs.send_message(
            QueueUrl=queue_url,
            MessageBody=messages_to_send[10]['MessageBody'],
            MessageAttributes=messages_to_send[10]['MessageAttributes'])

        def wait_for_done():
            attributes = sqs.get_queue_attributes(
                QueueUrl=queue_url,
                AttributeNames=[
                    'ApproximateNumberOfMessages',
                    'ApproximateNumberOfMessagesDelayed',
                    'ApproximateNumberOfMessagesNotVisible'
                ],
            )['Attributes']
            msg_count = int(attributes.get('ApproximateNumberOfMessages'))
            self.assertEqual(msg_count, 0, 'expecting queue to be empty')

            delayed_count = int(
                attributes.get('ApproximateNumberOfMessagesDelayed'))
            if delayed_count != 0:
                LOGGER.warning(
                    ('SQS delayed message count (actual/expected): %s/%s') %
                    (delayed_count, 0))

            not_visible_count = int(
                attributes.get('ApproximateNumberOfMessagesNotVisible'))
            if not_visible_count != 0:
                LOGGER.warning(
                    ('SQS messages not visible (actual/expected): %s/%s') %
                    (not_visible_count, 0))

            invocation_count = get_lambda_invocations_count(
                TEST_LAMBDA_NAME_QUEUE_BATCH,
                period=120,
                start_time=start_time,
                end_time=datetime.now())
            if invocation_count != 3:
                LOGGER.warning(
                    ('Lambda invocations (actual/expected): %s/%s') %
                    (invocation_count, 3))

            self.assertEqual(delayed_count, 0, 'no messages waiting for retry')
            self.assertEqual(delayed_count + not_visible_count, 0,
                             'no in flight messages')
            self.assertEqual(invocation_count, 3,
                             'expected 3 lambda invocations')

        # wait for the queue to drain (max 90s)
        retry(wait_for_done, retries=18, sleep=5.0)

        testutil.delete_lambda_function(TEST_LAMBDA_NAME_QUEUE_BATCH)
        sqs.delete_queue(QueueUrl=queue_url)