Ejemplo n.º 1
0
    def handle_request(s_src, thread):
        s_dst = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s_dst.connect(ip_to_tuple(dst))

        sockets = [s_src, s_dst]

        try:
            while thread.running:
                s_read, _, _ = select.select(sockets, [], [])

                for s in s_read:
                    data = s.recv(BUFFER_SIZE)
                    if data in [b"", "", None]:
                        return

                    if s == s_src:
                        forward, response = data, None
                        if handler:
                            forward, response = handler(data)
                        if forward is not None:
                            s_dst.sendall(forward)
                        elif response is not None:
                            s_src.sendall(response)
                            return
                    elif s == s_dst:
                        s_src.sendall(data)
        finally:
            run_safe(s_src.close)
            run_safe(s_dst.close)
Ejemplo n.º 2
0
 def remove_nones(o, **kwargs):
     if isinstance(o, dict):
         for k, v in dict(o).items():
             if v is None:
                 o.pop(k)
     if isinstance(o, list):
         common.run_safe(o.remove, None)
         common.run_safe(o.remove, PLACEHOLDER_AWS_NO_VALUE)
     return o
Ejemplo n.º 3
0
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
    env = get_environment(env)
    stream = KinesisStream(id=stream_name, num_shards=shards)
    conn = connect_to_service("kinesis", env=env)
    stream.connect(conn)
    if delete:
        run_safe(lambda: stream.destroy(), print_error=False)
    stream.create()
    # Note: Returning the stream without awaiting its creation (via wait_for()) to avoid API call timeouts/retries.
    return stream
Ejemplo n.º 4
0
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
    env = get_environment(env)
    # stream
    stream = KinesisStream(id=stream_name, num_shards=shards)
    conn = connect_to_service('kinesis', env=env)
    stream.connect(conn)
    if delete:
        run_safe(lambda: stream.destroy(), print_error=False)
    stream.create()
    stream.wait_for()
    return stream
Ejemplo n.º 5
0
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
    env = get_environment(env)
    # stream
    stream = KinesisStream(id=stream_name, num_shards=shards)
    conn = connect_to_service('kinesis', env=env)
    stream.connect(conn)
    if delete:
        run_safe(lambda: stream.destroy(), print_error=False)
    stream.create()
    stream.wait_for()
    return stream
Ejemplo n.º 6
0
def get_template_body(req_data):
    body = req_data.get("TemplateBody")
    if body:
        return body
    url = req_data.get("TemplateURL")
    if url:
        response = run_safe(lambda: safe_requests.get(url, verify=False))
        # check error codes, and code 301 - fixes https://github.com/localstack/localstack/issues/1884
        status_code = 0 if response is None else response.status_code
        if response is None or status_code == 301 or status_code >= 400:
            # check if this is an S3 URL, then get the file directly from there
            url = convert_s3_to_local_url(url)
            if is_local_service_url(url):
                parsed_path = urlparse.urlparse(url).path.lstrip("/")
                parts = parsed_path.partition("/")
                client = aws_stack.connect_to_service("s3")
                LOG.debug(
                    "Download CloudFormation template content from local S3: %s - %s"
                    % (parts[0], parts[2]))
                result = client.get_object(Bucket=parts[0], Key=parts[2])
                body = to_str(result["Body"].read())
                return body
            raise Exception(
                "Unable to fetch template body (code %s) from URL %s" %
                (status_code, url))
        return response.content
    raise Exception("Unable to get template body from input: %s" % req_data)
Ejemplo n.º 7
0
    def test_adding_fallback_function_name_in_headers(self):
        lambda_client = aws_stack.create_external_boto_client("lambda")
        ddb_client = aws_stack.create_external_boto_client("dynamodb")

        db_table = "lambda-records"
        config.LAMBDA_FALLBACK_URL = "dynamodb://%s" % db_table

        lambda_client.invoke(
            FunctionName="non-existing-lambda",
            Payload=b"{}",
            InvocationType="RequestResponse",
        )

        result = run_safe(ddb_client.scan, TableName=db_table)
        self.assertEqual("non-existing-lambda", result["Items"][0]["function_name"]["S"])
Ejemplo n.º 8
0
    def test_adding_fallback_function_name_in_headers(self):

        lambda_client = aws_stack.connect_to_service('lambda')
        ddb_client = aws_stack.connect_to_service('dynamodb')

        db_table = 'lambda-records'
        config.LAMBDA_FALLBACK_URL = 'dynamodb://%s' % db_table

        lambda_client.invoke(FunctionName='non-existing-lambda',
                             Payload=b'{}',
                             InvocationType='RequestResponse')

        result = run_safe(ddb_client.scan, TableName=db_table)
        self.assertEqual(result['Items'][0]['function_name']['S'],
                         'non-existing-lambda')
def get_template_body(req_data):
    body = req_data.get('TemplateBody')
    if body:
        return body
    url = req_data.get('TemplateURL')
    if url:
        response = run_safe(lambda: safe_requests.get(url, verify=False))
        if not response or response.status_code >= 400:
            # check if this is an S3 URL, then get the file directly from there
            if '://localhost' in url or re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com.*', url):
                parsed_path = urlparse.urlparse(url).path.lstrip('/')
                parts = parsed_path.partition('/')
                client = aws_stack.connect_to_service('s3')
                result = client.get_object(Bucket=parts[0], Key=parts[2])
                body = to_str(result['Body'].read())
                return body
            raise Exception('Unable to fetch template body (code %s) from URL %s' % (response.status_code, url))
        return response.content
    raise Exception('Unable to get template body from input: %s' % req_data)
Ejemplo n.º 10
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(TableName=TEST_STREAM_NAME +
                                                   ddb_lease_table_suffix),
             print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(
        TEST_STREAM_NAME,
        listener_func=process_records,
        wait_until_started=True,
        ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME,
                                   partition_key=PARTITION_KEY,
                                   stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                    zip_file=zip_file,
                                    event_source_arn=ddb_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception,
                  testutil.create_lambda_function,
                  func_name=TEST_LAMBDA_NAME_DDB,
                  zip_file=zip_file,
                  event_source_arn=ddb_event_source_arn,
                  runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
                                    zip_file=zip_file,
                                    event_source_arn=kinesis_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(
        RequestItems={
            TEST_TABLE_NAME: [{
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ✓'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 £'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ¢'
                    }
                }
            }]
        })
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(
            TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {
                'S': 'testId%s' % i
            }},
            AttributeUpdates={
                'data': {
                    'Action': 'PUT',
                    'Value': {
                        'S': 'foobar123_updated'
                    }
                }
            })

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(Records=[{
        'Data': '{}',
        'PartitionKey': 'testId%s' % i
    } for i in range(0, num_events_kinesis)],
                        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' %
                       lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                       PartitionKey='testIderror',
                       StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(
        TopicArn=response['TopicArn'],
        Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'],
                    Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(
        TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000',
        count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(
                ('DynamoDB and Kinesis updates retrieved ' +
                 '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [
            e for e in event_items if e.get('__action_type') == 'INSERT'
        ]
        modifies = [
            e for e in event_items if e.get('__action_type') == 'MODIFY'
        ]
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == num_events_ddb
Ejemplo n.º 11
0
    def test_lambda_streams_batch_and_transactions(self):
        ddb_lease_table_suffix = '-kclapp2'
        table_name = TEST_TABLE_NAME + 'lsbat' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_service('dynamodb', client=True)
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                       ddb_lease_table_suffix),
                 print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name,
                                        partition_key=PARTITION_KEY,
                                        stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                        zip_file=zip_file,
                                        event_source_arn=ddb_event_source_arn,
                                        runtime=LAMBDA_RUNTIME_PYTHON27,
                                        delete=True)

        # submit a batch with writes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [{
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: {
                                'S': 'testId0'
                            },
                            'data': {
                                'S': 'foobar123'
                            }
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: {
                                'S': 'testId1'
                            },
                            'data': {
                                'S': 'foobar123'
                            }
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: {
                                'S': 'testId2'
                            },
                            'data': {
                                'S': 'foobar123'
                            }
                        }
                    }
                }]
            })

        # submit a batch with writes and deletes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        'PutRequest': {
                            'Item': {
                                PARTITION_KEY: {
                                    'S': 'testId3'
                                },
                                'data': {
                                    'S': 'foobar123'
                                }
                            }
                        }
                    },
                    {
                        'PutRequest': {
                            'Item': {
                                PARTITION_KEY: {
                                    'S': 'testId4'
                                },
                                'data': {
                                    'S': 'foobar123'
                                }
                            }
                        }
                    },
                    {
                        'PutRequest': {
                            'Item': {
                                PARTITION_KEY: {
                                    'S': 'testId5'
                                },
                                'data': {
                                    'S': 'foobar123'
                                }
                            }
                        }
                    },
                    {
                        'DeleteRequest': {
                            'Key': {
                                PARTITION_KEY: {
                                    'S': 'testId0'
                                }
                            }
                        }
                    },
                    {
                        'DeleteRequest': {
                            'Key': {
                                PARTITION_KEY: {
                                    'S': 'testId1'
                                }
                            }
                        }
                    },
                    {
                        'DeleteRequest': {
                            'Key': {
                                PARTITION_KEY: {
                                    'S': 'testId2'
                                }
                            }
                        }
                    },
                ]
            })

        # submit a transaction with writes and delete
        dynamodb.transact_write_items(TransactItems=[
            {
                'Put': {
                    'TableName': table_name,
                    'Item': {
                        PARTITION_KEY: {
                            'S': 'testId6'
                        },
                        'data': {
                            'S': 'foobar123'
                        }
                    }
                }
            },
            {
                'Put': {
                    'TableName': table_name,
                    'Item': {
                        PARTITION_KEY: {
                            'S': 'testId7'
                        },
                        'data': {
                            'S': 'foobar123'
                        }
                    }
                }
            },
            {
                'Put': {
                    'TableName': table_name,
                    'Item': {
                        PARTITION_KEY: {
                            'S': 'testId8'
                        },
                        'data': {
                            'S': 'foobar123'
                        }
                    }
                }
            },
            {
                'Delete': {
                    'TableName': table_name,
                    'Key': {
                        PARTITION_KEY: {
                            'S': 'testId3'
                        }
                    }
                }
            },
            {
                'Delete': {
                    'TableName': table_name,
                    'Key': {
                        PARTITION_KEY: {
                            'S': 'testId4'
                        }
                    }
                }
            },
            {
                'Delete': {
                    'TableName': table_name,
                    'Key': {
                        PARTITION_KEY: {
                            'S': 'testId5'
                        }
                    }
                }
            },
        ])

        # submit a batch with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                'Put': {
                    'TableName': table_name,
                    'Item': {
                        PARTITION_KEY: {
                            'S': 'testId6'
                        },
                        'data': {
                            'S': 'foobar123_updated1'
                        }
                    }
                }
            },
        ])

        # submit a transaction with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                'Put': {
                    'TableName': table_name,
                    'Item': {
                        PARTITION_KEY: {
                            'S': 'testId7'
                        },
                        'data': {
                            'S': 'foobar123_updated1'
                        }
                    }
                }
            },
        ])

        # submit a transaction with updates
        dynamodb.transact_write_items(TransactItems=[
            {
                'Update': {
                    'TableName': table_name,
                    'Key': {
                        PARTITION_KEY: {
                            'S': 'testId6'
                        }
                    },
                    'UpdateExpression': 'SET #0 = :0',
                    'ExpressionAttributeNames': {
                        '#0': 'data'
                    },
                    'ExpressionAttributeValues': {
                        ':0': {
                            'S': 'foobar123_updated2'
                        }
                    }
                }
            },
            {
                'Update': {
                    'TableName': table_name,
                    'Key': {
                        PARTITION_KEY: {
                            'S': 'testId7'
                        }
                    },
                    'UpdateExpression': 'SET #0 = :0',
                    'ExpressionAttributeNames': {
                        '#0': 'data'
                    },
                    'ExpressionAttributeValues': {
                        ':0': {
                            'S': 'foobar123_updated2'
                        }
                    }
                }
            },
            {
                'Update': {
                    'TableName': table_name,
                    'Key': {
                        PARTITION_KEY: {
                            'S': 'testId8'
                        }
                    },
                    'UpdateExpression': 'SET #0 = :0',
                    'ExpressionAttributeNames': {
                        '#0': 'data'
                    },
                    'ExpressionAttributeValues': {
                        ':0': {
                            'S': 'foobar123_updated2'
                        }
                    }
                }
            },
        ])

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_insert = 9
        num_modify = 5
        num_delete = 6
        num_events = num_insert + num_modify + num_delete

        def check_events():
            if len(events) != num_events:
                LOGGER.warning(
                    ('DynamoDB updates retrieved (actual/expected): %s/%s') %
                    (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [
                json.loads(base64.b64decode(e['data'])) for e in events
            ]
            # make sure the we have the right amount of expected event types
            inserts = [
                e for e in event_items if e.get('__action_type') == 'INSERT'
            ]
            modifies = [
                e for e in event_items if e.get('__action_type') == 'MODIFY'
            ]
            removes = [
                e for e in event_items if e.get('__action_type') == 'REMOVE'
            ]
            self.assertEqual(len(inserts), num_insert)
            self.assertEqual(len(modifies), num_modify)
            self.assertEqual(len(removes), num_delete)

            for i, event in enumerate(inserts):
                self.assertNotIn('old_image', event)
                self.assertEqual(inserts[i]['new_image'], {
                    'id': 'testId%d' % i,
                    'data': 'foobar123'
                })

            self.assertEqual(modifies[0]['old_image'], {
                'id': 'testId6',
                'data': 'foobar123'
            })
            self.assertEqual(modifies[0]['new_image'], {
                'id': 'testId6',
                'data': 'foobar123_updated1'
            })
            self.assertEqual(modifies[1]['old_image'], {
                'id': 'testId7',
                'data': 'foobar123'
            })
            self.assertEqual(modifies[1]['new_image'], {
                'id': 'testId7',
                'data': 'foobar123_updated1'
            })
            self.assertEqual(modifies[2]['old_image'], {
                'id': 'testId6',
                'data': 'foobar123_updated1'
            })
            self.assertEqual(modifies[2]['new_image'], {
                'id': 'testId6',
                'data': 'foobar123_updated2'
            })
            self.assertEqual(modifies[3]['old_image'], {
                'id': 'testId7',
                'data': 'foobar123_updated1'
            })
            self.assertEqual(modifies[3]['new_image'], {
                'id': 'testId7',
                'data': 'foobar123_updated2'
            })
            self.assertEqual(modifies[4]['old_image'], {
                'id': 'testId8',
                'data': 'foobar123'
            })
            self.assertEqual(modifies[4]['new_image'], {
                'id': 'testId8',
                'data': 'foobar123_updated2'
            })

            for i, event in enumerate(removes):
                self.assertEqual(event['old_image'], {
                    'id': 'testId%d' % i,
                    'data': 'foobar123'
                })
                self.assertNotIn('new_image', event)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
Ejemplo n.º 12
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        ddb_lease_table_suffix = '-kclapp'
        table_name = TEST_TABLE_NAME + 'klsdss' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
        kinesis = aws_stack.connect_to_service('kinesis')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                       ddb_lease_table_suffix),
                 print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)
        aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name,
                                        partition_key=PARTITION_KEY,
                                        stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                        zip_file=zip_file,
                                        event_source_arn=ddb_event_source_arn,
                                        runtime=LAMBDA_RUNTIME_PYTHON27,
                                        delete=True)
        # make sure we cannot create Lambda with same name twice
        assert_raises(Exception,
                      testutil.create_lambda_function,
                      func_name=TEST_LAMBDA_NAME_DDB,
                      zip_file=zip_file,
                      event_source_arn=ddb_event_source_arn,
                      runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )['StreamDescription']['StreamARN']
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_STREAM,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
            runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(TEST_LAMBDA_NAME_QUEUE)
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_QUEUE,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27)

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

        LOGGER.info('Putting %s items to table...' % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123'
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123_put_existing'
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [{
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 ✓'
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 £'
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 ¢'
                        }
                    }
                }]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    'S': 'testId%s' % i
                }},
                AttributeUpdates={
                    'data': {
                        'Action': 'PUT',
                        'Value': {
                            'S': 'foobar123_updated'
                        }
                    }
                })

        # put items to stream
        num_events_kinesis = 10
        LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
        kinesis.put_records(Records=[{
            'Data': '{}',
            'PartitionKey': 'testId%s' % i
        } for i in range(0, num_events_kinesis)],
                            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # put 1 item to stream that will trigger an error in the Lambda
        kinesis.put_record(Data='{"%s": 1}' %
                           lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                           PartitionKey='testIderror',
                           StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response['TopicArn'],
            Protocol='lambda',
            Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
        for i in range(0, num_events_sns):
            sns.publish(TopicArn=response['TopicArn'],
                        Message='test message %s' % i)

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id='shardId-000000000000',
            count=10)
        self.assertEqual(len(latest), 10)

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info['QueueUrl'],
                             MessageBody=str(i))

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_events_kinesis

        def check_events():
            if len(events) != num_events:
                LOGGER.warning((
                    'DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s'
                ) % (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [
                json.loads(base64.b64decode(e['data'])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get('__action_type') == 'INSERT'
            ]
            modifies = [
                e for e in event_items if e.get('__action_type') == 'MODIFY'
            ]
            self.assertEqual(len(inserts), num_put_new_items + num_batch_items)
            self.assertEqual(len(modifies),
                             num_put_existing_items + num_updates_ddb)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # check cloudwatch notifications
        num_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM)
        # TODO: It seems that CloudWatch is currently reporting an incorrect number of
        #   invocations, namely the sum over *all* lambdas, not the single one we're asking for.
        #   Also, we need to bear in mind that Kinesis may perform batch updates, i.e., a single
        #   Lambda invocation may happen with a set of Kinesis records, hence we cannot simply
        #   add num_events_ddb to num_events_lambda above!
        # self.assertEqual(num_invocations, 2 + num_events_lambda)
        self.assertGreater(num_invocations, num_events_sns + num_events_sqs)
        num_error_invocations = get_lambda_invocations_count(
            TEST_LAMBDA_NAME_STREAM, 'Errors')
        self.assertEqual(num_error_invocations, 1)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_STREAM)
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
Ejemplo n.º 13
0
    def test_lambda_streams_batch_and_transactions(self):
        ddb_lease_table_suffix = "-kclapp2"
        table_name = TEST_TABLE_NAME + "lsbat" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        dynamodb = aws_stack.create_external_boto_client("dynamodb",
                                                         client=True)
        dynamodb_service = aws_stack.create_external_boto_client("dynamodb")
        dynamodbstreams = aws_stack.create_external_boto_client(
            "dynamodbstreams")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )
        aws_stack.create_kinesis_stream(stream_name, delete=True)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        testutil.create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON,
            libs=TEST_LAMBDA_LIBS,
            func_name=lambda_ddb_name,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )

        # submit a batch with writes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId0"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId1"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId2"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                ]
            })

        # submit a batch with writes and deletes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId3"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId4"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId5"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId0"
                                }
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId1"
                                }
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId2"
                                }
                            }
                        }
                    },
                ]
            })

        # submit a transaction with writes and delete
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId8"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId3"
                        }
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId4"
                        }
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId5"
                        }
                    },
                }
            },
        ])

        # submit a batch with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        },
                        "data": {
                            "S": "foobar123_updated1"
                        },
                    },
                }
            },
        ])

        # submit a transaction with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        },
                        "data": {
                            "S": "foobar123_updated1"
                        },
                    },
                }
            },
        ])

        # submit a transaction with updates
        dynamodb.transact_write_items(TransactItems=[
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId8"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
        ])

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_insert = 9
        num_modify = 5
        num_delete = 6
        num_events = num_insert + num_modify + num_delete

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of expected event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            removes = [
                e for e in event_items if e.get("__action_type") == "REMOVE"
            ]
            self.assertEqual(num_insert, len(inserts))
            self.assertEqual(num_modify, len(modifies))
            self.assertEqual(num_delete, len(removes))

            # assert that all inserts were received

            for i, event in enumerate(inserts):
                self.assertNotIn("old_image", event)
                item_id = "testId%d" % i
                matching = [
                    i for i in inserts if i["new_image"]["id"] == item_id
                ][0]
                self.assertEqual({
                    "id": item_id,
                    "data": "foobar123"
                }, matching["new_image"])

            # assert that all updates were received

            def assert_updates(expected_updates, modifies):
                def found(update):
                    for modif in modifies:
                        if modif["old_image"]["id"] == update["id"]:
                            self.assertEqual(
                                modif["old_image"],
                                {
                                    "id": update["id"],
                                    "data": update["old"]
                                },
                            )
                            self.assertEqual(
                                modif["new_image"],
                                {
                                    "id": update["id"],
                                    "data": update["new"]
                                },
                            )
                            return True

                for update in expected_updates:
                    self.assertTrue(found(update))

            updates1 = [
                {
                    "id": "testId6",
                    "old": "foobar123",
                    "new": "foobar123_updated1"
                },
                {
                    "id": "testId7",
                    "old": "foobar123",
                    "new": "foobar123_updated1"
                },
            ]
            updates2 = [
                {
                    "id": "testId6",
                    "old": "foobar123_updated1",
                    "new": "foobar123_updated2",
                },
                {
                    "id": "testId7",
                    "old": "foobar123_updated1",
                    "new": "foobar123_updated2",
                },
                {
                    "id": "testId8",
                    "old": "foobar123",
                    "new": "foobar123_updated2"
                },
            ]

            assert_updates(updates1, modifies[:2])
            assert_updates(updates2, modifies[2:])

            # assert that all removes were received

            for i, event in enumerate(removes):
                self.assertNotIn("new_image", event)
                item_id = "testId%d" % i
                matching = [
                    i for i in removes if i["old_image"]["id"] == item_id
                ][0]
                self.assertEqual({
                    "id": item_id,
                    "data": "foobar123"
                }, matching["old_image"])

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=4)

        # clean up
        testutil.delete_lambda_function(lambda_ddb_name)
Ejemplo n.º 14
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        def create_kinesis_stream(name, delete=False):
            stream = aws_stack.create_kinesis_stream(name, delete=delete)
            stream.wait_for()

        ddb_lease_table_suffix = "-kclapp"
        table_name = TEST_TABLE_NAME + "klsdss" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_stream_name = "lambda-stream-%s" % short_uid()
        lambda_queue_name = "lambda-queue-%s" % short_uid()
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        queue_name = "queue-%s" % short_uid()
        dynamodb = aws_stack.connect_to_resource("dynamodb")
        dynamodb_service = aws_stack.create_external_boto_client("dynamodb")
        dynamodbstreams = aws_stack.create_external_boto_client(
            "dynamodbstreams")
        kinesis = aws_stack.create_external_boto_client("kinesis")
        sns = aws_stack.create_external_boto_client("sns")
        sqs = aws_stack.create_external_boto_client("sqs")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )

        create_kinesis_stream(stream_name, delete=True)
        create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            records = [
                json.loads(base64.b64decode(r["data"])) if r.get("data") else r
                for r in records
            ]
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS)
        testutil.create_lambda_function(
            func_name=lambda_ddb_name,
            zip_file=zip_file,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )
        # make sure we cannot create Lambda with same name twice
        with self.assertRaises(Exception):
            testutil.create_lambda_function(
                func_name=lambda_ddb_name,
                zip_file=zip_file,
                event_source_arn=ddb_event_source_arn,
            )

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )["StreamDescription"]["StreamARN"]
        testutil.create_lambda_function(
            func_name=lambda_stream_name,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
        )

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(queue_name)
        testutil.create_lambda_function(
            func_name=lambda_queue_name,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info["QueueArn"],
        )

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = (num_events_ddb - num_put_new_items -
                           num_put_existing_items - num_batch_items)

        LOGGER.info("Putting %s items to table...", num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: f"testId{i}",
                "data": "foobar123"
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: f"testId{i}",
                "data": "foobar_put_existing"
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobaz123 ✓"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobaz123 £"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobaz123 ¢"
                            }
                        }
                    },
                ]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": f"testId{i}"
                }},
                AttributeUpdates={
                    "data": {
                        "Action": "PUT",
                        "Value": {
                            "S": "foo_updated"
                        }
                    }
                },
            )

        # put items to stream
        num_events_kinesis = 1
        num_kinesis_records = 10
        LOGGER.info("Putting %s records in %s event to stream...",
                    num_kinesis_records, num_events_kinesis)
        kinesis.put_records(
            Records=[{
                "Data": "{}",
                "PartitionKey": f"testId{i}"
            } for i in range(0, num_kinesis_records)],
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
        )

        # put 1 item to stream that will trigger an error in the Lambda
        num_events_kinesis_err = 1
        for i in range(num_events_kinesis_err):
            kinesis.put_record(
                Data='{"%s": 1}' %
                lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                PartitionKey="testIdError",
                StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
            )

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response["TopicArn"],
            Protocol="lambda",
            Endpoint=aws_stack.lambda_function_arn(lambda_stream_name),
        )
        for i in range(num_events_sns):
            sns.publish(
                TopicArn=response["TopicArn"],
                Subject="test_subject",
                Message=f"test message {i}",
            )

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id="shardId-000000000000",
            count=10)
        self.assertEqual(10, len(latest))

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info["QueueUrl"],
                             MessageBody=str(i))

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_kinesis_records

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [e for e in events if e.get("__action_type") == "INSERT"]
            modifies = [
                e for e in events if e.get("__action_type") == "MODIFY"
            ]
            self.assertEqual(num_put_new_items + num_batch_items, len(inserts))
            self.assertEqual(num_put_existing_items + num_updates_ddb,
                             len(modifies))

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=15, sleep=2)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(lambda_stream_name)
            expected_invocation_count = num_events_kinesis + num_events_kinesis_err + num_events_sns
            self.assertEqual(expected_invocation_count, num_invocations)
            num_error_invocations = get_lambda_invocations_count(
                lambda_stream_name, "Errors")
            self.assertEqual(num_events_kinesis_err, num_error_invocations)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=7, sleep=2)

        # clean up
        testutil.delete_lambda_function(lambda_stream_name)
        testutil.delete_lambda_function(lambda_ddb_name)
        testutil.delete_lambda_function(lambda_queue_name)
        sqs.delete_queue(QueueUrl=sqs_queue_info["QueueUrl"])
Ejemplo n.º 15
0
 def num_items():
     return len((run_safe(ddb_client.scan, TableName=db_table) or {
         'Items': []
     })['Items'])
Ejemplo n.º 16
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(
        TableName=TEST_STREAM_NAME + ddb_lease_table_suffix), print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
        wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
        stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
        zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(RequestItems={TEST_TABLE_NAME: [
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
    ]})
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {'S': 'testId%s' % i}},
            AttributeUpdates={'data': {
                'Action': 'PUT',
                'Value': {'S': 'foobar123_updated'}
            }})

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(
        Records=[
            {
                'Data': '{}',
                'PartitionKey': 'testId%s' % i
            } for i in range(0, num_events_kinesis)
        ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
        PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000', count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(('DynamoDB and Kinesis updates retrieved ' +
                '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
        modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == num_events_ddb
Ejemplo n.º 17
0
 def num_items():
     return len((run_safe(ddb_client.scan, TableName=db_table) or {"Items": []})["Items"])
Ejemplo n.º 18
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(
        TableName=TEST_STREAM_NAME + ddb_lease_table_suffix), print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
        wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info("Kinesis consumer initialized.")

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
        stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
        zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # put items to table
    num_events_ddb = 10
    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_events_ddb - 3):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    dynamodb.batch_write_item(RequestItems={TEST_TABLE_NAME: [
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123'}}}
    ]})

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(
        Records=[
            {
                'Data': '{}',
                'PartitionKey': 'testId%s' % i
            } for i in range(0, num_events_kinesis)
        ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
        PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000', count=10)
    assert len(latest) == 10

    LOGGER.info("Waiting some time before finishing test.")
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns
    if len(EVENTS) != num_events:
        LOGGER.warning('DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s' % (len(EVENTS), num_events))
    assert len(EVENTS) == num_events

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == 10
Ejemplo n.º 19
0
 def check_item():
     result = run_safe(ddb_client.scan, TableName=db_table)
     self.assertEqual("non-existing-lambda",
                      result["Items"][0]["function_name"]["S"])