Ejemplo n.º 1
0
def test_upload_lambda_from_s3():

    s3_client = aws_stack.connect_to_service('s3')
    lambda_client = aws_stack.connect_to_service('lambda')

    lambda_name = 'test_lambda_%s' % short_uid()
    bucket_name = 'test_bucket_lambda'
    bucket_key = 'test_lambda.zip'

    # upload zip file to S3
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    s3_client.create_bucket(Bucket=bucket_name)
    s3_client.upload_fileobj(BytesIO(zip_file), bucket_name, bucket_key)

    # create lambda function
    lambda_client.create_function(
        FunctionName=lambda_name, Handler='handler.handler',
        Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
        Code={
            'S3Bucket': bucket_name,
            'S3Key': bucket_key
        }
    )

    # invoke lambda function
    data_before = b'{"foo": "bar"}'
    result = lambda_client.invoke(FunctionName=lambda_name, Payload=data_before)
    data_after = result['Payload'].read()
    assert json.loads(to_str(data_before)) == json.loads(to_str(data_after))
Ejemplo n.º 2
0
def test_kinesis_lambda_forward_chain():
    kinesis = aws_stack.connect_to_service('kinesis')
    s3 = aws_stack.connect_to_service('s3')

    aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True)
    s3.create_bucket(Bucket=TEST_BUCKET_NAME)

    # deploy test lambdas connected to Kinesis streams
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file,
        event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file,
        event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), runtime=LAMBDA_RUNTIME_PYTHON27)

    # publish test record
    test_data = {'test_data': 'forward_chain_data_%s' % short_uid()}
    data = clone(test_data)
    data[lambda_integration.MSG_BODY_MESSAGE_TARGET] = 'kinesis:%s' % TEST_CHAIN_STREAM2_NAME
    kinesis.put_record(Data=to_bytes(json.dumps(data)), PartitionKey='testId', StreamName=TEST_CHAIN_STREAM1_NAME)

    # check results
    time.sleep(5)
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(test_data, all_objects)
Ejemplo n.º 3
0
def get_machine_id():
    global MACHINE_ID
    if MACHINE_ID:
        return MACHINE_ID

    # determine MACHINE_ID from config files
    configs_map = {}
    config_file_tmp = get_config_file_tempdir()
    config_file_home = get_config_file_homedir()
    for config_file in (config_file_home, config_file_tmp):
        if config_file:
            local_configs = load_file(config_file)
            local_configs = json.loads(to_str(local_configs))
            configs_map[config_file] = local_configs
            if 'machine_id' in local_configs:
                MACHINE_ID = local_configs['machine_id']
                break

    # if we can neither find NOR create the config files, fall back to process id
    if not configs_map:
        return PROCESS_ID

    # assign default id if empty
    if not MACHINE_ID:
        MACHINE_ID = short_uid()

    # update MACHINE_ID in all config files
    for config_file, configs in configs_map.items():
        configs['machine_id'] = MACHINE_ID
        save_file(config_file, json.dumps(configs))

    return MACHINE_ID
Ejemplo n.º 4
0
def get_function_code(function):
    """ Get the code of an existing function
        ---
        operationId: 'getFunctionCode'
        parameters:
    """
    arn = func_arn(function)
    lambda_cwd = arn_to_lambda[arn].cwd
    tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
    return Response(load_file(tmp_file, mode='rb'),
            mimetype='application/zip',
            headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
Ejemplo n.º 5
0
def test_lambda_environment():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda without Docker
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_ENV), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_ENV,
        zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON27, envvars={'Hello': 'World'})
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_ENV, Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload']
    assert json.load(result_data) == {'Hello': 'World'}
Ejemplo n.º 6
0
    def test_list_stack_resources_returns_queue_urls(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_2))
        cloudformation.create_stack(StackName=TEST_STACK_NAME_2, TemplateBody=template)

        def check_stack():
            stack = get_stack_details(TEST_STACK_NAME_2)
            assert stack['StackStatus'] == 'CREATE_COMPLETE'

        retry(check_stack, retries=3, sleep=2)

        list_stack_summaries = list_stack_resources(TEST_STACK_NAME_2)
        queue_urls = get_queue_urls()

        for resource in list_stack_summaries:
            assert resource['PhysicalResourceId'] in queue_urls
Ejemplo n.º 7
0
def get_lambda_code(func_name, retries=1, cache_time=None, env=None):
    if MOCK_OBJ:
        return ''
    env = aws_stack.get_environment(env)
    if cache_time is None and env.region != REGION_LOCAL:
        cache_time = AWS_LAMBDA_CODE_CACHE_TIMEOUT
    out = cmd_lambda('get-function --function-name %s' % func_name, env, cache_time)
    out = json.loads(out)
    loc = out['Code']['Location']
    hash = md5(loc)
    folder = TMP_DOWNLOAD_FILE_PATTERN.replace('*', hash)
    filename = 'archive.zip'
    archive = '%s/%s' % (folder, filename)
    try:
        mkdir(folder)
        if not os.path.isfile(archive):
            download(loc, archive, verify_ssl=False)
        if len(os.listdir(folder)) <= 1:
            zip_path = os.path.join(folder, filename)
            unzip(zip_path, folder)
    except Exception as e:
        print('WARN: %s' % e)
        rm_rf(archive)
        if retries > 0:
            return get_lambda_code(func_name, retries=retries - 1, cache_time=1, env=env)
        else:
            print('WARNING: Unable to retrieve lambda code: %s' % e)

    # traverse subdirectories and get script sources
    result = {}
    for root, subdirs, files in os.walk(folder):
        for file in files:
            prefix = root.split(folder)[-1]
            key = '%s/%s' % (prefix, file)
            if re.match(r'.+\.py$', key) or re.match(r'.+\.js$', key):
                codefile = '%s/%s' % (root, file)
                result[key] = load_file(codefile)

    # cleanup cache
    clean_cache(file_pattern=TMP_DOWNLOAD_FILE_PATTERN,
        last_clean_time=last_cache_cleanup_time,
        max_age=TMP_DOWNLOAD_CACHE_MAX_AGE)
    # TODO: delete only if cache_time is over
    rm_rf(folder)

    return result
Ejemplo n.º 8
0
    def test_apply_template(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        cloudformation.create_stack(StackName=TEST_STACK_NAME, TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(TEST_STACK_NAME)
            assert stack['StackStatus'] == 'CREATE_COMPLETE'

        retry(check_stack, retries=3, sleep=2)

        # assert that bucket has been created
        assert bucket_exists('cf-test-bucket-1')
        # assert that queue has been created
        assert queue_exists('cf-test-queue-1')
        # assert that stream has been created
        assert stream_exists('cf-test-stream-1')
        # assert that queue has been created
        resource = describe_stack_resource(TEST_STACK_NAME, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
Ejemplo n.º 9
0
def test_destroy_idle_containers():

    # run these tests only for the "reuse containers" Lambda executor
    if not isinstance(lambda_api.LAMBDA_EXECUTOR, lambda_executors.LambdaExecutorReuseContainers):
        return

    executor = lambda_api.LAMBDA_EXECUTOR
    func_name = 'test_destroy_idle_containers'

    # create a new lambda
    lambda_client = aws_stack.connect_to_service('lambda')

    func_arn = lambda_api.func_arn(func_name)

    # make sure existing containers are gone
    executor.destroy_existing_docker_containers()
    assert len(executor.get_all_container_names()) == 0

    # deploy and invoke lambda without Docker
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_ENV), get_content=True,
                                              libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=func_name,
                                    zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON27, envvars={'Hello': 'World'})

    assert len(executor.get_all_container_names()) == 0

    lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
    assert len(executor.get_all_container_names()) == 1

    # try to destroy idle containers.
    executor.idle_container_destroyer()
    assert len(executor.get_all_container_names()) == 1

    # simulate an idle container
    executor.function_invoke_times[func_arn] = time.time() - 610
    executor.idle_container_destroyer()
    assert len(executor.get_all_container_names()) == 0
Ejemplo n.º 10
0
def test_api_gateway_lambda_proxy_integration():
    # create lambda function
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_PROXY_BACKEND,
        zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON27)

    # create API Gateway and connect it to the Lambda proxy backend
    lambda_uri = aws_stack.lambda_function_arn(TEST_LAMBDA_PROXY_BACKEND)
    target_uri = 'arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations' % (DEFAULT_REGION, lambda_uri)
    result = connect_api_gateway_to_http_with_lambda_proxy('test_gateway2', target_uri,
        path=API_PATH_LAMBDA_PROXY_BACKEND)

    # make test request to gateway and check response
    path = API_PATH_LAMBDA_PROXY_BACKEND.replace('{test_param1}', 'foo1')
    url = INBOUND_GATEWAY_URL_PATTERN.format(api_id=result['id'], stage_name=TEST_STAGE_NAME, path=path)
    data = {'return_status_code': 203, 'return_headers': {'foo': 'bar123'}}
    result = requests.post(url, data=json.dumps(data))
    assert result.status_code == 203
    assert result.headers.get('foo') == 'bar123'
    parsed_body = json.loads(to_str(result.content))
    assert parsed_body.get('return_status_code') == 203
    assert parsed_body.get('return_headers') == {'foo': 'bar123'}
    assert parsed_body.get('pathParameters') == {'test_param1': 'foo1'}
Ejemplo n.º 11
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        ddb_lease_table_suffix = '-kclapp'
        table_name = TEST_TABLE_NAME + 'klsdss' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
        kinesis = aws_stack.connect_to_service('kinesis')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(
            TableName=stream_name + ddb_lease_table_suffix), print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)
        aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(stream_name, listener_func=process_records,
            wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name, partition_key=PARTITION_KEY,
            stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
            libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
            zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27, delete=True)
        # make sure we cannot create Lambda with same name twice
        assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
            zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
            zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(TEST_LAMBDA_NAME_QUEUE)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_QUEUE,
            zip_file=zip_file, event_source_arn=sqs_queue_info['QueueArn'], runtime=LAMBDA_RUNTIME_PYTHON27)

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

        LOGGER.info('Putting %s items to table...' % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123'
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123_put_existing'
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(RequestItems={table_name: [
            {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
            {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
            {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
        ]})
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(TableName=table_name,
                Key={PARTITION_KEY: {'S': 'testId%s' % i}},
                AttributeUpdates={'data': {
                    'Action': 'PUT',
                    'Value': {'S': 'foobar123_updated'}
                }})

        # put items to stream
        num_events_kinesis = 10
        LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
        kinesis.put_records(
            Records=[
                {
                    'Data': '{}',
                    'PartitionKey': 'testId%s' % i
                } for i in range(0, num_events_kinesis)
            ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )

        # put 1 item to stream that will trigger an error in the Lambda
        kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
            PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
            Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
        for i in range(0, num_events_sns):
            sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id='shardId-000000000000', count=10)
        self.assertEqual(len(latest), 10)

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info['QueueUrl'], MessageBody=str(i))

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_events_kinesis

        def check_events():
            if len(events) != num_events:
                LOGGER.warning(('DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s') %
                    (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [json.loads(base64.b64decode(e['data'])) for e in events]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
            modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
            self.assertEqual(len(inserts), num_put_new_items + num_batch_items)
            self.assertEqual(len(modifies), num_put_existing_items + num_updates_ddb)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # check cloudwatch notifications
        num_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM)
        # TODO: It seems that CloudWatch is currently reporting an incorrect number of
        #   invocations, namely the sum over *all* lambdas, not the single one we're asking for.
        #   Also, we need to bear in mind that Kinesis may perform batch updates, i.e., a single
        #   Lambda invocation may happen with a set of Kinesis records, hence we cannot simply
        #   add num_events_ddb to num_events_lambda above!
        # self.assertEqual(num_invocations, 2 + num_events_lambda)
        self.assertGreater(num_invocations, num_events_sns + num_events_sqs)
        num_error_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM, 'Errors')
        self.assertEqual(num_error_invocations, 1)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_STREAM)
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
Ejemplo n.º 12
0
def set_function_code(code, lambda_name, lambda_cwd=None):
    def generic_handler(event, context):
        raise ClientError((
            'Unable to find executor for Lambda function "%s". Note that ' +
            'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker'
        ) % lambda_name)

    arn = func_arn(lambda_name)
    lambda_details = arn_to_lambda[arn]
    runtime = lambda_details.runtime
    lambda_environment = lambda_details.envvars
    handler_name = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
    code_passed = code
    code = code or lambda_details.code
    is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
    zip_file_content = None

    if code_passed:
        lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
        if not is_local_mount:
            # Save the zip file to a temporary file that the lambda executors can reference
            zip_file_content = get_zip_bytes(code_passed)
    else:
        lambda_cwd = lambda_cwd or lambda_details.cwd

    # get local lambda working directory
    tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)

    if not zip_file_content:
        zip_file_content = load_file(tmp_file, mode='rb')

    # Set the appropriate lambda handler.
    lambda_handler = generic_handler
    if runtime == LAMBDA_RUNTIME_JAVA8:
        # The Lambda executors for Docker subclass LambdaExecutorContainers,
        # which runs Lambda in Docker by passing all *.jar files in the function
        # working directory as part of the classpath. Because of this, we need to
        # save the zip_file_content as a .jar here.
        lambda_handler, zip_file_content = get_java_handler(
            zip_file_content, handler_name, tmp_file)
        if is_jar_archive(zip_file_content):
            jar_tmp_file = '{working_dir}/{file_name}'.format(
                working_dir=lambda_cwd, file_name=LAMBDA_JAR_FILE_NAME)
            save_file(jar_tmp_file, zip_file_content)

    else:
        handler_file = get_handler_file_from_name(handler_name,
                                                  runtime=runtime)
        handler_function = get_handler_function_from_name(handler_name,
                                                          runtime=runtime)

        if not is_local_mount:
            # Lambda code must be uploaded in Zip format
            if not is_zip_file(zip_file_content):
                raise ClientError(
                    'Uploaded Lambda code for runtime ({}) is not in Zip format'
                    .format(runtime))
            unzip(tmp_file, lambda_cwd)

        main_file = '%s/%s' % (lambda_cwd, handler_file)
        if not os.path.exists(main_file):
            # Raise an error if (1) this is not a local mount lambda, or (2) we're
            # running Lambdas locally (not in Docker), or (3) we're using remote Docker.
            # -> We do *not* want to raise an error if we're using local mount in non-remote Docker
            if not is_local_mount or not use_docker(
            ) or config.LAMBDA_REMOTE_DOCKER:
                file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
                config_debug = (
                    'Config for local mount, docker, remote: "%s", "%s", "%s"'
                    % (is_local_mount, use_docker(),
                       config.LAMBDA_REMOTE_DOCKER))
                LOG.debug('Lambda archive content:\n%s' % file_list)
                raise ClientError(
                    error_response(
                        'Unable to find handler script in Lambda archive. %s' %
                        config_debug,
                        400,
                        error_type='ValidationError'))

        if runtime.startswith('python') and not use_docker():
            try:
                # make sure the file is actually readable, then read contents
                ensure_readable(main_file)
                zip_file_content = load_file(main_file, mode='rb')
                # extract handler
                lambda_handler = exec_lambda_code(
                    zip_file_content,
                    handler_function=handler_function,
                    lambda_cwd=lambda_cwd,
                    lambda_env=lambda_environment)
            except Exception as e:
                raise ClientError(
                    'Unable to get handler function from lambda code.', e)

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Ejemplo n.º 13
0
def install_elasticsearch(version=None):
    version = get_elasticsearch_install_version(version)
    install_dir = get_elasticsearch_install_dir(version)
    installed_executable = os.path.join(install_dir, 'bin', 'elasticsearch')
    if not os.path.exists(installed_executable):
        log_install_msg('Elasticsearch (%s)' % version)
        es_url = ELASTICSEARCH_URLS.get(version)
        if not es_url:
            raise Exception(
                'Unable to find download URL for Elasticsearch version "%s"' %
                version)
        install_dir_parent = os.path.dirname(install_dir)
        mkdir(install_dir_parent)
        # download and extract archive
        tmp_archive = os.path.join(config.TMP_FOLDER,
                                   'localstack.%s' % os.path.basename(es_url))
        download_and_extract_with_retry(es_url, tmp_archive,
                                        install_dir_parent)
        elasticsearch_dir = glob.glob(
            os.path.join(install_dir_parent, 'elasticsearch*'))
        if not elasticsearch_dir:
            raise Exception('Unable to find Elasticsearch folder in %s' %
                            install_dir_parent)
        shutil.move(elasticsearch_dir[0], install_dir)

        for dir_name in ('data', 'logs', 'modules', 'plugins',
                         'config/scripts'):
            dir_path = os.path.join(install_dir, dir_name)
            mkdir(dir_path)
            chmod_r(dir_path, 0o777)

        # install default plugins
        for plugin in ELASTICSEARCH_PLUGIN_LIST:
            if is_alpine():
                # https://github.com/pires/docker-elasticsearch/issues/56
                os.environ['ES_TMPDIR'] = '/tmp'
            plugin_binary = os.path.join(install_dir, 'bin',
                                         'elasticsearch-plugin')
            plugin_dir = os.path.join(install_dir, 'plugins', plugin)
            if not os.path.exists(plugin_dir):
                LOG.info('Installing Elasticsearch plugin %s' % (plugin))
                run('%s install -b %s' % (plugin_binary, plugin))

    # delete some plugins to free up space
    for plugin in ELASTICSEARCH_DELETE_MODULES:
        module_dir = os.path.join(install_dir, 'modules', plugin)
        rm_rf(module_dir)

    # disable x-pack-ml plugin (not working on Alpine)
    xpack_dir = os.path.join(install_dir, 'modules', 'x-pack-ml', 'platform')
    rm_rf(xpack_dir)

    # patch JVM options file - replace hardcoded heap size settings
    jvm_options_file = os.path.join(install_dir, 'config', 'jvm.options')
    if os.path.exists(jvm_options_file):
        jvm_options = load_file(jvm_options_file)
        jvm_options_replaced = re.sub(r'(^-Xm[sx][a-zA-Z0-9\.]+$)',
                                      r'# \1',
                                      jvm_options,
                                      flags=re.MULTILINE)
        if jvm_options != jvm_options_replaced:
            save_file(jvm_options_file, jvm_options_replaced)
Ejemplo n.º 14
0
def test_prime_and_destroy_containers():

    # run these tests only for the "reuse containers" Lambda executor
    if not isinstance(lambda_api.LAMBDA_EXECUTOR, lambda_executors.LambdaExecutorReuseContainers):
        return

    executor = lambda_api.LAMBDA_EXECUTOR
    func_name = 'test_prime_and_destroy_containers'

    # create a new lambda
    lambda_client = aws_stack.connect_to_service('lambda')

    func_arn = lambda_api.func_arn(func_name)

    # make sure existing containers are gone
    executor.cleanup()
    assert len(executor.get_all_container_names()) == 0

    # deploy and invoke lambda without Docker
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_ENV), get_content=True,
                                              libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=func_name, zip_file=zip_file,
                                    runtime=LAMBDA_RUNTIME_PYTHON27, envvars={'Hello': 'World'})

    assert len(executor.get_all_container_names()) == 0

    assert executor.function_invoke_times == {}

    # invoke a few times.
    durations = []
    num_iterations = 3

    for i in range(0, num_iterations + 1):
        prev_invoke_time = None
        if i > 0:
            prev_invoke_time = executor.function_invoke_times[func_arn]

        start_time = time.time()
        lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
        duration = time.time() - start_time

        assert len(executor.get_all_container_names()) == 1

        # ensure the last invoke time is being updated properly.
        if i > 0:
            assert executor.function_invoke_times[func_arn] > prev_invoke_time
        else:
            assert executor.function_invoke_times[func_arn] > 0

        durations.append(duration)

    # the first call would have created the container. subsequent calls would reuse and be faster.
    for i in range(1, num_iterations + 1):
        assert durations[i] < durations[0]

    status = executor.get_docker_container_status(func_arn)
    assert status == 1

    executor.cleanup()
    status = executor.get_docker_container_status(func_arn)
    assert status == 0

    assert len(executor.get_all_container_names()) == 0
Ejemplo n.º 15
0
 def test_validate_template(self):
     cloudformation = aws_stack.connect_to_service('cloudformation')
     template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))
     response = cloudformation.validate_template(TemplateBody=template)
     assert response['ResponseMetadata']['HTTPStatusCode'] == 200
Ejemplo n.º 16
0
def test_lambda_runtimes():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda - Python 2.7
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY,
                                    zip_file=zip_file,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Python 3.6
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON3),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON36)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY3,
                                        zip_file=zip_file,
                                        runtime=LAMBDA_RUNTIME_PYTHON36)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY3,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java
    if not os.path.exists(TEST_LAMBDA_JAVA):
        mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
        download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
    zip_file = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA,
        zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Node.js
        zip_file = testutil.create_zip_file(TEST_LAMBDA_NODEJS,
                                            get_content=True)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JS,
                                        zip_file=zip_file,
                                        handler='lambda_integration.handler',
                                        runtime=LAMBDA_RUNTIME_NODEJS)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JS,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'
Ejemplo n.º 17
0
def create_lambda_function(
    func_name,
    zip_file=None,
    event_source_arn=None,
    handler_file=None,
    handler=None,
    starting_position=None,
    runtime=None,
    envvars={},
    tags={},
    libs=[],
    delete=False,
    layers=None,
    **kwargs,
):
    """Utility method to create a new function via the Lambda API"""

    starting_position = starting_position or LAMBDA_DEFAULT_STARTING_POSITION
    runtime = runtime or LAMBDA_DEFAULT_RUNTIME
    client = aws_stack.connect_to_service("lambda")

    # load zip file content if handler_file is specified
    if not zip_file and handler_file:
        file_content = load_file(handler_file) if os.path.exists(
            handler_file) else handler_file
        if libs or not handler:
            zip_file = create_lambda_archive(
                file_content,
                libs=libs,
                get_content=True,
                runtime=runtime or LAMBDA_DEFAULT_RUNTIME,
            )
        else:
            zip_file = create_zip_file(handler_file, get_content=True)

    handler = handler or LAMBDA_DEFAULT_HANDLER

    if delete:
        try:
            # Delete function if one already exists
            client.delete_function(FunctionName=func_name)
        except Exception:
            pass

    lambda_code = {"ZipFile": zip_file}
    if len(zip_file) > MAX_LAMBDA_ARCHIVE_UPLOAD_SIZE:
        s3 = aws_stack.connect_to_service("s3")
        aws_stack.get_or_create_bucket(LAMBDA_ASSETS_BUCKET_NAME)
        asset_key = f"{short_uid()}.zip"
        s3.upload_fileobj(Fileobj=io.BytesIO(zip_file),
                          Bucket=LAMBDA_ASSETS_BUCKET_NAME,
                          Key=asset_key)
        lambda_code = {
            "S3Bucket": LAMBDA_ASSETS_BUCKET_NAME,
            "S3Key": asset_key
        }

    # create function
    additional_kwargs = kwargs
    kwargs = {
        "FunctionName": func_name,
        "Runtime": runtime,
        "Handler": handler,
        "Role": LAMBDA_TEST_ROLE,
        "Code": lambda_code,
        "Timeout": LAMBDA_TIMEOUT_SEC,
        "Environment": dict(Variables=envvars),
        "Tags": tags,
    }
    kwargs.update(additional_kwargs)
    if layers:
        kwargs["Layers"] = layers
    create_func_resp = client.create_function(**kwargs)

    resp = {
        "CreateFunctionResponse": create_func_resp,
        "CreateEventSourceMappingResponse": None,
    }

    # create event source mapping
    if event_source_arn:
        resp[
            "CreateEventSourceMappingResponse"] = client.create_event_source_mapping(
                FunctionName=func_name,
                EventSourceArn=event_source_arn,
                StartingPosition=starting_position,
            )

    return resp
Ejemplo n.º 18
0
 def setUpClass(cls):
     cls.lambda_client = aws_stack.connect_to_service('lambda')
     cls.zip_file_content2 = load_file(TEST_LAMBDA_DOTNETCORE2, mode='rb')
     cls.zip_file_content31 = load_file(TEST_LAMBDA_DOTNETCORE31, mode='rb')
Ejemplo n.º 19
0
    def test_prime_and_destroy_containers(self):
        # run these tests only for the "reuse containers" Lambda executor
        if not isinstance(lambda_api.LAMBDA_EXECUTOR,
                          lambda_executors.LambdaExecutorReuseContainers):
            return

        executor = lambda_api.LAMBDA_EXECUTOR
        func_name = 'test_prime_and_destroy_containers'
        func_arn = lambda_api.func_arn(func_name)

        # make sure existing containers are gone
        executor.cleanup()
        self.assertEqual(len(executor.get_all_container_names()), 0)

        # deploy and invoke lambda without Docker
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_ENV),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27
        )
        testutil.create_lambda_function(
            func_name=func_name,
            zip_file=zip_file,
            runtime=LAMBDA_RUNTIME_PYTHON27,
            envvars={'Hello': 'World'}
        )

        self.assertEqual(len(executor.get_all_container_names()), 0)
        self.assertDictEqual(executor.function_invoke_times, {})

        # invoke a few times.
        durations = []
        num_iterations = 3

        for i in range(0, num_iterations + 1):
            prev_invoke_time = None
            if i > 0:
                prev_invoke_time = executor.function_invoke_times[func_arn]

            start_time = time.time()
            self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
            duration = time.time() - start_time

            self.assertEqual(len(executor.get_all_container_names()), 1)

            # ensure the last invoke time is being updated properly.
            if i > 0:
                self.assertGreater(executor.function_invoke_times[func_arn], prev_invoke_time)
            else:
                self.assertGreater(executor.function_invoke_times[func_arn], 0)

            durations.append(duration)

        # the first call would have created the container. subsequent calls would reuse and be faster.
        for i in range(1, num_iterations + 1):
            self.assertLess(durations[i], durations[0])

        status = executor.get_docker_container_status(func_arn)
        self.assertEqual(status, 1)

        container_network = executor.get_docker_container_network(func_arn)
        self.assertEqual(container_network, 'default')

        executor.cleanup()
        status = executor.get_docker_container_status(func_arn)
        self.assertEqual(status, 0)

        self.assertEqual(len(executor.get_all_container_names()), 0)
Ejemplo n.º 20
0
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(
            load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name,
                                    TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        assert queue_exists('cf-test-queue-1')
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name,
                                           'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(
            tags, [{
                'Key': 'foobar',
                'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')
            }])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(
            tags, [{
                'Key': 'foo',
                'Value': 'cf-test-bucket-1'
            }, {
                'Key': 'bar',
                'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')
            }])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [
            s for s in subs
            if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']
        ]
        self.assertEqual(len(subs), 1)
        self.assertIn(
            ':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name),
            subs[0]['TopicArn'])

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [
            a for a in apigateway.get_rest_apis()['items']
            if a['name'] == test_api_name
        ][0]
        responses = apigateway.get_gateway_responses(
            restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
Ejemplo n.º 21
0
def test_lambda_runtimes():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda - Python 2.7
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY,
                                    zip_file=zip_file,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Python 3.6
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON3),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON36)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY3,
                                        zip_file=zip_file,
                                        runtime=LAMBDA_RUNTIME_PYTHON36)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY3,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java
    if not os.path.exists(TEST_LAMBDA_JAVA):
        mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
        download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
    zip_file = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA,
        zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'LinkedHashMap' in to_str(result_data)

    # test SNSEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        InvocationType='Event',
        Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert json.loads(to_str(result_data)) == {'async': 'True'}

    # test KinesisEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        Payload=
        b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}'
    )
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'KinesisEvent' in to_str(result_data)

    # deploy and invoke lambda - Java with stream handler
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA_STREAM,
        zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaStreamHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Node.js
        zip_file = testutil.create_zip_file(TEST_LAMBDA_NODEJS,
                                            get_content=True)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JS,
                                        zip_file=zip_file,
                                        handler='lambda_integration.handler',
                                        runtime=LAMBDA_RUNTIME_NODEJS)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JS,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'
Ejemplo n.º 22
0
    def _test_api_gateway_lambda_proxy_integration(self, fn_name, path):
        # create lambda function
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27
        )
        testutil.create_lambda_function(
            func_name=fn_name,
            zip_file=zip_file,
            runtime=LAMBDA_RUNTIME_PYTHON27
        )

        # create API Gateway and connect it to the Lambda proxy backend
        lambda_uri = aws_stack.lambda_function_arn(fn_name)
        invocation_uri = 'arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations'
        target_uri = invocation_uri % (DEFAULT_REGION, lambda_uri)

        result = self.connect_api_gateway_to_http_with_lambda_proxy(
            'test_gateway2',
            target_uri,
            path=path
        )

        api_id = result['id']
        path_map = get_rest_api_paths(api_id)
        _, resource = get_resource_for_path('/lambda/foo1', path_map)

        # make test request to gateway and check response
        path = path.replace('{test_param1}', 'foo1')
        path = path + '?foo=foo&bar=bar&bar=baz'

        url = INBOUND_GATEWAY_URL_PATTERN.format(
            api_id=api_id,
            stage_name=self.TEST_STAGE_NAME,
            path=path
        )

        data = {'return_status_code': 203, 'return_headers': {'foo': 'bar123'}}
        result = requests.post(
            url,
            data=json.dumps(data),
            headers={'User-Agent': 'python-requests/testing'}
        )

        self.assertEqual(result.status_code, 203)
        self.assertEqual(result.headers.get('foo'), 'bar123')

        parsed_body = json.loads(to_str(result.content))
        self.assertEqual(parsed_body.get('return_status_code'), 203)
        self.assertDictEqual(parsed_body.get('return_headers'), {'foo': 'bar123'})
        self.assertDictEqual(parsed_body.get('queryStringParameters'), {'foo': 'foo', 'bar': ['bar', 'baz']})

        request_context = parsed_body.get('requestContext')
        source_ip = request_context['identity'].pop('sourceIp')

        self.assertTrue(re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', source_ip))

        self.assertEqual(request_context['path'], '/lambda/foo1')
        self.assertEqual(request_context['accountId'], TEST_AWS_ACCOUNT_ID)
        self.assertEqual(request_context['resourceId'], resource.get('id'))
        self.assertEqual(request_context['stage'], self.TEST_STAGE_NAME)
        self.assertEqual(request_context['identity']['userAgent'], 'python-requests/testing')

        result = requests.delete(url, data=json.dumps(data))
        self.assertEqual(result.status_code, 404)
Ejemplo n.º 23
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        def create_kinesis_stream(name, delete=False):
            stream = aws_stack.create_kinesis_stream(name, delete=delete)
            stream.wait_for()

        ddb_lease_table_suffix = "-kclapp"
        table_name = TEST_TABLE_NAME + "klsdss" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_stream_name = "lambda-stream-%s" % short_uid()
        lambda_queue_name = "lambda-queue-%s" % short_uid()
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        queue_name = "queue-%s" % short_uid()
        dynamodb = aws_stack.connect_to_resource("dynamodb")
        dynamodb_service = aws_stack.connect_to_service("dynamodb")
        dynamodbstreams = aws_stack.connect_to_service("dynamodbstreams")
        kinesis = aws_stack.connect_to_service("kinesis")
        sns = aws_stack.connect_to_service("sns")
        sqs = aws_stack.connect_to_service("sqs")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )

        create_kinesis_stream(stream_name, delete=True)
        create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS)
        testutil.create_lambda_function(
            func_name=lambda_ddb_name,
            zip_file=zip_file,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )
        # make sure we cannot create Lambda with same name twice
        with self.assertRaises(Exception):
            testutil.create_lambda_function(
                func_name=lambda_ddb_name,
                zip_file=zip_file,
                event_source_arn=ddb_event_source_arn,
            )

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )["StreamDescription"]["StreamARN"]
        testutil.create_lambda_function(
            func_name=lambda_stream_name,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
        )

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(queue_name)
        testutil.create_lambda_function(
            func_name=lambda_queue_name,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info["QueueArn"],
        )

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = (num_events_ddb - num_put_new_items -
                           num_put_existing_items - num_batch_items)

        LOGGER.info("Putting %s items to table..." % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123"
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123_put_existing"
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ✓"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 £"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ¢"
                            }
                        }
                    },
                ]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": "testId%s" % i
                }},
                AttributeUpdates={
                    "data": {
                        "Action": "PUT",
                        "Value": {
                            "S": "foobar123_updated"
                        }
                    }
                },
            )

        # put items to stream
        num_events_kinesis = 1
        num_kinesis_records = 10
        LOGGER.info("Putting %s records in %s event to stream..." %
                    (num_kinesis_records, num_events_kinesis))
        kinesis.put_records(
            Records=[{
                "Data": "{}",
                "PartitionKey": "testId%s" % i
            } for i in range(0, num_kinesis_records)],
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
        )

        # put 1 item to stream that will trigger an error in the Lambda
        num_events_kinesis_err = 1
        for i in range(num_events_kinesis_err):
            kinesis.put_record(
                Data='{"%s": 1}' %
                lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                PartitionKey="testIdError",
                StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
            )

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response["TopicArn"],
            Protocol="lambda",
            Endpoint=aws_stack.lambda_function_arn(lambda_stream_name),
        )
        for i in range(num_events_sns):
            sns.publish(
                TopicArn=response["TopicArn"],
                Subject="test_subject",
                Message="test message %s" % i,
            )

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id="shardId-000000000000",
            count=10)
        self.assertEqual(10, len(latest))

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info["QueueUrl"],
                             MessageBody=str(i))

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_kinesis_records

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            self.assertEqual(num_put_new_items + num_batch_items, len(inserts))
            self.assertEqual(num_put_existing_items + num_updates_ddb,
                             len(modifies))

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=15, sleep=2)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(lambda_stream_name)
            expected_invocation_count = num_events_kinesis + num_events_kinesis_err + num_events_sns
            self.assertEqual(expected_invocation_count, num_invocations)
            num_error_invocations = get_lambda_invocations_count(
                lambda_stream_name, "Errors")
            self.assertEqual(num_events_kinesis_err, num_error_invocations)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=7, sleep=2)

        # clean up
        testutil.delete_lambda_function(lambda_stream_name)
        testutil.delete_lambda_function(lambda_ddb_name)
        testutil.delete_lambda_function(lambda_queue_name)
        sqs.delete_queue(QueueUrl=sqs_queue_info["QueueUrl"])
Ejemplo n.º 24
0
    def test_lambda_streams_batch_and_transactions(self):
        ddb_lease_table_suffix = '-kclapp2'
        table_name = TEST_TABLE_NAME + 'lsbat' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_service('dynamodb', client=True)
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(
            TableName=stream_name + ddb_lease_table_suffix), print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(stream_name, listener_func=process_records,
            wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name, partition_key=PARTITION_KEY,
            stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
            libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
            zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27, delete=True)

        # submit a batch with writes
        dynamodb.batch_write_item(RequestItems={table_name: [
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId0'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId1'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId2'}, 'data': {'S': 'foobar123'}}}}
        ]})

        # submit a batch with writes and deletes
        dynamodb.batch_write_item(RequestItems={table_name: [
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId3'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId4'}, 'data': {'S': 'foobar123'}}}},
            {'PutRequest': {'Item': {PARTITION_KEY: {'S': 'testId5'}, 'data': {'S': 'foobar123'}}}},
            {'DeleteRequest': {'Key': {PARTITION_KEY: {'S': 'testId0'}}}},
            {'DeleteRequest': {'Key': {PARTITION_KEY: {'S': 'testId1'}}}},
            {'DeleteRequest': {'Key': {PARTITION_KEY: {'S': 'testId2'}}}},
        ]})

        # submit a transaction with writes and delete
        dynamodb.transact_write_items(TransactItems=[
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId6'}, 'data': {'S': 'foobar123'}}}},
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId7'}, 'data': {'S': 'foobar123'}}}},
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId8'}, 'data': {'S': 'foobar123'}}}},
            {'Delete': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId3'}}}},
            {'Delete': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId4'}}}},
            {'Delete': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId5'}}}},
        ])

        # submit a batch with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId6'}, 'data': {'S': 'foobar123_updated1'}}}},
        ])

        # submit a transaction with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {'Put': {'TableName': table_name,
                'Item': {PARTITION_KEY: {'S': 'testId7'}, 'data': {'S': 'foobar123_updated1'}}}},
        ])

        # submit a transaction with updates
        dynamodb.transact_write_items(TransactItems=[
            {'Update': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId6'}},
                'UpdateExpression': 'SET #0 = :0',
                'ExpressionAttributeNames': {'#0': 'data'},
                'ExpressionAttributeValues': {':0': {'S': 'foobar123_updated2'}}}},
            {'Update': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId7'}},
                'UpdateExpression': 'SET #0 = :0',
                'ExpressionAttributeNames': {'#0': 'data'},
                'ExpressionAttributeValues': {':0': {'S': 'foobar123_updated2'}}}},
            {'Update': {'TableName': table_name, 'Key': {PARTITION_KEY: {'S': 'testId8'}},
                'UpdateExpression': 'SET #0 = :0',
                'ExpressionAttributeNames': {'#0': 'data'},
                'ExpressionAttributeValues': {':0': {'S': 'foobar123_updated2'}}}},
        ])

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_insert = 9
        num_modify = 5
        num_delete = 6
        num_events = num_insert + num_modify + num_delete

        def check_events():
            if len(events) != num_events:
                LOGGER.warning(('DynamoDB updates retrieved (actual/expected): %s/%s') %
                    (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [json.loads(base64.b64decode(e['data'])) for e in events]
            # make sure the we have the right amount of expected event types
            inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
            modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
            removes = [e for e in event_items if e.get('__action_type') == 'REMOVE']
            self.assertEqual(len(inserts), num_insert)
            self.assertEqual(len(modifies), num_modify)
            self.assertEqual(len(removes), num_delete)

            for i, event in enumerate(inserts):
                self.assertNotIn('old_image', event)
                self.assertEqual(inserts[i]['new_image'], {'id': 'testId%d' % i, 'data': 'foobar123'})

            self.assertEqual(modifies[0]['old_image'], {'id': 'testId6', 'data': 'foobar123'})
            self.assertEqual(modifies[0]['new_image'], {'id': 'testId6', 'data': 'foobar123_updated1'})
            self.assertEqual(modifies[1]['old_image'], {'id': 'testId7', 'data': 'foobar123'})
            self.assertEqual(modifies[1]['new_image'], {'id': 'testId7', 'data': 'foobar123_updated1'})
            self.assertEqual(modifies[2]['old_image'], {'id': 'testId6', 'data': 'foobar123_updated1'})
            self.assertEqual(modifies[2]['new_image'], {'id': 'testId6', 'data': 'foobar123_updated2'})
            self.assertEqual(modifies[3]['old_image'], {'id': 'testId7', 'data': 'foobar123_updated1'})
            self.assertEqual(modifies[3]['new_image'], {'id': 'testId7', 'data': 'foobar123_updated2'})
            self.assertEqual(modifies[4]['old_image'], {'id': 'testId8', 'data': 'foobar123'})
            self.assertEqual(modifies[4]['new_image'], {'id': 'testId8', 'data': 'foobar123_updated2'})

            for i, event in enumerate(removes):
                self.assertEqual(event['old_image'], {'id': 'testId%d' % i, 'data': 'foobar123'})
                self.assertNotIn('new_image', event)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
Ejemplo n.º 25
0
def install_elasticsearch(version=None):
    from localstack.services.es import versions

    if not version:
        version = ELASTICSEARCH_DEFAULT_VERSION

    version = get_elasticsearch_install_version(version)
    install_dir = get_elasticsearch_install_dir(version)
    installed_executable = os.path.join(install_dir, "bin", "elasticsearch")
    if not os.path.exists(installed_executable):
        log_install_msg("Elasticsearch (%s)" % version)
        es_url = versions.get_download_url(version)
        install_dir_parent = os.path.dirname(install_dir)
        mkdir(install_dir_parent)
        # download and extract archive
        tmp_archive = os.path.join(config.TMP_FOLDER,
                                   "localstack.%s" % os.path.basename(es_url))
        download_and_extract_with_retry(es_url, tmp_archive,
                                        install_dir_parent)
        elasticsearch_dir = glob.glob(
            os.path.join(install_dir_parent, "elasticsearch*"))
        if not elasticsearch_dir:
            raise Exception("Unable to find Elasticsearch folder in %s" %
                            install_dir_parent)
        shutil.move(elasticsearch_dir[0], install_dir)

        for dir_name in ("data", "logs", "modules", "plugins",
                         "config/scripts"):
            dir_path = os.path.join(install_dir, dir_name)
            mkdir(dir_path)
            chmod_r(dir_path, 0o777)

        # install default plugins
        for plugin in ELASTICSEARCH_PLUGIN_LIST:
            if is_alpine():
                # https://github.com/pires/docker-elasticsearch/issues/56
                os.environ["ES_TMPDIR"] = "/tmp"
            plugin_binary = os.path.join(install_dir, "bin",
                                         "elasticsearch-plugin")
            plugin_dir = os.path.join(install_dir, "plugins", plugin)
            if not os.path.exists(plugin_dir):
                LOG.info("Installing Elasticsearch plugin %s" % plugin)

                def try_install():
                    safe_run([plugin_binary, "install", "-b", plugin])

                # We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries
                download_attempts = 3
                try:
                    retry(try_install, retries=download_attempts - 1, sleep=2)
                except Exception:
                    LOG.warning(
                        "Unable to download Elasticsearch plugin '%s' after %s attempts",
                        plugin,
                        download_attempts,
                    )
                    if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"):
                        raise

    # delete some plugins to free up space
    for plugin in ELASTICSEARCH_DELETE_MODULES:
        module_dir = os.path.join(install_dir, "modules", plugin)
        rm_rf(module_dir)

    # disable x-pack-ml plugin (not working on Alpine)
    xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform")
    rm_rf(xpack_dir)

    # patch JVM options file - replace hardcoded heap size settings
    jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
    if os.path.exists(jvm_options_file):
        jvm_options = load_file(jvm_options_file)
        jvm_options_replaced = re.sub(r"(^-Xm[sx][a-zA-Z0-9\.]+$)",
                                      r"# \1",
                                      jvm_options,
                                      flags=re.MULTILINE)
        if jvm_options != jvm_options_replaced:
            save_file(jvm_options_file, jvm_options_replaced)
Ejemplo n.º 26
0
    def test_lambda_subscribe_sns_topic(self):
        function_name = '{}-{}'.format(TEST_LAMBDA_FUNCTION_PREFIX, short_uid())

        zip_file = testutil.create_lambda_archive(
            script=load_file(TEST_LAMBDA_ECHO_FILE),
            get_content=True,
            runtime=LAMBDA_RUNTIME_PYTHON36
        )

        testutil.create_lambda_function(
            zip_file=zip_file,
            func_name=function_name,
            runtime=LAMBDA_RUNTIME_PYTHON36
        )

        topic = self.sns_client.create_topic(
            Name=TEST_SNS_TOPIC_NAME
        )
        topic_arn = topic['TopicArn']

        self.sns_client.subscribe(
            TopicArn=topic_arn,
            Protocol='lambda',
            Endpoint=lambda_api.func_arn(function_name),
        )

        subject = '[Subject] Test subject'
        message = 'Hello world.'
        self.sns_client.publish(
            TopicArn=topic_arn,
            Subject=subject,
            Message=message
        )

        logs = aws_stack.connect_to_service('logs')

        def get_event_message(events):
            for event in events:
                raw_message = event['message']
                if 'START' in raw_message or 'END' in raw_message or 'REPORT' in raw_message:
                    continue

                return json.loads(raw_message)

            return None

        # wait for lambda executing
        def check_log_streams():
            rs = logs.describe_log_streams(
                logGroupName='/aws/lambda/{}'.format(function_name)
            )

            self.assertEqual(len(rs['logStreams']), 1)
            return rs['logStreams'][0]['logStreamName']

        log_stream = retry(check_log_streams, retries=3, sleep=2)
        rs = logs.get_log_events(
            logGroupName='/aws/lambda/{}'.format(function_name),
            logStreamName=log_stream
        )

        message = get_event_message(rs['events'])
        self.assertEqual(len(message['Records']), 1)
        notification = message['Records'][0]['Sns']

        self.assertIn('Subject', notification)
        self.assertEqual(notification['Subject'], subject)
Ejemplo n.º 27
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(
        TableName=TEST_STREAM_NAME + ddb_lease_table_suffix), print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
        wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
        stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
        zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(RequestItems={TEST_TABLE_NAME: [
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
    ]})
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {'S': 'testId%s' % i}},
            AttributeUpdates={'data': {
                'Action': 'PUT',
                'Value': {'S': 'foobar123_updated'}
            }})

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(
        Records=[
            {
                'Data': '{}',
                'PartitionKey': 'testId%s' % i
            } for i in range(0, num_events_kinesis)
        ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
        PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000', count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(('DynamoDB and Kinesis updates retrieved ' +
                '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
        modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == num_events_ddb
Ejemplo n.º 28
0
def create_lambda_function(func_name,
                           zip_file=None,
                           event_source_arn=None,
                           handler_file=None,
                           handler=None,
                           starting_position=None,
                           runtime=None,
                           envvars={},
                           tags={},
                           libs=[],
                           delete=False,
                           layers=None,
                           **kwargs):
    """Utility method to create a new function via the Lambda API"""

    starting_position = starting_position or LAMBDA_DEFAULT_STARTING_POSITION
    runtime = runtime or LAMBDA_DEFAULT_RUNTIME
    client = aws_stack.connect_to_service('lambda')

    # load zip file content if handler_file is specified
    if not zip_file and handler_file:
        file_content = load_file(handler_file) if os.path.exists(
            handler_file) else handler_file
        if libs or not handler:
            zip_file = create_lambda_archive(file_content,
                                             libs=libs,
                                             get_content=True,
                                             runtime=runtime
                                             or LAMBDA_DEFAULT_RUNTIME)
        else:
            zip_file = create_zip_file(handler_file, get_content=True)

    handler = handler or LAMBDA_DEFAULT_HANDLER

    if delete:
        try:
            # Delete function if one already exists
            client.delete_function(FunctionName=func_name)
        except Exception:
            pass

    # create function
    additional_kwargs = kwargs
    kwargs = {
        'FunctionName': func_name,
        'Runtime': runtime,
        'Handler': handler,
        'Role': LAMBDA_TEST_ROLE,
        'Code': {
            'ZipFile': zip_file
        },
        'Timeout': LAMBDA_TIMEOUT_SEC,
        'Environment': dict(Variables=envvars),
        'Tags': tags
    }
    kwargs.update(additional_kwargs)
    if layers:
        kwargs['Layers'] = layers
    create_func_resp = client.create_function(**kwargs)

    resp = {
        'CreateFunctionResponse': create_func_resp,
        'CreateEventSourceMappingResponse': None
    }

    # create event source mapping
    if event_source_arn:
        resp[
            'CreateEventSourceMappingResponse'] = client.create_event_source_mapping(
                FunctionName=func_name,
                EventSourceArn=event_source_arn,
                StartingPosition=starting_position)

    return resp
Ejemplo n.º 29
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(TableName=TEST_STREAM_NAME +
                                                   ddb_lease_table_suffix),
             print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(
        TEST_STREAM_NAME,
        listener_func=process_records,
        wait_until_started=True,
        ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME,
                                   partition_key=PARTITION_KEY,
                                   stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                    zip_file=zip_file,
                                    event_source_arn=ddb_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception,
                  testutil.create_lambda_function,
                  func_name=TEST_LAMBDA_NAME_DDB,
                  zip_file=zip_file,
                  event_source_arn=ddb_event_source_arn,
                  runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
                                    zip_file=zip_file,
                                    event_source_arn=kinesis_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    # put items to table
    num_events_ddb = 10
    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_events_ddb - 3):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(
        RequestItems={
            TEST_TABLE_NAME: [{
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ✓'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 £'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ¢'
                    }
                }
            }]
        })

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(Records=[{
        'Data': '{}',
        'PartitionKey': 'testId%s' % i
    } for i in range(0, num_events_kinesis)],
                        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' %
                       lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                       PartitionKey='testIderror',
                       StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(
        TopicArn=response['TopicArn'],
        Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'],
                    Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(
        TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000',
        count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(
                ('DynamoDB and Kinesis updates retrieved ' +
                 '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == 10
Ejemplo n.º 30
0
def test_prime_and_destroy_containers():

    # run these tests only for the "reuse containers" Lambda executor
    if not isinstance(lambda_api.LAMBDA_EXECUTOR,
                      lambda_executors.LambdaExecutorReuseContainers):
        return

    executor = lambda_api.LAMBDA_EXECUTOR
    func_name = 'test_prime_and_destroy_containers'

    # create a new lambda
    lambda_client = aws_stack.connect_to_service('lambda')

    func_arn = lambda_api.func_arn(func_name)

    # make sure existing containers are gone
    executor.cleanup()
    assert len(executor.get_all_container_names()) == 0

    # deploy and invoke lambda without Docker
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_ENV),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=func_name,
                                    zip_file=zip_file,
                                    runtime=LAMBDA_RUNTIME_PYTHON27,
                                    envvars={'Hello': 'World'})

    assert len(executor.get_all_container_names()) == 0

    assert executor.function_invoke_times == {}

    # invoke a few times.
    durations = []
    num_iterations = 3

    for i in range(0, num_iterations + 1):
        prev_invoke_time = None
        if i > 0:
            prev_invoke_time = executor.function_invoke_times[func_arn]

        start_time = time.time()
        lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
        duration = time.time() - start_time

        assert len(executor.get_all_container_names()) == 1

        # ensure the last invoke time is being updated properly.
        if i > 0:
            assert executor.function_invoke_times[func_arn] > prev_invoke_time
        else:
            assert executor.function_invoke_times[func_arn] > 0

        durations.append(duration)

    # the first call would have created the container. subsequent calls would reuse and be faster.
    for i in range(1, num_iterations + 1):
        assert durations[i] < durations[0]

    status = executor.get_docker_container_status(func_arn)
    assert status == 1

    executor.cleanup()
    status = executor.get_docker_container_status(func_arn)
    assert status == 0

    assert len(executor.get_all_container_names()) == 0
Ejemplo n.º 31
0
 def create_function(cls, file, name, runtime=None, libs=None):
     runtime = runtime or LAMBDA_RUNTIME_PYTHON27
     zip_file = testutil.create_lambda_archive(
         load_file(file), get_content=True, libs=libs, runtime=runtime)
     testutil.create_lambda_function(
         func_name=name, zip_file=zip_file, runtime=runtime)
Ejemplo n.º 32
0
def test_lambda_runtimes():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda - Python 2.7
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY,
                                    zip_file=zip_file,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    # Invocation Type not set
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    # Invocation Type - RequestResponse
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}',
                                  InvocationType='RequestResponse')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    # Invocation Type - Event
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}',
                                  InvocationType='Event')
    assert result['StatusCode'] == 202

    # Invocation Type - DryRun
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY,
                                  Payload=b'{}',
                                  InvocationType='DryRun')
    assert result['StatusCode'] == 204

    if use_docker():
        # deploy and invoke lambda - Python 3.6
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON3),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON36)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY3,
                                        zip_file=zip_file,
                                        runtime=LAMBDA_RUNTIME_PYTHON36)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY3,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java
    if not os.path.exists(TEST_LAMBDA_JAVA):
        mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
        download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
    # Lambda supports single JAR deployments without the zip, so we upload the JAR directly.
    test_java_jar = load_file(TEST_LAMBDA_JAVA, mode='rb')
    assert test_java_jar is not None
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA,
        zip_file=test_java_jar,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'LinkedHashMap' in to_str(result_data)

    # test SNSEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        InvocationType='Event',
        Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}')
    assert result['StatusCode'] == 202

    # test DDBEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        InvocationType='Event',
        Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}')
    assert result['StatusCode'] == 202

    # test KinesisEvent
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA,
        Payload=
        b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}'
    )
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'KinesisEvent' in to_str(result_data)

    # deploy and invoke lambda - Java with stream handler
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA_STREAM,
        zip_file=test_java_jar,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.LambdaStreamHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM,
                                  Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java with serializable input object
    testutil.create_lambda_function(
        func_name=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
        zip_file=test_java_jar,
        runtime=LAMBDA_RUNTIME_JAVA8,
        handler='cloud.localstack.sample.SerializedInputLambdaHandler')
    result = lambda_client.invoke(
        FunctionName=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
        Payload=b'{"bucket": "test_bucket", "key": "test_key"}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert json.loads(to_str(result_data)) == {
        'validated': True,
        'bucket': 'test_bucket',
        'key': 'test_key'
    }

    if use_docker():
        # deploy and invoke lambda - Node.js
        zip_file = testutil.create_zip_file(TEST_LAMBDA_NODEJS,
                                            get_content=True)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JS,
                                        zip_file=zip_file,
                                        handler='lambda_integration.handler',
                                        runtime=LAMBDA_RUNTIME_NODEJS)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JS,
                                      Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

        # deploy and invoke - .NET Core 2.0. Its already a zip
        zip_file = TEST_LAMBDA_DOTNETCORE2
        zip_file_content = None
        with open(zip_file, 'rb') as file_obj:
            zip_file_content = file_obj.read()
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_DOTNETCORE2,
            zip_file=zip_file_content,
            handler=
            'DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler',
            runtime=LAMBDA_RUNTIME_DOTNETCORE2)
        result = lambda_client.invoke(
            FunctionName=TEST_LAMBDA_NAME_DOTNETCORE2, Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'
Ejemplo n.º 33
0
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name, TemplateBody=template)

        _await_stack_completion(stack_name)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        queue_url = queue_exists('cf-test-queue-1')
        assert queue_url
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
        assert ssm_param_exists('cf-test-param-1')

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(tags, [{'Key': 'foobar', 'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')}])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(tags, [
            {'Key': 'foo', 'Value': 'cf-test-bucket-1'},
            {'Key': 'bar', 'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')}
        ])
        queue_tags = sqs.list_queue_tags(QueueUrl=queue_url)
        self.assertIn('Tags', queue_tags)
        self.assertEqual(queue_tags['Tags'], {'key1': 'value1', 'key2': 'value2'})

        # assert that bucket notifications have been created
        notifications = s3.get_bucket_notification_configuration(Bucket='cf-test-bucket-1')
        self.assertIn('QueueConfigurations', notifications)
        self.assertIn('LambdaFunctionConfigurations', notifications)
        self.assertEqual(notifications['QueueConfigurations'][0]['QueueArn'], 'aws:arn:sqs:test:testqueue')
        self.assertEqual(notifications['QueueConfigurations'][0]['Events'], ['s3:ObjectDeleted:*'])
        self.assertEqual(
            notifications['LambdaFunctionConfigurations'][0]['LambdaFunctionArn'],
            'aws:arn:lambda:test:testfunc'
        )
        self.assertEqual(notifications['LambdaFunctionConfigurations'][0]['Events'], ['s3:ObjectCreated:*'])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [s for s in subs if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']]
        self.assertEqual(len(subs), 1)
        self.assertIn(':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name), subs[0]['TopicArn'])
        # assert that subscription attributes are added properly
        attrs = sns.get_subscription_attributes(SubscriptionArn=subs[0]['SubscriptionArn'])['Attributes']
        self.assertEqual(attrs, {'Endpoint': subs[0]['Endpoint'], 'Protocol': 'sqs',
            'SubscriptionArn': subs[0]['SubscriptionArn'], 'TopicArn': subs[0]['TopicArn'],
            'FilterPolicy': json.dumps({'eventType': ['created']})})

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [a for a in apigateway.get_rest_apis()['items'] if a['name'] == test_api_name][0]
        responses = apigateway.get_gateway_responses(restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
def load_template_raw(tmpl_path: str) -> str:
    template = load_file(
        os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "templates", tmpl_path)
    )
    return template
Ejemplo n.º 35
0
def test_lambda_runtimes():

    lambda_client = aws_stack.connect_to_service('lambda')

    # deploy and invoke lambda - Python 2.7
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY,
        zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON27)
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    if use_docker():
        # deploy and invoke lambda - Python 3.6
        zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON3), get_content=True,
            libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON36)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY3,
            zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON36)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY3, Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java
    if not os.path.exists(TEST_LAMBDA_JAVA):
        mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
        download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
    zip_file = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JAVA, zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.LambdaHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA, Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'LinkedHashMap' in to_str(result_data)

    # test SNSEvent
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
                                  Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert json.loads(to_str(result_data)) == {'async': 'True'}

    # test DDBEvent
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
                                  Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert json.loads(to_str(result_data)) == {'async': 'True'}

    # test KinesisEvent
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA,
                                  Payload=b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert 'KinesisEvent' in to_str(result_data)

    # deploy and invoke lambda - Java with stream handler
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JAVA_STREAM, zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.LambdaStreamHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM, Payload=b'{}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert to_str(result_data).strip() == '{}'

    # deploy and invoke lambda - Java with serializable input object
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE, zip_file=zip_file,
        runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.SerializedInputLambdaHandler')
    result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
                                  Payload=b'{"bucket": "test_bucket", "key": "test_key"}')
    assert result['StatusCode'] == 200
    result_data = result['Payload'].read()
    assert json.loads(to_str(result_data)) == {'validated': True, 'bucket': 'test_bucket', 'key': 'test_key'}

    if use_docker():
        # deploy and invoke lambda - Node.js
        zip_file = testutil.create_zip_file(TEST_LAMBDA_NODEJS, get_content=True)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JS,
            zip_file=zip_file, handler='lambda_integration.handler', runtime=LAMBDA_RUNTIME_NODEJS)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JS, Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'

        # deploy and invoke - .NET Core 2.0. Its already a zip
        zip_file = TEST_LAMBDA_DOTNETCORE2
        zip_file_content = None
        with open(zip_file, 'rb') as file_obj:
            zip_file_content = file_obj.read()
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DOTNETCORE2, zip_file=zip_file_content,
            handler='DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler',
            runtime=LAMBDA_RUNTIME_DOTNETCORE2)
        result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_DOTNETCORE2, Payload=b'{}')
        assert result['StatusCode'] == 200
        result_data = result['Payload'].read()
        assert to_str(result_data).strip() == '{}'
Ejemplo n.º 36
0
 def test_validate_template(self):
     cloudformation = aws_stack.connect_to_service('cloudformation')
     template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))
     response = cloudformation.validate_template(TemplateBody=template)
     self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)