示例#1
0
def make_response(op_name, content=''):
    response = Response()
    if not content:
        content = '<MessageId>%s</MessageId>' % short_uid()
    response._content = """<{op_name}Response xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
        <{op_name}Result>
            {content}
        </{op_name}Result>
        <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>
        </{op_name}Response>""".format(op_name=op_name, content=content, req_id=short_uid())
    response.status_code = 200
    return response
示例#2
0
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
    if lambda_cwd or lambda_env:
        exec_mutex.acquire()
        if lambda_cwd:
            previous_cwd = os.getcwd()
            os.chdir(lambda_cwd)
            sys.path = [lambda_cwd] + sys.path
        if lambda_env:
            previous_env = dict(os.environ)
            os.environ.update(lambda_env)
    # generate lambda file name
    lambda_id = 'l_%s' % short_uid()
    lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
    save_file(lambda_file, script)
    # delete temporary .py and .pyc files on exit
    TMP_FILES.append(lambda_file)
    TMP_FILES.append('%sc' % lambda_file)
    try:
        handler_module = imp.load_source(lambda_id, lambda_file)
        module_vars = handler_module.__dict__
    except Exception as e:
        LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
        raise e
    finally:
        if lambda_cwd or lambda_env:
            if lambda_cwd:
                os.chdir(previous_cwd)
                sys.path.pop(0)
            if lambda_env:
                os.environ = previous_env
            exec_mutex.release()
    return module_vars[handler_function]
示例#3
0
def get_stream_info(stream_name, log_file=None, shards=None, env=None, endpoint_url=None,
        ddb_lease_table_suffix=None, env_vars={}):
    if not ddb_lease_table_suffix:
        ddb_lease_table_suffix = DEFAULT_DDB_LEASE_TABLE_SUFFIX
    # construct stream info
    env = aws_stack.get_environment(env)
    props_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.properties' % short_uid())
    app_name = '%s%s' % (stream_name, ddb_lease_table_suffix)
    stream_info = {
        'name': stream_name,
        'region': DEFAULT_REGION,
        'shards': shards,
        'properties_file': props_file,
        'log_file': log_file,
        'app_name': app_name,
        'env_vars': env_vars
    }
    # set local connection
    if env.region == REGION_LOCAL:
        stream_info['conn_kwargs'] = {
            'host': HOSTNAME,
            'port': config.PORT_KINESIS,
            'is_secure': bool(USE_SSL)
        }
    if endpoint_url:
        if 'conn_kwargs' not in stream_info:
            stream_info['conn_kwargs'] = {}
        url = urlparse(endpoint_url)
        stream_info['conn_kwargs']['host'] = url.hostname
        stream_info['conn_kwargs']['port'] = url.port
        stream_info['conn_kwargs']['is_secure'] = url.scheme == 'https'
    return stream_info
示例#4
0
def append_cors_headers(bucket_name, request_method, request_headers, response):
    cors = BUCKET_CORS.get(bucket_name)
    if not cors:
        return
    origin = request_headers.get('Origin', '')
    rules = cors['CORSConfiguration']['CORSRule']
    if not isinstance(rules, list):
        rules = [rules]
    for rule in rules:
        # add allow-origin header
        allowed_methods = rule.get('AllowedMethod', [])
        if request_method in allowed_methods:
            allowed_origins = rule.get('AllowedOrigin', [])
            for allowed in allowed_origins:
                if origin in allowed or re.match(allowed.replace('*', '.*'), origin):
                    response.headers['Access-Control-Allow-Origin'] = origin
                    break
        # add additional headers
        exposed_headers = rule.get('ExposeHeader', [])
        for header in exposed_headers:
            if header.lower() == 'date':
                response.headers[header] = timestamp(format='%a, %d %b %Y %H:%M:%S +0000')
            elif header.lower() == 'etag':
                response.headers[header] = md5(response._content)
            elif header.lower() in ('server', 'x-amz-id-2', 'x-amz-request-id'):
                response.headers[header] = short_uid()
            elif header.lower() == 'x-amz-delete-marker':
                response.headers[header] = 'false'
            elif header.lower() == 'x-amz-version-id':
                # TODO: check whether bucket versioning is enabled and return proper version id
                response.headers[header] = 'null'
示例#5
0
def test_kinesis_lambda_forward_chain():
    kinesis = aws_stack.connect_to_service('kinesis')
    s3 = aws_stack.connect_to_service('s3')

    aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True)
    s3.create_bucket(Bucket=TEST_BUCKET_NAME)

    # deploy test lambdas connected to Kinesis streams
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file,
        event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file,
        event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), runtime=LAMBDA_RUNTIME_PYTHON27)

    # publish test record
    test_data = {'test_data': 'forward_chain_data_%s' % short_uid()}
    data = clone(test_data)
    data[lambda_integration.MSG_BODY_MESSAGE_TARGET] = 'kinesis:%s' % TEST_CHAIN_STREAM2_NAME
    kinesis.put_record(Data=to_bytes(json.dumps(data)), PartitionKey='testId', StreamName=TEST_CHAIN_STREAM1_NAME)

    # check results
    time.sleep(5)
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(test_data, all_objects)
示例#6
0
def get_machine_id():
    global MACHINE_ID
    if MACHINE_ID:
        return MACHINE_ID

    # determine MACHINE_ID from config files
    configs_map = {}
    config_file_tmp = get_config_file_tempdir()
    config_file_home = get_config_file_homedir()
    for config_file in (config_file_home, config_file_tmp):
        if config_file:
            local_configs = load_file(config_file)
            local_configs = json.loads(to_str(local_configs))
            configs_map[config_file] = local_configs
            if 'machine_id' in local_configs:
                MACHINE_ID = local_configs['machine_id']
                break

    # if we can neither find NOR create the config files, fall back to process id
    if not configs_map:
        return PROCESS_ID

    # assign default id if empty
    if not MACHINE_ID:
        MACHINE_ID = short_uid()

    # update MACHINE_ID in all config files
    for config_file, configs in configs_map.items():
        configs['machine_id'] = MACHINE_ID
        save_file(config_file, json.dumps(configs))

    return MACHINE_ID
示例#7
0
def test_firehose_s3():

    s3_resource = aws_stack.connect_to_resource('s3')
    firehose = aws_stack.connect_to_service('firehose')

    s3_prefix = '/testdata'
    test_data = '{"test": "firehose_data_%s"}' % short_uid()
    # create Firehose stream
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        S3DestinationConfiguration={
            'RoleARN': aws_stack.iam_resource_arn('firehose'),
            'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
            'Prefix': s3_prefix
        }
    )
    assert stream
    assert TEST_FIREHOSE_NAME in firehose.list_delivery_streams()['DeliveryStreamNames']
    # create target S3 bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

    # put records
    firehose.put_record(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        Record={
            'Data': to_bytes(test_data)
        }
    )
    # check records in target bucket
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
示例#8
0
def test_upload_lambda_from_s3():

    s3_client = aws_stack.connect_to_service('s3')
    lambda_client = aws_stack.connect_to_service('lambda')

    lambda_name = 'test_lambda_%s' % short_uid()
    bucket_name = 'test_bucket_lambda'
    bucket_key = 'test_lambda.zip'

    # upload zip file to S3
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    s3_client.create_bucket(Bucket=bucket_name)
    s3_client.upload_fileobj(BytesIO(zip_file), bucket_name, bucket_key)

    # create lambda function
    lambda_client.create_function(
        FunctionName=lambda_name, Handler='handler.handler',
        Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
        Code={
            'S3Bucket': bucket_name,
            'S3Key': bucket_key
        }
    )

    # invoke lambda function
    data_before = b'{"foo": "bar"}'
    result = lambda_client.invoke(FunctionName=lambda_name, Payload=data_before)
    data_after = result['Payload'].read()
    assert json.loads(to_str(data_before)) == json.loads(to_str(data_after))
示例#9
0
def generate_processor_script(events_file, log_file=None):
    script_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.processor.py' % short_uid())
    if log_file:
        log_file = "'%s'" % log_file
    else:
        log_file = 'None'
    content = """#!/usr/bin/env python
import os, sys, glob, json, socket, time, logging, tempfile
import subprocess32 as subprocess
logging.basicConfig(level=logging.INFO)
for path in glob.glob('%s/lib/python*/site-packages'):
    sys.path.insert(0, path)
sys.path.insert(0, '%s')
from localstack.config import DEFAULT_ENCODING
from localstack.utils.kinesis import kinesis_connector
from localstack.utils.common import timestamp
events_file = '%s'
log_file = %s
error_log = os.path.join(tempfile.gettempdir(), 'kclipy.error.log')
if __name__ == '__main__':
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)

    num_tries = 3
    sleep_time = 2
    error = None
    for i in range(0, num_tries):
        try:
            sock.connect(events_file)
            error = None
            break
        except Exception as e:
            error = e
            if i < num_tries:
                msg = '%%s: Unable to connect to UNIX socket. Retrying.' %% timestamp()
                subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True)
                time.sleep(sleep_time)
    if error:
        print("WARN: Unable to connect to UNIX socket after retrying: %%s" %% error)
        raise error

    def receive_msg(records, checkpointer, shard_id):
        try:
            # records is a list of amazon_kclpy.messages.Record objects -> convert to JSON
            records_dicts = [j._json_dict for j in records]
            message_to_send = {'shard_id': shard_id, 'records': records_dicts}
            string_to_send = '%%s\\n' %% json.dumps(message_to_send)
            bytes_to_send = string_to_send.encode(DEFAULT_ENCODING)
            sock.send(bytes_to_send)
        except Exception as e:
            msg = "WARN: Unable to forward event: %%s" %% e
            print(msg)
            subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True)
    kinesis_connector.KinesisProcessor.run_processor(log_file=log_file, processor_func=receive_msg)
    """ % (LOCALSTACK_VENV_FOLDER, LOCALSTACK_ROOT_FOLDER, events_file, log_file)
    save_file(script_file, content)
    chmod_r(script_file, 0o755)
    TMP_FILES.append(script_file)
    return script_file
示例#10
0
def start_kcl_client_process(stream_name, listener_script, log_file=None, env=None, configs={},
        endpoint_url=None, ddb_lease_table_suffix=None, env_vars={},
        kcl_log_level=DEFAULT_KCL_LOG_LEVEL, log_subscribers=[]):
    env = aws_stack.get_environment(env)
    # decide which credentials provider to use
    credentialsProvider = None
    if (('AWS_ASSUME_ROLE_ARN' in os.environ or 'AWS_ASSUME_ROLE_ARN' in env_vars) and
            ('AWS_ASSUME_ROLE_SESSION_NAME' in os.environ or 'AWS_ASSUME_ROLE_SESSION_NAME' in env_vars)):
        # use special credentials provider that can assume IAM roles and handle temporary STS auth tokens
        credentialsProvider = 'com.atlassian.DefaultSTSAssumeRoleSessionCredentialsProvider'
        # pass through env variables to child process
        for var_name in ['AWS_ASSUME_ROLE_ARN', 'AWS_ASSUME_ROLE_SESSION_NAME',
                'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']:
            if var_name in os.environ and var_name not in env_vars:
                env_vars[var_name] = os.environ[var_name]
    if env.region == REGION_LOCAL:
        # need to disable CBOR protocol, enforce use of plain JSON,
        # see https://github.com/mhart/kinesalite/issues/31
        env_vars['AWS_CBOR_DISABLE'] = 'true'
    if kcl_log_level or (len(log_subscribers) > 0):
        if not log_file:
            log_file = LOG_FILE_PATTERN.replace('*', short_uid())
            TMP_FILES.append(log_file)
        run('touch %s' % log_file)
        # start log output reader thread which will read the KCL log
        # file and print each line to stdout of this process...
        reader_thread = OutputReaderThread({'file': log_file, 'level': kcl_log_level,
            'log_prefix': 'KCL', 'log_subscribers': log_subscribers})
        reader_thread.start()

    # construct stream info
    stream_info = get_stream_info(stream_name, log_file, env=env, endpoint_url=endpoint_url,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars)
    props_file = stream_info['properties_file']
    # set kcl config options
    kwargs = {
        'metricsLevel': 'NONE',
        'initialPositionInStream': 'LATEST'
    }
    # set parameters for local connection
    if env.region == REGION_LOCAL:
        kwargs['kinesisEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_KINESIS)
        kwargs['dynamodbEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_DYNAMODB)
        kwargs['kinesisProtocol'] = 'http%s' % ('s' if USE_SSL else '')
        kwargs['dynamodbProtocol'] = 'http%s' % ('s' if USE_SSL else '')
        kwargs['disableCertChecking'] = 'true'
    kwargs.update(configs)
    # create config file
    kclipy_helper.create_config_file(config_file=props_file, executableName=listener_script,
        streamName=stream_name, applicationName=stream_info['app_name'],
        credentialsProvider=credentialsProvider, **kwargs)
    TMP_FILES.append(props_file)
    # start stream consumer
    stream = KinesisStream(id=stream_name, params=stream_info)
    thread_consumer = KinesisProcessorThread.start_consumer(stream)
    TMP_THREADS.append(thread_consumer)
    return thread_consumer
示例#11
0
def create_stream(stream_name, s3_destination=None, elasticsearch_destination=None):
    stream = {
        'HasMoreDestinations': False,
        'VersionId': '1',
        'CreateTimestamp': time.time(),
        'DeliveryStreamARN': firehose_stream_arn(stream_name),
        'DeliveryStreamStatus': 'ACTIVE',
        'DeliveryStreamName': stream_name,
        'Destinations': []
    }
    DELIVERY_STREAMS[stream_name] = stream
    if elasticsearch_destination:
        update_destination(stream_name=stream_name,
            destination_id=short_uid(),
            elasticsearch_update=elasticsearch_destination)
    if s3_destination:
        update_destination(stream_name=stream_name, destination_id=short_uid(), s3_update=s3_destination)
    return stream
示例#12
0
def make_error(message, code=400, code_string='InvalidParameter'):
    response = Response()
    response._content = """<ErrorResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/"><Error>
        <Type>Sender</Type>
        <Code>{code_string}</Code>
        <Message>{message}</Message>
        </Error><RequestId>{req_id}</RequestId>
        </ErrorResponse>""".format(message=message, code_string=code_string, req_id=short_uid())
    response.status_code = code
    return response
示例#13
0
def test_dynamodb_error_injection():
    if not do_run():
        return

    dynamodb = aws_stack.connect_to_resource('dynamodb')
    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY)
    table = dynamodb.Table(TEST_TABLE_NAME)

    # by default, no errors
    test_no_errors = table.put_item(Item={PARTITION_KEY: short_uid(), 'data': 'foobar123'})
    assert_equal(test_no_errors['ResponseMetadata']['HTTPStatusCode'], 200)

    # with a probability of 1, always throw errors
    config.DYNAMODB_ERROR_PROBABILITY = 1.0
    assert_raises(ClientError, table.put_item, Item={PARTITION_KEY: short_uid(), 'data': 'foobar123'})

    # reset probability to zero
    config.DYNAMODB_ERROR_PROBABILITY = 0.0
示例#14
0
def add_authorizer(path, data):
    api_id = get_api_id_from_path(path)
    result = common.clone(data)
    result['id'] = common.short_uid()
    if '_links' not in result:
        result['_links'] = {}
    result['_links']['self'] = {
        'href': '/restapis/%s/authorizers/%s' % (api_id, result['id'])
    }
    AUTHORIZERS[result['id']] = result
    return result
示例#15
0
def listen_to_kinesis(stream_name, listener_func=None, processor_script=None,
        events_file=None, endpoint_url=None, log_file=None, configs={}, env=None,
        ddb_lease_table_suffix=None, env_vars={}, kcl_log_level=DEFAULT_KCL_LOG_LEVEL,
        log_subscribers=[], wait_until_started=False):
    """
    High-level function that allows to subscribe to a Kinesis stream
    and receive events in a listener function. A KCL client process is
    automatically started in the background.
    """
    env = aws_stack.get_environment(env)
    if not events_file:
        events_file = EVENTS_FILE_PATTERN.replace('*', short_uid())
        TMP_FILES.append(events_file)
    if not processor_script:
        processor_script = generate_processor_script(events_file, log_file=log_file)

    run('rm -f %s' % events_file)
    # start event reader thread (this process)
    ready_mutex = threading.Semaphore(0)
    thread = EventFileReaderThread(events_file, listener_func, ready_mutex=ready_mutex)
    thread.start()
    # Wait until the event reader thread is ready (to avoid 'Connection refused' error on the UNIX socket)
    ready_mutex.acquire()
    # start KCL client (background process)
    if processor_script[-4:] == '.pyc':
        processor_script = processor_script[0:-1]
    # add log listener that notifies when KCL is started
    if wait_until_started:
        listener = KclStartedLogListener()
        log_subscribers.append(listener)

    process = start_kcl_client_process(stream_name, processor_script,
        endpoint_url=endpoint_url, log_file=log_file, configs=configs, env=env,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars, kcl_log_level=kcl_log_level,
        log_subscribers=log_subscribers)

    if wait_until_started:
        # Wait at most 90 seconds for initialization. Note that creating the DDB table can take quite a bit
        try:
            listener.sync_init.get(block=True, timeout=90)
        except Exception:
            raise Exception('Timeout when waiting for KCL initialization.')
        # wait at most 30 seconds for shard lease notification
        try:
            listener.sync_take_shard.get(block=True, timeout=30)
        except Exception:
            # this merely means that there is no shard available to take. Do nothing.
            pass

    return process
示例#16
0
 def execute_java_lambda(self, event, context, handler, main_file):
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(event))
     TMP_FILES.append(event_file)
     class_name = handler.split('::')[0]
     classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
     cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     async = False
     # flip async flag depending on origin
     if 'Records' in event:
         # TODO: add more event supporting async lambda execution
         if 'Sns' in event['Records'][0]:
             async = True
         if 'dynamodb' in event['Records'][0]:
             async = True
     result, log_output = self.run_lambda_executor(cmd, async=async)
示例#17
0
文件: test_s3.py 项目: bbc/localstack
def test_s3_get_response_content_type_same_as_upload():
    bucket_name = 'test-bucket-%s' % short_uid()
    s3_client = aws_stack.connect_to_service('s3')
    s3_client.create_bucket(Bucket=bucket_name)

    # put object
    object_key = 'key-by-hostname'
    s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something', ContentType='text/html; charset=utf-8')
    url = s3_client.generate_presigned_url(
        'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
    )

    # get object and assert headers
    response = requests.get(url, verify=False)
    assert response.headers['content-type'] == 'text/html; charset=utf-8'
    # clean up
    s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
    s3_client.delete_bucket(Bucket=bucket_name)
示例#18
0
文件: test_s3.py 项目: bbc/localstack
def test_s3_get_response_default_content_type():
    # When no content type is provided by a PUT request
    # 'binary/octet-stream' should be used
    # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
    bucket_name = 'test-bucket-%s' % short_uid()
    s3_client = aws_stack.connect_to_service('s3')
    s3_client.create_bucket(Bucket=bucket_name)

    # put object
    object_key = 'key-by-hostname'
    s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
    url = s3_client.generate_presigned_url(
        'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
    )

    # get object and assert headers
    response = requests.get(url, verify=False)
    assert response.headers['content-type'] == 'binary/octet-stream'
    # clean up
    s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
    s3_client.delete_bucket(Bucket=bucket_name)
示例#19
0
def get_event_message(event_name, bucket_name, file_name='testfile.txt', file_size=1024):
    # Based on: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
    return {
        'Records': [{
            'eventVersion': '2.0',
            'eventSource': 'aws:s3',
            'awsRegion': DEFAULT_REGION,
            'eventTime': timestamp(format=TIMESTAMP_FORMAT_MILLIS),
            'eventName': event_name,
            'userIdentity': {
                'principalId': 'AIDAJDPLRKLG7UEXAMPLE'
            },
            'requestParameters': {
                'sourceIPAddress': '127.0.0.1'  # TODO determine real source IP
            },
            'responseElements': {
                'x-amz-request-id': short_uid(),
                'x-amz-id-2': 'eftixk72aD6Ap51TnqcoF8eFidJG9Z/2'  # Amazon S3 host that processed the request
            },
            's3': {
                's3SchemaVersion': '1.0',
                'configurationId': 'testConfigRule',
                'bucket': {
                    'name': bucket_name,
                    'ownerIdentity': {
                        'principalId': 'A3NL1KOZZKExample'
                    },
                    'arn': 'arn:aws:s3:::%s' % bucket_name
                },
                'object': {
                    'key': file_name,
                    'size': file_size,
                    'eTag': 'd41d8cd98f00b204e9800998ecf8427e',
                    'versionId': '096fKKXTRTtl3on89fVO.nfljtsv6qko',
                    'sequencer': '0055AED6DCD90281E5'
                }
            }
        }]
    }
示例#20
0
def test_sns_to_sqs():
    sqs_client = aws_stack.connect_to_service('sqs')
    sns_client = aws_stack.connect_to_service('sns')

    # create topic and queue
    queue_info = sqs_client.create_queue(QueueName=TEST_QUEUE_NAME_FOR_SNS)
    topic_info = sns_client.create_topic(Name=TEST_TOPIC_NAME)

    # subscribe SQS to SNS, publish message
    sns_client.subscribe(TopicArn=topic_info['TopicArn'], Protocol='sqs',
        Endpoint=aws_stack.sqs_queue_arn(TEST_QUEUE_NAME_FOR_SNS))
    test_value = short_uid()
    sns_client.publish(TopicArn=topic_info['TopicArn'], Message='test message for SQS',
        MessageAttributes={'attr1': {'DataType': 'String', 'StringValue': test_value}})

    # receive, assert, and delete message from SQS
    queue_url = queue_info['QueueUrl']
    assertions = []
    # make sure we receive the correct topic ARN in notifications
    assertions.append({'TopicArn': topic_info['TopicArn']})
    # make sure the notification contains message attributes
    assertions.append({'Value': test_value})
    receive_assert_delete(queue_url, assertions, sqs_client)
示例#21
0
文件: test_s3.py 项目: bbc/localstack
def test_s3_get_response_headers():
    bucket_name = 'test-bucket-%s' % short_uid()
    s3_client = aws_stack.connect_to_service('s3')
    s3_client.create_bucket(Bucket=bucket_name)

    # put object and CORS configuration
    object_key = 'key-by-hostname'
    s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
    url = s3_client.generate_presigned_url(
        'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
    )
    s3_client.put_bucket_cors(Bucket=bucket_name,
        CORSConfiguration={
            'CORSRules': [{
                'AllowedMethods': ['GET', 'PUT', 'POST'],
                'AllowedOrigins': ['*'],
                'ExposeHeaders': [
                    'Date', 'x-amz-delete-marker', 'x-amz-version-id'
                ]
            }]
        },
    )

    # get object and assert headers
    url = s3_client.generate_presigned_url(
        'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
    )
    response = requests.get(url, verify=False)
    assert response.headers['Date']
    assert response.headers['x-amz-delete-marker']
    assert response.headers['x-amz-version-id']
    assert not response.headers.get('x-amz-id-2')
    assert not response.headers.get('x-amz-request-id')
    # clean up
    s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
    s3_client.delete_bucket(Bucket=bucket_name)
示例#22
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        def create_kinesis_stream(name, delete=False):
            stream = aws_stack.create_kinesis_stream(name, delete=delete)
            stream.wait_for()

        ddb_lease_table_suffix = "-kclapp"
        table_name = TEST_TABLE_NAME + "klsdss" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_stream_name = "lambda-stream-%s" % short_uid()
        lambda_queue_name = "lambda-queue-%s" % short_uid()
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        queue_name = "queue-%s" % short_uid()
        dynamodb = aws_stack.connect_to_resource("dynamodb")
        dynamodb_service = aws_stack.create_external_boto_client("dynamodb")
        dynamodbstreams = aws_stack.create_external_boto_client(
            "dynamodbstreams")
        kinesis = aws_stack.create_external_boto_client("kinesis")
        sns = aws_stack.create_external_boto_client("sns")
        sqs = aws_stack.create_external_boto_client("sqs")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )

        create_kinesis_stream(stream_name, delete=True)
        create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS)
        testutil.create_lambda_function(
            func_name=lambda_ddb_name,
            zip_file=zip_file,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )
        # make sure we cannot create Lambda with same name twice
        with self.assertRaises(Exception):
            testutil.create_lambda_function(
                func_name=lambda_ddb_name,
                zip_file=zip_file,
                event_source_arn=ddb_event_source_arn,
            )

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )["StreamDescription"]["StreamARN"]
        testutil.create_lambda_function(
            func_name=lambda_stream_name,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
        )

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(queue_name)
        testutil.create_lambda_function(
            func_name=lambda_queue_name,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info["QueueArn"],
        )

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = (num_events_ddb - num_put_new_items -
                           num_put_existing_items - num_batch_items)

        LOGGER.info("Putting %s items to table...", num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123"
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: "testId%s" % i,
                "data": "foobar123_put_existing"
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ✓"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 £"
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: short_uid(),
                                "data": "foobar123 ¢"
                            }
                        }
                    },
                ]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": "testId%s" % i
                }},
                AttributeUpdates={
                    "data": {
                        "Action": "PUT",
                        "Value": {
                            "S": "foobar123_updated"
                        }
                    }
                },
            )

        # put items to stream
        num_events_kinesis = 1
        num_kinesis_records = 10
        LOGGER.info("Putting %s records in %s event to stream...",
                    num_kinesis_records, num_events_kinesis)
        kinesis.put_records(
            Records=[{
                "Data": "{}",
                "PartitionKey": "testId%s" % i
            } for i in range(0, num_kinesis_records)],
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
        )

        # put 1 item to stream that will trigger an error in the Lambda
        num_events_kinesis_err = 1
        for i in range(num_events_kinesis_err):
            kinesis.put_record(
                Data='{"%s": 1}' %
                lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                PartitionKey="testIdError",
                StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME,
            )

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response["TopicArn"],
            Protocol="lambda",
            Endpoint=aws_stack.lambda_function_arn(lambda_stream_name),
        )
        for i in range(num_events_sns):
            sns.publish(
                TopicArn=response["TopicArn"],
                Subject="test_subject",
                Message="test message %s" % i,
            )

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id="shardId-000000000000",
            count=10)
        self.assertEqual(10, len(latest))

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info["QueueUrl"],
                             MessageBody=str(i))

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_kinesis_records

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            self.assertEqual(num_put_new_items + num_batch_items, len(inserts))
            self.assertEqual(num_put_existing_items + num_updates_ddb,
                             len(modifies))

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=15, sleep=2)

        # check cloudwatch notifications
        def check_cw_invocations():
            num_invocations = get_lambda_invocations_count(lambda_stream_name)
            expected_invocation_count = num_events_kinesis + num_events_kinesis_err + num_events_sns
            self.assertEqual(expected_invocation_count, num_invocations)
            num_error_invocations = get_lambda_invocations_count(
                lambda_stream_name, "Errors")
            self.assertEqual(num_events_kinesis_err, num_error_invocations)

        # Lambda invocations are running asynchronously, hence sleep some time here to wait for results
        retry(check_cw_invocations, retries=7, sleep=2)

        # clean up
        testutil.delete_lambda_function(lambda_stream_name)
        testutil.delete_lambda_function(lambda_ddb_name)
        testutil.delete_lambda_function(lambda_queue_name)
        sqs.delete_queue(QueueUrl=sqs_queue_info["QueueUrl"])
示例#23
0
    def test_lambda_streams_batch_and_transactions(self):
        ddb_lease_table_suffix = "-kclapp2"
        table_name = TEST_TABLE_NAME + "lsbat" + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        lambda_ddb_name = "lambda-ddb-%s" % short_uid()
        dynamodb = aws_stack.create_external_boto_client("dynamodb",
                                                         client=True)
        dynamodb_service = aws_stack.create_external_boto_client("dynamodb")
        dynamodbstreams = aws_stack.create_external_boto_client(
            "dynamodbstreams")

        LOGGER.info("Creating test streams...")
        run_safe(
            lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                  ddb_lease_table_suffix),
            print_error=False,
        )
        aws_stack.create_kinesis_stream(stream_name, delete=True)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix,
        )

        LOGGER.info("Kinesis consumer initialized.")

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams["Streams"]:
            if stream["TableName"] == table_name:
                ddb_event_source_arn = stream["StreamArn"]
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        testutil.create_lambda_function(
            handler_file=TEST_LAMBDA_PYTHON,
            libs=TEST_LAMBDA_LIBS,
            func_name=lambda_ddb_name,
            event_source_arn=ddb_event_source_arn,
            delete=True,
        )

        # submit a batch with writes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId0"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId1"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId2"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                ]
            })

        # submit a batch with writes and deletes
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId3"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId4"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "PutRequest": {
                            "Item": {
                                PARTITION_KEY: {
                                    "S": "testId5"
                                },
                                "data": {
                                    "S": "foobar123"
                                },
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId0"
                                }
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId1"
                                }
                            }
                        }
                    },
                    {
                        "DeleteRequest": {
                            "Key": {
                                PARTITION_KEY: {
                                    "S": "testId2"
                                }
                            }
                        }
                    },
                ]
            })

        # submit a transaction with writes and delete
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId8"
                        },
                        "data": {
                            "S": "foobar123"
                        },
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId3"
                        }
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId4"
                        }
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId5"
                        }
                    },
                }
            },
        ])

        # submit a batch with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        },
                        "data": {
                            "S": "foobar123_updated1"
                        },
                    },
                }
            },
        ])

        # submit a transaction with a put over existing item
        dynamodb.transact_write_items(TransactItems=[
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        },
                        "data": {
                            "S": "foobar123_updated1"
                        },
                    },
                }
            },
        ])

        # submit a transaction with updates
        dynamodb.transact_write_items(TransactItems=[
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId6"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId7"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        PARTITION_KEY: {
                            "S": "testId8"
                        }
                    },
                    "UpdateExpression": "SET #0 = :0",
                    "ExpressionAttributeNames": {
                        "#0": "data"
                    },
                    "ExpressionAttributeValues": {
                        ":0": {
                            "S": "foobar123_updated2"
                        }
                    },
                }
            },
        ])

        LOGGER.info("Waiting some time before finishing test.")
        time.sleep(2)

        num_insert = 9
        num_modify = 5
        num_delete = 6
        num_events = num_insert + num_modify + num_delete

        def check_events():
            if len(events) != num_events:
                msg = "DynamoDB updates retrieved (actual/expected): %s/%s" % (
                    len(events),
                    num_events,
                )
                LOGGER.warning(msg)
            self.assertEqual(num_events, len(events))
            event_items = [
                json.loads(base64.b64decode(e["data"])) for e in events
            ]
            # make sure the we have the right amount of expected event types
            inserts = [
                e for e in event_items if e.get("__action_type") == "INSERT"
            ]
            modifies = [
                e for e in event_items if e.get("__action_type") == "MODIFY"
            ]
            removes = [
                e for e in event_items if e.get("__action_type") == "REMOVE"
            ]
            self.assertEqual(num_insert, len(inserts))
            self.assertEqual(num_modify, len(modifies))
            self.assertEqual(num_delete, len(removes))

            # assert that all inserts were received

            for i, event in enumerate(inserts):
                self.assertNotIn("old_image", event)
                item_id = "testId%d" % i
                matching = [
                    i for i in inserts if i["new_image"]["id"] == item_id
                ][0]
                self.assertEqual({
                    "id": item_id,
                    "data": "foobar123"
                }, matching["new_image"])

            # assert that all updates were received

            def assert_updates(expected_updates, modifies):
                def found(update):
                    for modif in modifies:
                        if modif["old_image"]["id"] == update["id"]:
                            self.assertEqual(
                                modif["old_image"],
                                {
                                    "id": update["id"],
                                    "data": update["old"]
                                },
                            )
                            self.assertEqual(
                                modif["new_image"],
                                {
                                    "id": update["id"],
                                    "data": update["new"]
                                },
                            )
                            return True

                for update in expected_updates:
                    self.assertTrue(found(update))

            updates1 = [
                {
                    "id": "testId6",
                    "old": "foobar123",
                    "new": "foobar123_updated1"
                },
                {
                    "id": "testId7",
                    "old": "foobar123",
                    "new": "foobar123_updated1"
                },
            ]
            updates2 = [
                {
                    "id": "testId6",
                    "old": "foobar123_updated1",
                    "new": "foobar123_updated2",
                },
                {
                    "id": "testId7",
                    "old": "foobar123_updated1",
                    "new": "foobar123_updated2",
                },
                {
                    "id": "testId8",
                    "old": "foobar123",
                    "new": "foobar123_updated2"
                },
            ]

            assert_updates(updates1, modifies[:2])
            assert_updates(updates2, modifies[2:])

            # assert that all removes were received

            for i, event in enumerate(removes):
                self.assertNotIn("new_image", event)
                item_id = "testId%d" % i
                matching = [
                    i for i in removes if i["old_image"]["id"] == item_id
                ][0]
                self.assertEqual({
                    "id": item_id,
                    "data": "foobar123"
                }, matching["old_image"])

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=4)

        # clean up
        testutil.delete_lambda_function(lambda_ddb_name)
示例#24
0
LOGGER = logging.getLogger(__name__)


def start_sqs(port=PORT_SQS, async=False, update_listener=None):
    install_elasticmq()
    backend_port = DEFAULT_PORT_SQS_BACKEND
    # create config file
    config = """
    include classpath("application.conf")
    node-address {
        protocol = http
        host = "%s"
        port = %s
        context-path = ""
    }
    rest-sqs {
        enabled = true
        bind-port = %s
        bind-hostname = "0.0.0.0"
        sqs-limits = strict
    }
    """ % (LOCALSTACK_HOSTNAME, port, backend_port)
    config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid())
    TMP_FILES.append(config_file)
    save_file(config_file, config)
    # start process
    cmd = ('java -Dconfig.file=%s -jar %s/elasticmq-server.jar' % (config_file, INSTALL_DIR_ELASTICMQ))
    print('Starting mock SQS (%s port %s)...' % (get_service_protocol(), port))
    start_proxy_for_service('sqs', port, backend_port, update_listener)
    return do_run(cmd, async)
示例#25
0
    def return_response(self, method, path, data, headers, response):
        if response.status_code < 400 or response.status_code >= 500:
            return

        region_details = Route53Backend.get()

        is_associate = path.endswith('/associatevpc')
        if is_associate or path.endswith('/disassociatevpc'):
            path_parts = path.lstrip('/').split('/')
            zone_id = path_parts[2]
            req_data = xmltodict.parse(to_str(data))
            zone_details = region_details.vpc_hosted_zone_associations.get(zone_id) or []
            if is_associate:
                assoc_id = short_uid()
                zone_data = req_data.get('AssociateVPCWithHostedZoneRequest', {})
                zone_data['Id'] = assoc_id
                zone_data['HostedZoneId'] = zone_id
                zone_details.append(zone_data)
                response_entry = {
                    'ChangeInfo': {
                        'Id': assoc_id,
                        'Status': 'INSYNC',
                        'SubmittedAt': timestamp_millis()
                    }
                }
            else:
                def _match(z):
                    return z['HostedZoneId'] == zone_id and z['VPC']['VPCId'] == zone_data['VPC']['VPCId']
                zone_data = req_data.get('DisassociateVPCFromHostedZoneRequest', {})
                response_entry = [z for z in zone_details if _match(z)]
                zone_details = [z for z in zone_details if not _match(z)]
                if not response_entry:
                    return 404
                response_entry = response_entry[0]

            region_details.vpc_hosted_zone_associations[zone_id] = zone_details

            response_tag = '%sVPCWithHostedZoneResponse' % ('Associate' if is_associate else 'Disassociate')
            response = {
                response_tag: response_entry
            }
            body = xmltodict.unparse(response)
            response = requests_response(body)
            return response

        if '/hostedzonesbyvpc' in path and method == 'GET':
            def _zone(z):
                zone_id = z['HostedZoneId']
                hosted_zone = client.get_hosted_zone(Id=zone_id).get('HostedZone', {})
                result = {
                    'HostedZoneId': zone_id,
                    'Name': hosted_zone.get('Name'),
                    'Owner': {'OwningAccount': constants.TEST_AWS_ACCOUNT_ID}
                }
                return result
            client = aws_stack.connect_to_service('route53')
            req_data = parse_request_data(method, path, data)
            vpc_id = req_data.get('vpcid')
            zone_details = region_details.vpc_hosted_zone_associations
            result = [_zone(z) for z_list in zone_details.values() for z in z_list if z['VPC']['VPCId'] == vpc_id]
            response = {'ListHostedZonesByVPCResponse': {'HostedZoneSummaries': {'HostedZoneSummary': result}}}
            body = xmltodict.unparse(response)
            response = requests_response(body)
            return response
示例#26
0
文件: infra.py 项目: bbc/localstack
def get_graph(name_filter='.*', env=None):
    result = {
        'nodes': [],
        'edges': []
    }

    pool = {}

    if True:
        result = {
            'nodes': [],
            'edges': []
        }
        node_ids = {}
        # Make sure we load components in the right order:
        # (ES,DynamoDB,S3) -> (Kinesis,Lambda)
        domains = get_elasticsearch_domains(name_filter, pool=pool, env=env)
        dbs = get_dynamo_dbs(name_filter, pool=pool, env=env)
        buckets = get_s3_buckets(name_filter, details=True, pool=pool, env=env)
        streams = get_kinesis_streams(name_filter, pool=pool, env=env)
        firehoses = get_firehose_streams(name_filter, pool=pool, env=env)
        lambdas = get_lambda_functions(name_filter, details=True, pool=pool, env=env)
        queues = get_sqs_queues(name_filter, pool=pool, env=env)

        for es in domains:
            uid = short_uid()
            node_ids[es.id] = uid
            result['nodes'].append({'id': uid, 'arn': es.id, 'name': es.name(), 'type': 'es'})
        for b in buckets:
            uid = short_uid()
            node_ids[b.id] = uid
            result['nodes'].append({'id': uid, 'arn': b.id, 'name': b.name(), 'type': 's3'})
        for db in dbs:
            uid = short_uid()
            node_ids[db.id] = uid
            result['nodes'].append({'id': uid, 'arn': db.id, 'name': db.name(), 'type': 'dynamodb'})
        for s in streams:
            uid = short_uid()
            node_ids[s.id] = uid
            result['nodes'].append({'id': uid, 'arn': s.id, 'name': s.name(), 'type': 'kinesis'})
            for shard in s.shards:
                uid1 = short_uid()
                name = re.sub(r'shardId-0*', '', shard.id) or '0'
                result['nodes'].append({'id': uid1, 'arn': shard.id, 'name': name,
                    'type': 'kinesis_shard', 'streamName': s.name(), 'parent': uid})
        for f in firehoses:
            uid = short_uid()
            node_ids[f.id] = uid
            result['nodes'].append({'id': uid, 'arn': f.id, 'name': f.name(), 'type': 'firehose'})
            for d in f.destinations:
                result['edges'].append({'source': uid, 'target': node_ids[d.id]})
        for q in queues:
            uid = short_uid()
            node_ids[q.id] = uid
            result['nodes'].append({'id': uid, 'arn': q.id, 'name': q.name(), 'type': 'sqs'})
        for l in lambdas:
            uid = short_uid()
            node_ids[l.id] = uid
            result['nodes'].append({'id': uid, 'arn': l.id, 'name': l.name(), 'type': 'lambda'})
            for s in l.event_sources:
                lookup_id = s.id
                if isinstance(s, DynamoDBStream):
                    lookup_id = s.table.id
                result['edges'].append({'source': node_ids.get(lookup_id), 'target': uid})
            for t in l.targets:
                lookup_id = t.id
                result['edges'].append({'source': uid, 'target': node_ids.get(lookup_id)})
        for b in buckets:
            for n in b.notifications:
                src_uid = node_ids[b.id]
                tgt_uid = node_ids[n.target.id]
                result['edges'].append({'source': src_uid, 'target': tgt_uid})

    return result
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(TableName=TEST_STREAM_NAME +
                                                   ddb_lease_table_suffix),
             print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(
        TEST_STREAM_NAME,
        listener_func=process_records,
        wait_until_started=True,
        ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME,
                                   partition_key=PARTITION_KEY,
                                   stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON),
                                              get_content=True,
                                              libs=TEST_LAMBDA_LIBS,
                                              runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                    zip_file=zip_file,
                                    event_source_arn=ddb_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception,
                  testutil.create_lambda_function,
                  func_name=TEST_LAMBDA_NAME_DDB,
                  zip_file=zip_file,
                  event_source_arn=ddb_event_source_arn,
                  runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
                                    zip_file=zip_file,
                                    event_source_arn=kinesis_event_source_arn,
                                    runtime=LAMBDA_RUNTIME_PYTHON27)

    # put items to table
    num_events_ddb = 10
    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_events_ddb - 3):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(
        RequestItems={
            TEST_TABLE_NAME: [{
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ✓'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 £'
                    }
                }
            }, {
                'PutRequest': {
                    'Item': {
                        PARTITION_KEY: short_uid(),
                        'data': 'foobar123 ¢'
                    }
                }
            }]
        })

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(Records=[{
        'Data': '{}',
        'PartitionKey': 'testId%s' % i
    } for i in range(0, num_events_kinesis)],
                        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' %
                       lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                       PartitionKey='testIderror',
                       StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(
        TopicArn=response['TopicArn'],
        Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'],
                    Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(
        TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000',
        count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(
                ('DynamoDB and Kinesis updates retrieved ' +
                 '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == 10
示例#28
0
 def _store_logs(self, func_details, log_output, invocation_time):
     log_group_name = '/aws/lambda/%s' % func_details.name()
     time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time))
     log_stream_name = '%s/[$LATEST]%s' % (time_str, short_uid())
     return store_cloudwatch_logs(log_group_name, log_stream_name,
                                  log_output, invocation_time)
示例#29
0
def test_sqs_queue_names():
    sqs_client = aws_stack.connect_to_service('sqs')
    queue_name = '%s.fifo' % short_uid()
    # make sure we can create *.fifo queues
    queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
    sqs_client.delete_queue(QueueUrl=queue_url)
示例#30
0
    def test_dynamodb_stream_records_with_update_item(self):
        table_name = "test-ddb-table-%s" % short_uid()
        dynamodb = aws_stack.create_external_boto_client("dynamodb")
        ddbstreams = aws_stack.create_external_boto_client("dynamodbstreams")

        aws_stack.create_dynamodb_table(
            table_name,
            partition_key=PARTITION_KEY,
            stream_view_type="NEW_AND_OLD_IMAGES",
        )
        table = self.dynamodb.Table(table_name)

        response = ddbstreams.describe_stream(
            StreamArn=table.latest_stream_arn)
        self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
        self.assertEqual(1, len(response["StreamDescription"]["Shards"]))
        shard_id = response["StreamDescription"]["Shards"][0]["ShardId"]
        starting_sequence_number = int(
            response["StreamDescription"]["Shards"][0].get(
                "SequenceNumberRange").get("StartingSequenceNumber"))

        response = ddbstreams.get_shard_iterator(
            StreamArn=table.latest_stream_arn,
            ShardId=shard_id,
            ShardIteratorType="LATEST",
        )
        self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
        self.assertIn("ShardIterator", response)
        iterator_id = response["ShardIterator"]

        item_id = short_uid()
        for _ in range(2):
            dynamodb.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    "S": item_id
                }},
                UpdateExpression="SET attr1 = :v1, attr2 = :v2",
                ExpressionAttributeValues={
                    ":v1": {
                        "S": "value1"
                    },
                    ":v2": {
                        "S": "value2"
                    },
                },
                ReturnValues="ALL_NEW",
                ReturnConsumedCapacity="INDEXES",
            )

        records = ddbstreams.get_records(ShardIterator=iterator_id)
        self.assertEqual(200, records["ResponseMetadata"]["HTTPStatusCode"])
        self.assertEqual(2, len(records["Records"]))
        self.assertTrue(
            isinstance(
                records["Records"][0]["dynamodb"]
                ["ApproximateCreationDateTime"],
                datetime,
            ))
        self.assertEqual("1.1", records["Records"][0]["eventVersion"])
        self.assertEqual("INSERT", records["Records"][0]["eventName"])
        self.assertNotIn("OldImage", records["Records"][0]["dynamodb"])
        self.assertGreater(
            int(records["Records"][0]["dynamodb"]["SequenceNumber"]),
            starting_sequence_number,
        )
        self.assertTrue(
            isinstance(
                records["Records"][1]["dynamodb"]
                ["ApproximateCreationDateTime"],
                datetime,
            ))
        self.assertEqual("1.1", records["Records"][1]["eventVersion"])
        self.assertEqual("MODIFY", records["Records"][1]["eventName"])
        self.assertIn("OldImage", records["Records"][1]["dynamodb"])
        self.assertGreater(
            int(records["Records"][1]["dynamodb"]["SequenceNumber"]),
            starting_sequence_number,
        )

        dynamodb.delete_table(TableName=table_name)
示例#31
0
    def test_transaction_write_items(self):
        table_name = "test-ddb-table-%s" % short_uid()
        dynamodb = aws_stack.create_external_boto_client("dynamodb")

        dynamodb.create_table(
            TableName=table_name,
            KeySchema=[{
                "AttributeName": "id",
                "KeyType": "HASH"
            }],
            AttributeDefinitions=[{
                "AttributeName": "id",
                "AttributeType": "S"
            }],
            ProvisionedThroughput={
                "ReadCapacityUnits": 5,
                "WriteCapacityUnits": 5
            },
            Tags=TEST_DDB_TAGS,
        )

        response = dynamodb.transact_write_items(TransactItems=[
            {
                "ConditionCheck": {
                    "TableName": table_name,
                    "ConditionExpression": "attribute_not_exists(id)",
                    "Key": {
                        "id": {
                            "S": "test1"
                        }
                    },
                }
            },
            {
                "Put": {
                    "TableName": table_name,
                    "Item": {
                        "id": {
                            "S": "test2"
                        }
                    }
                }
            },
            {
                "Update": {
                    "TableName": table_name,
                    "Key": {
                        "id": {
                            "S": "test3"
                        }
                    },
                    "UpdateExpression": "SET attr1 = :v1, attr2 = :v2",
                    "ExpressionAttributeValues": {
                        ":v1": {
                            "S": "value1"
                        },
                        ":v2": {
                            "S": "value2"
                        },
                    },
                }
            },
            {
                "Delete": {
                    "TableName": table_name,
                    "Key": {
                        "id": {
                            "S": "test4"
                        }
                    }
                }
            },
        ])

        self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])

        # clean up
        dynamodb.delete_table(TableName=table_name)
示例#32
0
    def test_dynamodb_with_kinesis_stream(self):
        dynamodb = aws_stack.create_external_boto_client("dynamodb")
        kinesis = aws_stack.create_external_boto_client("kinesis")

        # create kinesis datastream
        kinesis.create_stream(StreamName="kinesis_dest_stream", ShardCount=1)
        # wait for the stream to be created
        sleep(1)
        # Get stream description
        stream_description = kinesis.describe_stream(
            StreamName="kinesis_dest_stream")["StreamDescription"]
        table_name = "table_with_kinesis_stream-%s" % short_uid()
        # create table
        dynamodb.create_table(
            TableName=table_name,
            KeySchema=[{
                "AttributeName": "Username",
                "KeyType": "HASH"
            }],
            AttributeDefinitions=[{
                "AttributeName": "Username",
                "AttributeType": "S"
            }],
            ProvisionedThroughput={
                "ReadCapacityUnits": 5,
                "WriteCapacityUnits": 5
            },
        )

        # Enable kinesis destination for the table
        dynamodb.enable_kinesis_streaming_destination(
            TableName=table_name, StreamArn=stream_description["StreamARN"])

        # put item into table
        dynamodb.put_item(TableName=table_name,
                          Item={"Username": {
                              "S": "Fred"
                          }})

        dynamodb.update_item(
            TableName=table_name,
            Key={"Username": {
                "S": "Fred"
            }},
            UpdateExpression="set S=:r",
            ExpressionAttributeValues={":r": {
                "S": "Fred_Modified"
            }},
            ReturnValues="UPDATED_NEW",
        )

        dynamodb.delete_item(TableName=table_name,
                             Key={"Username": {
                                 "S": "Fred"
                             }})
        # get shard iterator of the stream
        shard_iterator = kinesis.get_shard_iterator(
            StreamName="kinesis_dest_stream",
            ShardId=stream_description["Shards"][0]["ShardId"],
            ShardIteratorType="TRIM_HORIZON",
        )["ShardIterator"]

        # get records from the stream
        records = kinesis.get_records(ShardIterator=shard_iterator)["Records"]
        self.assertEqual(3, len(records))

        for record in records:
            record = json.loads(record["Data"])
            self.assertEqual(record["tableName"], table_name)
            # check eventSourceARN not exists in the stream record
            self.assertNotIn("eventSourceARN", record)
            if record["eventName"] == "INSERT":
                self.assertNotIn("OldImage", record["dynamodb"])
                self.assertIn("NewImage", record["dynamodb"])
            elif record["eventName"] == "MODIFY":
                self.assertIn("NewImage", record["dynamodb"])
                self.assertIn("OldImage", record["dynamodb"])
            elif record["eventName"] == "REMOVE":
                self.assertNotIn("NewImage", record["dynamodb"])
                self.assertIn("OldImage", record["dynamodb"])
        # describe kinesis streaming destination of the table
        describe = dynamodb.describe_kinesis_streaming_destination(
            TableName=table_name)["KinesisDataStreamDestinations"][0]

        # assert kinesis streaming destination status
        self.assertEqual(stream_description["StreamARN"],
                         describe["StreamArn"])
        self.assertEqual("ACTIVE", describe["DestinationStatus"])

        # Disable kinesis destination
        dynamodb.disable_kinesis_streaming_destination(
            TableName=table_name, StreamArn=stream_description["StreamARN"])

        # describe kinesis streaming destination of the table
        describe = dynamodb.describe_kinesis_streaming_destination(
            TableName=table_name)["KinesisDataStreamDestinations"][0]

        # assert kinesis streaming destination status
        self.assertEqual(stream_description["StreamARN"],
                         describe["StreamArn"])
        self.assertEqual("DISABLED", describe["DestinationStatus"])

        # clean up
        delete_table(table_name)
        kinesis.delete_stream(StreamName="kinesis_dest_stream")
示例#33
0
    def test_dynamodb_stream_stream_view_type(self):
        dynamodb = aws_stack.create_external_boto_client("dynamodb")
        ddbstreams = aws_stack.create_external_boto_client("dynamodbstreams")
        table_name = "table_with_stream-%s" % short_uid()
        # create table
        table = dynamodb.create_table(
            TableName=table_name,
            KeySchema=[{
                "AttributeName": "Username",
                "KeyType": "HASH"
            }],
            AttributeDefinitions=[{
                "AttributeName": "Username",
                "AttributeType": "S"
            }],
            StreamSpecification={
                "StreamEnabled": True,
                "StreamViewType": "KEYS_ONLY",
            },
            ProvisionedThroughput={
                "ReadCapacityUnits": 5,
                "WriteCapacityUnits": 5
            },
        )
        stream_arn = table["TableDescription"]["LatestStreamArn"]
        # wait for stream to be created
        sleep(1)
        # put item in table - Insert event
        dynamodb.put_item(TableName=table_name,
                          Item={"Username": {
                              "S": "Fred"
                          }})
        # update item in table - Modify event
        dynamodb.update_item(
            TableName=table_name,
            Key={"Username": {
                "S": "Fred"
            }},
            UpdateExpression="set S=:r",
            ExpressionAttributeValues={":r": {
                "S": "Fred_Modified"
            }},
            ReturnValues="UPDATED_NEW",
        )
        # delete item in table - Delete event
        dynamodb.delete_item(TableName=table_name,
                             Key={"Username": {
                                 "S": "Fred"
                             }})
        result = ddbstreams.describe_stream(StreamArn=stream_arn)
        # assert stream_view_type of the table
        self.assertEqual("KEYS_ONLY",
                         result["StreamDescription"]["StreamViewType"])

        # get shard iterator
        response = ddbstreams.get_shard_iterator(
            StreamArn=stream_arn,
            ShardId=result["StreamDescription"]["Shards"][0]["ShardId"],
            ShardIteratorType="AT_SEQUENCE_NUMBER",
            SequenceNumber=result["StreamDescription"]["Shards"][0].get(
                "SequenceNumberRange").get("StartingSequenceNumber"),
        )

        # get records
        records = ddbstreams.get_records(
            ShardIterator=response["ShardIterator"])["Records"]

        for record in records:
            self.assertIn("SequenceNumber", record["dynamodb"])
            self.assertEqual("KEYS_ONLY", record["dynamodb"]["StreamViewType"])
            self.assertEqual({"Username": {
                "S": "Fred"
            }}, record["dynamodb"]["Keys"])
            self.assertNotIn("OldImage", record["dynamodb"])
            self.assertNotIn("NewImage", record["dynamodb"])
        # clean up
        delete_table(table_name)
示例#34
0
    def test_multiple_update_expressions(self):
        dynamodb = aws_stack.create_external_boto_client("dynamodb")
        aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME,
                                        partition_key=PARTITION_KEY)
        table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)

        item_id = short_uid()
        table.put_item(Item={PARTITION_KEY: item_id, "data": "foobar123 ✓"})
        response = dynamodb.update_item(
            TableName=TEST_DDB_TABLE_NAME,
            Key={PARTITION_KEY: {
                "S": item_id
            }},
            UpdateExpression="SET attr1 = :v1, attr2 = :v2",
            ExpressionAttributeValues={
                ":v1": {
                    "S": "value1"
                },
                ":v2": {
                    "S": "value2"
                }
            },
        )
        self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])

        item = table.get_item(Key={PARTITION_KEY: item_id})["Item"]
        self.assertEqual(item["attr1"], "value1")
        self.assertEqual(item["attr2"], "value2")
        attributes = [{"AttributeName": "id", "AttributeType": STRING}]

        user_id_idx = [
            {
                "Create": {
                    "IndexName": "id-index",
                    "KeySchema": [{
                        "AttributeName": "id",
                        "KeyType": "HASH"
                    }],
                    "Projection": {
                        "ProjectionType": "INCLUDE",
                        "NonKeyAttributes": ["data"],
                    },
                    "ProvisionedThroughput": {
                        "ReadCapacityUnits": 5,
                        "WriteCapacityUnits": 5,
                    },
                }
            },
        ]

        # for each index
        table.update(AttributeDefinitions=attributes,
                     GlobalSecondaryIndexUpdates=user_id_idx)

        with self.assertRaises(Exception) as ctx:
            table.query(
                TableName=TEST_DDB_TABLE_NAME,
                IndexName="id-index",
                KeyConditionExpression=Key(PARTITION_KEY).eq(item_id),
                Select="ALL_ATTRIBUTES",
            )
        self.assertIn("ValidationException", str(ctx.exception))
示例#35
0
    def test_stream_consumers(self):
        client = aws_stack.connect_to_service('kinesis')
        stream_name = 'test-%s' % short_uid()
        stream_arn = aws_stack.kinesis_stream_arn(stream_name)

        def assert_consumers(count):
            consumers = client.list_stream_consumers(
                StreamARN=stream_arn).get('Consumers')
            self.assertEqual(count, len(consumers))
            return consumers

        # create stream and assert 0 consumers
        client.create_stream(StreamName=stream_name, ShardCount=1)
        sleep(1)
        assert_consumers(0)

        # create consumer and assert 1 consumer
        consumer_name = 'cons1'
        response = client.register_stream_consumer(StreamARN=stream_arn,
                                                   ConsumerName=consumer_name)
        sleep(1)
        self.assertEqual(consumer_name, response['Consumer']['ConsumerName'])
        # boto3 converts the timestamp to datetime
        self.assertTrue(
            isinstance(response['Consumer']['ConsumerCreationTimestamp'],
                       datetime))
        consumers = assert_consumers(1)
        consumer_arn = consumers[0]['ConsumerARN']
        self.assertEqual(consumer_name, consumers[0]['ConsumerName'])
        self.assertIn('/%s' % consumer_name, consumer_arn)
        self.assertTrue(
            isinstance(consumers[0]['ConsumerCreationTimestamp'], datetime))

        # lookup stream consumer by describe calls, assert response
        consumer_description_by_arn = client.describe_stream_consumer(
            StreamARN=stream_arn,
            ConsumerARN=consumer_arn)['ConsumerDescription']
        self.assertEqual(consumer_name,
                         consumer_description_by_arn['ConsumerName'])
        self.assertEqual(consumer_arn,
                         consumer_description_by_arn['ConsumerARN'])
        self.assertEqual(stream_arn, consumer_description_by_arn['StreamARN'])
        self.assertEqual('ACTIVE',
                         consumer_description_by_arn['ConsumerStatus'])
        self.assertTrue(
            isinstance(
                consumer_description_by_arn['ConsumerCreationTimestamp'],
                datetime))
        consumer_description_by_name = client.describe_stream_consumer(
            StreamARN=stream_arn,
            ConsumerName=consumer_name)['ConsumerDescription']
        self.assertEqual(consumer_description_by_arn,
                         consumer_description_by_name)

        # delete existing consumer and assert 0 remaining consumers
        client.deregister_stream_consumer(StreamARN=stream_arn,
                                          ConsumerName=consumer_name)
        sleep(1)
        assert_consumers(0)

        # clean up
        client.delete_stream(StreamName=stream_name)
示例#36
0
    def test_apigateway_with_lambda_integration(self):
        lambda_name = 'test-apigw-lambda-%s' % short_uid()
        self.create_lambda_function(lambda_name)

        lambda_uri = aws_stack.lambda_function_arn(lambda_name)
        target_uri = aws_stack.apigateway_invocations_arn(lambda_uri)

        apigw_client = aws_stack.connect_to_service('apigateway')

        api = apigw_client.create_rest_api(name='test-api', description='')

        api_id = api['id']
        root_res_id = apigw_client.get_resources(restApiId=api_id)['items'][0]['id']
        api_resource = apigw_client.create_resource(restApiId=api_id, parentId=root_res_id, pathPart='test')

        apigw_client.put_method(
            restApiId=api_id,
            resourceId=api_resource['id'],
            httpMethod='GET',
            authorizationType='NONE'
        )

        rs = apigw_client.put_integration(
            restApiId=api_id,
            resourceId=api_resource['id'],
            httpMethod='GET',
            integrationHttpMethod='GET',
            type='AWS',
            uri=target_uri
        )
        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
        for key in ['httpMethod', 'type', 'passthroughBehavior', 'cacheKeyParameters', 'uri', 'cacheNamespace']:
            self.assertIn(key, rs)

        self.assertNotIn('responseTemplates', rs)

        rs = apigw_client.get_integration(
            restApiId=api_id,
            resourceId=api_resource['id'],
            httpMethod='GET'
        )
        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)

        self.assertEqual(rs['type'], 'AWS')
        self.assertEqual(rs['httpMethod'], 'GET')
        self.assertEqual(rs['uri'], target_uri)

        rs = apigw_client.delete_integration(
            restApiId=api_id,
            resourceId=api_resource['id'],
            httpMethod='GET',
        )
        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)

        try:
            # Try to get deleted integration
            apigw_client.get_integration(
                restApiId=api_id,
                resourceId=api_resource['id'],
                httpMethod='GET'
            )
            self.fail('This call should not be successful as the integration is deleted')

        except ClientError as e:
            self.assertEqual(e.response['Error']['Code'], 'BadRequestException')

        # clean up
        lambda_client = aws_stack.connect_to_service('lambda')
        lambda_client.delete_function(
            FunctionName=lambda_name
        )

        apigw_client.delete_rest_api(
            restApiId=api_id
        )
示例#37
0
def generate_default_name_without_stack(logical_resource_id: str):
    random_id_part = short_uid()
    resource_id_part = logical_resource_id[:63 - 1 - len(random_id_part)]
    return f"{resource_id_part}-{random_id_part}"
示例#38
0
def get_nested_stack_name(params, **kwargs):
    stack_name = kwargs.get('stack_name', 'stack')
    return '%s-%s' % (stack_name, common.short_uid())
示例#39
0
    def test_attach_detach_role_policy(self):
        role_name = "s3-role-{}".format(short_uid())
        policy_name = "s3-role-policy-{}".format(short_uid())

        policy_arns = [p["Arn"] for p in ADDITIONAL_MANAGED_POLICIES.values()]

        assume_policy_document = {
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Action": "sts:AssumeRole",
                    "Principal": {"Service": "s3.amazonaws.com"},
                }
            ],
        }

        policy_document = {
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Action": [
                        "s3:GetReplicationConfiguration",
                        "s3:GetObjectVersion",
                        "s3:ListBucket",
                    ],
                    "Effect": "Allow",
                    "Resource": ["arn:aws:s3:::bucket_name"],
                }
            ],
        }

        self.iam_client.create_role(
            RoleName=role_name,
            AssumeRolePolicyDocument=json.dumps(assume_policy_document),
        )

        policy_arn = self.iam_client.create_policy(
            PolicyName=policy_name, Path="/", PolicyDocument=json.dumps(policy_document)
        )["Policy"]["Arn"]
        policy_arns.append(policy_arn)

        # Attach some polices
        for policy_arn in policy_arns:
            rs = self.iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn)
            self.assertEqual(200, rs["ResponseMetadata"]["HTTPStatusCode"])

        try:
            # Try to delete role
            self.iam_client.delete_role(RoleName=role_name)
            self.fail("This call should not be successful as the role has policies attached")

        except ClientError as e:
            self.assertEqual("DeleteConflict", e.response["Error"]["Code"])

        for policy_arn in policy_arns:
            rs = self.iam_client.detach_role_policy(RoleName=role_name, PolicyArn=policy_arn)
            self.assertEqual(200, rs["ResponseMetadata"]["HTTPStatusCode"])

        # clean up
        rs = self.iam_client.delete_role(RoleName=role_name)
        self.assertEqual(200, rs["ResponseMetadata"]["HTTPStatusCode"])

        self.iam_client.delete_policy(PolicyArn=policy_arn)
示例#40
0
def get_graph(name_filter='.*', env=None):
    result = {'nodes': [], 'edges': []}

    pool = {}

    if True:
        result = {'nodes': [], 'edges': []}
        node_ids = {}
        # Make sure we load components in the right order:
        # (ES,DynamoDB,S3) -> (Kinesis,Lambda)
        domains = get_elasticsearch_domains(name_filter, pool=pool, env=env)
        dbs = get_dynamo_dbs(name_filter, pool=pool, env=env)
        buckets = get_s3_buckets(name_filter, details=True, pool=pool, env=env)
        streams = get_kinesis_streams(name_filter, pool=pool, env=env)
        firehoses = get_firehose_streams(name_filter, pool=pool, env=env)
        lambdas = get_lambda_functions(name_filter,
                                       details=True,
                                       pool=pool,
                                       env=env)
        queues = get_sqs_queues(name_filter, pool=pool, env=env)

        for es in domains:
            uid = short_uid()
            node_ids[es.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': es.id,
                'name': es.name(),
                'type': 'es'
            })
        for b in buckets:
            uid = short_uid()
            node_ids[b.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': b.id,
                'name': b.name(),
                'type': 's3'
            })
        for db in dbs:
            uid = short_uid()
            node_ids[db.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': db.id,
                'name': db.name(),
                'type': 'dynamodb'
            })
        for s in streams:
            uid = short_uid()
            node_ids[s.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': s.id,
                'name': s.name(),
                'type': 'kinesis'
            })
            for shard in s.shards:
                uid1 = short_uid()
                name = re.sub(r'shardId-0*', '', shard.id) or '0'
                result['nodes'].append({
                    'id': uid1,
                    'arn': shard.id,
                    'name': name,
                    'type': 'kinesis_shard',
                    'streamName': s.name(),
                    'parent': uid
                })
        for f in firehoses:
            uid = short_uid()
            node_ids[f.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': f.id,
                'name': f.name(),
                'type': 'firehose'
            })
            for d in f.destinations:
                result['edges'].append({
                    'source': uid,
                    'target': node_ids[d.id]
                })
        for q in queues:
            uid = short_uid()
            node_ids[q.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': q.id,
                'name': q.name(),
                'type': 'sqs'
            })
        for l in lambdas:
            uid = short_uid()
            node_ids[l.id] = uid
            result['nodes'].append({
                'id': uid,
                'arn': l.id,
                'name': l.name(),
                'type': 'lambda'
            })
            for s in l.event_sources:
                lookup_id = s.id
                if isinstance(s, DynamoDBStream):
                    lookup_id = s.table.id
                result['edges'].append({
                    'source': node_ids.get(lookup_id),
                    'target': uid
                })
            for t in l.targets:
                lookup_id = t.id
                result['edges'].append({
                    'source': uid,
                    'target': node_ids.get(lookup_id)
                })
        for b in buckets:
            for n in b.notifications:
                src_uid = node_ids[b.id]
                tgt_uid = node_ids[n.target.id]
                result['edges'].append({'source': src_uid, 'target': tgt_uid})

    return result
示例#41
0
    def _execute(self, func_arn, func_details, event, context=None, version=None):
        lambda_cwd = func_details.cwd
        runtime = func_details.runtime
        handler = func_details.handler
        environment = self._prepare_environment(func_details)

        # configure USE_SSL in environment
        if config.USE_SSL:
            environment['USE_SSL'] = '1'

        # prepare event body
        if not event:
            LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
            event = {}
        event_body = json.dumps(json_safe(event))
        stdin = self.prepare_event(environment, event_body)

        main_endpoint = get_main_endpoint_from_container()

        environment['LOCALSTACK_HOSTNAME'] = main_endpoint
        environment['_HANDLER'] = handler
        if os.environ.get('HTTP_PROXY'):
            environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
        if func_details.timeout:
            environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
        if context:
            environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
            environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
            environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
            if hasattr(context, 'client_context'):
                environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(base64.b64decode(
                    bytes(context.client_context, encoding='utf8'))))

        # custom command to execute in the container
        command = ''
        events_file = ''

        # if running a Java Lambda, set up classpath arguments
        if is_java_lambda(runtime):
            java_opts = Util.get_java_opts()
            stdin = None
            # copy executor jar into temp directory
            target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
            if not os.path.exists(target_file):
                cp_r(LAMBDA_EXECUTOR_JAR, target_file)
            # TODO cleanup once we have custom Java Docker image
            taskdir = '/var/task'
            events_file = '_lambda.events.%s.json' % short_uid()
            save_file(os.path.join(lambda_cwd, events_file), event_body)
            classpath = Util.get_java_classpath(target_file)
            command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
                (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))

        # accept any self-signed certificates for outgoing calls from the Lambda
        if is_nodejs_runtime(runtime):
            environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'

        # determine the command to be executed (implemented by subclasses)
        cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)

        # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
        LOG.info('Running lambda cmd: %s' % cmd)
        result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)

        # clean up events file
        events_file and os.path.exists(events_file) and rm_rf(events_file)

        return result
import os
import json
import time
from os.path import expanduser
from six.moves import queue
from localstack.config import TMP_FOLDER
from localstack.constants import API_ENDPOINT
from localstack.utils.common import (JsonObject, to_str, timestamp, short_uid,
                                     save_file, FuncThread, load_file)
from localstack.utils.common import safe_requests as requests

PROCESS_ID = short_uid()
MACHINE_ID = None

# event type constants
EVENT_START_INFRA = 'infra.start'

# sender thread and queue
SENDER_THREAD = None
EVENT_QUEUE = queue.PriorityQueue()


class AnalyticsEvent(JsonObject):
    def __init__(self, **kwargs):
        self.t = kwargs.get('timestamp') or kwargs.get('t') or timestamp()
        self.m_id = kwargs.get('machine_id') or kwargs.get(
            'm_id') or get_machine_id()
        self.p_id = kwargs.get('process_id') or kwargs.get(
            'p_id') or get_process_id()
        self.e_t = kwargs.get('event_type') or kwargs.get('e_t')
        self.p = kwargs.get('payload') or kwargs.get('p')
示例#43
0
 def Deployment_create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
     props = cloudformation_json['Properties']
     name = props['StageName']
     deployment_id = props.get('Id') or short_uid()
     description = props.get('Description') or ''
     return apigw_models.Deployment(deployment_id, name, description)
示例#44
0
def set_function_code(code, lambda_name):

    def generic_handler(event, context):
        raise Exception(('Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)

    lambda_handler = generic_handler
    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER
    handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
    handler_function = get_handler_function_from_name(handler_name, runtime=runtime)

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    if 'S3Bucket' in code:
        s3_client = aws_stack.connect_to_service('s3')
        bytes_io = BytesIO()
        try:
            s3_client.download_fileobj(code['S3Bucket'], code['S3Key'], bytes_io)
            zip_file_content = bytes_io.getvalue()
        except Exception as e:
            return error_response('Unable to fetch Lambda archive from S3: %s' % e, 404)
    elif 'ZipFile' in code:
        zip_file_content = code['ZipFile']
        zip_file_content = base64.b64decode(zip_file_content)
    else:
        return error_response('No valid Lambda archive specified.', 400)

    # save tmp file
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    mkdir(tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # check if this is a ZIP file
    is_zip = is_zip_file(zip_file_content)
    if is_zip:
        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if not os.path.isfile(main_file):
            # check if this is a zip file that contains a single JAR file
            jar_files = glob.glob('%s/*.jar' % tmp_dir)
            if len(jar_files) == 1:
                main_file = jar_files[0]
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response('Unable to find handler script in Lambda archive.', 400, error_type='ValidationError')

    # it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
    is_jar = is_jar_archive(zip_file_content)
    if is_jar:

        def execute(event, context):
            result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(event, context,
                handler=arn_to_lambda[arn].handler, main_file=main_file)
            return result

        lambda_handler = execute

    elif runtime.startswith('python') and not use_docker():
        try:
            lambda_handler = exec_lambda_code(zip_file_content,
                handler_function=handler_function, lambda_cwd=lambda_cwd,
                lambda_env=lambda_environment)
        except Exception as e:
            raise Exception('Unable to get handler function from lambda code.', e)

    if not is_zip and not is_jar:
        raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.')

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
示例#45
0
    def test_deploy_stack_change_set(self):
        cloudformation = aws_stack.connect_to_service('cloudformation')
        stack_name = 'stack-%s' % short_uid()
        change_set_name = 'change-set-%s' % short_uid()

        try:
            cloudformation.describe_stacks(
                StackName=stack_name
            )
            self.fail('This call should not be successful as the stack does not exist')

        except ClientError as e:
            self.assertEqual(e.response['Error']['Code'], 'ValidationError')

        rs = cloudformation.create_change_set(
            StackName=stack_name,
            ChangeSetName=change_set_name,
            TemplateBody=TEST_CHANGE_SET_BODY,
            Parameters=[
                {
                    'ParameterKey': 'EnvironmentType',
                    'ParameterValue': 'stage'
                }
            ],
            Capabilities=['CAPABILITY_IAM'],
        )

        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)

        change_set_id = rs['Id']

        rs = cloudformation.describe_change_set(
            StackName=stack_name,
            ChangeSetName=change_set_name
        )
        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
        self.assertEqual(rs['ChangeSetName'], change_set_name)
        self.assertEqual(rs['ChangeSetId'], change_set_id)
        self.assertEqual(rs['Status'], 'CREATE_COMPLETE')

        rs = cloudformation.execute_change_set(
            StackName=stack_name,
            ChangeSetName=change_set_name
        )
        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)

        rs = cloudformation.describe_stacks(
            StackName=stack_name
        )
        self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)

        stack = rs['Stacks'][0]
        parameters = stack['Parameters']

        self.assertEqual(stack['StackName'], stack_name)
        self.assertEqual(parameters[0]['ParameterKey'], 'EnvironmentType')
        self.assertEqual(parameters[0]['ParameterValue'], 'stage')

        # clean up
        cloudformation.delete_change_set(
            StackName=stack_name,
            ChangeSetName=change_set_name
        )
        cloudformation.delete_stack(
            StackName=stack_name
        )
示例#46
0
def test_sqs_batch_lambda_forward(lambda_client, sqs_client,
                                  create_lambda_function):

    lambda_name_queue_batch = "lambda_queue_batch-%s" % short_uid()

    # deploy test lambda connected to SQS queue
    sqs_queue_info = testutil.create_sqs_queue(lambda_name_queue_batch)
    queue_url = sqs_queue_info["QueueUrl"]
    resp = create_lambda_function(
        handler_file=TEST_LAMBDA_PYTHON_ECHO,
        func_name=lambda_name_queue_batch,
        event_source_arn=sqs_queue_info["QueueArn"],
        libs=TEST_LAMBDA_LIBS,
    )

    event_source_id = resp["CreateEventSourceMappingResponse"]["UUID"]
    lambda_client.update_event_source_mapping(UUID=event_source_id,
                                              BatchSize=5)

    messages_to_send = [{
        "Id": "message{:02d}".format(i),
        "MessageBody": "msgBody{:02d}".format(i),
        "MessageAttributes": {
            "CustomAttribute": {
                "DataType": "String",
                "StringValue": "CustomAttributeValue{:02d}".format(i),
            }
        },
    } for i in range(1, 12)]

    # send 11 messages (which should get split into 3 batches)
    sqs_client.send_message_batch(QueueUrl=queue_url,
                                  Entries=messages_to_send[:10])
    sqs_client.send_message(
        QueueUrl=queue_url,
        MessageBody=messages_to_send[10]["MessageBody"],
        MessageAttributes=messages_to_send[10]["MessageAttributes"],
    )

    def wait_for_done():
        attributes = sqs_client.get_queue_attributes(
            QueueUrl=queue_url,
            AttributeNames=[
                "ApproximateNumberOfMessages",
                "ApproximateNumberOfMessagesDelayed",
                "ApproximateNumberOfMessagesNotVisible",
            ],
        )["Attributes"]
        msg_count = int(attributes.get("ApproximateNumberOfMessages"))
        assert 0 == msg_count, "expecting queue to be empty"

        delayed_count = int(
            attributes.get("ApproximateNumberOfMessagesDelayed"))
        if delayed_count != 0:
            LOGGER.warning(
                "SQS delayed message count (actual/expected): %s/%s",
                delayed_count, 0)

        not_visible_count = int(
            attributes.get("ApproximateNumberOfMessagesNotVisible"))
        if not_visible_count != 0:
            LOGGER.warning("SQS messages not visible (actual/expected): %s/%s",
                           not_visible_count, 0)

        assert 0 == delayed_count, "no messages waiting for retry"
        assert 0 == delayed_count + not_visible_count, "no in flight messages"

    # wait for the queue to drain (max 60s)
    retry(wait_for_done, retries=12, sleep=5.0)

    def check_lambda_logs():
        events = get_lambda_log_events(lambda_name_queue_batch, 10)
        assert 3 == len(events), "expected 3 lambda invocations"

    retry(check_lambda_logs, retries=5, sleep=3)

    sqs_client.delete_queue(QueueUrl=queue_url)
示例#47
0
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name, TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        queue_url = queue_exists('cf-test-queue-1')
        assert queue_url
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
        assert ssm_param_exists('cf-test-param-1')

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(tags, [{'Key': 'foobar', 'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')}])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(tags, [
            {'Key': 'foo', 'Value': 'cf-test-bucket-1'},
            {'Key': 'bar', 'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')}
        ])
        queue_tags = sqs.list_queue_tags(QueueUrl=queue_url)
        self.assertIn('Tags', queue_tags)
        self.assertEqual(queue_tags['Tags'], {'key1': 'value1', 'key2': 'value2'})

        # assert that bucket notifications have been created
        notifs = s3.get_bucket_notification_configuration(Bucket='cf-test-bucket-1')
        self.assertIn('QueueConfigurations', notifs)
        self.assertIn('LambdaFunctionConfigurations', notifs)
        self.assertEqual(notifs['QueueConfigurations'][0]['QueueArn'], 'aws:arn:sqs:test:testqueue')
        self.assertEqual(notifs['QueueConfigurations'][0]['Events'], ['s3:ObjectDeleted:*'])
        self.assertEqual(notifs['LambdaFunctionConfigurations'][0]['LambdaFunctionArn'], 'aws:arn:lambda:test:testfunc')
        self.assertEqual(notifs['LambdaFunctionConfigurations'][0]['Events'], ['s3:ObjectCreated:*'])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [s for s in subs if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']]
        self.assertEqual(len(subs), 1)
        self.assertIn(':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name), subs[0]['TopicArn'])
        # assert that subscription attributes are added properly
        attrs = sns.get_subscription_attributes(SubscriptionArn=subs[0]['SubscriptionArn'])['Attributes']
        self.assertEqual(attrs, {'Endpoint': subs[0]['Endpoint'], 'Protocol': 'sqs',
            'SubscriptionArn': subs[0]['SubscriptionArn'], 'TopicArn': subs[0]['TopicArn'],
            'FilterPolicy': json.dumps({'eventType': ['created']})})

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [a for a in apigateway.get_rest_apis()['items'] if a['name'] == test_api_name][0]
        responses = apigateway.get_gateway_responses(restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
示例#48
0
def test_kinesis_lambda_sns_ddb_streams():

    ddb_lease_table_suffix = '-kclapp'
    dynamodb = aws_stack.connect_to_resource('dynamodb')
    dynamodb_service = aws_stack.connect_to_service('dynamodb')
    dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
    kinesis = aws_stack.connect_to_service('kinesis')
    sns = aws_stack.connect_to_service('sns')

    LOGGER.info('Creating test streams...')
    run_safe(lambda: dynamodb_service.delete_table(
        TableName=TEST_STREAM_NAME + ddb_lease_table_suffix), print_error=False)
    aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)
    aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

    # subscribe to inbound Kinesis stream
    def process_records(records, shard_id):
        EVENTS.extend(records)

    # start the KCL client process in the background
    kinesis_connector.listen_to_kinesis(TEST_STREAM_NAME, listener_func=process_records,
        wait_until_started=True, ddb_lease_table_suffix=ddb_lease_table_suffix)

    LOGGER.info('Kinesis consumer initialized.')

    # create table with stream forwarding config
    testutil.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY,
        stream_view_type='NEW_AND_OLD_IMAGES')

    # list DDB streams and make sure the table stream is there
    streams = dynamodbstreams.list_streams()
    ddb_event_source_arn = None
    for stream in streams['Streams']:
        if stream['TableName'] == TEST_TABLE_NAME:
            ddb_event_source_arn = stream['StreamArn']
    assert ddb_event_source_arn

    # deploy test lambda connected to DynamoDB Stream
    zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
        libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)
    # make sure we cannot create Lambda with same name twice
    assert_raises(Exception, testutil.create_lambda_function, func_name=TEST_LAMBDA_NAME_DDB,
        zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # deploy test lambda connected to Kinesis Stream
    kinesis_event_source_arn = kinesis.describe_stream(
        StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN']
    testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM,
        zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27)

    # set number of items to update/put to table
    num_events_ddb = 15
    num_put_new_items = 5
    num_put_existing_items = 2
    num_batch_items = 3
    num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

    LOGGER.info('Putting %s items to table...' % num_events_ddb)
    table = dynamodb.Table(TEST_TABLE_NAME)
    for i in range(0, num_put_new_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123'
        })
    # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
    for i in range(0, num_put_existing_items):
        table.put_item(Item={
            PARTITION_KEY: 'testId%s' % i,
            'data': 'foobar123_put_existing'
        })

    # batch write some items containing non-ASCII characters
    dynamodb.batch_write_item(RequestItems={TEST_TABLE_NAME: [
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ✓'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 £'}}},
        {'PutRequest': {'Item': {PARTITION_KEY: short_uid(), 'data': 'foobar123 ¢'}}}
    ]})
    # update some items, which also triggers notification events
    for i in range(0, num_updates_ddb):
        dynamodb_service.update_item(TableName=TEST_TABLE_NAME,
            Key={PARTITION_KEY: {'S': 'testId%s' % i}},
            AttributeUpdates={'data': {
                'Action': 'PUT',
                'Value': {'S': 'foobar123_updated'}
            }})

    # put items to stream
    num_events_kinesis = 10
    LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
    kinesis.put_records(
        Records=[
            {
                'Data': '{}',
                'PartitionKey': 'testId%s' % i
            } for i in range(0, num_events_kinesis)
        ], StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
    )

    # put 1 item to stream that will trigger an error in the Lambda
    kinesis.put_record(Data='{"%s": 1}' % lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
        PartitionKey='testIderror', StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

    # create SNS topic, connect it to the Lambda, publish test message
    num_events_sns = 3
    response = sns.create_topic(Name=TEST_TOPIC_NAME)
    sns.subscribe(TopicArn=response['TopicArn'], Protocol='lambda',
        Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
    for i in range(0, num_events_sns):
        sns.publish(TopicArn=response['TopicArn'], Message='test message %s' % i)

    # get latest records
    latest = aws_stack.kinesis_get_latest_records(TEST_LAMBDA_SOURCE_STREAM_NAME,
        shard_id='shardId-000000000000', count=10)
    assert len(latest) == 10

    LOGGER.info('Waiting some time before finishing test.')
    time.sleep(2)

    num_events = num_events_ddb + num_events_kinesis + num_events_sns

    def check_events():
        if len(EVENTS) != num_events:
            LOGGER.warning(('DynamoDB and Kinesis updates retrieved ' +
                '(actual/expected): %s/%s') % (len(EVENTS), num_events))
        assert len(EVENTS) == num_events
        event_items = [json.loads(base64.b64decode(e['data'])) for e in EVENTS]
        inserts = [e for e in event_items if e.get('__action_type') == 'INSERT']
        modifies = [e for e in event_items if e.get('__action_type') == 'MODIFY']
        assert len(inserts) == num_put_new_items + num_batch_items
        assert len(modifies) == num_put_existing_items + num_updates_ddb

    # this can take a long time in CI, make sure we give it enough time/retries
    retry(check_events, retries=7, sleep=3)

    # make sure the we have the right amount of INSERT/MODIFY event types

    # check cloudwatch notifications
    stats1 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM)
    assert len(stats1['Datapoints']) == 2 + num_events_sns
    stats2 = get_lambda_metrics(TEST_LAMBDA_NAME_STREAM, 'Errors')
    assert len(stats2['Datapoints']) == 1
    stats3 = get_lambda_metrics(TEST_LAMBDA_NAME_DDB)
    assert len(stats3['Datapoints']) == num_events_ddb
示例#49
0
from localstack.utils.common import load_file, retry, short_uid, to_str
from localstack.utils.cloudformation import template_deployer
from botocore.exceptions import ClientError
from botocore.parsers import ResponseParserError

THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_TEMPLATE_1 = os.path.join(THIS_FOLDER, 'templates', 'template1.yaml')
TEST_TEMPLATE_2 = os.path.join(THIS_FOLDER, 'templates', 'template2.yaml')
TEST_TEMPLATE_3 = """
AWSTemplateFormatVersion: 2010-09-09
Resources:
  S3Setup:
    Type: AWS::S3::Bucket
    Properties:
      BucketName: test-%s
""" % short_uid()
TEST_TEMPLATE_4 = """
AWSTemplateFormatVersion: 2010-09-09
Transform: AWS::Serverless-2016-10-31
Parameters:
  LambdaRuntime:
    Type: String
    Default: python3.6
Resources:
  MyRole:
    Type: AWS::IAM::Role
    Properties:
      RoleName: test-role-123
      AssumeRolePolicyDocument: {}
  MyFunc:
    Type: AWS::Serverless::Function
示例#50
0
    def test_dynamodb_stream_records_with_update_item(self):
        table_name = 'test-ddb-table-%s' % short_uid()
        dynamodb = aws_stack.connect_to_service('dynamodb')
        ddbstreams = aws_stack.connect_to_service('dynamodbstreams')

        aws_stack.create_dynamodb_table(table_name,
                                        partition_key=PARTITION_KEY,
                                        stream_view_type='NEW_AND_OLD_IMAGES')
        table = self.dynamodb.Table(table_name)

        response = ddbstreams.describe_stream(
            StreamArn=table.latest_stream_arn)
        self.assertEqual(200, response['ResponseMetadata']['HTTPStatusCode'])
        self.assertEqual(1, len(response['StreamDescription']['Shards']))
        shard_id = response['StreamDescription']['Shards'][0]['ShardId']
        starting_sequence_number = int(
            response['StreamDescription']['Shards'][0].get(
                'SequenceNumberRange').get('StartingSequenceNumber'))

        response = ddbstreams.get_shard_iterator(
            StreamArn=table.latest_stream_arn,
            ShardId=shard_id,
            ShardIteratorType='LATEST')
        self.assertEqual(200, response['ResponseMetadata']['HTTPStatusCode'])
        self.assertIn('ShardIterator', response)
        iterator_id = response['ShardIterator']

        item_id = short_uid()
        for _ in range(2):
            dynamodb.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    'S': item_id
                }},
                UpdateExpression='SET attr1 = :v1, attr2 = :v2',
                ExpressionAttributeValues={
                    ':v1': {
                        'S': 'value1'
                    },
                    ':v2': {
                        'S': 'value2'
                    }
                },
                ReturnValues='ALL_NEW',
                ReturnConsumedCapacity='INDEXES',
            )

        records = ddbstreams.get_records(ShardIterator=iterator_id)
        self.assertEqual(200, records['ResponseMetadata']['HTTPStatusCode'])
        self.assertEqual(2, len(records['Records']))
        self.assertTrue(
            isinstance(
                records['Records'][0]['dynamodb']
                ['ApproximateCreationDateTime'], datetime))
        self.assertEqual('1.1', records['Records'][0]['eventVersion'])
        self.assertEqual('INSERT', records['Records'][0]['eventName'])
        self.assertNotIn('OldImage', records['Records'][0]['dynamodb'])
        self.assertGreater(
            int(records['Records'][0]['dynamodb']['SequenceNumber']),
            starting_sequence_number)
        self.assertTrue(
            isinstance(
                records['Records'][1]['dynamodb']
                ['ApproximateCreationDateTime'], datetime))
        self.assertEqual('1.1', records['Records'][1]['eventVersion'])
        self.assertEqual('MODIFY', records['Records'][1]['eventName'])
        self.assertIn('OldImage', records['Records'][1]['dynamodb'])
        self.assertGreater(
            int(records['Records'][1]['dynamodb']['SequenceNumber']),
            starting_sequence_number)

        dynamodb.delete_table(TableName=table_name)
示例#51
0
    def test_kinesis_lambda_sns_ddb_sqs_streams(self):
        ddb_lease_table_suffix = '-kclapp'
        table_name = TEST_TABLE_NAME + 'klsdss' + ddb_lease_table_suffix
        stream_name = TEST_STREAM_NAME
        dynamodb = aws_stack.connect_to_resource('dynamodb')
        dynamodb_service = aws_stack.connect_to_service('dynamodb')
        dynamodbstreams = aws_stack.connect_to_service('dynamodbstreams')
        kinesis = aws_stack.connect_to_service('kinesis')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')

        LOGGER.info('Creating test streams...')
        run_safe(lambda: dynamodb_service.delete_table(TableName=stream_name +
                                                       ddb_lease_table_suffix),
                 print_error=False)
        aws_stack.create_kinesis_stream(stream_name, delete=True)
        aws_stack.create_kinesis_stream(TEST_LAMBDA_SOURCE_STREAM_NAME)

        events = []

        # subscribe to inbound Kinesis stream
        def process_records(records, shard_id):
            events.extend(records)

        # start the KCL client process in the background
        kinesis_connector.listen_to_kinesis(
            stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix=ddb_lease_table_suffix)

        LOGGER.info('Kinesis consumer initialized.')

        # create table with stream forwarding config
        aws_stack.create_dynamodb_table(table_name,
                                        partition_key=PARTITION_KEY,
                                        stream_view_type='NEW_AND_OLD_IMAGES')

        # list DDB streams and make sure the table stream is there
        streams = dynamodbstreams.list_streams()
        ddb_event_source_arn = None
        for stream in streams['Streams']:
            if stream['TableName'] == table_name:
                ddb_event_source_arn = stream['StreamArn']
        self.assertTrue(ddb_event_source_arn)

        # deploy test lambda connected to DynamoDB Stream
        zip_file = testutil.create_lambda_archive(
            load_file(TEST_LAMBDA_PYTHON),
            get_content=True,
            libs=TEST_LAMBDA_LIBS,
            runtime=LAMBDA_RUNTIME_PYTHON27)
        testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB,
                                        zip_file=zip_file,
                                        event_source_arn=ddb_event_source_arn,
                                        runtime=LAMBDA_RUNTIME_PYTHON27,
                                        delete=True)
        # make sure we cannot create Lambda with same name twice
        assert_raises(Exception,
                      testutil.create_lambda_function,
                      func_name=TEST_LAMBDA_NAME_DDB,
                      zip_file=zip_file,
                      event_source_arn=ddb_event_source_arn,
                      runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to Kinesis Stream
        kinesis_event_source_arn = kinesis.describe_stream(
            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME
        )['StreamDescription']['StreamARN']
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_STREAM,
            zip_file=zip_file,
            event_source_arn=kinesis_event_source_arn,
            runtime=LAMBDA_RUNTIME_PYTHON27)

        # deploy test lambda connected to SQS queue
        sqs_queue_info = testutil.create_sqs_queue(TEST_LAMBDA_NAME_QUEUE)
        testutil.create_lambda_function(
            func_name=TEST_LAMBDA_NAME_QUEUE,
            zip_file=zip_file,
            event_source_arn=sqs_queue_info['QueueArn'],
            runtime=LAMBDA_RUNTIME_PYTHON27)

        # set number of items to update/put to table
        num_events_ddb = 15
        num_put_new_items = 5
        num_put_existing_items = 2
        num_batch_items = 3
        num_updates_ddb = num_events_ddb - num_put_new_items - num_put_existing_items - num_batch_items

        LOGGER.info('Putting %s items to table...' % num_events_ddb)
        table = dynamodb.Table(table_name)
        for i in range(0, num_put_new_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123'
            })
        # Put items with an already existing ID (fix https://github.com/localstack/localstack/issues/522)
        for i in range(0, num_put_existing_items):
            table.put_item(Item={
                PARTITION_KEY: 'testId%s' % i,
                'data': 'foobar123_put_existing'
            })

        # batch write some items containing non-ASCII characters
        dynamodb.batch_write_item(
            RequestItems={
                table_name: [{
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 ✓'
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 £'
                        }
                    }
                }, {
                    'PutRequest': {
                        'Item': {
                            PARTITION_KEY: short_uid(),
                            'data': 'foobar123 ¢'
                        }
                    }
                }]
            })
        # update some items, which also triggers notification events
        for i in range(0, num_updates_ddb):
            dynamodb_service.update_item(
                TableName=table_name,
                Key={PARTITION_KEY: {
                    'S': 'testId%s' % i
                }},
                AttributeUpdates={
                    'data': {
                        'Action': 'PUT',
                        'Value': {
                            'S': 'foobar123_updated'
                        }
                    }
                })

        # put items to stream
        num_events_kinesis = 10
        LOGGER.info('Putting %s items to stream...' % num_events_kinesis)
        kinesis.put_records(Records=[{
            'Data': '{}',
            'PartitionKey': 'testId%s' % i
        } for i in range(0, num_events_kinesis)],
                            StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # put 1 item to stream that will trigger an error in the Lambda
        kinesis.put_record(Data='{"%s": 1}' %
                           lambda_integration.MSG_BODY_RAISE_ERROR_FLAG,
                           PartitionKey='testIderror',
                           StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)

        # create SNS topic, connect it to the Lambda, publish test messages
        num_events_sns = 3
        response = sns.create_topic(Name=TEST_TOPIC_NAME)
        sns.subscribe(
            TopicArn=response['TopicArn'],
            Protocol='lambda',
            Endpoint=aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_STREAM))
        for i in range(0, num_events_sns):
            sns.publish(TopicArn=response['TopicArn'],
                        Subject='test_subject',
                        Message='test message %s' % i)

        # get latest records
        latest = aws_stack.kinesis_get_latest_records(
            TEST_LAMBDA_SOURCE_STREAM_NAME,
            shard_id='shardId-000000000000',
            count=10)
        self.assertEqual(len(latest), 10)

        # send messages to SQS queue
        num_events_sqs = 4
        for i in range(num_events_sqs):
            sqs.send_message(QueueUrl=sqs_queue_info['QueueUrl'],
                             MessageBody=str(i))

        LOGGER.info('Waiting some time before finishing test.')
        time.sleep(2)

        num_events_lambda = num_events_ddb + num_events_sns + num_events_sqs
        num_events = num_events_lambda + num_events_kinesis

        def check_events():
            if len(events) != num_events:
                LOGGER.warning((
                    'DynamoDB and Kinesis updates retrieved (actual/expected): %s/%s'
                ) % (len(events), num_events))
            self.assertEqual(len(events), num_events)
            event_items = [
                json.loads(base64.b64decode(e['data'])) for e in events
            ]
            # make sure the we have the right amount of INSERT/MODIFY event types
            inserts = [
                e for e in event_items if e.get('__action_type') == 'INSERT'
            ]
            modifies = [
                e for e in event_items if e.get('__action_type') == 'MODIFY'
            ]
            self.assertEqual(len(inserts), num_put_new_items + num_batch_items)
            self.assertEqual(len(modifies),
                             num_put_existing_items + num_updates_ddb)

        # this can take a long time in CI, make sure we give it enough time/retries
        retry(check_events, retries=9, sleep=3)

        # check cloudwatch notifications
        num_invocations = get_lambda_invocations_count(TEST_LAMBDA_NAME_STREAM)
        # TODO: It seems that CloudWatch is currently reporting an incorrect number of
        #   invocations, namely the sum over *all* lambdas, not the single one we're asking for.
        #   Also, we need to bear in mind that Kinesis may perform batch updates, i.e., a single
        #   Lambda invocation may happen with a set of Kinesis records, hence we cannot simply
        #   add num_events_ddb to num_events_lambda above!
        # self.assertEqual(num_invocations, 2 + num_events_lambda)
        self.assertGreater(num_invocations, num_events_sns + num_events_sqs)
        num_error_invocations = get_lambda_invocations_count(
            TEST_LAMBDA_NAME_STREAM, 'Errors')
        self.assertEqual(num_error_invocations, 1)

        # clean up
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_STREAM)
        testutil.delete_lambda_function(TEST_LAMBDA_NAME_DDB)
示例#52
0
    def test_transaction_write_items(self):
        table_name = 'test-ddb-table-%s' % short_uid()
        dynamodb = aws_stack.connect_to_service('dynamodb')

        dynamodb.create_table(TableName=table_name,
                              KeySchema=[{
                                  'AttributeName': 'id',
                                  'KeyType': 'HASH'
                              }],
                              AttributeDefinitions=[{
                                  'AttributeName': 'id',
                                  'AttributeType': 'S'
                              }],
                              ProvisionedThroughput={
                                  'ReadCapacityUnits': 5,
                                  'WriteCapacityUnits': 5
                              },
                              Tags=TEST_DDB_TAGS)

        response = dynamodb.transact_write_items(TransactItems=[{
            'ConditionCheck': {
                'TableName': table_name,
                'ConditionExpression': 'attribute_not_exists(id)',
                'Key': {
                    'id': {
                        'S': 'test1'
                    }
                }
            }
        }, {
            'Put': {
                'TableName': table_name,
                'Item': {
                    'id': {
                        'S': 'test2'
                    }
                }
            }
        }, {
            'Update': {
                'TableName': table_name,
                'Key': {
                    'id': {
                        'S': 'test3'
                    }
                },
                'UpdateExpression': 'SET attr1 = :v1, attr2 = :v2',
                'ExpressionAttributeValues': {
                    ':v1': {
                        'S': 'value1'
                    },
                    ':v2': {
                        'S': 'value2'
                    }
                }
            }
        }, {
            'Delete': {
                'TableName': table_name,
                'Key': {
                    'id': {
                        'S': 'test4'
                    }
                }
            }
        }])

        self.assertEqual(200, response['ResponseMetadata']['HTTPStatusCode'])

        # clean up
        dynamodb.delete_table(TableName=table_name)
示例#53
0
def test_firehose_http(lambda_processor_enabled: bool):
    class MyUpdateListener(ProxyListener):
        def forward_request(self, method, path, data, headers):
            data_received = dict(json.loads(data.decode("utf-8")))
            records.append(data_received)
            return 200

    if lambda_processor_enabled:
        # create processor func
        func_name = f"proc-{short_uid()}"
        testutil.create_lambda_function(handler_file=PROCESSOR_LAMBDA,
                                        func_name=func_name)

    # define firehose configs
    local_port = get_free_tcp_port()
    endpoint = "{}://{}:{}".format(get_service_protocol(),
                                   config.LOCALSTACK_HOSTNAME, local_port)
    records = []
    http_destination_update = {
        "EndpointConfiguration": {
            "Url": endpoint,
            "Name": "test_update"
        }
    }
    http_destination = {
        "EndpointConfiguration": {
            "Url": endpoint
        },
        "S3BackupMode": "FailedDataOnly",
        "S3Configuration": {
            "RoleARN": "arn:.*",
            "BucketARN": "arn:.*",
            "Prefix": "",
            "ErrorOutputPrefix": "",
            "BufferingHints": {
                "SizeInMBs": 1,
                "IntervalInSeconds": 60
            },
        },
    }

    if lambda_processor_enabled:
        http_destination["ProcessingConfiguration"] = {
            "Enabled":
            True,
            "Processors": [{
                "Type":
                "Lambda",
                "Parameters": [{
                    "ParameterName":
                    "LambdaArn",
                    "ParameterValue":
                    lambda_function_arn(func_name),
                }],
            }],
        }

    # start proxy server
    start_proxy(local_port,
                backend_url=None,
                update_listener=MyUpdateListener())
    wait_for_port_open(local_port)

    # create firehose stream with http destination
    firehose = aws_stack.create_external_boto_client("firehose")
    stream_name = "firehose_" + short_uid()
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=stream_name,
        HttpEndpointDestinationConfiguration=http_destination,
    )
    assert stream
    stream_description = firehose.describe_delivery_stream(
        DeliveryStreamName=stream_name)
    stream_description = stream_description["DeliveryStreamDescription"]
    destination_description = stream_description["Destinations"][0][
        "HttpEndpointDestinationDescription"]
    assert len(stream_description["Destinations"]) == 1
    assert (destination_description["EndpointConfiguration"]["Url"] ==
            f"http://localhost:{local_port}")

    # put record
    msg_text = "Hello World!"
    firehose.put_record(DeliveryStreamName=stream_name,
                        Record={"Data": msg_text})

    # wait for the result to arrive with proper content
    def _assert_record():
        received_record = records[0]["records"][0]
        received_record_data = to_str(
            base64.b64decode(to_bytes(received_record["data"])))
        assert (
            received_record_data ==
            f"{msg_text}{'-processed' if lambda_processor_enabled else ''}")

    retry(_assert_record, retries=5, sleep=1)

    # update stream destination
    destination_id = stream_description["Destinations"][0]["DestinationId"]
    version_id = stream_description["VersionId"]
    firehose.update_destination(
        DeliveryStreamName=stream_name,
        DestinationId=destination_id,
        CurrentDeliveryStreamVersionId=version_id,
        HttpEndpointDestinationUpdate=http_destination_update,
    )
    stream_description = firehose.describe_delivery_stream(
        DeliveryStreamName=stream_name)
    stream_description = stream_description["DeliveryStreamDescription"]
    destination_description = stream_description["Destinations"][0][
        "HttpEndpointDestinationDescription"]
    assert destination_description["EndpointConfiguration"][
        "Name"] == "test_update"

    # delete stream
    stream = firehose.delete_delivery_stream(DeliveryStreamName=stream_name)
    assert stream["ResponseMetadata"]["HTTPStatusCode"] == 200
示例#54
0
from localstack import config
from localstack.services.generic_proxy import ProxyListener
from localstack.services.infra import start_proxy
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
    get_free_tcp_port,
    get_service_protocol,
    retry,
    short_uid,
    to_bytes,
    to_str,
    wait_for_port_open,
)

TEST_STREAM_NAME = "firehose_test_" + short_uid()


class FirehoseTest(unittest.TestCase):
    def test_firehose_http(self):
        class MyUpdateListener(ProxyListener):
            def forward_request(self, method, path, data, headers):
                data_received = dict(json.loads(data.decode("utf-8")))
                records.append(data_received)
                return 200

        firehose = aws_stack.connect_to_service("firehose")
        local_port = get_free_tcp_port()
        endpoint = "{}://{}:{}".format(
            get_service_protocol(), config.LOCALSTACK_HOSTNAME, local_port
        )
示例#55
0
    def test_publish_get_delete_message_batch(self):
        queue_name = "queue-%s" % short_uid()
        queue_info = self.client.create_queue(QueueName=queue_name)
        queue_url = queue_info["QueueUrl"]
        self.assertIn(queue_name, queue_url)

        def receive_messages(**kwargs):
            kwds = dict(
                QueueUrl=queue_url,
                MaxNumberOfMessages=10,
                MessageAttributeNames=["All"],
            )
            kwds.update(kwargs)
            messages = self.client.receive_message(**kwds)
            return messages

        def get_hashes(messages, outgoing=False):
            body_key = "MD5OfMessageBody" if outgoing else "MD5OfBody"
            return set([(m[body_key], m["MD5OfMessageAttributes"])
                        for m in messages])

        messages_to_send = [{
            "Id": "message{:02d}".format(i),
            "MessageBody": "msgBody{:02d}".format(i),
            "MessageAttributes": {
                "CustomAttribute": {
                    "DataType": "String",
                    "StringValue": "CustomAttributeValue{:02d}".format(i),
                }
            },
        } for i in range(1, 11)]

        resp = self.client.send_message_batch(QueueUrl=queue_url,
                                              Entries=messages_to_send)
        sent_hashes = get_hashes(resp.get("Successful", []), outgoing=True)
        self.assertEqual(len(sent_hashes), len(messages_to_send))

        for i in range(2):
            messages = receive_messages(VisibilityTimeout=0)["Messages"]
            received_hashes = get_hashes(messages)
            self.assertEqual(received_hashes, sent_hashes)

        self.client.delete_message_batch(
            QueueUrl=queue_url,
            Entries=[{
                "Id": "{:02d}".format(i),
                "ReceiptHandle": m["ReceiptHandle"]
            } for i, m in enumerate(messages)],
        )

        response = receive_messages()
        self.assertFalse(response.get("Messages"))

        # publish/receive message with change_message_visibility
        self.client.send_message_batch(QueueUrl=queue_url,
                                       Entries=messages_to_send)
        messages = receive_messages()["Messages"]
        response = receive_messages()
        self.assertFalse(response.get("Messages"))

        reset_hashes = get_hashes(messages[:5])
        self.client.change_message_visibility_batch(
            QueueUrl=queue_url,
            Entries=[{
                "Id": "{:02d}".format(i),
                "ReceiptHandle": msg["ReceiptHandle"],
                "VisibilityTimeout": 0,
            } for i, msg in enumerate(messages[:5])],
        )
        for i in range(2):
            messages = receive_messages(VisibilityTimeout=0)["Messages"]
            received_hashes = get_hashes(messages)
            self.assertEqual(reset_hashes, received_hashes)

        # clean up
        self.client.delete_queue(QueueUrl=queue_url)
示例#56
0
def generate_default_name(stack_name: str, logical_resource_id: str):
    random_id_part = short_uid()
    resource_id_part = logical_resource_id[:24]
    stack_name_part = stack_name[:63 - 2 -
                                 (len(random_id_part) + len(resource_id_part))]
    return f"{stack_name_part}-{resource_id_part}-{random_id_part}"
示例#57
0
import os
import json
import time
from six.moves import queue
from localstack.config import TMP_FOLDER, CONFIG_FILE_PATH
from localstack.constants import API_ENDPOINT, ENV_INTERNAL_TEST_RUN
from localstack.utils.common import (JsonObject, to_str,
    timestamp, short_uid, save_file, FuncThread, load_file)
from localstack.utils.common import safe_requests as requests

PROCESS_ID = short_uid()
MACHINE_ID = None

# event type constants
EVENT_START_INFRA = 'inf.up'
EVENT_STOP_INFRA = 'inf.dn'
EVENT_KINESIS_CREATE_STREAM = 'kns.cs'
EVENT_KINESIS_DELETE_STREAM = 'kns.ds'
EVENT_LAMBDA_CREATE_FUNC = 'lmb.cf'
EVENT_LAMBDA_DELETE_FUNC = 'lmb.df'
EVENT_SQS_CREATE_QUEUE = 'sqs.cq'
EVENT_SQS_DELETE_QUEUE = 'sqs.dq'
EVENT_S3_CREATE_BUCKET = 's3.cb'
EVENT_S3_DELETE_BUCKET = 's3.db'
EVENT_DYNAMODB_CREATE_TABLE = 'ddb.ct'
EVENT_DYNAMODB_DELETE_TABLE = 'ddb.dt'

# sender thread and queue
SENDER_THREAD = None
EVENT_QUEUE = queue.Queue()