Exemple #1
0
    def test_firehose_s3(self):
        s3_resource = aws_stack.connect_to_resource("s3")
        firehose = aws_stack.connect_to_service("firehose")

        s3_prefix = "/testdata"
        test_data = '{"test": "firehose_data_%s"}' % short_uid()
        # create Firehose stream
        stream = firehose.create_delivery_stream(
            DeliveryStreamName=TEST_FIREHOSE_NAME,
            S3DestinationConfiguration={
                "RoleARN": aws_stack.iam_resource_arn("firehose"),
                "BucketARN": aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
                "Prefix": s3_prefix,
            },
            Tags=TEST_TAGS,
        )
        self.assertTrue(stream)
        self.assertIn(TEST_FIREHOSE_NAME, firehose.list_delivery_streams()["DeliveryStreamNames"])
        tags = firehose.list_tags_for_delivery_stream(DeliveryStreamName=TEST_FIREHOSE_NAME)
        self.assertEqual(TEST_TAGS, tags["Tags"])
        # create target S3 bucket
        s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

        # put records
        firehose.put_record(
            DeliveryStreamName=TEST_FIREHOSE_NAME, Record={"Data": to_bytes(test_data)}
        )
        # check records in target bucket
        all_objects = testutil.list_all_s3_objects()
        testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
        # check file layout in target bucket
        all_objects = testutil.map_all_s3_objects(buckets=[TEST_BUCKET_NAME])
        for key in all_objects.keys():
            self.assertRegex(key, r".*/\d{4}/\d{2}/\d{2}/\d{2}/.*\-\d{4}\-\d{2}\-\d{2}\-\d{2}.*")
def test_firehose_s3():

    s3_resource = aws_stack.connect_to_resource('s3')
    firehose = aws_stack.connect_to_service('firehose')

    s3_prefix = '/testdata'
    test_data = '{"test": "firehose_data_%s"}' % short_uid()
    # create Firehose stream
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        S3DestinationConfiguration={
            'RoleARN': aws_stack.iam_resource_arn('firehose'),
            'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
            'Prefix': s3_prefix
        }
    )
    assert stream
    assert TEST_FIREHOSE_NAME in firehose.list_delivery_streams()['DeliveryStreamNames']
    # create target S3 bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

    # put records
    firehose.put_record(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        Record={
            'Data': to_bytes(test_data)
        }
    )
    # check records in target bucket
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
    def test_add_lambda_permission(self):
        iam_client = aws_stack.connect_to_service('iam')

        # create lambda permission
        action = 'lambda:InvokeFunction'
        resp = self.lambda_client.add_permission(
            FunctionName=TEST_LAMBDA_NAME_PY,
            Action=action,
            StatementId='s3',
            Principal='s3.amazonaws.com',
            SourceArn=aws_stack.s3_bucket_arn('test-bucket'))
        self.assertIn('Statement', resp)
        # fetch lambda policy
        policy = self.lambda_client.get_policy(
            FunctionName=TEST_LAMBDA_NAME_PY)['Policy']
        self.assertEqual(policy['Statement'][0]['Action'], action)
        self.assertEqual(policy['Statement'][0]['Resource'],
                         lambda_api.func_arn(TEST_LAMBDA_NAME_PY))
        # fetch IAM policy
        policies = iam_client.list_policies(Scope='Local',
                                            MaxItems=500)['Policies']
        matching = [
            p for p in policies
            if p['PolicyName'] == 'lambda_policy_%s' % TEST_LAMBDA_NAME_PY
        ]
        self.assertEqual(len(matching), 1)
        self.assertIn(':policy/', matching[0]['Arn'])

        # remove permission that we just added
        resp = self.lambda_client.remove_permission(
            FunctionName=TEST_LAMBDA_NAME_PY,
            StatementId=resp['Statement'],
            Qualifier='qual1',
            RevisionId='r1')
        self.assertEqual(resp['ResponseMetadata']['HTTPStatusCode'], 200)
Exemple #4
0
def test_firehose_s3(env=ENV_DEV):

    s3_resource = aws_stack.connect_to_resource('s3', env=env)
    s3_client = aws_stack.connect_to_service('s3', env=env)
    firehose = aws_stack.connect_to_service('firehose', env=env)

    s3_prefix = '/testdata'
    bucket_name = 'test_bucket'
    test_data = b'{"test": "data123"}'
    # create Firehose stream
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        S3DestinationConfiguration={
            'RoleARN': aws_stack.iam_resource_arn('firehose'),
            'BucketARN': aws_stack.s3_bucket_arn(bucket_name),
            'Prefix': s3_prefix
        }
    )
    # create target S3 bucket
    s3_resource.create_bucket(Bucket=bucket_name)

    # put records
    firehose.put_record(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        Record={
            'Data': test_data
        }
    )
    # check records in target bucket
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(json.loads(test_data), all_objects)
    def test_cfn_handle_events_rule(self):
        stack_name = 'stack-%s' % short_uid()
        bucket_name = 'target-%s' % short_uid()
        rule_prefix = 's3-rule-%s' % short_uid()
        rule_name = '%s-%s' % (rule_prefix, short_uid())

        cfn = aws_stack.connect_to_service('cloudformation')
        events = aws_stack.connect_to_service('events')

        _deploy_stack(stack_name=stack_name, template_body=TEST_TEMPLATE_16 % (bucket_name, rule_name))

        rs = events.list_rules(
            NamePrefix=rule_prefix
        )
        self.assertIn(rule_name, [rule['Name'] for rule in rs['Rules']])

        target_arn = aws_stack.s3_bucket_arn(bucket_name)
        rs = events.list_targets_by_rule(
            Rule=rule_name
        )
        self.assertIn(target_arn, [target['Arn'] for target in rs['Targets']])

        cfn.delete_stack(StackName=stack_name)

        rs = events.list_rules(
            NamePrefix=rule_prefix
        )
        self.assertNotIn(rule_name, [rule['Name'] for rule in rs['Rules']])
Exemple #6
0
def test_firehose_s3():

    s3_resource = aws_stack.connect_to_resource('s3')
    firehose = aws_stack.connect_to_service('firehose')

    s3_prefix = '/testdata'
    test_data = '{"test": "firehose_data_%s"}' % short_uid()
    # create Firehose stream
    stream = firehose.create_delivery_stream(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        S3DestinationConfiguration={
            'RoleARN': aws_stack.iam_resource_arn('firehose'),
            'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
            'Prefix': s3_prefix
        }
    )
    assert stream
    assert TEST_FIREHOSE_NAME in firehose.list_delivery_streams()['DeliveryStreamNames']
    # create target S3 bucket
    s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

    # put records
    firehose.put_record(
        DeliveryStreamName=TEST_FIREHOSE_NAME,
        Record={
            'Data': to_bytes(test_data)
        }
    )
    # check records in target bucket
    all_objects = testutil.list_all_s3_objects()
    testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
    def test_apply_template(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name, TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that bucket has been created
        assert bucket_exists('cf-test-bucket-1')
        # assert that queue has been created
        assert queue_exists('cf-test-queue-1')
        # assert that topic has been created
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        # assert that stream has been created
        assert stream_exists('cf-test-stream-1')
        # assert that queue has been created
        resource = describe_stack_resource(stack_name, 'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(tags, [{'Key': 'foobar', 'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')}])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(tags, [
            {'Key': 'foo', 'Value': 'cf-test-bucket-1'},
            {'Key': 'bar', 'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')}
        ])
        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [s for s in subs if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']]
        self.assertEqual(len(subs), 1)
        self.assertIn(':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name), subs[0]['TopicArn'])

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [a for a in apigateway.get_rest_apis()['items'] if a['name'] == test_api_name][0]
        responses = apigateway.get_gateway_responses(restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))
    def test_firehose_kinesis_to_s3(self):
        kinesis = aws_stack.connect_to_service('kinesis')
        s3_resource = aws_stack.connect_to_resource('s3')
        firehose = aws_stack.connect_to_service('firehose')

        aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)

        s3_prefix = '/testdata'
        test_data = '{"test": "firehose_data_%s"}' % short_uid()

        # create Firehose stream
        stream = firehose.create_delivery_stream(
            DeliveryStreamType='KinesisStreamAsSource',
            KinesisStreamSourceConfiguration={
                'RoleARN': aws_stack.iam_resource_arn('firehose'),
                'KinesisStreamARN': aws_stack.kinesis_stream_arn(TEST_STREAM_NAME)
            },
            DeliveryStreamName=TEST_FIREHOSE_NAME,
            S3DestinationConfiguration={
                'RoleARN': aws_stack.iam_resource_arn('firehose'),
                'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
                'Prefix': s3_prefix
            }
        )
        self.assertTrue(stream)
        self.assertIn(TEST_FIREHOSE_NAME, firehose.list_delivery_streams()['DeliveryStreamNames'])

        # create target S3 bucket
        s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

        # put records
        kinesis.put_record(
            Data=to_bytes(test_data),
            PartitionKey='testId',
            StreamName=TEST_STREAM_NAME
        )

        time.sleep(3)

        # check records in target bucket
        all_objects = testutil.list_all_s3_objects()
        testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
    def test_firehose_kinesis_to_s3(self):
        kinesis = aws_stack.create_external_boto_client("kinesis")
        s3_resource = aws_stack.connect_to_resource("s3")
        firehose = aws_stack.create_external_boto_client("firehose")

        aws_stack.create_kinesis_stream(TEST_STREAM_NAME, delete=True)

        s3_prefix = "/testdata"
        test_data = '{"test": "firehose_data_%s"}' % short_uid()

        # create Firehose stream
        stream = firehose.create_delivery_stream(
            DeliveryStreamType="KinesisStreamAsSource",
            KinesisStreamSourceConfiguration={
                "RoleARN": aws_stack.iam_resource_arn("firehose"),
                "KinesisStreamARN":
                aws_stack.kinesis_stream_arn(TEST_STREAM_NAME),
            },
            DeliveryStreamName=TEST_FIREHOSE_NAME,
            S3DestinationConfiguration={
                "RoleARN": aws_stack.iam_resource_arn("firehose"),
                "BucketARN": aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
                "Prefix": s3_prefix,
            },
        )
        self.assertTrue(stream)
        self.assertIn(TEST_FIREHOSE_NAME,
                      firehose.list_delivery_streams()["DeliveryStreamNames"])

        # create target S3 bucket
        s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

        # put records
        kinesis.put_record(Data=to_bytes(test_data),
                           PartitionKey="testId",
                           StreamName=TEST_STREAM_NAME)

        time.sleep(3)

        # check records in target bucket
        all_objects = testutil.list_all_s3_objects()
        testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
    def test_firehose_s3(self):

        s3_resource = aws_stack.connect_to_resource('s3')
        firehose = aws_stack.connect_to_service('firehose')

        s3_prefix = '/testdata'
        test_data = '{"test": "firehose_data_%s"}' % short_uid()
        # create Firehose stream
        stream = firehose.create_delivery_stream(
            DeliveryStreamName=TEST_FIREHOSE_NAME,
            S3DestinationConfiguration={
                'RoleARN': aws_stack.iam_resource_arn('firehose'),
                'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME),
                'Prefix': s3_prefix
            },
            Tags=TEST_TAGS
        )
        self.assertTrue(stream)
        self.assertIn(TEST_FIREHOSE_NAME, firehose.list_delivery_streams()['DeliveryStreamNames'])
        tags = firehose.list_tags_for_delivery_stream(DeliveryStreamName=TEST_FIREHOSE_NAME)
        self.assertEquals(TEST_TAGS, tags['Tags'])
        # create target S3 bucket
        s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME)

        # put records
        firehose.put_record(
            DeliveryStreamName=TEST_FIREHOSE_NAME,
            Record={
                'Data': to_bytes(test_data)
            }
        )
        # check records in target bucket
        all_objects = testutil.list_all_s3_objects()
        testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
        # check file layout in target bucket
        all_objects = testutil.map_all_s3_objects(buckets=[TEST_BUCKET_NAME])
        for key in all_objects.keys():
            self.assertRegexpMatches(key, r'.*/\d{4}/\d{2}/\d{2}/\d{2}/.*\-\d{4}\-\d{2}\-\d{2}\-\d{2}.*')
Exemple #11
0
def create_sqs_bucket_notification(
    s3_client: "S3Client",
    sqs_client: "SQSClient",
    bucket_name: str,
    queue_url: str,
    events: List["EventType"],
):
    """A NotificationFactory."""
    queue_arn = get_queue_arn(sqs_client, queue_url)
    assert queue_arn
    bucket_arn = aws_stack.s3_bucket_arn(bucket_name)

    policy = {
        "Version":
        "2012-10-17",
        "Statement": [{
            "Effect": "Allow",
            "Principal": "*",
            "Action": "sqs:SendMessage",
            "Resource": queue_arn,
            "Condition": {
                "ArnEquals": {
                    "aws:SourceArn": bucket_arn
                }
            },
        }],
    }
    sqs_client.set_queue_attributes(QueueUrl=queue_url,
                                    Attributes={"Policy": json.dumps(policy)})

    s3_client.put_bucket_notification_configuration(
        Bucket=bucket_name,
        NotificationConfiguration=dict(
            QueueConfigurations=[dict(
                QueueArn=queue_arn,
                Events=events,
            )]),
    )
Exemple #12
0
def create_sns_bucket_notification(
    s3_client: "S3Client",
    sns_client: "SNSClient",
    bucket_name: str,
    topic_arn: str,
    events: List["EventType"],
):
    """A NotificationFactory."""
    bucket_arn = aws_stack.s3_bucket_arn(bucket_name)

    policy = {
        "Version":
        "2012-10-17",
        "Statement": [{
            "Effect": "Allow",
            "Principal": "*",
            "Action": "sns:Publish",
            "Resource": topic_arn,
            "Condition": {
                "ArnEquals": {
                    "aws:SourceArn": bucket_arn
                }
            },
        }],
    }
    sns_client.set_topic_attributes(TopicArn=topic_arn,
                                    AttributeName="Policy",
                                    AttributeValue=json.dumps(policy))

    s3_client.put_bucket_notification_configuration(
        Bucket=bucket_name,
        NotificationConfiguration=dict(
            TopicConfigurations=[dict(
                TopicArn=topic_arn,
                Events=events,
            )]),
    )
Exemple #13
0
    def test_put_events_with_target_firehose(self):
        s3_bucket = 's3-{}'.format(short_uid())
        s3_prefix = 'testeventdata'
        stream_name = 'firehose-{}'.format(short_uid())
        rule_name = 'rule-{}'.format(short_uid())
        target_id = 'target-{}'.format(short_uid())

        # create firehose target bucket
        s3_client = aws_stack.connect_to_service('s3')
        s3_client.create_bucket(Bucket=s3_bucket)

        # create firehose delivery stream to s3
        firehose_client = aws_stack.connect_to_service('firehose')
        stream = firehose_client.create_delivery_stream(
            DeliveryStreamName=stream_name,
            S3DestinationConfiguration={
                'RoleARN': aws_stack.iam_resource_arn('firehose'),
                'BucketARN': aws_stack.s3_bucket_arn(s3_bucket),
                'Prefix': s3_prefix
            })
        stream_arn = stream['DeliveryStreamARN']

        self.events_client.create_event_bus(Name=TEST_EVENT_BUS_NAME)

        self.events_client.put_rule(
            Name=rule_name,
            EventBusName=TEST_EVENT_BUS_NAME,
            EventPattern=json.dumps(TEST_EVENT_PATTERN))

        rs = self.events_client.put_targets(Rule=rule_name,
                                            EventBusName=TEST_EVENT_BUS_NAME,
                                            Targets=[{
                                                'Id': target_id,
                                                'Arn': stream_arn
                                            }])

        self.assertIn('FailedEntryCount', rs)
        self.assertIn('FailedEntries', rs)
        self.assertEqual(rs['FailedEntryCount'], 0)
        self.assertEqual(rs['FailedEntries'], [])

        self.events_client.put_events(
            Entries=[{
                'EventBusName': TEST_EVENT_BUS_NAME,
                'Source': TEST_EVENT_PATTERN['Source'],
                'DetailType': TEST_EVENT_PATTERN['DetailType'],
                'Detail': TEST_EVENT_PATTERN['Detail']
            }])

        # run tests
        bucket_contents = s3_client.list_objects(Bucket=s3_bucket)['Contents']
        self.assertEqual(len(bucket_contents), 1)
        key = bucket_contents[0]['Key']
        s3_object = s3_client.get_object(Bucket=s3_bucket, Key=key)
        self.assertEqual((s3_object['Body'].read()).decode(),
                         str(TEST_EVENT_PATTERN['Detail']))

        # clean up
        firehose_client.delete_delivery_stream(DeliveryStreamName=stream_name)
        # empty and delete bucket
        s3_client.delete_object(Bucket=s3_bucket, Key=key)
        s3_client.delete_bucket(Bucket=s3_bucket)

        self.events_client.remove_targets(Rule=rule_name,
                                          EventBusName=TEST_EVENT_BUS_NAME,
                                          Ids=[target_id],
                                          Force=True)
        self.events_client.delete_rule(Name=rule_name,
                                       EventBusName=TEST_EVENT_BUS_NAME,
                                       Force=True)
        self.events_client.delete_event_bus(Name=TEST_EVENT_BUS_NAME)
Exemple #14
0
    def test_put_events_with_target_firehose(self, events_client, s3_client, firehose_client):
        s3_bucket = "s3-{}".format(short_uid())
        s3_prefix = "testeventdata"
        stream_name = "firehose-{}".format(short_uid())
        rule_name = "rule-{}".format(short_uid())
        target_id = "target-{}".format(short_uid())
        bus_name = "bus-{}".format(short_uid())

        # create firehose target bucket
        s3_client.create_bucket(Bucket=s3_bucket)

        # create firehose delivery stream to s3
        stream = firehose_client.create_delivery_stream(
            DeliveryStreamName=stream_name,
            S3DestinationConfiguration={
                "RoleARN": aws_stack.iam_resource_arn("firehose"),
                "BucketARN": aws_stack.s3_bucket_arn(s3_bucket),
                "Prefix": s3_prefix,
            },
        )
        stream_arn = stream["DeliveryStreamARN"]

        events_client.create_event_bus(Name=bus_name)
        events_client.put_rule(
            Name=rule_name,
            EventBusName=bus_name,
            EventPattern=json.dumps(TEST_EVENT_PATTERN),
        )
        rs = events_client.put_targets(
            Rule=rule_name,
            EventBusName=bus_name,
            Targets=[{"Id": target_id, "Arn": stream_arn}],
        )

        assert "FailedEntryCount" in rs
        assert "FailedEntries" in rs
        assert rs["FailedEntryCount"] == 0
        assert rs["FailedEntries"] == []

        events_client.put_events(
            Entries=[
                {
                    "EventBusName": bus_name,
                    "Source": TEST_EVENT_PATTERN["source"][0],
                    "DetailType": TEST_EVENT_PATTERN["detail-type"][0],
                    "Detail": json.dumps(EVENT_DETAIL),
                }
            ]
        )

        # run tests
        bucket_contents = s3_client.list_objects(Bucket=s3_bucket)["Contents"]
        assert len(bucket_contents) == 1
        key = bucket_contents[0]["Key"]
        s3_object = s3_client.get_object(Bucket=s3_bucket, Key=key)
        actual_event = json.loads(s3_object["Body"].read().decode())
        self.assert_valid_event(actual_event)
        assert actual_event["detail"] == EVENT_DETAIL

        # clean up
        firehose_client.delete_delivery_stream(DeliveryStreamName=stream_name)
        # empty and delete bucket
        s3_client.delete_object(Bucket=s3_bucket, Key=key)
        s3_client.delete_bucket(Bucket=s3_bucket)
        self.cleanup(bus_name, rule_name, target_id)
 def S3_Bucket_get_cfn_attribute(self, attribute_name):
     if attribute_name in ['Arn']:
         return aws_stack.s3_bucket_arn(self.name)
     return S3_Bucket_get_cfn_attribute_orig(self, attribute_name)
    def test_create_delete_stack(self):
        cloudformation = aws_stack.connect_to_resource('cloudformation')
        cf_client = aws_stack.connect_to_service('cloudformation')
        s3 = aws_stack.connect_to_service('s3')
        sns = aws_stack.connect_to_service('sns')
        sqs = aws_stack.connect_to_service('sqs')
        apigateway = aws_stack.connect_to_service('apigateway')
        template = template_deployer.template_to_json(
            load_file(TEST_TEMPLATE_1))

        # deploy template
        stack_name = 'stack-%s' % short_uid()
        cloudformation.create_stack(StackName=stack_name,
                                    TemplateBody=template)

        # wait for deployment to finish
        def check_stack():
            stack = get_stack_details(stack_name)
            self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE')

        retry(check_stack, retries=3, sleep=2)

        # assert that resources have been created
        assert bucket_exists('cf-test-bucket-1')
        queue_url = queue_exists('cf-test-queue-1')
        assert queue_url
        topic_arn = topic_exists('%s-test-topic-1-1' % stack_name)
        assert topic_arn
        assert stream_exists('cf-test-stream-1')
        resource = describe_stack_resource(stack_name,
                                           'SQSQueueNoNameProperty')
        assert queue_exists(resource['PhysicalResourceId'])
        assert ssm_param_exists('cf-test-param-1')

        # assert that tags have been created
        tags = s3.get_bucket_tagging(Bucket='cf-test-bucket-1')['TagSet']
        self.assertEqual(
            tags, [{
                'Key': 'foobar',
                'Value': aws_stack.get_sqs_queue_url('cf-test-queue-1')
            }])
        tags = sns.list_tags_for_resource(ResourceArn=topic_arn)['Tags']
        self.assertEqual(
            tags, [{
                'Key': 'foo',
                'Value': 'cf-test-bucket-1'
            }, {
                'Key': 'bar',
                'Value': aws_stack.s3_bucket_arn('cf-test-bucket-1')
            }])
        queue_tags = sqs.list_queue_tags(QueueUrl=queue_url)
        self.assertIn('Tags', queue_tags)
        self.assertEqual(queue_tags['Tags'], {
            'key1': 'value1',
            'key2': 'value2'
        })

        # assert that bucket notifications have been created
        notifs = s3.get_bucket_notification_configuration(
            Bucket='cf-test-bucket-1')
        self.assertIn('QueueConfigurations', notifs)
        self.assertIn('LambdaFunctionConfigurations', notifs)
        self.assertEqual(notifs['QueueConfigurations'][0]['QueueArn'],
                         'aws:arn:sqs:test:testqueue')
        self.assertEqual(notifs['QueueConfigurations'][0]['Events'],
                         ['s3:ObjectDeleted:*'])
        self.assertEqual(
            notifs['LambdaFunctionConfigurations'][0]['LambdaFunctionArn'],
            'aws:arn:lambda:test:testfunc')
        self.assertEqual(notifs['LambdaFunctionConfigurations'][0]['Events'],
                         ['s3:ObjectCreated:*'])

        # assert that subscriptions have been created
        subs = sns.list_subscriptions()['Subscriptions']
        subs = [
            s for s in subs
            if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']
        ]
        self.assertEqual(len(subs), 1)
        self.assertIn(
            ':%s:%s-test-topic-1-1' % (TEST_AWS_ACCOUNT_ID, stack_name),
            subs[0]['TopicArn'])
        # assert that subscription attributes are added properly
        attrs = sns.get_subscription_attributes(
            SubscriptionArn=subs[0]['SubscriptionArn'])['Attributes']
        self.assertEqual(
            attrs, {
                'Endpoint': subs[0]['Endpoint'],
                'Protocol': 'sqs',
                'SubscriptionArn': subs[0]['SubscriptionArn'],
                'TopicArn': subs[0]['TopicArn'],
                'FilterPolicy': json.dumps({'eventType': ['created']})
            })

        # assert that Gateway responses have been created
        test_api_name = 'test-api'
        api = [
            a for a in apigateway.get_rest_apis()['items']
            if a['name'] == test_api_name
        ][0]
        responses = apigateway.get_gateway_responses(
            restApiId=api['id'])['items']
        self.assertEqual(len(responses), 2)
        types = [r['responseType'] for r in responses]
        self.assertEqual(set(types), set(['UNAUTHORIZED', 'DEFAULT_5XX']))

        # delete the stack
        cf_client.delete_stack(StackName=stack_name)

        # assert that resources have been deleted
        assert not bucket_exists('cf-test-bucket-1')
        assert not queue_exists('cf-test-queue-1')
        assert not topic_exists('%s-test-topic-1-1' % stack_name)
        retry(lambda: self.assertFalse(stream_exists('cf-test-stream-1')))
Exemple #17
0
    def test_delivery_stream_with_kinesis_as_source(
        self,
        firehose_client,
        kinesis_client,
        s3_client,
        s3_bucket,
        kinesis_create_stream,
    ):

        bucket_arn = aws_stack.s3_bucket_arn(s3_bucket)
        stream_name = f"test-stream-{short_uid()}"
        log_group_name = f"group{short_uid()}"
        role_arn = "arn:aws:iam::000000000000:role/Firehose-Role"
        delivery_stream_name = f"test-delivery-stream-{short_uid()}"

        kinesis_create_stream(StreamName=stream_name, ShardCount=2)
        stream_arn = kinesis_client.describe_stream(
            StreamName=stream_name)["StreamDescription"]["StreamARN"]

        response = firehose_client.create_delivery_stream(
            DeliveryStreamName=delivery_stream_name,
            DeliveryStreamType="KinesisStreamAsSource",
            KinesisStreamSourceConfiguration={
                "KinesisStreamARN": stream_arn,
                "RoleARN": role_arn,
            },
            ExtendedS3DestinationConfiguration={
                "BucketARN": bucket_arn,
                "RoleARN": role_arn,
                "BufferingHints": {
                    "IntervalInSeconds": 60,
                    "SizeInMBs": 64
                },
                "DynamicPartitioningConfiguration": {
                    "Enabled": True
                },
                "ProcessingConfiguration": {
                    "Enabled":
                    True,
                    "Processors": [
                        {
                            "Type":
                            "MetadataExtraction",
                            "Parameters": [
                                {
                                    "ParameterName": "MetadataExtractionQuery",
                                    "ParameterValue": "{s3Prefix: .tableName}",
                                },
                                {
                                    "ParameterName": "JsonParsingEngine",
                                    "ParameterValue": "JQ-1.6"
                                },
                            ],
                        },
                    ],
                },
                "DataFormatConversionConfiguration": {
                    "Enabled": True
                },
                "CompressionFormat": "GZIP",
                "Prefix":
                "firehoseTest/!{partitionKeyFromQuery:s3Prefix}/!{partitionKeyFromLambda:companyId}/!{partitionKeyFromLambda:year}/!{partitionKeyFromLambda:month}/",
                "ErrorOutputPrefix":
                "firehoseTest-errors/!{firehose:error-output-type}/",
                "CloudWatchLoggingOptions": {
                    "Enabled": True,
                    "LogGroupName": log_group_name,
                },
            },
        )

        assert response["ResponseMetadata"]["HTTPStatusCode"] == 200

        firehose_client.delete_delivery_stream(
            DeliveryStreamName=delivery_stream_name)
        kinesis_client.delete_stream(StreamName=stream_name)
        s3_client.delete_bucket(Bucket=s3_bucket)
Exemple #18
0
    def test_kinesis_firehose_opensearch_s3_backup(
        self,
        firehose_client,
        kinesis_client,
        opensearch_client,
        s3_client,
        s3_bucket,
        kinesis_create_stream,
        monkeypatch,
        opensearch_endpoint_strategy,
    ):
        domain_name = f"test-domain-{short_uid()}"
        stream_name = f"test-stream-{short_uid()}"
        role_arn = "arn:aws:iam::000000000000:role/Firehose-Role"
        delivery_stream_name = f"test-delivery-stream-{short_uid()}"
        monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY",
                            opensearch_endpoint_strategy)
        try:
            opensearch_create_response = opensearch_client.create_domain(
                DomainName=domain_name)
            opensearch_url = f"http://{opensearch_create_response['DomainStatus']['Endpoint']}"
            opensearch_arn = opensearch_create_response["DomainStatus"]["ARN"]

            # create s3 backup bucket arn
            bucket_arn = aws_stack.s3_bucket_arn(s3_bucket)

            # create kinesis stream
            kinesis_create_stream(StreamName=stream_name, ShardCount=2)
            stream_arn = kinesis_client.describe_stream(
                StreamName=stream_name)["StreamDescription"]["StreamARN"]

            kinesis_stream_source_def = {
                "KinesisStreamARN": stream_arn,
                "RoleARN": role_arn,
            }
            opensearch_destination_configuration = {
                "RoleARN": role_arn,
                "DomainARN": opensearch_arn,
                "IndexName": "activity",
                "TypeName": "activity",
                "S3BackupMode": "AllDocuments",
                "S3Configuration": {
                    "RoleARN": role_arn,
                    "BucketARN": bucket_arn,
                },
            }
            firehose_client.create_delivery_stream(
                DeliveryStreamName=delivery_stream_name,
                DeliveryStreamType="KinesisStreamAsSource",
                KinesisStreamSourceConfiguration=kinesis_stream_source_def,
                AmazonopensearchserviceDestinationConfiguration=
                opensearch_destination_configuration,
            )

            # wait for opensearch cluster to be ready
            def check_domain_state():
                result = opensearch_client.describe_domain(
                    DomainName=domain_name)["DomainStatus"]["Processing"]
                return not result

            assert poll_condition(check_domain_state, 30, 1)

            # put kinesis stream record
            kinesis_record = {"target": "hello"}
            kinesis_client.put_record(StreamName=stream_name,
                                      Data=to_bytes(
                                          json.dumps(kinesis_record)),
                                      PartitionKey="1")

            firehose_record = {"target": "world"}
            firehose_client.put_record(
                DeliveryStreamName=delivery_stream_name,
                Record={"Data": to_bytes(json.dumps(firehose_record))},
            )

            def assert_opensearch_contents():
                response = requests.get(f"{opensearch_url}/activity/_search")
                response_bod = response.json()
                assert "hits" in response_bod
                response_bod_hits = response_bod["hits"]
                assert "hits" in response_bod_hits
                result = response_bod_hits["hits"]
                assert len(result) == 2
                sources = [item["_source"] for item in result]
                assert firehose_record in sources
                assert kinesis_record in sources

            retry(assert_opensearch_contents)

            def assert_s3_contents():
                result = s3_client.list_objects(Bucket=s3_bucket)
                contents = []
                for o in result.get("Contents"):
                    data = s3_client.get_object(Bucket=s3_bucket,
                                                Key=o.get("Key"))
                    content = data["Body"].read()
                    contents.append(content)
                assert len(contents) == 2
                assert to_bytes(json.dumps(firehose_record)) in contents
                assert to_bytes(json.dumps(kinesis_record)) in contents

            retry(assert_s3_contents)

        finally:
            firehose_client.delete_delivery_stream(
                DeliveryStreamName=delivery_stream_name)
            opensearch_client.delete_domain(DomainName=domain_name)