示例#1
0
    def kinesis_adder(self, name, shards):
        kinesis_stream = self.template.add_resource(
            kinesis.Stream(name, ShardCount=shards))

        self.template.add_output(
            [Output(
                "kinesisStreamName",
                Value=Ref(kinesis_stream),
            )])
示例#2
0
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False):
    """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards."""
    if type(account_list) == tuple:
        account_list = list(account_list)

    parameter_groups = []

    region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2']
    t = Template()
    t.add_version("2010-09-09")
    t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.")

    # Create Kinesis and IAM Roles
    log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount",
                                             Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.",
                                             Type="Number",
                                             MinValue=1,
                                             MaxValue=64,
                                             Default=1))

    log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod",
                                                  Description = "Number of hours to retain logs in the Kinesis stream.",
                                                  Type="Number",
                                                  MinValue=24,
                                                  MaxValue=120,
                                                  Default=24))

    parameter_groups.append({'Label': {'default': 'Log Stream Inputs'},
                         'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]})


    log_stream = t.add_resource(k.Stream("LogStream",
                                RetentionPeriodHours=Ref(log_stream_retention_period),
                                ShardCount=Ref(log_stream_shard_count)))

    firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket'))

    firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole',
                                            AssumeRolePolicyDocument=Policy(
                                                Statement=[Statement(
                                                Effect=Allow,
                                                Action=[AssumeRole],
                                                Principal=Principal('Service', 'firehose.amazonaws.com'),
                                                Condition=Condition(StringEquals('sts:ExternalId', AccountId)))])))

    log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy',
                                           Roles=[Ref(firehose_delivery_role)],
                                           PolicyName='LogS3DeliveryPolicy',
                                           PolicyDocument=Policy(
                                               Statement=[Statement(
                                                   Effect=Allow,
                                                   Action=[as3.AbortMultipartUpload,
                                                           as3.GetBucketLocation,
                                                           as3.GetObject,
                                                           as3.ListBucket,
                                                           as3.ListBucketMultipartUploads,
                                                           as3.PutObject],
                                                   Resource=[
                                                        Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]),
                                                        Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]),
                                                Statement(
                                                    Effect=Allow,
                                                    Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams],
                                                    Resource=[
                                                        GetAtt(log_stream, 'Arn')
                                                    ])])))

    s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream',
                                 DependsOn=[log_s3_delivery_policy.name],
                                 DeliveryStreamName='LogToS3DeliveryStream',
                                 DeliveryStreamType='KinesisStreamAsSource',
                                 KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration(
                                    KinesisStreamARN=GetAtt(log_stream, 'Arn'),
                                    RoleARN=GetAtt(firehose_delivery_role, 'Arn')
                                 ),
                                 S3DestinationConfiguration=fh.S3DestinationConfiguration(
                                    BucketARN=GetAtt(firehose_bucket, 'Arn'),
                                    BufferingHints=fh.BufferingHints(
                                        IntervalInSeconds=300,
                                        SizeInMBs=50
                                    ) ,
                                    CompressionFormat='UNCOMPRESSED',
                                    Prefix='firehose/' ,
                                    RoleARN=GetAtt(firehose_delivery_role, 'Arn'),
                                 )))

    t.add_output(Output('SplunkKinesisLogStream',
                 Value=GetAtt(log_stream, 'Arn'),
                 Description='ARN of the kinesis stream for log aggregation.'))


    # Generate Bucket with Lifecycle Policies

    ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix',
                                       Type='String',
                                       Default='',
                                       Description='Key name prefix for logs being sent to S3'))

    bucket_name = t.add_parameter(Parameter("BucketName",
                                  Description="Name to assign to the central logging retention bucket",
                                  Type="String",
                                  AllowedPattern="([a-z]|[0-9])+",
                                  MinLength=2,
                                  MaxLength=64))

    glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays",
                                             Description="Number of days until logs are expired from S3 and transitioned to Glacier",
                                             Type="Number",
                                             Default=365))

    glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays",
                                            Description="Number of days until logs are expired from Glacier and deleted",
                                            Type="Number",
                                            Default=365*7))

    parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'},
                             'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]})

    dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue'))

    queue = t.add_resource(sqs.Queue('s3DeliveryQueue',
                           MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s
                           VisibilityTimeout=5*60,
                           RedrivePolicy=sqs.RedrivePolicy(
                               deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'),
                               maxReceiveCount=10
                           ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS

    t.add_output(Output('SplunkS3Queue',
                 Value=GetAtt(queue, 'Arn'),
                 Description='Queue for Splunk SQS S3 ingest'))

    t.add_output(Output('SplunkS3DeadLetterQueue',
                Value=GetAtt(dead_letter_queue, 'Arn'),
                Description="Dead letter queue for Splunk SQS S3 ingest"))


    t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy',
                   PolicyDocument=Policy(
                   Statement=[Statement(
                       Effect=Allow,
                       Principal=Principal("AWS", "*"),
                       Action=[asqs.SendMessage],
                       Resource=[GetAtt(queue, 'Arn')],
                       Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]),
                   Queues=[Ref(queue)]))

    bucket = t.add_resource(s3.Bucket("LogDeliveryBucket",
                            DependsOn=[log_stream.name, queue.name],
                            BucketName=Ref(bucket_name),
                            AccessControl="LogDeliveryWrite",
                            NotificationConfiguration=s3.NotificationConfiguration(
                                QueueConfigurations=[s3.QueueConfigurations(
                                    Event="s3:ObjectCreated:*",
                                    Queue=GetAtt(queue, 'Arn'))]),
                            LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                                s3.LifecycleRule(
                                    Id="S3ToGlacierTransition",
                                    Status="Enabled",
                                    ExpirationInDays=Ref(glacier_deletion_days),
                                    Transition=s3.LifecycleRuleTransition(
                                        StorageClass="Glacier",
                                        TransitionInDays=Ref(glacier_migration_days)))])))

    bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy",
                                    Bucket=Ref(bucket),
                                    PolicyDocument=Policy(
                                        Statement=[
                                            Statement(
                                                Effect="Allow",
                                                Principal=Principal("Service", "cloudtrail.amazonaws.com"),
                                                Action=[GetBucketAcl],
                                                Resource=[GetAtt(bucket, 'Arn')]),
                                            Statement(
                                                Effect="Allow",
                                                Principal=Principal("Service", "cloudtrail.amazonaws.com"),
                                                Action=[PutObject],
                                                Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})),
                                                Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])])))

    splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser',
                                        Path='/',
                                        UserName='******'))

    splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)]))

    t.add_output(Output('BucketName',
                 Description="Name of the bucket for CloudTrail log delivery",
                 Value=Ref(bucket)))

    # Log destination setup

    cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole',
                                         AssumeRolePolicyDocument=Policy(
                                            Statement=[Statement(
                                                Effect=Allow,
                                                Action=[AssumeRole],
                                                Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))])))

    cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy',
                                               PolicyName='CWLtoKinesisPolicy',
                                               Roles=[Ref(cwl_to_kinesis_role)],
                                               PolicyDocument=Policy(
                                                 Statement=[
                                                     Statement(
                                                         Effect=Allow,
                                                         Resource=[GetAtt(log_stream, 'Arn')],
                                                         Action=[akinesis.PutRecord]),
                                                     Statement(
                                                         Effect=Allow,
                                                         Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')],
                                                         Action=[IAMPassRole])])))

    log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination',
                                     DependsOn=[cwl_to_kinesis_policy_link.name],
                                     DestinationName='CWLtoKinesisDestination',
                                     DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list),
                                     RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'),
                                     TargetArn=GetAtt(log_stream, 'Arn')))

    t.add_output(Output('childAccountLogDeliveryDestinationArn',
                 Value=GetAtt(log_destination,'Arn'),
                 Description='Log Destination to specify when deploying the source cloudformation template in other accounts.'))

    if output_keys:
        splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds',
                                           UserName=Ref(splunk_sqs_s3_user)))

        t.add_output(Output('splunkUserAccessKey',
                     Description='AWS Access Key for the user created for splunk to use when accessing logs',
                     Value=Ref(splunk_user_creds)))

        t.add_output(Output('splunkUserSecretKey',
                     Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs',
                     Value=GetAtt(splunk_user_creds, 'SecretAccessKey')))


    t.add_output(Output('splunkCWLRegion',
                 Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.",
                 Value=Region))

    t.add_output(Output("DeploymentAccount",
                 Value=AccountId,
                 Description="Convenience Output for referencing AccountID of the log aggregation account"))

    t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}})

    if dry_run:
        print(t.to_json())
    else:
        save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json')
        with open (save_path, 'w') as f:
            f.write(t.to_json())
示例#3
0
# load config
cfg = yaml.load(resource_string('config', 'kinesis_firehose_s3.yml'))

STACK_NAME = cfg['stack_name']

template = Template()
description = 'Stack containing kinesis and firehose writing to S3'
template.add_description(description)
# AWSTemplateFormatVersion
template.add_version('2010-09-09')

kinesis_stream = template.add_resource(
    kinesis.Stream('DevStream',
                   Name=cfg['kinesis_stream_name'],
                   ShardCount=cfg['kinesis_shard_count'],
                   Tags=Tags(StackName=Ref('AWS::StackName'),
                             Name='DevStream')))

firehose_delivery_role = template.add_resource(
    iam.Role('FirehoseRole',
             AssumeRolePolicyDocument={
                 'Statement': [{
                     'Effect': 'Allow',
                     'Principal': {
                         'Service': ['firehose.amazonaws.com']
                     },
                     'Action': ['sts:AssumeRole']
                 }]
             },
             Policies=[
# This is an example of a Kinesis Stream

from troposphere import Output
from troposphere import Ref, Template
import troposphere.kinesis as kinesis


template = Template()

kinesis_stream = template.add_resource(kinesis.Stream(
    "TestStream",
    ShardCount=1
))

template.add_output([
    Output(
        "StreamName",
        Description="Stream Name (Physical ID)",
        Value=Ref(kinesis_stream),
    ),
])

print(template.to_json())
示例#5
0
    def _add_adaptive_memory_streams(self, template, context: ff.Context,
                                     lambda_function, role):
        stream_name = self._stream_resource_name(context.name)
        stream = template.add_resource(
            kinesis.Stream(stream_name, Name=stream_name, ShardCount=1))

        # sql_text = """
        #                 CREATE OR REPLACE STREAM "DESTINATION_STREAM" (
        #                     "rt" TIMESTAMP,
        #                     "message" CHAR(128),
        #                     "up" BIGINT
        #                 );

        #                 CREATE OR REPLACE PUMP "STREAM_PUMP" AS
        #                     INSERT INTO "DESTINATION_STREAM"
        #                         SELECT STREAM
        #                             FLOOR("SOURCE_SQL_STREAM_001".ROWTIME TO HOUR),
        #                             "message",
        #                             MAX(1)
        #                         FROM "SOURCE_SQL_STREAM_001"
        #                         WHERE "event_type" = 'resource-usage'
        #                         GROUP BY FLOOR("SOURCE_SQL_STREAM_001".ROWTIME TO HOUR), "message"
        #                 ;
        #             """

        # """
        #                             CASE WHEN (AVG(memory_usage) + (STDDEV_SAMP(memory_usage) * 2.58)) > (.9 * max_memory) THEN 1 ELSE 0

        #                             AND (
        #                                 (AVG(memory_usage) + (STDDEV_SAMP(memory_usage) * 2.58)) > (.9 * max_memory)
        #                                 OR (AVG(memory_usage) + (STDDEV_SAMP(memory_usage) * 2.58)) < (.8 * COALESCE(prev_memory_tier, 1000000))
        #                             )
        # """

        sql = """
            CREATE OR REPLACE STREAM "DESTINATION_STREAM" (
                "rt" TIMESTAMP,
                "message" VARCHAR(128),
                "up" BIGINT
            );
    
            CREATE OR REPLACE STREAM "METRICS_STREAM" (
                "rt" TIMESTAMP,
                "message" VARCHAR(128),
                "average" DOUBLE,
                "standard_dev" DOUBLE
            );
    
            CREATE OR REPLACE PUMP "METRICS_PUMP" AS
            INSERT INTO "METRICS_STREAM"
                SELECT STREAM
                    FLOOR(s.ROWTIME TO HOUR),
                    "message",
                    AVG("memory_used"),
                    STDDEV_SAMP("memory_used")
                FROM "PwrLabDevIntegrationStream_001" AS s
                GROUP BY FLOOR(s.ROWTIME TO HOUR), "message";
    
    
            CREATE OR REPLACE PUMP "STREAM_PUMP" AS
            INSERT INTO "DESTINATION_STREAM"
                SELECT STREAM
                    m."rt",
                    m."message",
                    MAX(CASE WHEN (m."average" + (m."standard_dev" * 2.58)) > (.9 * s."max_memory") THEN 1 ELSE 0 END)
                FROM "PwrLabDevIntegrationStream_001" AS s
                JOIN "METRICS_STREAM" AS m
                    ON s."message" = m."message" 
                    AND FLOOR(s.ROWTIME TO HOUR) = m."rt"  
                WHERE 
                        ((m."average" + (m."standard_dev" * 2.58)) > (.9 * s."max_memory"))
                        OR ((m."average" + (m."standard_dev" * 2.58)) < (.8 * s."prev_memory_tier"))
                GROUP BY FLOOR(s.ROWTIME TO HOUR), m."rt", m."message", m."average", m."standard_dev", s."max_memory", s."prev_memory_tier";
                            
        """

        analytics_stream = template.add_resource(
            analytics.Application(
                self._analytics_application_resource_name(context.name),
                ApplicationName=self._analytics_application_resource_name(
                    context.name),
                ApplicationConfiguration=analytics.ApplicationConfiguration(
                    ApplicationCodeConfiguration=analytics.
                    ApplicationCodeConfiguration(
                        CodeContent=analytics.CodeContent(TextContent=sql),
                        CodeContentType="PLAINTEXT"),
                    SqlApplicationConfiguration=analytics.
                    SqlApplicationConfiguration(Inputs=[
                        analytics.Input(InputSchema=analytics.InputSchema(
                            RecordColumns=[
                                analytics.RecordColumn(Mapping='event_type',
                                                       Name='event_type',
                                                       SqlType='CHAR(64)'),
                                analytics.RecordColumn(Mapping='message',
                                                       Name='message',
                                                       SqlType='CHAR(128)'),
                                analytics.RecordColumn(Mapping='memory_used',
                                                       Name='memory_used',
                                                       SqlType='NUMERIC'),
                                analytics.RecordColumn(Mapping='run_time',
                                                       Name='run_time',
                                                       SqlType='NUMERIC'),
                                analytics.RecordColumn(Mapping='max_memory',
                                                       Name='max_memory',
                                                       SqlType='NUMERIC'),
                                analytics.RecordColumn(
                                    Mapping='prev_memory_tier',
                                    Name='prev_memory_tier',
                                    SqlType='NUMERIC'),
                            ],
                            RecordFormat=analytics.RecordFormat(
                                RecordFormatType="JSON"),
                        ),
                                        KinesisStreamsInput=analytics.
                                        KinesisStreamsInput(ResourceARN=GetAtt(
                                            stream, 'Arn'), ),
                                        NamePrefix=stream_name)
                    ])),
                RuntimeEnvironment="SQL-1_0",
                ServiceExecutionRole=GetAtt(role, 'Arn'),
                DependsOn=stream))

        template.add_resource(
            analytics.ApplicationOutput(
                f'{self._analytics_application_resource_name(context.name)}Output',
                ApplicationName=self._analytics_application_resource_name(
                    context.name),
                Output=analytics.Output(
                    DestinationSchema=analytics.DestinationSchema(
                        RecordFormatType="JSON"),
                    LambdaOutput=analytics.LambdaOutput(
                        ResourceARN=GetAtt(lambda_function, 'Arn'))),
                DependsOn=analytics_stream))
示例#6
0
            "2012-10-17",
            "Statement": [{
                "Sid": "LambdaWriteToQueue",
                "Effect": "Allow",
                "Principal": {
                    "AWS": GetAtt("LambdaExecutionRole", "Arn")
                },
                "Action": "SQS:*",
                "Resource": GetAtt(sqsqueue, "Arn")
            }]
        }))

kinesis_stream = template.add_resource(
    kinesis.Stream(
        "CapturedDataKinesisStream",
        Name=Ref(kinesis_param),
        ShardCount=1,
        RetentionPeriodHours=24))

kinesis_consumer = template.add_resource(
    Function(
        "ReadKinesisAndPutQueueFunction",
        Code=Code(
            S3Bucket=Ref(s3_bucket),
            S3Key=Ref(s3_key),
            S3ObjectVersion=Ref(s3_object_version_id)),
        Handler="lambda_function.lambda_handler",
        Role=GetAtt("LambdaExecutionRole", "Arn"),
        Runtime="python3.6",
        MemorySize=Ref(memory_size),
        Timeout=Ref(timeout)))
示例#7
0
from awacs.aws import Statement, Allow, Action

import cloudformation.troposphere.utils as utils

cfg = yaml.load(resource_string('config', 'kinesis_cross_account_cfg.yml'))

STACK_NAME = cfg['stack_name']

template = Template()
description = 'Stack containing kinesis and a lambda writing to another account'
template.add_description(description)
template.add_version('2010-09-09')

kinesis_stream = template.add_resource(
    kinesis.Stream('DevStream',
                   Name=cfg['kinesis']['stream_name'],
                   ShardCount=cfg['kinesis']['shard_count']))

lambda_execution_role = template.add_resource(
    iam.Role(
        'ExecutionRole',
        Path='/',
        Policies=[
            iam.Policy(
                PolicyName='KinesisToFirehosePolicy',
                PolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [
                        Statement(Sid='Logs',
                                  Effect=Allow,