Ejemplo n.º 1
0
 def cloudwatch_log_adder(self, name, metric_filter=None, lambda_name=None):
     log_group = logs.LogGroup(name, LogGroupName=name)
     if metric_filter:
         self.template.add_resource(
             logs.MetricFilter(name + "filter",
                               DependsOn=name,
                               LogGroupName=name,
                               FilterPattern=metric_filter,
                               MetricTransformations=[
                                   logs.MetricTransformation(
                                       name + "transform",
                                       MetricName=name,
                                       MetricNamespace=name,
                                       MetricValue="1")
                               ]))
     if lambda_name:
         self.template.add_resource(
             logs.SubscriptionFilter(name + "subscribe",
                                     DependsOn=name,
                                     LogGroupName=name,
                                     DestinationArn=GetAtt(
                                         lambda_name, "Arn"),
                                     FilterPattern=metric_filter))
     self.template.add_resource(log_group)
     return log_group
Ejemplo n.º 2
0
 def create_log_group(self):
     t = self.template
     t.add_resource(logs.LogGroup(RUN_LOGS, Condition="CreateRunLogsGroup"))
     t.add_output(
         Output("RunLogs",
                Value=Ref(RUN_LOGS),
                Condition="CreateRunLogsGroup"))
Ejemplo n.º 3
0
def create_queue_worker_log_group_resource(template, queue_worker_log_group_name_variable):
    return template.add_resource(
        logs.LogGroup(
            'QueueWorkerLogGroup',
            LogGroupName=queue_worker_log_group_name_variable,
            RetentionInDays=7
        )
    )
Ejemplo n.º 4
0
def create_scheduler_log_group_resource(template, scheduler_log_group_name_variable):
    return template.add_resource(
        logs.LogGroup(
            'SchedulerLogGroup',
            LogGroupName=scheduler_log_group_name_variable,
            RetentionInDays=7
        )
    )
Ejemplo n.º 5
0
    def build_group(self, t):

        group = t.add_resource(logs.LogGroup("{}LogGroup".format(self.name)))

        t.add_output(
            [Output("{}LogGroup".format(self.name), Value=Ref(group))])

        return group
Ejemplo n.º 6
0
def create_api_log_group_resource(template, api_log_group_name_variable):
    return template.add_resource(
        logs.LogGroup(
            'ApiLogGroup',
            LogGroupName=api_log_group_name_variable,
            RetentionInDays=7
        )
    )
Ejemplo n.º 7
0
 def create_log_group(self):
     t = self.template
     log_group = t.add_resource(
         logs.LogGroup(
             'LogGroup',
             LogGroupName='-'.join(['FargateLogGroup',
                                    self.vars['Family']]),
             RetentionInDays=self.vars['LogRetention'],
         ))
     return log_group
Ejemplo n.º 8
0
    def resources(self, stack: Stack) -> list[AWSObject]:
        # API logical id
        logical_id = name_to_id(self.name)

        result = []

        # Create a log group for the API
        result.append(
            logs.LogGroup(logical_id + "LogGroup", LogGroupName=self.name))

        # Create the API itself
        api_params = {
            "Description": self.description,
            "ProtocolType": "HTTP",
            "Name": self.name,
            "DisableExecuteApiEndpoint": self.disable_execute_api_endpoint,
        }
        result.append(apigatewayv2.Api(name_to_id(self.name), **api_params))

        # Declare the default stage
        result.append(
            self.declare_stage(stage_name="$default",
                               log_arn=GetAtt(logical_id + "LogGroup", "Arn")))

        # Declare one integration
        result.append(
            apigatewayv2.Integration(
                logical_id + "Integration",
                ApiId=Ref(logical_id),
                IntegrationType="AWS_PROXY",
                IntegrationUri=self.lambda_arn,
                PayloadFormatVersion="2.0",
            ))

        # Declare the routes
        for route in self.route_list:
            result += self.declare_route(route=route,
                                         integration=Ref(logical_id +
                                                         "Integration"))

        # Declare the domain
        if self.domain_name is not None:
            assert self.hosted_zone_id is not None
            result += self.declare_domain(
                domain_name=self.domain_name,
                hosted_zone_id=self.hosted_zone_id,
                stage_name="$default",
            )

        # Declare the authorizers
        for auth_name, auth_params in self.authorizers.items():
            result.append(
                apigatewayv2.Authorizer(name_to_id(auth_name), **auth_params))

        return result
Ejemplo n.º 9
0
    def configure_awslogs(self, t, container_definition):
        logs_group = t.add_resource(logs.LogGroup(self.resource_name_format % ('CloudWatchLogsGroup'), RetentionInDays=90))

        container_definition.LogConfiguration = ecs.LogConfiguration(
            LogDriver="awslogs",
            Options={
                "awslogs-group": Ref(logs_group),
                "awslogs-region": Region,
                "awslogs-datetime-format": self.logs_datetime_format
            }
        )
Ejemplo n.º 10
0
    def create_template(self):
        t = self.template
        variables = self.get_variables()

        self.log_group = t.add_resource(
            logs.LogGroup(
                FLOW_LOG_GROUP_NAME,
                RetentionInDays=variables["Retention"],
            ))

        t.add_output(
            Output("%sName" % FLOW_LOG_GROUP_NAME, Value=Ref(self.log_group)))
        t.add_output(
            Output("%sArn" % FLOW_LOG_GROUP_NAME,
                   Value=GetAtt(self.log_group, "Arn")))

        self.role = t.add_resource(
            iam.Role(CLOUDWATCH_ROLE_NAME,
                     AssumeRolePolicyDocument=flowlogs_assumerole_policy(),
                     Path="/",
                     Policies=[
                         TropoPolicy(
                             PolicyName="vpc_cloudwatch_flowlog_policy",
                             PolicyDocument=vpc_flow_log_cloudwatch_policy(
                                 GetAtt(self.log_group, "Arn")),
                         ),
                     ]))

        t.add_output(
            Output("%sName" % CLOUDWATCH_ROLE_NAME, Value=Ref(self.role)))
        role_arn = GetAtt(self.role, "Arn")
        t.add_output(Output("%sArn" % CLOUDWATCH_ROLE_NAME, Value=role_arn))

        self.log_stream = t.add_resource(
            ec2.FlowLog(
                FLOW_LOG_STREAM_NAME,
                DeliverLogsPermissionArn=role_arn,
                LogGroupName=Ref(FLOW_LOG_GROUP_NAME),
                ResourceId=variables["VpcId"],
                ResourceType="VPC",
                TrafficType=variables["TrafficType"],
            ))

        t.add_output(
            Output("%sName" % FLOW_LOG_STREAM_NAME,
                   Value=Ref(self.log_stream)))
Ejemplo n.º 11
0
def flow_logs(t, vpc_objects):
    vpc_flow_log_role = t.add_resource(
        Role("vpcflowlogrole",
             AssumeRolePolicyDocument=PolicyDocument(Statement=[
                 Statement(Effect=Allow,
                           Action=[Action("sts", "AssumeRole")],
                           Principal=Principal("Service",
                                               "vpc-flow-logs.amazonaws.com"))
             ]),
             Policies=[
                 Policy(PolicyName="vpc_flow_logs_policy",
                        PolicyDocument=PolicyDocument(
                            Id="vpc_flow_logs_policy",
                            Version="2012-10-17",
                            Statement=[
                                Statement(Effect=Allow,
                                          Action=[
                                              Action("logs", "CreateLogGroup"),
                                              Action("logs",
                                                     "CreateLogStream"),
                                              Action("logs", "PutLogEvents"),
                                              Action("logs",
                                                     "DescribeLogGroups"),
                                              Action("logs",
                                                     "DescribeLogStreams")
                                          ],
                                          Resource=["arn:aws:logs:*:*:*"])
                            ]))
             ]))
    t.add_resource(
        logs.LogGroup(
            'VPCLogGroup',
            LogGroupName='VPCFlowLog',
            #DependsOn="vpcflowlogrole",
            RetentionInDays=7))
    t.add_resource(
        ec2.FlowLog(
            'VPCFlowLog',
            DeliverLogsPermissionArn=GetAtt(vpc_flow_log_role, "Arn"),
            LogGroupName='VPCFlowLog',
            ResourceId=Ref(vpc_objects['vpc']),
            ResourceType='VPC',
            #DependsOn="VPCLogGroup",
            TrafficType='ALL'))
    return t, vpc_objects
Ejemplo n.º 12
0
    def add_resources_and_outputs(self):
        """Add resources and outputs to template."""
        template = self.template
        variables = self.get_variables()

        logsloggroup = template.add_resource(
            logs.LogGroup('CloudWatchLogGroup',
                          LogGroupName=variables['LogGroupName'].ref,
                          RetentionInDays=variables['LogRetentionDays'].ref))

        template.add_output(
            Output('{}Arn'.format(logsloggroup.title),
                   Description='CloudWatch Logs log group ARN',
                   Value=GetAtt(logsloggroup, 'Arn'),
                   Export=Export(
                       Sub('${AWS::StackName}-%sArn' % logsloggroup.title))))

        template.add_output(
            Output('{}Name'.format(logsloggroup.title),
                   Description='CloudWatch Logs log group name',
                   Value=Ref(logsloggroup)))
Ejemplo n.º 13
0
    Description="KMS alias ARN for lambda",

))

plain_text = t.add_parameter(Parameter(
    "PlainText",
    Type="String",
    Description="Text that you want to encrypt ( Hello World )",
    Default="Hello World",
    NoEcho=True
))

# Create loggroup
log_group_ssm = t.add_resource(logs.LogGroup(
    "LogGroupSsm",
    LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "ssm"])]),
    RetentionInDays=14
))

log_group_get_ssm_value = t.add_resource(logs.LogGroup(
    "LogGroupGetSsmValue",
    LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "get-ssm-value"])]),
    RetentionInDays=14
))

log_group_simple = t.add_resource(logs.LogGroup(
    "LogGroupSimple",
    LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "simple"])]),
    RetentionInDays=14
))
Ejemplo n.º 14
0
    def add_resources(self):
        """Add resources to template."""
        class EcsServiceWithHealthCheckGracePeriodSeconds(ecs.Service):
            """ECS Service class with HealthCheckGracePeriodSeconds added."""

            props = ecs.Service.props
            props['HealthCheckGracePeriodSeconds'] = (positive_integer, False)

        pkg_version = pkg_resources.get_distribution('troposphere').version
        if LooseVersion(pkg_version) < LooseVersion('2.1.3'):
            ecs_service = EcsServiceWithHealthCheckGracePeriodSeconds
        else:
            ecs_service = ecs.Service

        template = self.template
        variables = self.get_variables()

        ecstaskrole = template.add_resource(
            iam.Role('EcsTaskRole',
                     AssumeRolePolicyDocument=get_ecs_task_assumerole_policy(),
                     RoleName=variables['EcsTaskRoleName'].ref))

        loggroup = template.add_resource(
            logs.LogGroup(
                'CloudWatchLogGroup',
                LogGroupName=Join('', [
                    '/ecs/', variables['ContainerName'].ref, '-',
                    variables['EnvironmentName'].ref
                ]),
                RetentionInDays=variables['EcsCloudWatchLogRetention'].ref))

        ecscontainerdef = ecs.ContainerDefinition(
            Image=Join('', [
                Ref('AWS::AccountId'), '.dkr.ecr.',
                Ref('AWS::Region'), '.amazonaws.com/',
                variables['ContainerName'].ref, '-',
                variables['EnvironmentName'].ref
            ]),
            LogConfiguration=ecs.LogConfiguration(LogDriver='awslogs',
                                                  Options={
                                                      'awslogs-group':
                                                      Ref(loggroup),
                                                      'awslogs-region':
                                                      Ref('AWS::Region'),
                                                      'awslogs-stream-prefix':
                                                      'ecs'
                                                  }),
            Name=Join('-', [
                variables['ContainerName'].ref,
                variables['EnvironmentName'].ref
            ]),
            PortMappings=[
                ecs.PortMapping(ContainerPort=variables['ContainerPort'].ref)
            ])

        ecstaskdef = template.add_resource(
            ecs.TaskDefinition(
                'EcsTaskDef',
                ContainerDefinitions=[ecscontainerdef],
                Cpu=variables['TaskCpu'].ref,
                Memory=variables['TaskMem'].ref,
                ExecutionRoleArn=variables['EcsTaskExecIamRoleArn'].ref,
                TaskRoleArn=Ref(ecstaskrole),
                Family=Join('-', [
                    variables['ContainerName'].ref,
                    variables['EnvironmentName'].ref
                ]),
                NetworkMode='awsvpc',
                RequiresCompatibilities=['FARGATE']))

        ecscluster = template.add_resource(
            ecs.Cluster('EcsCluster',
                        ClusterName=Join('-', [
                            variables['ContainerName'].ref,
                            variables['EnvironmentName'].ref
                        ])))

        ecsservice = template.add_resource(
            ecs_service(
                'EcsService',
                Cluster=Join('-', [
                    variables['ContainerName'].ref,
                    variables['EnvironmentName'].ref
                ]),
                DeploymentConfiguration=ecs.DeploymentConfiguration(
                    MinimumHealthyPercent=variables['MinHealthyPercent'].ref,
                    MaximumPercent=variables['MaxPercent'].ref),
                DesiredCount=variables['NumberOfTasks'].ref,
                HealthCheckGracePeriodSeconds=variables[
                    'HealthCheckGracePeriod'].ref,
                LaunchType='FARGATE',
                LoadBalancers=[
                    ecs.LoadBalancer(
                        ContainerName=Join('-', [
                            variables['ContainerName'].ref,
                            variables['EnvironmentName'].ref
                        ]),
                        ContainerPort=variables['ContainerPort'].ref,
                        TargetGroupArn=variables['TargetGroupArn'].ref)
                ],
                NetworkConfiguration=ecs.NetworkConfiguration(
                    AwsvpcConfiguration=ecs.AwsvpcConfiguration(
                        SecurityGroups=variables['SgIdList'].ref,
                        Subnets=variables['Subnets'].ref)),
                ServiceName=Join('-', [
                    variables['ContainerName'].ref,
                    variables['EnvironmentName'].ref
                ]),
                TaskDefinition=Ref(ecstaskdef)))

        template.add_output(
            Output("{}Arn".format(ecstaskrole.title),
                   Description="ECS Task Role ARN",
                   Value=GetAtt(ecstaskrole, "Arn"),
                   Export=Export(
                       Sub('${AWS::StackName}-%sArn' % ecstaskrole.title))))

        template.add_output(
            Output("{}Name".format(ecstaskrole.title),
                   Description="ECS Task Role Name",
                   Value=Ref(ecstaskrole)))

        template.add_output(
            Output("{}Arn".format(ecsservice.title),
                   Description="ARN of the ECS Service",
                   Value=Ref(ecsservice),
                   Export=Export(
                       Sub('${AWS::StackName}-%sArn' % ecsservice.title))))

        template.add_output(
            Output("{}Name".format(ecsservice.title),
                   Description="Name of the ECS Service",
                   Value=GetAtt(ecsservice, "Name"),
                   Export=Export(
                       Sub('${AWS::StackName}-%sName' % ecsservice.title))))

        template.add_output(
            Output("{}Arn".format(ecscluster.title),
                   Description="ECS Cluster ARN",
                   Value=GetAtt(ecscluster, "Arn"),
                   Export=Export(
                       Sub('${AWS::StackName}-%sArn' % ecscluster.title))))

        template.add_output(
            Output("{}Arn".format(ecstaskdef.title),
                   Description="ARN of the Task Definition",
                   Value=Ref(ecstaskdef),
                   Export=Export(
                       Sub('${AWS::StackName}-%sArn' % ecstaskdef.title))))
Ejemplo n.º 15
0
def generate(dry_run, file_location=None):
    """CloudFormation template generator to apply to all accounts which configures log sources to publish to the centralized log target(s) specified"""
    t = Template()
    t.add_version("2010-09-09")
    t.add_description(
        "UCSD Log Source AWS CloudFormation Template - this template is meant to be applied to pre-approved accounts and configures CloudWatch Logs to forward to the UCSD log aggregation process."
    )

    #
    # CloudWatch Logs setup - Set up shipping to 'centralized' account
    #

    # Parameters
    delivery_stream_arn = t.add_parameter(
        Parameter('LogDeliveryDestinationArn',
                  Type="String",
                  Default="",
                  Description="ARN of the Log Destination to send logs to."))

    # resources
    cwl_group_retention = t.add_parameter(
        Parameter("LogGroupRetentionInDays",
                  Type="Number",
                  Description=
                  "Number of days to retain logs in the CloudWatch Log Group",
                  MinValue=1,
                  MaxValue=14,
                  Default=1))

    cwl_group = t.add_resource(
        cwl.LogGroup('SecurityLogShippingGroup',
                     LogGroupName=security_log_shipping_group_name,
                     RetentionInDays=Ref(cwl_group_retention)))

    cwl_subscription = t.add_resource(
        cwl.SubscriptionFilter('SecurityLogShippingFilter',
                               DestinationArn=Ref(delivery_stream_arn),
                               LogGroupName=Ref(cwl_group),
                               FilterPattern=""))

    cwl_primary_stream = t.add_resource(
        cwl.LogStream('PrimaryLogStream',
                      LogGroupName=Ref(cwl_group),
                      LogStreamName='PrimaryLogStream'))

    # Create IAM role to allow VPC Flow Logs within this account to push data to CloudWatch Logs per https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html#flow-logs-iam
    vpc_flow_log_iam_role = t.add_resource(
        iam.Role('VPCFlowLogToCWLIAMRole',
                 AssumeRolePolicyDocument=Policy(Statement=[
                     Statement(Effect=Allow,
                               Action=[AssumeRole],
                               Principal=Principal(
                                   "Service", "vpc-flow-logs.amazonaws.com"))
                 ])))

    vpc_flow_log_policies = t.add_resource(
        iam.PolicyType(
            'VPCFlowLogToCWLPolicy',
            PolicyName='vpcflowlogtocwlpolicy20180213',
            Roles=[Ref(vpc_flow_log_iam_role)],
            PolicyDocument=Policy(Statement=[
                Statement(Effect=Allow,
                          Action=[
                              CreateLogGroup, CreateLogStream, PutLogEvents,
                              DescribeLogGroups, DescribeLogStreams
                          ],
                          Resource=["*"])
            ])))

    # outputs
    t.add_output(
        Output(
            'CloudWatchLogGroupName',
            Value=Ref(cwl_group),
            Description=
            "Name of the CloudWatch Log Group created to flow logs to the centralized logging stream."
        ))

    t.add_output(
        Output(
            'CloudWatchLogGroupARN',
            Value=GetAtt(cwl_group, "Arn"),
            Description=
            "ARN of the CloudWatch Log Group created to flow logs to the centralized logging stream."
        ))

    t.add_output(
        Output(
            'VPCFlowLogDeliveryLogsPermissionArn',
            Value=GetAtt(vpc_flow_log_iam_role, "Arn"),
            Description=
            "ARN of the IAM role for VPC Flow Logs to use within this account to ship VPC flow logs through."
        ))

    #
    # CloudTrail setup - ship to S3 in 'central account' as well as cloudtrail logs if it'll let us :)
    #

    # parameters
    ct_is_logging = t.add_parameter(
        Parameter(
            'CloudTrailIsLogging',
            Type="String",
            Default="false",
            AllowedValues=["true", "false"],
            Description=
            "Flag indicating that CloudTrail is configured to send logs."))

    ct_include_global = t.add_parameter(
        Parameter(
            'CloudTrailIncludeGlobal',
            Type="String",
            Default="true",
            AllowedValues=["true", "false"],
            Description=
            "Flag indicating that CloudTrail is configured to capture global service events."
        ))

    ct_multi_region = t.add_parameter(
        Parameter(
            'CloudTrailMultiRegion',
            Type="String",
            Default="true",
            AllowedValues=["true", "false"],
            Description=
            "Flag indicating that CloudTrail is to be configured in multi-region mode"
        ))

    ct_s3_key_prefix = t.add_parameter(
        Parameter('CloudTrailKeyPrefix',
                  Type='String',
                  Default='',
                  Description='Key name prefix for logs being sent to S3'))

    ct_bucket_name = t.add_parameter(
        Parameter(
            'CloudTrailBucketName',
            Type='String',
            Default='',
            Description='Name of the S3 Bucket for delivery of CloudTrail logs'
        ))
    # resources

    ct_trail = t.add_resource(
        ct.Trail("SecurityTrail",
                 TrailName=Join("-", ["SecurityTrail", Region]),
                 S3BucketName=Ref(ct_bucket_name),
                 S3KeyPrefix=Ref(ct_s3_key_prefix),
                 IncludeGlobalServiceEvents=Ref(ct_include_global),
                 IsMultiRegionTrail=Ref(ct_multi_region),
                 IsLogging=Ref(ct_is_logging)))

    # outputs
    t.add_output(
        Output(
            'CloudTrailARN',
            Description=
            "ARN of the CloudTrail Trail configured for this log source deployment.",
            Value=GetAtt(ct_trail, "Arn")))

    # Splunk Addon User and Policies per http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions
    addon_user = t.add_resource(
        iam.User('SplunkAddonUser', UserName='******'))

    # http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_CloudTrail_permissions
    ct_splunk_user_policy = t.add_resource(
        iam.PolicyType('cloudtrailSplunkPolicy',
                       PolicyName='cloudtrailsplunkuser20180213',
                       Roles=[Ref(vpc_flow_log_iam_role)],
                       PolicyDocument=Policy(Statement=[
                           Statement(Effect=Allow,
                                     Action=[
                                         asqs.GetQueueAttributes,
                                         asqs.ListQueues, asqs.ReceiveMessage,
                                         asqs.GetQueueUrl, asqs.DeleteMessage,
                                         as3.Action('Get*'),
                                         as3.Action('List*'),
                                         as3.Action('Delete*')
                                     ],
                                     Resource=["*"])
                       ])))

    # http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_CloudWatch_permissions
    cw_splunk_user_policy = t.add_resource(
        iam.PolicyType('cloudwatchSplunkPolicy',
                       PolicyName='cloudwatchsplunkuser20180213',
                       Roles=[Ref(vpc_flow_log_iam_role)],
                       PolicyDocument=Policy(Statement=[
                           Statement(Effect=Allow,
                                     Action=[
                                         aas.Action("Describe*"),
                                         acw.Action("Describe*"),
                                         acw.Action("Get*"),
                                         acw.Action("List*"),
                                         asns.Action("Get*"),
                                         asns.Action("List*")
                                     ],
                                     Resource=['*'])
                       ])))

    if dry_run:
        print(t.to_json())
    else:
        save_path = file_location if file_location else os.path.join(
            log_aggregation_cf, 'log_sources.json')
        with open(save_path, 'w') as f:
            f.write(t.to_json())
Ejemplo n.º 16
0
from troposphere import Join, iam, logs

from .common import arn_prefix
from .template import template

container_log_group = logs.LogGroup(
    "ContainerLogs",
    template=template,
    RetentionInDays=365,
    DeletionPolicy="Retain",
)


logging_policy = iam.Policy(
    PolicyName="LoggingPolicy",
    PolicyDocument=dict(
        Statement=[dict(
            Effect="Allow",
            Action=[
                "logs:Create*",
                "logs:PutLogEvents",
            ],
            Resource=Join("", [
                arn_prefix,
                ":logs:*:*:*",  # allow logging to any log group
            ]),
        )],
    ),
)
Ejemplo n.º 17
0
from troposphere import logs
from troposphere import GetAtt, Output, Parameter, Ref, Tags, Template

import cloudformation.utils as utils

STACK_NAME = 'LogsStack'

template = Template()
description = 'Stack containing EMR with conda in all nodes'
template.add_description(description)
template.add_version('2010-09-09')

generic_emr_log_group = template.add_resource(
    logs.LogGroup(
        'GenericEMR',
        LogGroupName='/emr/generic_cluster/apps',
    ))
# Outputs
template.add_output([
    Output("GenericEMRLogGroup",
           Description="Log Group for Generic EMR",
           Value=Ref(generic_emr_log_group)),
])

template_json = template.to_json(indent=4)
print(template_json)

stack_args = {
    'StackName': STACK_NAME,
    'TemplateBody': template_json,
    'Capabilities': [
Ejemplo n.º 18
0
#
# Don't expire condition
#
t.add_condition(
    'NotExpireCondition',
    Equals(Ref(param_retention), -1)
)

#
# Resources
#

log_group = t.add_resource(logs.LogGroup(
    'LogGroup',
    RetentionInDays=If('NotExpireCondition',
                       Ref(AWS_NO_VALUE),
                       Ref(param_retention))
))

log_delivery_role = t.add_resource(iam.Role(
    'LogDeliveryRole',
    AssumeRolePolicyDocument=Policy(
        Statement=[Statement(
            Effect=Allow,
            Action=[awacs.sts.AssumeRole],
            Principal=Principal('Service', ['vpc-flow-logs.amazonaws.com'])
        )]
    ),
    Policies=[
        iam.Policy(
            PolicyName='AllowReadSrcBucket',
Ejemplo n.º 19
0
# Create the Docker repository.
docker_repository = template.add_resource(
    ecr.Repository(
        'DockerRepository',
        RepositoryName=Ref(docker_repository_name),
        LifecyclePolicy=ecr.LifecyclePolicy(
            LifecyclePolicyText='{"rules":[{"rulePriority":1,"description":"Remove untagged images older than 1 week","selection":{"tagStatus":"untagged","countType":"sinceImagePushed","countUnit":"days","countNumber":7},"action":{"type":"expire"}}]}'
        )
    )
)

# Create the ECS task definitions.
api_log_group = template.add_resource(
    logs.LogGroup(
        'ApiLogGroup',
        LogGroupName=Ref(api_log_group_name),
        RetentionInDays=7
    )
)

queue_worker_log_group = template.add_resource(
    logs.LogGroup(
        'QueueWorkerLogGroup',
        LogGroupName=Ref(queue_worker_log_group_name),
        RetentionInDays=7
    )
)

scheduler_log_group = template.add_resource(
    logs.LogGroup(
        'SchedulerLogGroup',
Ejemplo n.º 20
0
def showeach_log(showeach_function):
  return logs.LogGroup(
      'ShoweachLog',
      LogGroupName=lambda_log_name(showeach_function),
      RetentionInDays=30)
Ejemplo n.º 21
0
deploy_condition = "Deploy"
template.add_condition(deploy_condition, Not(Equals(application_revision, "")))

image = Join("", [
    Ref(AWS_ACCOUNT_ID),
    ".dkr.ecr.",
    Ref(AWS_REGION),
    ".amazonaws.com/",
    Ref(repository),
    ":",
    application_revision,
])

web_log_group = logs.LogGroup(
    "WebLogs",
    template=template,
    RetentionInDays=365,
    DeletionPolicy="Retain",
)

template.add_output(
    Output("WebLogsGroup",
           Description="Web application log group",
           Value=GetAtt(web_log_group, "Arn")))

log_configuration = LogConfiguration(LogDriver="awslogs",
                                     Options={
                                         'awslogs-group': Ref(web_log_group),
                                         'awslogs-region': Ref(AWS_REGION),
                                     })

# ECS task
Ejemplo n.º 22
0
def archiveeach_log(archiveeach_function):
  return logs.LogGroup(
      'ArchiveeachLog',
      LogGroupName=lambda_log_name(archiveeach_function),
      RetentionInDays=30)
Ejemplo n.º 23
0
r_server_user_data['runcmd'].append(
    ['/usr/bin/sudo', '/usr/bin/apt', 'install', '-y', 'libpq-dev', 'r-base']
)


webserver_user_data = copy.deepcopy(user_data)
webserver_user_data['packages'].extend([
    'nginx',
    'uwsgi',
    'uwsgi-plugin-python',
])

stack = stack.StackTemplate()

stack.add_resource(logs.LogGroup(
    'LogGroupConservationIntl',
    LogGroupName='conservationintl',
))

logs_writer_policy_doc = Policy(
    Version="2012-10-17",
    Statement=[
        Statement(
            Action=[
                awacs_logs.CreateLogGroup,
                awacs_logs.CreateLogStream,
                awacs_logs.PutLogEvents,
                awacs_logs.DescribeLogStreams,
            ],
            Effect=Allow,
            Resource=["arn:aws:logs:*:*:*"],
        ),
PubSubnet1RouteTableAssociation = t.add_resource(ec2.SubnetRouteTableAssociation(
    "PubSubnet1RouteTableAssociation",
    SubnetId=Ref(PubSubnetAz1),
    RouteTableId=Ref(RouteViaIgw)
))

PubSubnet2RouteTableAssociation = t.add_resource(ec2.SubnetRouteTableAssociation(
    "PubSubnet2RouteTableAssociation",
    SubnetId=Ref(PubSubnetAz2),
    RouteTableId=Ref(RouteViaIgw)
))

# Create CloudWatch Log Group
CWLogGroup = t.add_resource(logs.LogGroup(
    "CWLogGroup",
))

# Create the Task Execution Role
TaskExecutionRole = t.add_resource(iam.Role(
    "TaskExecutionRole",
    AssumeRolePolicyDocument={
        "Statement": [{
            "Effect": "Allow",
            "Principal": {"Service": ["ecs-tasks.amazonaws.com"]},
            "Action": ["sts:AssumeRole"]
        }]},
))

# Create the Fargate Execution Policy (access to ECR and CW Logs)
TaskExecutionPolicy = t.add_resource(iam.PolicyType(
Ejemplo n.º 25
0
 def gen_log_group(self):
     self.log_group_name = "conducto-demo-log-group"
     log_group = logs.LogGroup("LogGroup",
                               LogGroupName=self.log_group_name,
                               RetentionInDays=1)
     self.template.add_resource(log_group)
Ejemplo n.º 26
0
)

docker_repository_resource = template.add_resource(
  ecr.Repository(
    'DockerRepository',
    RepositoryName=docker_repository_name_variable,
    LifecyclePolicy=ecr.LifecyclePolicy(
      LifecyclePolicyText='{"rules":[{"rulePriority":1,"description":"Remove untagged images older than 1 week","selection":{"tagStatus":"untagged","countType":"sinceImagePushed","countUnit":"days","countNumber":7},"action":{"type":"expire"}}]}'
    )
  )
)

api_log_group_resource = template.add_resource(
  logs.LogGroup(
    'ApiLogGroup',
    LogGroupName=api_log_group_name_variable,
    RetentionInDays=7
  )
)

queue_worker_log_group_resource = template.add_resource(
  logs.LogGroup(
    'QueueWorkerLogGroup',
    LogGroupName=queue_worker_log_group_name_variable,
    RetentionInDays=7
  )
)

scheduler_log_group_resource = template.add_resource(
  logs.LogGroup(
    'SchedulerLogGroup',
Ejemplo n.º 27
0
        Type="String",
        Description="KMS alias ARN for lambda",
    ))

plain_text = t.add_parameter(
    Parameter("PlainText",
              Type="String",
              Description="Text that you want to encrypt ( Hello World )",
              Default="Hello World",
              NoEcho=True))

# Create loggroup
log_group_ssm = t.add_resource(
    logs.LogGroup(
        "LogGroupSsm",
        LogGroupName=Join(
            "", ["/aws/lambda/",
                 Join("-", [Ref("AWS::StackName"), "ssm"])]),
        RetentionInDays=14))

log_group_get_ssm_value = t.add_resource(
    logs.LogGroup("LogGroupGetSsmValue",
                  LogGroupName=Join("", [
                      "/aws/lambda/",
                      Join("-", [Ref("AWS::StackName"), "get-ssm-value"])
                  ]),
                  RetentionInDays=14))

log_group_simple = t.add_resource(
    logs.LogGroup(
        "LogGroupSimple",
        LogGroupName=Join(
Ejemplo n.º 28
0
              Type="CommaDelimitedList",
              Description=
              "Location of lambda zip file. ie: mybucket,datadog_lambda.zip"))

log_level = t.add_parameter(
    Parameter("LogLevel",
              Type="String",
              AllowedValues=["DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"],
              Default="INFO"))

# Create loggroup
log_group = t.add_resource(
    logs.LogGroup(
        "LogGroup",
        LogGroupName=Join("", [
            "/aws/lambda/",
            Join("-", ["datadoglambda", Ref("AWS::StackName")])
        ]),
        RetentionInDays=14))

kms_key_arn = ImportValue(Sub("${EncryptLambdaStack}-KmsKeyArn"))
lambda_arn = ImportValue(Sub("${EncryptLambdaStack}-EncryptLambdaArn"))

datadog_lambda_role = t.add_resource(
    iam.Role(
        "DatadogLambdaRole",
        AssumeRolePolicyDocument=Policy(
            Version="2012-10-17",
            Statement=[
                Statement(Effect=Allow,
                          Principal=Principal("Service",
Ejemplo n.º 29
0
 awslambdafunction = template.add_resource(
     awslambda.Function(
         "{custom_resource_name}Function".format(
             custom_resource_name=custom_resource_name_cfn),
         Code=awslambda.Code(
             S3Bucket=troposphere.Ref(s3_bucket),
             S3Key=troposphere.Join(
                 '', [troposphere.Ref(s3_path), zip_filename]),
         ),
         Role=GetAtt(role, 'Arn'),
         **custom_resource.troposphere_class.function_settings()))
 template.add_resource(
     logs.LogGroup(
         "{custom_resource_name}Logs".format(
             custom_resource_name=custom_resource_name_cfn),
         LogGroupName=troposphere.Join(
             '', ["/aws/lambda/",
                  troposphere.Ref(awslambdafunction)]),
         RetentionInDays=90,
     ))
 template.add_output(
     Output(
         "{custom_resource_name}ServiceToken".format(
             custom_resource_name=custom_resource_name_cfn),
         Value=GetAtt(awslambdafunction, 'Arn'),
         Description=
         "ServiceToken for the {custom_resource_name} custom resource".
         format(custom_resource_name='.'.join(custom_resource.name)),
         Export=Export(
             Sub("${{AWS::StackName}}-{custom_resource_name}ServiceToken".
                 format(custom_resource_name=custom_resource_name_cfn)))))
 template.add_output(
Ejemplo n.º 30
0
def csvimport_log(csvimport_function):
  return logs.LogGroup(
      'CsvimportLog',
      LogGroupName=lambda_log_name(csvimport_function),
      RetentionInDays=30)