示例#1
0
def create_log_group(stack, name, custom_name=False, retention_in_days=7):
    """Add a log group."""
    lg = LogGroup('{0}LogGroup'.format(name.replace('-', '')),
                  RetentionInDays=retention_in_days)

    if custom_name is True:
        lg.LogGroupName = '{0}LogGroup'.format(name)

    return stack.stack.add_resource(lg)
示例#2
0
    def test_loggroup_retention(self):
        for days in [7, "7"]:
            LogGroup(
                "LogGroupWithDeletionPolicy",
                RetentionInDays=days,
            )

        for days in [6, "6"]:
            with self.assertRaises(ValueError):
                LogGroup(
                    "LogGroupWithDeletionPolicy",
                    RetentionInDays=days,
                )
示例#3
0
def create_log_group_template():
    template = Template(
        Description="Child stack to maintain Lambda@Edge log groups")

    log_group_name = template.add_parameter(
        Parameter("LogGroupName", Type="String"))
    log_retention_days = template.add_parameter(
        Parameter(
            "LogRetentionDays",
            Type="Number",
            Description=
            "Days to keep Lambda@Edge logs. 0 means indefinite retention.",
            AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS,
        ))

    retention_defined = add_condition(template, "RetentionDefined",
                                      Not(Equals(Ref(log_retention_days), 0)))

    template.add_resource(
        LogGroup(
            "EdgeLambdaLogGroup",
            LogGroupName=Ref(log_group_name),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    return template
示例#4
0
    def set_default_cluster_config(self, root_stack):
        """
        Function to get the default defined ECS Cluster configuration

        :return: cluster
        :rtype: troposphere.ecs.Cluster
        """
        self.log_group = LogGroup(
            "EcsExecLogGroup",
            LogGroupName=Sub(f"ecs/execute-logs/${{{AWS_STACK_NAME}}}"),
            RetentionInDays=120,
        )
        self.cfn_resource = Cluster(
            CLUSTER_T,
            ClusterName=Ref(AWS_STACK_NAME),
            CapacityProviders=FARGATE_PROVIDERS,
            DefaultCapacityProviderStrategy=DEFAULT_STRATEGY,
            Configuration=ClusterConfiguration(
                ExecuteCommandConfiguration=ExecuteCommandConfiguration(
                    Logging="OVERRIDE",
                    LogConfiguration=ExecuteCommandLogConfiguration(
                        CloudWatchLogGroupName=Ref(self.log_group), ),
                )),
            Metadata=metadata,
        )
        root_stack.stack_template.add_resource(self.log_group)
        root_stack.stack_template.add_resource(self.cfn_resource)
        self.capacity_providers = FARGATE_PROVIDERS
        self.default_strategy_providers = [
            cap.CapacityProvider for cap in DEFAULT_STRATEGY
        ]
        self.cluster_identifier = Ref(self.cfn_resource)
示例#5
0
def create_log_group(
    family: ComposeFamily,
    group_name,
    grant_task_role_access: bool = False,
) -> LogGroup:
    """
    Function to create a new Log Group for the services
    :return:
    """
    if LOG_GROUP_T not in family.template.resources:
        svc_log = LogGroup(
            LOG_GROUP_T,
            RetentionInDays=Ref(LOG_GROUP_RETENTION),
            LogGroupName=group_name,
        )
        add_resource(family.template, svc_log)

    else:
        svc_log = family.template.resources[LOG_GROUP_T]
    roles = [family.iam_manager.exec_role.name]
    if grant_task_role_access:
        roles.append(family.iam_manager.task_role.name)
    define_iam_permissions(
        "logs",
        family,
        family.template,
        "CloudWatchLogsAccess",
        LOGGING_IAM_PERMISSIONS_MODEL,
        access_definition="LogGroupOwner",
        resource_arns=[GetAtt(svc_log, "Arn")],
        roles=roles,
    )

    return svc_log
示例#6
0
 def _create_log_group(self):
     log_group = LogGroup(camelcase(
         "{self.env}LogGroup".format(**locals())),
                          LogGroupName="{self.env}-logs".format(**locals()),
                          RetentionInDays=365)
     self.template.add_resource(log_group)
     return None
def add_vpc_flow(template, vpc, boundary=None):
    """
    Function to add VPC Flow Log to log VPC

    :param troposphere.Template template:
    :param vpc: The VPC Object
    :param str boundary:
    """
    if boundary and boundary.startswith("arn:aws"):
        perm_boundary = boundary
    elif boundary and not boundary.startswith("arn:aws"):
        perm_boundary = Sub(
            f"arn:${{{AWS_PARTITION}}}:iam:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:policy/{boundary}"
        )
    else:
        perm_boundary = Ref(AWS_NO_VALUE)
    log_group = template.add_resource(
        LogGroup(
            "FlowLogsGroup",
            RetentionInDays=14,
            LogGroupName=Sub(f"flowlogs/vpc/${{{vpc.title}}}"),
        ))
    role = template.add_resource(
        Role(
            "FlowLogsRole",
            AssumeRolePolicyDocument=service_role_trust_policy("ec2"),
            PermissionsBoundary=perm_boundary,
            Policies=[
                Policy(
                    PolicyName="CloudWatchAccess",
                    PolicyDocument={
                        "Version":
                        "2012-10-17",
                        "Statement": [{
                            "Sid":
                            "AllowCloudWatchLoggingToSpecificLogGroup",
                            "Effect":
                            "Allow",
                            "Action": [
                                "logs:CreateLogStream",
                                "logs:PutLogEvents",
                            ],
                            "Resource":
                            GetAtt(log_group, "Arn"),
                        }],
                    },
                )
            ],
        ))
    template.add_resource(
        FlowLog(
            "VpcFlowLogs",
            DeliverLogsPermissionArn=GetAtt(role, "Arn"),
            LogGroupName=Ref(log_group),
            LogDestinationType="cloud-watch-logs",
            MaxAggregationInterval=600,
            ResourceId=Ref(vpc),
            ResourceType="VPC",
            TrafficType="ALL",
        ))
def create_new_stream(stream: DeliveryStream) -> None:
    """
    Imports the settings from CFN Definitions and define the CFN Resource from properties

    :param DeliveryStream stream:
    """
    props = import_record_properties(
        stream.properties,
        CfnDeliveryStream,
        ignore_missing_required=True,
        ignore_missing_sub_required=True,
    )
    stream.cfn_resource = CfnDeliveryStream(stream.logical_name, **props)
    stream.log_group = LogGroup(
        f"{stream.logical_name}LogGroup",
        LogGroupName=Sub(f"firehose/${{STACK_ID}}/{stream.name}",
                         STACK_ID=STACK_ID_SHORT),
    )
    if (stream.cfn_resource.DeliveryStreamType == "KinesisStreamAsSource"
            and stream.cfn_resource.DeliveryStreamEncryptionConfigurationInput
            != NoValue):
        LOG.error(
            f"{stream.module.res_key}.{stream.name} -"
            " You can only have ServerSide encryption with DirectPut DeliveryStream. Removing."
        )
        stream.cfn_resource.DeliveryStreamEncryptionConfigurationInput = NoValue
    set_replace_iam_role(stream)
    values_validation(stream)
    stream.init_outputs()
    stream.generate_outputs()
示例#9
0
 def add_engine_log_group(self):
     '''
     Add Anchore Engine log group to template
     '''
     self.cfn_template.add_resource(
         LogGroup(title=constants.ENG_LOG,
                  LogGroupName='demo-anchore-engine',
                  RetentionInDays=int('7')))
     return self.cfn_template
示例#10
0
 def add_database_log_group(self):
     '''
     Add Anchore Database log group to template
     '''
     self.cfn_template.add_resource(
         LogGroup(title=constants.DB_LOG,
                  LogGroupName='demo-anchore-database',
                  RetentionInDays=int('7')))
     return self.cfn_template
示例#11
0
    def add_figure_lambda(self):
        ## Now add to a lambda function:
        function = Function(
            'FigLambda',
            CodeUri='../../protocols',
            Runtime='python3.6',
            Handler='log.eventshandler',
            Description='Lambda Function logging start/stop for NCAP',
            MemorySize=128,
            Timeout=90,
            Role=
            'arn:aws:iam::739988523141:role/lambda_dataflow',  ## TODO: Create this in template
            Events={})
        figurelamb = self.template.add_resource(function)
        ## Attach specific permissions to invoke this lambda function as well.
        cwpermission = Permission('CWPermissions',
                                  Action='lambda:InvokeFunction',
                                  Principal='events.amazonaws.com',
                                  FunctionName=Ref(figurelamb))
        self.template.add_resource(cwpermission)

        ## Because this lambda function gets invoked by an unknown target, we need to take care of its log group separately.

        figloggroup = LogGroup('FignameLogGroup',
                               LogGroupName=Sub("/aws/lambda/${FigLambda}"))
        self.template.add_resource(figloggroup)

        ## Now we need to configure this function as a potential target.
        ## Initialize role to send events to cloudwatch
        with open('policies/cloudwatch_events_assume_role_doc.json', 'r') as f:
            cloudwatchassume_role_doc = json.load(f)
        ## Now get the actual policy:
        with open('policies/cloudwatch_events_policy_doc.json', 'r') as f:
            cloudwatch_policy_doc = json.load(f)
        cloudwatchpolicy = ManagedPolicy(
            "CloudwatchBusPolicy",
            Description=Join(" ", [
                "Base Policy for all lambda function roles in",
                Ref(AWS_STACK_NAME)
            ]),
            PolicyDocument=cloudwatch_policy_doc)
        self.template.add_resource(cloudwatchpolicy)
        ## create the role:
        cwrole = Role("CloudWatchBusRole",
                      AssumeRolePolicyDocument=cloudwatchassume_role_doc,
                      ManagedPolicyArns=[Ref(cloudwatchpolicy)])
        cwrole_attached = self.template.add_resource(cwrole)
        self.cwrole = cwrole_attached
        return figurelamb
    def link_resources(self):
        ## Attach specific permissions to invoke this lambda function as well.
        cwpermission = Permission('CCPermissions',
                                  Action='lambda:InvokeFunction',
                                  Principal='codecommit.amazonaws.com',
                                  FunctionName=Ref(self.commitlambda),
                                  SourceArn=GetAtt(self.repo, 'Arn'),
                                  SourceAccount=Ref(AWS_ACCOUNT_ID))
        self.template.add_resource(cwpermission)

        ## Because this lambda function gets invoked by an unknown target, we need to take care of its log group separately.

        figloggroup = LogGroup('FignameLogGroup',
                               LogGroupName=Sub("/aws/lambda/${CodeLambda}"))
        self.template.add_resource(figloggroup)
示例#13
0
 def set_log_group(self, cluster_name, root_stack, log_configuration):
     self.log_group = LogGroup(
         "EcsExecLogGroup",
         LogGroupName=Sub(
             "/ecs/execute-logs/${CLUSTER_NAME}",
             CLUSTER_NAME=cluster_name,
         ),
         RetentionInDays=120
         if not keyisset("LogGroupRetentionInDays", self.parameters) else
         get_closest_valid_log_retention_period(
             self.parameters["LogGroupRetentionInDays"]),
         KmsKeyId=GetAtt(self.log_key.cfn_resource, "Arn") if isinstance(
             self.log_key, KmsKey) else Ref(AWS_NO_VALUE),
         DependsOn=[self.log_key.cfn_resource.title] if isinstance(
             self.log_key, KmsKey) else [],
     )
     root_stack.stack_template.add_resource(self.log_group)
     log_configuration["CloudWatchLogGroupName"] = Ref(self.log_group)
     if isinstance(self.log_key, KmsKey):
         log_configuration["CloudWatchEncryptionEnabled"] = True
示例#14
0
                       }]
                   })
        ],
        AssumeRolePolicyDocument={
            "Statement": [{
                "Action": ["sts:AssumeRole"],
                "Effect": "Allow",
                "Principal": {
                    "Service": "cloudtrail.amazonaws.com"
                }
            }]
        },
    ))

# CloudWatch Log Group where CloudTrail will log
CloudTrailLogs = t.add_resource(LogGroup("CloudTrailLogs"))

CloudTrail = t.add_resource(
    Trail(
        "CloudTrail",
        DependsOn=["LogPolicy"],
        # ARN of a log group to which CloudTrail logs will be delivered
        CloudWatchLogsLogGroupArn=GetAtt("CloudTrailLogs", "Arn"),
        # Role that Amazon CloudWatch Logs assumes to write logs to a log group
        CloudWatchLogsRoleArn=GetAtt("CloudTrailLoggingRole", "Arn"),
        # Indicates whether CloudTrail validates the integrity of log files
        EnableLogFileValidation=True,
        # Whether the trail is publishing events from global services, such as IAM, to the log files
        IncludeGlobalServiceEvents=True,
        # Indicates whether the CloudTrail trail is currently logging AWS API calls
        IsLogging=True,
示例#15
0
 def test_loggroup_deletionpolicy_is_preserved(self):
     log_group = LogGroup("LogGroupWithDeletionPolicy",
                          DeletionPolicy=Retain)
     self.assertIn("DeletionPolicy", log_group.to_dict())
    def attach(self):
        """Attached an IAM Role, IAM Policy, and EC2 Instance Profile to a
        CloudFormation template and returns the template."
        """
        self.template.add_resource(
            iam.Role(
                'RoleResource',
                AssumeRolePolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Effect": "Allow",
                        "Principal": {
                            "Service":
                            ["ec2.amazonaws.com", "opsworks.amazonaws.com"]
                        },
                        "Action": ["sts:AssumeRole"]
                    }]
                },
                ManagedPolicyArns=[
                    "arn:aws:iam::aws:policy/AmazonS3FullAccess",
                ],
                Path="/"))

        # Inline policy for the given role defined in the Roles attribute.
        self.template.add_resource(
            iam.PolicyType(
                'LogPolicyResource',
                PolicyName=Ref(self.template.parameters['LogPolicyName']),
                PolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Effect":
                        "Allow",
                        "Resource": ["*"],
                        "Action": [
                            "logs:CreateLogGroup", "logs:CreateLogStream",
                            "logs:PutLogEvents", "logs:DescribeLogStreams"
                        ]
                    }]
                },
                Roles=[Ref(self.template.resources['RoleResource'])]))

        # Inline policy for the given role defined in the Roles attribute.
        self.template.add_resource(
            iam.PolicyType(
                'DefaultPolicyResource',
                PolicyName='DefaultPolicyName',
                PolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Effect": "Allow",
                        "Resource": ["*"],
                        "Action": ["cloudformation:*"]
                    }]
                },
                Roles=[Ref(self.template.resources['RoleResource'])]))

        self.template.add_resource(
            iam.InstanceProfile(
                'InstanceProfileResource',
                Path="/",
                Roles=[Ref(self.template.resources['RoleResource'])]))

        self.template.add_resource(
            LogGroup('LogGroupResource',
                     RetentionInDays=Ref(
                         self.template.parameters['LogRetentionDays']),
                     DeletionPolicy='Delete'))

        self.template.add_output(
            Output("LogGroupName",
                   Description="LogGroupName (Physical ID)",
                   Value=Ref(self.template.resources['LogGroupResource'])))

        return self.template
示例#17
0
def build_template(sierrafile):
    template = Template()

    template.add_version('2010-09-09')

    template.add_metadata(build_interface(sierrafile.extra_params))

    parameters = AttrDict(

        # Network Parameters

        vpc_cidr=template.add_parameter(Parameter(
            'VpcCidr',
            Type='String',
            Default='192.172.0.0/16',
        )),
        subnet1_cidr=template.add_parameter(Parameter(
            'Subnet1Cidr',
            Type='String',
            Default='192.172.1.0/24',
        )),
        subnet2_cidr=template.add_parameter(Parameter(
            'Subnet2Cidr',
            Type='String',
            Default='192.172.2.0/24',
        )),

        # ECS Parameters

        cluster_size=template.add_parameter(Parameter(
            'ClusterSize',
            Type='Number',
            Default=2,
        )),
        instance_type=template.add_parameter(Parameter(
            'InstanceType',
            Type='String',
            Default='t2.medium'
        )),
        key_name=template.add_parameter(Parameter(
            'KeyName',
            Type='AWS::EC2::KeyPair::KeyName',
        )),
        image_id=template.add_parameter(Parameter(
            'ImageId',
            Type='AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>',
            Default=(
                '/aws/service/ecs/optimized-ami'
                '/amazon-linux/recommended/image_id'
            ),
            Description=(
              'An SSM parameter that resolves to a valid AMI ID.'
              ' This is the AMI that will be used to create ECS hosts.'
              ' The default is the current recommended ECS-optimized AMI.'
            )
        )),

        # Other Parameters

        github_token=template.add_parameter(Parameter(
            'GitHubToken',
            Type='String',
            NoEcho=True,
        )),
    )

    # Environment Variable Parameters

    for env_var_param, env_var_name in sierrafile.extra_params:
        template.add_parameter(Parameter(
            env_var_param,
            Type='String',
            NoEcho=True,
        ))

    # Resource Declarations

    # # Network

    network_vpc = template.add_resource(VPC(
        'NetworkVpc',
        CidrBlock=Ref(parameters.vpc_cidr),
        Tags=Tags(Name=Ref('AWS::StackName')),
    ))

    network_ig = template.add_resource(InternetGateway(
        'NetworkInternetGateway',
        Tags=Tags(Name=Ref('AWS::StackName')),
    ))

    vpc_attach = template.add_resource(VPCGatewayAttachment(
        'NetworkInternetGatewayAttachment',
        InternetGatewayId=Ref(network_ig),
        VpcId=Ref(network_vpc),
    ))

    route_table = template.add_resource(RouteTable(
        'NetworkRouteTable',
        VpcId=Ref(network_vpc),
        Tags=Tags(Name=Ref('AWS::StackName')),
    ))

    template.add_resource(Route(
        'NetworkDefaultRoute',
        DependsOn=[vpc_attach.title],
        RouteTableId=Ref(route_table),
        DestinationCidrBlock='0.0.0.0/0',
        GatewayId=Ref(network_ig),
    ))

    subnet1 = template.add_resource(Subnet(
        'NetworkSubnet1',
        VpcId=Ref(network_vpc),
        AvailabilityZone=Select(0, GetAZs()),
        MapPublicIpOnLaunch=True,
        CidrBlock=Ref(parameters.subnet1_cidr),
        Tags=Tags(Name=Sub('${AWS::StackName} (Public)')),
    ))

    subnet2 = template.add_resource(Subnet(
        'NetworkSubnet2',
        VpcId=Ref(network_vpc),
        AvailabilityZone=Select(1, GetAZs()),
        MapPublicIpOnLaunch=True,
        CidrBlock=Ref(parameters.subnet2_cidr),
        Tags=Tags(Name=Sub('${AWS::StackName} (Public)')),
    ))

    template.add_resource(SubnetRouteTableAssociation(
        'NetworkSubnet1RouteTableAssociation',
        RouteTableId=Ref(route_table),
        SubnetId=Ref(subnet1),
    ))

    template.add_resource(SubnetRouteTableAssociation(
        'NetworkSubnet2RouteTableAssociation',
        RouteTableId=Ref(route_table),
        SubnetId=Ref(subnet2),
    ))

    elb = template.add_resource(LoadBalancer(
        ELB_NAME,
        Name=Sub('${AWS::StackName}-elb'),
        Type='network',
        Subnets=[Ref(subnet1), Ref(subnet2)],
    ))

    # # Cluster

    ecs_host_role = template.add_resource(Role(
        'EcsHostRole',
        AssumeRolePolicyDocument=PolicyDocument(
            Statement=[Statement(
                Effect=Allow,
                Principal=Principal('Service', 'ec2.amazonaws.com'),
                Action=[awacs.sts.AssumeRole]
            )],
        ),
        ManagedPolicyArns=[
            'arn:aws:iam::aws:policy/'
            'service-role/AmazonEC2ContainerServiceforEC2Role'
        ]
    ))

    ecs_host_profile = template.add_resource(InstanceProfile(
        'EcsHostInstanceProfile',
        Roles=[Ref(ecs_host_role)]
    ))

    ecs_host_sg = template.add_resource(SecurityGroup(
        'EcsHostSecurityGroup',
        GroupDescription=Sub('${AWS::StackName}-hosts'),
        VpcId=Ref(network_vpc),
        SecurityGroupIngress=[SecurityGroupRule(
            CidrIp='0.0.0.0/0',
            IpProtocol='-1'
        )]
    ))

    cluster = template.add_resource(Cluster(
        'EcsCluster',
        ClusterName=Ref('AWS::StackName')
    ))

    autoscaling_name = 'EcsHostAutoScalingGroup'
    launch_conf_name = 'EcsHostLaunchConfiguration'

    launch_conf = template.add_resource(LaunchConfiguration(
        launch_conf_name,
        ImageId=Ref(parameters.image_id),
        InstanceType=Ref(parameters.instance_type),
        IamInstanceProfile=Ref(ecs_host_profile),
        KeyName=Ref(parameters.key_name),
        SecurityGroups=[Ref(ecs_host_sg)],
        UserData=Base64(Sub(
            '#!/bin/bash\n'
            'yum install -y aws-cfn-bootstrap\n'
            '/opt/aws/bin/cfn-init -v'
            ' --region ${AWS::Region}'
            ' --stack ${AWS::StackName}'
            f' --resource {launch_conf_name}\n'
            '/opt/aws/bin/cfn-signal -e $?'
            ' --region ${AWS::Region}'
            ' --stack ${AWS::StackName}'
            f' --resource {autoscaling_name}\n'
        )),
        Metadata={
            'AWS::CloudFormation::Init': {
                'config': {
                    'commands': {
                        '01_add_instance_to_cluster': {
                            'command': Sub(
                                f'echo ECS_CLUSTER=${{{cluster.title}}}'
                                f' > /etc/ecs/ecs.config'
                            ),
                        }
                    },
                    'files': {
                        '/etc/cfn/cfn-hup.conf': {
                            'mode': 0o400,
                            'owner': 'root',
                            'group': 'root',
                            'content': Sub(
                                '[main]\n'
                                'stack=${AWS::StackId}\n'
                                'region=${AWS::Region}\n'
                            ),
                        },
                        '/etc/cfn/hooks.d/cfn-auto-reloader.conf': {
                            'content': Sub(
                                '[cfn-auto-reloader-hook]\n'
                                'triggers=post.update\n'
                                'path=Resources.ContainerInstances.Metadata'
                                '.AWS::CloudFormation::Init\n'
                                'action=/opt/aws/bin/cfn-init -v'
                                ' --region ${AWS::Region}'
                                ' --stack ${AWS::StackName}'
                                f' --resource {launch_conf_name}\n'
                            ),
                        },
                    },
                    'services': {
                        'sysvinit': {
                            'cfn-hup': {
                                'enabled': True,
                                'ensureRunning': True,
                                'files': [
                                    '/etc/cfn/cfn-hup.conf',
                                    '/etc/cfn/hooks.d/cfn-auto-reloader.conf'
                                ]
                            }
                        }
                    }
                }
            }
        }
    ))

    autoscaling_group = template.add_resource(AutoScalingGroup(
        autoscaling_name,
        VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)],
        LaunchConfigurationName=Ref(launch_conf),
        DesiredCapacity=Ref(parameters.cluster_size),
        MinSize=Ref(parameters.cluster_size),
        MaxSize=Ref(parameters.cluster_size),
        Tags=[{
            'Key': 'Name',
            'Value': Sub('${AWS::StackName} - ECS Host'),
            'PropagateAtLaunch': True,
        }],
        CreationPolicy=CreationPolicy(
            ResourceSignal=ResourceSignal(Timeout='PT15M'),
        ),
        UpdatePolicy=UpdatePolicy(
            AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                MinInstancesInService=1,
                MaxBatchSize=1,
                PauseTime='PT5M',
                WaitOnResourceSignals=True,
            ),
        ),
    ))

    # # Services

    task_role = template.add_resource(Role(
        'TaskExecutionRole',
        AssumeRolePolicyDocument=PolicyDocument(
            Statement=[Statement(
                Effect=Allow,
                Principal=Principal('Service', 'ecs-tasks.amazonaws.com'),
                Action=[awacs.sts.AssumeRole],
            )],
        ),
        ManagedPolicyArns=[
            'arn:aws:iam::aws:policy/'
            'service-role/AmazonECSTaskExecutionRolePolicy'
        ],
    ))

    artifact_bucket = template.add_resource(Bucket(
        'ArtifactBucket',
        DeletionPolicy='Retain',
    ))

    codebuild_role = template.add_resource(Role(
        'CodeBuildServiceRole',
        Path='/',
        AssumeRolePolicyDocument=PolicyDocument(
            Version='2012-10-17',
            Statement=[
                Statement(
                    Effect=Allow,
                    Principal=Principal(
                        'Service', 'codebuild.amazonaws.com'
                    ),
                    Action=[
                        awacs.sts.AssumeRole,
                    ],
                ),
            ],
        ),
        Policies=[Policy(
            PolicyName='root',
            PolicyDocument=PolicyDocument(
                Version='2012-10-17',
                Statement=[
                    Statement(
                        Resource=['*'],
                        Effect=Allow,
                        Action=[
                            awacs.ssm.GetParameters,
                        ],
                    ),
                    Statement(
                        Resource=['*'],
                        Effect=Allow,
                        Action=[
                            awacs.s3.GetObject,
                            awacs.s3.PutObject,
                            awacs.s3.GetObjectVersion,
                        ],
                    ),
                    Statement(
                        Resource=['*'],
                        Effect=Allow,
                        Action=[
                            awacs.logs.CreateLogGroup,
                            awacs.logs.CreateLogStream,
                            awacs.logs.PutLogEvents,
                        ],
                    ),
                ],
            ),
        )],
    ))

    codepipeline_role = template.add_resource(Role(
        'CodePipelineServiceRole',
        Path='/',
        AssumeRolePolicyDocument=PolicyDocument(
            Version='2012-10-17',
            Statement=[
                Statement(
                    Effect=Allow,
                    Principal=Principal(
                        'Service', 'codepipeline.amazonaws.com'
                    ),
                    Action=[
                        awacs.sts.AssumeRole,
                    ],
                ),
            ],
        ),
        Policies=[Policy(
            PolicyName='root',
            PolicyDocument=PolicyDocument(
                Version='2012-10-17',
                Statement=[
                    Statement(
                        Resource=[
                            Sub(f'${{{artifact_bucket.title}.Arn}}/*')
                        ],
                        Effect=Allow,
                        Action=[
                            awacs.s3.GetBucketVersioning,
                            awacs.s3.GetObject,
                            awacs.s3.GetObjectVersion,
                            awacs.s3.PutObject,
                        ],
                    ),
                    Statement(
                        Resource=['*'],
                        Effect=Allow,
                        Action=[
                            awacs.ecs.DescribeServices,
                            awacs.ecs.DescribeTaskDefinition,
                            awacs.ecs.DescribeTasks,
                            awacs.ecs.ListTasks,
                            awacs.ecs.RegisterTaskDefinition,
                            awacs.ecs.UpdateService,
                            awacs.codebuild.StartBuild,
                            awacs.codebuild.BatchGetBuilds,
                            awacs.iam.PassRole,
                        ],
                    ),
                ],
            ),
        )],
    ))

    log_group = template.add_resource(LogGroup(
        'LogGroup',
        LogGroupName=Sub('/ecs/${AWS::StackName}'),
    ))

    if any(conf.pipeline.enable for conf in sierrafile.services.values()):
        project = template.add_resource(Project(
            'CodeBuildProject',
            Name=Sub('${AWS::StackName}-build'),
            ServiceRole=Ref(codebuild_role),
            Artifacts=Artifacts(Type='CODEPIPELINE'),
            Source=Source(Type='CODEPIPELINE'),
            Environment=Environment(
                ComputeType='BUILD_GENERAL1_SMALL',
                Image='aws/codebuild/docker:17.09.0',
                Type='LINUX_CONTAINER',
            ),
        ))

    for name, settings in sierrafile.services.items():
        task_definition = template.add_resource(TaskDefinition(
            f'{name}TaskDefinition',
            RequiresCompatibilities=['EC2'],
            Cpu=str(settings.container.cpu),
            Memory=str(settings.container.memory),
            NetworkMode='bridge',
            ExecutionRoleArn=Ref(task_role.title),
            ContainerDefinitions=[
                ContainerDefinition(
                    Name=f'{name}',
                    Image=settings.container.image,
                    Memory=str(settings.container.memory),
                    Essential=True,
                    PortMappings=[
                        PortMapping(
                            ContainerPort=settings.container.port,
                            Protocol='tcp',
                        ),
                    ],
                    Environment=[
                        troposphere.ecs.Environment(Name=k, Value=v)
                        for k, v in sierrafile.env_vars.items()
                        if k in settings.get('environment', [])
                    ],
                    LogConfiguration=LogConfiguration(
                        LogDriver='awslogs',
                        Options={
                            'awslogs-region': Ref('AWS::Region'),
                            'awslogs-group': Ref(log_group.title),
                            'awslogs-stream-prefix': Ref('AWS::StackName'),
                        },
                    ),
                ),
            ],
        ))

        target_group = template.add_resource(TargetGroup(
            f'{name}TargetGroup',
            Port=settings.container.port,
            Protocol='TCP',
            VpcId=Ref(network_vpc),
            Tags=Tags(Name=Sub(f'${{AWS::StackName}}-{name}')),
        ))

        listener = template.add_resource(Listener(
            f'{name}ElbListener',
            LoadBalancerArn=Ref(elb),
            Port=settings.container.port,
            Protocol='TCP',
            DefaultActions=[
                Action(TargetGroupArn=Ref(target_group), Type='forward')
            ],
        ))

        service = template.add_resource(Service(
            f'{name}Service',
            Cluster=Ref(cluster),
            ServiceName=f'{name}-service',
            DependsOn=[autoscaling_group.title, listener.title],
            DesiredCount=settings.container.count,
            TaskDefinition=Ref(task_definition),
            LaunchType='EC2',
            LoadBalancers=[
                troposphere.ecs.LoadBalancer(
                    ContainerName=f'{name}',
                    ContainerPort=settings.container.port,
                    TargetGroupArn=Ref(target_group),
                ),
            ],
        ))

        if settings.pipeline.enable:
            pipeline = template.add_resource(Pipeline(
                f'{name}Pipeline',
                RoleArn=GetAtt(codepipeline_role, 'Arn'),
                ArtifactStore=ArtifactStore(
                    Type='S3',
                    Location=Ref(artifact_bucket),
                ),
                Stages=[
                    Stages(
                        Name='Source',
                        Actions=[Actions(
                            Name='Source',
                            ActionTypeId=ActionTypeId(
                                Category='Source',
                                Owner='ThirdParty',
                                Version='1',
                                Provider='GitHub',
                            ),
                            OutputArtifacts=[
                                OutputArtifacts(Name=f'{name}Source'),
                            ],
                            RunOrder='1',
                            Configuration={
                                'Owner': settings.pipeline.user,
                                'Repo': settings.pipeline.repo,
                                'Branch': settings.pipeline.branch,
                                'OAuthToken': Ref(parameters.github_token),
                            },
                        )],
                    ),
                    Stages(
                        Name='Build',
                        Actions=[Actions(
                            Name='Build',
                            ActionTypeId=ActionTypeId(
                                Category='Build',
                                Owner='AWS',
                                Version='1',
                                Provider='CodeBuild',
                            ),
                            InputArtifacts=[
                                InputArtifacts(Name=f'{name}Source'),
                            ],
                            OutputArtifacts=[
                                OutputArtifacts(Name=f'{name}Build'),
                            ],
                            RunOrder='1',
                            Configuration={
                                'ProjectName': Ref(project),
                            },
                        )],
                    ),
                    Stages(
                        Name='Deploy',
                        Actions=[Actions(
                            Name='Deploy',
                            ActionTypeId=ActionTypeId(
                                Category='Deploy',
                                Owner='AWS',
                                Version='1',
                                Provider='ECS',
                            ),
                            InputArtifacts=[
                                InputArtifacts(Name=f'{name}Build')
                            ],
                            RunOrder='1',
                            Configuration={
                                'ClusterName': Ref(cluster),
                                'ServiceName': Ref(service),
                                'FileName': 'image.json',
                            },
                        )],
                    ),
                ],
            ))

            template.add_resource(Webhook(
                f'{name}CodePipelineWebhook',
                Name=Sub(f'${{AWS::StackName}}-{name}-webhook'),
                Authentication='GITHUB_HMAC',
                AuthenticationConfiguration=AuthenticationConfiguration(
                    SecretToken=Ref(parameters.github_token),
                ),
                Filters=[FilterRule(
                    JsonPath='$.ref',
                    MatchEquals=f'refs/heads/{settings.pipeline.branch}'
                )],
                TargetAction='Source',
                TargetPipeline=Ref(pipeline),
                TargetPipelineVersion=1,
                RegisterWithThirdParty=True,
            ))

    return template
            S3Bucket=ImportValue(
                Join('-', [Ref(core_stack), 'LambdaCodeBucket-Ref'])),
            S3Key=Ref(request_encoding_lambda_code_key),
        ),
        Environment=Environment(
            Variables={
                'VIDEO_EVENTS_TABLE': _video_events_table,
                'PIPELINE_ID_PARAMETER': _pipeline_id_parameter,
            }),
        TracingConfig=TracingConfig(Mode='Active', ),
    ))

template.add_resource(
    LogGroup(
        "RequestEncodingLambdaLogGroup",
        LogGroupName=Join(
            '/', ['/aws/lambda', Ref(request_encoding_function)]),
        RetentionInDays=7,
    ))

request_encoding_topic = template.add_resource(
    Topic(
        'RequestEncodingTopic',
        Subscription=[
            Subscription(
                Protocol='sqs',
                Endpoint=GetAtt(request_encoding_queue, 'Arn'),
            ),
            Subscription(
                Protocol='lambda',
                Endpoint=GetAtt(request_encoding_function, 'Arn'),
            ),
示例#19
0
def create_template():
    template = Template(Description="User-defined code")

    deployment_id = template.add_parameter(
        Parameter(
            "DeploymentId",
            Type="String",
        )
    )

    vpc_id = template.add_parameter(
        Parameter(
            "VpcId",
            Type="String",
        )
    )

    subnet_ids = template.add_parameter(
        Parameter(
            "SubnetIds",
            Type="CommaDelimitedList",
        )
    )

    file_system_access_point_arn = template.add_parameter(
        Parameter(
            "FileSystemAccessPointArn",
            Type="String",
        )
    )

    image_uri = template.add_parameter(
        Parameter(
            "ImageUri",
            Type="String",
        )
    )

    security_group = template.add_resource(
        SecurityGroup(
            "SecurityGroup",
            GroupDescription=StackName,
            VpcId=Ref(vpc_id),
        )
    )

    role = template.add_resource(
        Role(
            "Role",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[sts.AssumeRole],
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                    ),
                ],
            ),
            Policies=[
                Policy(
                    PolicyName="vpc-access",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    ec2.CreateNetworkInterface,
                                    ec2.DescribeNetworkInterfaces,
                                    ec2.DeleteNetworkInterface,
                                    ec2.AssignPrivateIpAddresses,
                                    ec2.UnassignPrivateIpAddresses,
                                ],
                                Resource=["*"],
                            ),
                        ],
                    ),
                ),
            ],
        )
    )

    function, alias = common.add_versioned_lambda(
        template,
        Ref(deployment_id),
        Function(
            "Function",
            MemorySize=256,
            Role=GetAtt(role, "Arn"),
            VpcConfig=VPCConfig(
                SecurityGroupIds=[Ref(security_group)],
                SubnetIds=Ref(subnet_ids),
            ),
            FileSystemConfigs=[
                FileSystemConfig(
                    Arn=Ref(file_system_access_point_arn),
                    LocalMountPath="/mnt/storage",
                ),
            ],
            PackageType="Image",
            Code=Code(
                ImageUri=Ref(image_uri),
            ),
            ImageConfig=ImageConfig(
                Command=[
                    Join(":", (handler.__module__, handler.__name__)),
                ],
            ),
        ),
    )

    log_group = template.add_resource(
        LogGroup(
            "LogGroup",
            LogGroupName=Join("/", ["/aws/lambda", Ref(function)]),
            RetentionInDays=7,
        )
    )

    policy = template.add_resource(
        PolicyType(
            "Policy",
            PolicyName=Ref(function),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Resource=GetAtt(log_group, "Arn"),
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                    ),
                ],
            ),
            Roles=[Ref(role)],
        )
    )

    template.add_output(
        Output(
            "FunctionAliasArn",
            Value=Ref(alias),
        )
    )

    return template
示例#20
0
from troposphere import Template, Parameter, Ref, Join
from troposphere.logs import LogGroup

t = Template()

t.set_description("Log group for GoURMET gap fill evaluation tool")

environment = t.add_parameter(
    Parameter(
        "Environment",
        Description="The name of the environment.",
        Type="String",
    ))

logGroup = t.add_resource(
    LogGroup("logGroup",
             RetentionInDays=14,
             LogGroupName=Join(
                 "-", ["gourmet-gap-fill-evaluation",
                       Ref(environment)])))

print(t.to_json())
示例#21
0
                          Action=[Action("rds", "DescribeDBInstances")],
                          Resource=["*"]),
                Statement(Effect=Allow,
                          Action=[Action("rds", "DescribeDBSnapshots")],
                          Resource=["arn:aws:rds:*:*:*"]),
                Statement(
                    Effect=Allow,
                    Action=[Action("rds", "RestoreDBInstanceToPointInTime")],
                    Resource=["*"]),
                Statement(Effect=Allow,
                          Action=[Action("rds", "CreateDBSnapshot")],
                          Resource=["*"])
            ]))
    ])

lg = LogGroup("LogGroupForRDSMacro", DeletionPolicy=Retain)

t.add_resource(lg)
t.add_resource(role)

log_destination = Destination('MyLogDestination',
                              DestinationName='destination-name',
                              RoleArn='role-arn',
                              TargetArn='target-arn',
                              DestinationPolicy='destination-policy')

dbInstanceMacro = Macro(
    title="DBInstanceManipulator",
    template=t,
    validation=False,
    Description=
示例#22
0
def __create_ecs():
    template = Template()

    desired_count = template.add_parameter(
        parameter=Parameter(title='DesiredCount', Default=1, Type='Number'))

    cpu = template.add_parameter(
        parameter=Parameter(title='Cpu', Default=256, Type='Number'))

    memory = template.add_parameter(
        parameter=Parameter(title='Memory', Default=512, Type='Number'))

    cluster = template.add_resource(resource=Cluster(title='SampleCluster', ))

    log_group = template.add_resource(resource=LogGroup(
        title='SampleLogGroup', LogGroupName='/aws/ecs/sample'))

    container_name = 'sample-nginx'

    task_definition = template.add_resource(resource=TaskDefinition(
        title='SampleTaskDefinition',
        Cpu=Ref(cpu),
        Family='sample-fargate-task',
        RequiresCompatibilities=['FARGATE'],
        Memory=Ref(memory),
        NetworkMode='awsvpc',
        ExecutionRoleArn=Sub(
            'arn:aws:iam::${AWS::AccountId}:role/ecsTaskExecutionRole'),
        ContainerDefinitions=[
            ContainerDefinition(
                Image='nginx:latest',
                Name=container_name,
                PortMappings=[
                    PortMapping(ContainerPort=80, HostPort=80, Protocol='tcp')
                ],
                LogConfiguration=LogConfiguration(
                    LogDriver='awslogs',
                    Options={
                        'awslogs-region': Ref('AWS::Region'),
                        'awslogs-group': Ref(log_group),
                        'awslogs-stream-prefix': 'nginx'
                    }))
        ]))

    template.add_resource(resource=Service(
        title='SampleService',
        ServiceName='sample-fargate',
        Cluster=Ref(cluster),
        DesiredCount=Ref(desired_count),
        TaskDefinition=Ref(task_definition),
        LaunchType='FARGATE',
        NetworkConfiguration=NetworkConfiguration(
            AwsvpcConfiguration=AwsvpcConfiguration(
                AssignPublicIp='ENABLED',
                SecurityGroups=[
                    ImportValue(ExportName.TASK_SECURITY_GROUP.value)
                ],
                Subnets=[
                    ImportValue(
                        CommonResource.ExportName.PUBLIC_SUBNET_A_ID.value),
                    ImportValue(
                        CommonResource.ExportName.PUBLIC_SUBNET_B_ID.value),
                ])),
        LoadBalancers=[
            EcsLoadBalancer(ContainerName=container_name,
                            ContainerPort=80,
                            TargetGroupArn=ImportValue(
                                ExportName.TARGET_GROUP.value))
        ]))
    output_template_file(template, 'ecs.yml')
            Effect=Allow,
            Principal=Principal("Service", FindInMap("Region2Principal", Ref("AWS::Region"), "EC2Principal")),
            Action=[
                Action("logs", "CreateLogGroup"),
                Action("logs", "CreateLogStream"),
                Action("logs", "PutLogEvents"),
                Action("logs", "DescribeLogStreams"),
            ],
            Resource=[Join("", [FindInMap("Region2ARNPrefix", Ref("AWS::Region"), "ARNPrefix"), "logs:*:*:*"])],
        ),
    ],
)

logrole = t.add_resource(Role(
    "logrole",
    AssumeRolePolicyDocument=policy,
  )
)

cfninstanceprofile = t.add_resource(InstanceProfile(
    "InstanceProfile",
    Roles=[Ref(logrole)]
))

t.add_resource(LogGroup(
    "CloudFormationLogs",
    RetentionInDays=7,
))

print(t.to_json())
示例#24
0
def create_template():
    t = Template(Description="Infrastructure for routezero")
    api_key = t.add_parameter(Parameter("ZerotierApiKey", Type="String", NoEcho=True))
    network_id = t.add_parameter(Parameter("ZerotierNetworkId", Type="String"))
    role = t.add_resource(
        Role(
            "Role",
            AssumeRolePolicyDocument=get_lambda_assumerole_policy(),
            Policies=[
                Policy(
                    PolicyName="cloudformation-route53-update",
                    PolicyDocument=PolicyDocument(
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.Action("*"),
                                    route53.Action("*"),
                                ],
                                Resource=["*"],
                            )
                        ]
                    ),
                )
            ],
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
            ],
        )
    )
    function = t.add_resource(
        CLIFunction(
            "Function",
            MemorySize=256,
            Timeout=60 * 15,
            Handler=".".join([routezero.__name__, routezero.handler.__name__]),
            Runtime="python3.6",
            Code=create_bundle(),
            Role=GetAtt(role, "Arn"),
            Environment=Environment(
                Variables={
                    "ZEROTIER_API_KEY": Ref(api_key),
                    "ZEROTIER_NETWORK_ID": Ref(network_id),
                    "ROUTE53_RECORD_STACK_NAME": Sub("${AWS::StackName}Records"),
                }
            ),
        )
    )
    log_group = t.add_resource(
        LogGroup(
            "LogGroup", LogGroupName=Sub("/aws/lambda/${Function}"), RetentionInDays=30
        )
    )
    permission = t.add_resource(
        Permission(
            "Permission",
            FunctionName=GetAtt(function, "Arn"),
            Principal="events.amazonaws.com",
            Action="lambda:InvokeFunction",
            SourceArn=Sub(
                "arn:${AWS::Partition}:events:${AWS::Region}:${AWS::AccountId}:rule/*"
            ),
            DependsOn=[log_group],
        )
    )
    rule = t.add_resource(
        Rule(
            "Rule",
            ScheduleExpression="rate(15 minutes)",
            Targets=[Target(Id=Ref(function), Arn=GetAtt(function, "Arn"))],
            DependsOn=[permission],
        )
    )
    return t
示例#25
0
from tropicalnest import TemplateStack
from troposphere import Parameter, Ref, Template
from troposphere.logs import LogGroup

master = Template("A test template with nested templates")

logs = Template("cloudwatch log groups")
log_group_name = Parameter("Name", Type="String")
log_group = LogGroup(
    "TestLogGroup",
    LogGroupName=Ref(log_group_name),
    RetentionInDays=30,
)
logs.add_parameter(log_group_name)
logs.add_resource(log_group)

ts = TemplateStack("master", master)
ts.add_template("Logs", logs, Parameters={'Name': 'test'})
ts.save('mybucket')
示例#26
0
 def test_loggroup_deletionpolicy_is_preserved(self):
     log_group = LogGroup(
         "LogGroupWithDeletionPolicy",
         DeletionPolicy=Retain
     )
     self.assertIn('DeletionPolicy', log_group.to_dict())
示例#27
0
def create_log_groups(domain, stack, props):
    """

    :param ecs_composex.opensearch.opensearch_stack.OpenSearchDomain domain:
    :param ecs_composex.common.stacks.ComposeXStack stack:
    :param dict props:
    :return:
    """
    opts = {}
    all_opts = [
        "SEARCH_SLOW_LOGS",
        "ES_APPLICATION_LOGS",
        "INDEX_SLOW_LOGS",
        "AUDIT_LOGS",
    ]
    opts_to_add = (domain.parameters["CreateLogGroups"] if isinstance(
        domain.parameters["CreateLogGroups"], list) else all_opts)
    groups = []
    for option in opts_to_add:
        group_name = Sub(
            f"opensearch/${{STACK_NAME}}/{domain.logical_name}/{option}",
            STACK_NAME=define_stack_name(stack.stack_template),
        )
        log_group = LogGroup(
            f"{domain.logical_name}{NONALPHANUM.sub('', option)}LogGroup",
            LogGroupName=group_name,
            RetentionInDays=30
            if not keyisset("RetentionInDays", domain.parameters) else
            get_closest_valid_log_retention_period(
                domain.parameters["RetentionInDays"]),
        )
        stack.stack_template.add_resource(log_group)
        groups.append(log_group)
        opts[option] = {
            "Enabled":
            True,
            "CloudWatchLogsLogGroupArn":
            Sub(f"arn:${{{AWS_PARTITION}}}:logs:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:"
                f"log-group:${{{log_group.title}}}"),
        }
    if keyisset("CreateLogGroupsResourcePolicy", domain.parameters):
        logs_policy = ResourcePolicy(
            "OpenSearchLogGroupResourcePolicy",
            DeletionPolicy="Retain",
            PolicyName="ComposeXOpenSearchAccessToCWLogs",
            PolicyDocument=Sub(
                json.dumps({
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Sid":
                        "AllowESDomainsToAccessLogGroupsInAllRegions",
                        "Effect":
                        "Allow",
                        "Principal": {
                            "Service": f"es.${{{AWS_URL_SUFFIX}}}"
                        },
                        "Action":
                        ["logs:PutLogEvents", "logs:CreateLogStream"],
                        "Resource": [
                            f"arn:${{{AWS_PARTITION}}}:logs:*:${{{AWS_ACCOUNT_ID}}}:log-group:opensearch/*"
                        ],
                    }],
                })),
        )
        stack.stack_template.add_resource(logs_policy)
    props["LogPublishingOptions"] = opts
示例#28
0
        Tags=[
            Tag("Environment", Ref(environment), True),
            Tag("Cluster", "zookeeper", True),
            Tag("Name", "zookeeper", True)
        ],
        UpdatePolicy=UpdatePolicy(
            AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                PauseTime='PT5M',
                MinInstancesInService="1",
                MaxBatchSize='1',
                WaitOnResourceSignals=True))))

# LogGroup
log_group = template.add_resource(
    LogGroup(
        "ZkLogGroup",
        LogGroupName="/zookeeper/instances",
    ))

### Outputs

template.add_output([
    Output("ZkSecurityGroup", Value=Ref(security_group)),
    Output("ZkAutoscalingGroup", Value=Ref(autoscaling_group)),
    Output("ZkLaunchConfig", Value=Ref(launch_config)),
    Output("ZkLogGroup", Value=Ref(log_group))
])

### Program to deploy the CloudFormation Stack


def _stack_exists(stack_name):
示例#29
0
 def test_loggroup_deletionpolicy_is_preserved(self):
     log_group = LogGroup("LogGroupWithDeletionPolicy",
                          DeletionPolicy=Retain)
     self.assertIn('DeletionPolicy', log_group.JSONrepr())
    def __init__(self,
                 prefix: str,
                 aws_region: str,
                 cpu: str,
                 ram: str,
                 environment: Dict[str, AwsRef],
                 container_name: str,
                 container_port: int,
                 target_group: TargetGroup,
                 security_groups: List[SecurityGroup],
                 subnets: List[Subnet],
                 depends_on_loadbalancers: List[LoadBalancer] = [],
                 depends_on_target_groups: List[TargetGroup] = [],
                 depends_on_listeners: List[Listener] = []) -> None:
        """
        Constructor.

        :param prefix: A prefix for newly created resources.
        :param aws_region: A region in which resources are put.
        :param cpu: Cpu points for the deployed container. 1 CPU = 1024 Cpu points.
        :param ram: Memory for the deployed container. 1 GB Ram = 1024.
        :param environment: Environment that will be passed to a running container.
        :param container_name: The name that will be given to a newly deployed container.
        :param container_port: An open container port through which a loadbalancer can communicate.
        :param target_group: A main target group to which a loadbalancer will forward traffic. Also, the newly
        created container will be associated with this group.
        :param security_groups: Container security groups restricting network traffic.
        :param subnets: Subnets in which the newly created container can be placed.
        :param depends_on_loadbalancers: Before creating ecs service, these loadbalancers must be created.
        :param depends_on_target_groups: Before creating ecs service, these target groups must be created.
        :param depends_on_listeners: Before creating ecs service, these listeners must be created.
        """
        self.prefix = prefix
        self.aws_region = aws_region
        self.environment = environment
        self.cpu = cpu
        self.ram = ram
        self.container_name = container_name
        self.container_port = container_port

        self.task_execution_role = Role(
            prefix + 'FargateEcsTaskExecutionRole',
            Path='/',
            Policies=[
                Policy(PolicyName=prefix + 'FargateEcsTaskExecutionPolicy',
                       PolicyDocument={
                           'Version':
                           '2012-10-17',
                           'Statement': [{
                               'Action': [
                                   "ecr:GetAuthorizationToken",
                                   "ecr:BatchCheckLayerAvailability",
                                   "ecr:GetDownloadUrlForLayer",
                                   "ecr:BatchGetImage", "logs:CreateLogStream",
                                   "logs:PutLogEvents"
                               ],
                               "Resource":
                               "*",
                               "Effect":
                               "Allow"
                           }]
                       })
            ],
            AssumeRolePolicyDocument={
                'Version':
                '2012-10-17',
                'Statement': [{
                    'Action': ['sts:AssumeRole'],
                    'Effect': 'Allow',
                    'Principal': {
                        'Service': [
                            'ecs-tasks.amazonaws.com',
                        ]
                    }
                }]
            },
        )

        self.log_group = LogGroup(prefix + 'FargateEcsLogGroup',
                                  LogGroupName=f'/aws/ecs/fargate/{prefix}')

        self.cluster = Cluster(prefix + 'FargateEcsCluster',
                               ClusterName=prefix + 'FargateEcsCluster')

        self.task = TaskDefinition(
            prefix + 'FargateEcsTaskDefinition',
            RequiresCompatibilities=['FARGATE'],
            ExecutionRoleArn=GetAtt(self.task_execution_role, 'Arn'),
            ContainerDefinitions=[
                ContainerDefinition(
                    Name=container_name,
                    # Create dummy image, since container definitions list can not be empty.
                    Image='nginx:latest',
                    # For task definitions that use the awsvpc network mode, you should only specify the containerPort.
                    # The hostPort can be left blank or it must be the same value as the containerPort.
                    PortMappings=[PortMapping(ContainerPort=80)],
                    LogConfiguration=LogConfiguration(
                        LogDriver='awslogs',
                        Options={
                            # Use Ref to set a dependency to a log group.
                            # Or use "depends on" attribute.
                            'awslogs-group': Ref(self.log_group),
                            'awslogs-region': aws_region,
                            'awslogs-stream-prefix': prefix
                        }))
            ],
            Cpu=cpu,
            Memory=ram,
            # For ECS Fargate - awsvpc is the only available option.
            NetworkMode='awsvpc',
            Family=prefix.lower())

        self.service = CustomEcsService(
            prefix + 'FargateEcsService',
            ServiceToken=EcsServiceService().service_token(),
            Cluster=Ref(self.cluster),
            ServiceName=prefix + 'FargateEcsService',
            TaskDefinition=Ref(self.task),
            LoadBalancers=[
                {
                    'targetGroupArn': Ref(target_group),
                    'containerName': container_name,
                    'containerPort': container_port
                },
            ],
            DesiredCount=1,
            LaunchType='FARGATE',
            NetworkConfiguration={
                'awsvpcConfiguration': {
                    'subnets': [Ref(sub) for sub in subnets],
                    'securityGroups': [Ref(sub) for sub in security_groups],
                    'assignPublicIp': 'ENABLED'
                }
            },
            DeploymentController={'type': 'CODE_DEPLOY'},
            # Target groups must have an associated load balancer before creating an ecs service.
            DependsOn=([lb.title for lb in depends_on_loadbalancers] +
                       [tg.title for tg in depends_on_target_groups] +
                       [l.title for l in depends_on_listeners]))
                    Join('-', [Ref(core_stack), 'VideoEventsTable', 'Ref'])),
                'REKOGNITION_UPDATES_TOPIC':
                Ref(rekognition_updates_topic),
                'REKOGNITION_ROLE_ARN':
                GetAtt(rekognition_publish_role, 'Arn'),
                'INPUT_BUCKET':
                ImportValue(
                    Join('-', [Ref(encoding_stack), 'UploadBucket', 'Ref'])),
            }),
        TracingConfig=TracingConfig(Mode='Active', ),
    ))

template.add_resource(
    LogGroup(
        "RekognitionFunctionLogGroup",
        LogGroupName=Join(
            '/', ['/aws/lambda', Ref(rekognition_function)]),
        RetentionInDays=7,
    ))

video_metadata_event_role = template.add_resource(
    Role(
        'VideoMetadataEventRole',
        Path="/",
        AssumeRolePolicyDocument={
            "Version":
            "2012-10-17",
            "Statement": [{
                "Action": ["sts:AssumeRole"],
                "Effect": "Allow",
                "Principal": {
                    "Service": ["lambda.amazonaws.com"]