def add_bucket_policy(self): for policy_name, policy_properties in self.sceptre_user_data.iteritems(): policy_properties.update({"Bucket": Ref(self.bucket_name)}) bucket_policy = self.template.add_resource(BucketPolicy(policy_name, **policy_properties)) self.template.add_output(Output( policy_name, Value=Ref(bucket_policy) ))
def policy_build(bucket, **kwargs): """ Args: kwargs: OrganizationUnit: str() of the OU name or Path OuAsRoot: bool() to define whethere all accounts in OU and sub OU should be used Returns: bucket_policy BucketPolicy() """ bucket_policy = BucketPolicy('BucketPolicy', Bucket=Ref(bucket), PolicyDocument=_cloudtrail_bucket_policy( bucket, **kwargs)) return bucket_policy
def build_policy_bucket(self, bucket, name, statements): """ Generate bucket policy for S3 bucket :param bucket: The bucket to attach policy to :param name: The name of the bucket (to generate policy name from it) :param statements: The "rules" the policy should have :return: Ref to new policy """ policy = self.__template.add_resource( BucketPolicy(self.name_strip(name, True, False), Bucket=troposphere.Ref(bucket), DependsOn=[troposphere.Name(bucket)], PolicyDocument=Policy(Version=self.VERSION_IAM, Statement=statements))) return policy
def __init__(self, unit_title, template, s3_access, bucket_policy): """ AWS - http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html AWS - http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html Troposphere - https://github.com/cloudtools/troposphere/blob/master/troposphere/s3.py s3_access = [Private, PublicRead, PublicReadWrite, AuthenticatedRead, BucketOwnerRead, BucketOwnerFullControl, LogDeliveryWrite] :param unit_title: Title of the s3 bucket unit :param template: The troposphere template to add the Elastic Loadbalancer to. :param s3_access: A canned access control list (ACL) that grants predefined permissions to the bucket. For more information about canned ACLs http://docs.aws.amazon.com/AmazonS3/latest/dev/CannedACL.html Valid values: AuthenticatedRead | AwsExecRead | BucketOwnerRead | BucketOwnerFullControl | LogDeliveryWrite | Private | PublicRead | PublicReadWrite :param bucket_policy: A dictionary object containing s3 bucket policy """ title = (unit_title + 'AmzS3').lower() # Add S3 Bucket self.s3_b = template.add_resource(Bucket(title, BucketName=title, AccessControl=s3_access, DeletionPolicy='Retain')) template.add_output(Output( title, Value=Join('', [Ref(self.s3_b), ' is a managed AWS S3 bucket, created with Amazonia as part of stack name - ', Ref('AWS::StackName'), ' at ', GetAtt(self.s3_b, 'WebsiteURL') ]), Description='Amazonia S3 Bucket' )) # Add S3 Bucket Policy s3_b_policy_name = ''.join([title, 'policy']) if bucket_policy: self.s3_b_policy = template.add_resource(BucketPolicy(s3_b_policy_name, Bucket=title, PolicyDocument=bucket_policy, DependsOn=[self.s3_b.title]))
def create_cloud_front_template(): template = Template() template.set_transform('AWS::Serverless-2016-10-31') bucket = template.add_resource( resource=Bucket( title='SampleOriginBucket', BucketName=Sub('sample-origin-bucket-${AWS::AccountId}') ) ) identity = template.add_resource( resource=CloudFrontOriginAccessIdentity( title='SampleOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( Comment='sample-lambda-edge' ) ) ) template.add_resource( resource=BucketPolicy( title='SampleBucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Statement': [{ 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']), 'Principal': { 'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId') } }] } ) ) template.add_resource( resource=Distribution( title='SampleDistribution', DistributionConfig=DistributionConfig( DefaultCacheBehavior=DefaultCacheBehavior( ForwardedValues=ForwardedValues( QueryString=True, ), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType='viewer-request', LambdaFunctionARN=Sub([ '${FUNCTION_ARN}:8', {'FUNCTION_ARN': ImportValue(get_export_name())} ]), ) ], TargetOriginId=Sub('S3-${' + bucket.title + '}'), ViewerProtocolPolicy='redirect-to-https', ), Enabled=True, Origins=[ Origin( Id=Sub('S3-${' + bucket.title + '}'), DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Sub('origin-access-identity/cloudfront/${' + identity.title + '}') ) ) ], ) ) ) with open('./cloudfront.yml', mode='w') as file: file.write(template.to_yaml())
def generate_template(d): t = Template() t.set_description(d["cf_template_description"]) S3bucket = t.add_resource( Bucket( "S3Bucket", BucketName=Join("-", [d["project_name"], d["env"]]), AccessControl=Private, PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=Tags(d["tags"], {"Name": d["project_name"]}), ) ) CFOriginAccessIdentity = t.add_resource( CloudFrontOriginAccessIdentity( "CFOriginAccessIdentity", CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( Comment=Join(" ", ["Cloudfront Origin Access Identity", d["project_name"], d["env"]]) ), ) ) t.add_resource( BucketPolicy( "BucketPolicy", Bucket=Ref("S3Bucket"), PolicyDocument=dict( Statement=[ dict( Sid="Allow-cf", Effect="Allow", Action=[ "s3:GetObject", "s3:ListBucket" ], Principal=Principal( "CanonicalUser", GetAtt(CFOriginAccessIdentity, "S3CanonicalUserId") ), Resource=[ Join("", ["arn:aws:s3:::", Ref("S3Bucket"), "/*"]), Join("", ["arn:aws:s3:::", Ref("S3Bucket")]), ] ) ] ) ) ) myDistribution = t.add_resource( Distribution( "myDistribution", DistributionConfig=DistributionConfig( Enabled=True, HttpVersion='http2', DefaultRootObject=d['default_root_object'], Origins=[Origin( Id=Join("-", [d["project_name"], d["env"]]), DomainName=GetAtt(S3bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join("/",["origin-access-identity", "cloudfront", Ref(CFOriginAccessIdentity)]) ) )], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId=Join("-", [d["project_name"], d["env"]]), ForwardedValues=ForwardedValues( QueryString=False ), ViewerProtocolPolicy="allow-all", MaxTTL=0, MinTTL=0, DefaultTTL=0, ), CustomErrorResponses=[CustomErrorResponse( ErrorCachingMinTTL=0, ErrorCode=404, ResponsePagePath=Join("",["/", d['default_root_object']]), ResponseCode=200, )], ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True ), ), Tags=Tags(d["tags"], {"Name": d["project_name"]}), ) ) t.add_output(Output( "BucketName", Value=Ref(S3bucket), Description="Name of S3 bucket to hold website content" )) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output( "DistributionName", Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")])), ]) return t
SharedBucketPolicy = BucketPolicy( "SharedBucketPolicy", Bucket=Ref(BucketName), PolicyDocument={ "Version": "2008-10-17", "Statement": [ # Read only HTTP access for S3 /shared/http { "Effect": "Allow", "Principal": "*", "Action": "s3:*", "Resource": [ Join("", ["arn:aws:s3:::", Ref(BucketName), "/cell-os--", Ref("CellName"), "/shared/http/*"]), ], "Condition": { "StringEquals": { "aws:sourceVpce": Ref(VpcEndpointS3) } } }, # public list needed by the status page { "Effect": "Allow", "Principal": {"AWS": "*"}, "Action": [ "s3:ListBucket" ], "Resource": [ Join("", ["arn:aws:s3:::", Ref(BucketName)]), ], "Condition": { "StringLike": { # S3 lists the entire bucket recursively, so we need to filter "s3:prefix": [Join("", ["cell-os--", Ref("CellName"), "/shared/status/*"])], }, "IpAddress": { "aws:SourceIp": egress_nets } } }, # public read needed by the status page { "Effect": "Allow", "Principal": {"AWS": "*"}, "Action": [ "s3:GetObject", ], "Resource": [ Join("", ["arn:aws:s3:::", Ref(BucketName), "/cell-os--", Ref("CellName"), "/shared/status/*"]), ], "Condition": { "IpAddress": { "aws:SourceIp": egress_nets } } } ] } )
def create_template(): template = Template(Description=( "Static website hosted with S3 and CloudFront. " "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website" )) partition_config = add_mapping( template, "PartitionConfig", { "aws": { # the region with the control plane for CloudFront, IAM, Route 53, etc "PrimaryRegion": "us-east-1", # assume that Lambda@Edge replicates to all default enabled regions, and that # future regions will be opt-in. generated with AWS CLI: # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)" "DefaultRegions": [ "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", ], }, # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn "aws-cn": { "PrimaryRegion": "cn-north-1", "DefaultRegions": ["cn-north-1", "cn-northwest-1"], }, }, ) acm_certificate_arn = template.add_parameter( Parameter( "AcmCertificateArn", Description= "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.", Type="String", AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)", Default="", )) hosted_zone_id = template.add_parameter( Parameter( "HostedZoneId", Description= "Existing Route 53 zone to use for validating a new TLS certificate.", Type="String", AllowedPattern="(Z[A-Z0-9]+|)", Default="", )) dns_names = template.add_parameter( Parameter( "DomainNames", Description= "Comma-separated list of additional domain names to serve.", Type="CommaDelimitedList", Default="", )) tls_protocol_version = template.add_parameter( Parameter( "TlsProtocolVersion", Description= "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.", Type="String", Default="TLSv1.2_2019", )) log_retention_days = template.add_parameter( Parameter( "LogRetentionDays", Description= "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.", Type="Number", AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS, Default=365, )) default_ttl_seconds = template.add_parameter( Parameter( "DefaultTtlSeconds", Description="Cache time-to-live when not set by S3 object headers.", Type="Number", Default=int(datetime.timedelta(minutes=5).total_seconds()), )) enable_price_class_hack = template.add_parameter( Parameter( "EnablePriceClassHack", Description="Cut your bill in half with this one weird trick.", Type="String", Default="false", AllowedValues=["true", "false"], )) retention_defined = add_condition(template, "RetentionDefined", Not(Equals(Ref(log_retention_days), 0))) using_price_class_hack = add_condition( template, "UsingPriceClassHack", Equals(Ref(enable_price_class_hack), "true")) using_acm_certificate = add_condition( template, "UsingAcmCertificate", Not(Equals(Ref(acm_certificate_arn), ""))) using_hosted_zone = add_condition(template, "UsingHostedZone", Not(Equals(Ref(hosted_zone_id), ""))) using_certificate = add_condition( template, "UsingCertificate", Or(Condition(using_acm_certificate), Condition(using_hosted_zone)), ) should_create_certificate = add_condition( template, "ShouldCreateCertificate", And(Condition(using_hosted_zone), Not(Condition(using_acm_certificate))), ) using_dns_names = add_condition(template, "UsingDnsNames", Not(Equals(Select(0, Ref(dns_names)), ""))) is_primary_region = "IsPrimaryRegion" template.add_condition( is_primary_region, Equals(Region, FindInMap(partition_config, Partition, "PrimaryRegion")), ) precondition_region_is_primary = template.add_resource( WaitConditionHandle( "PreconditionIsPrimaryRegionForPartition", Condition=is_primary_region, )) log_ingester_dlq = template.add_resource( Queue( "LogIngesterDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", )) log_ingester_role = template.add_resource( Role( "LogIngesterRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[GetAtt(log_ingester_dlq, "Arn")], ) ], ), ) ], )) log_ingester = template.add_resource( Function( "LogIngester", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(log_ingest.handler.__name__), Code=Code(ZipFile=inspect.getsource(log_ingest)), MemorySize=256, Timeout=300, Role=GetAtt(log_ingester_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(log_ingester_dlq, "Arn")), )) log_ingester_permission = template.add_resource( Permission( "LogIngesterPermission", FunctionName=GetAtt(log_ingester, "Arn"), Action="lambda:InvokeFunction", Principal="s3.amazonaws.com", SourceAccount=AccountId, )) log_bucket = template.add_resource( Bucket( "LogBucket", # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails. # When the CloudFront distribution is created, it adds an additional bucket ACL. # That ACL is not possible to model in CloudFormation. AccessControl="LogDeliveryWrite", LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule(ExpirationInDays=1, Status="Enabled"), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=1), Status="Enabled", ), ]), NotificationConfiguration=NotificationConfiguration( LambdaConfigurations=[ LambdaConfigurations(Event="s3:ObjectCreated:*", Function=GetAtt(log_ingester, "Arn")) ]), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # if we use KMS, we can't read the logs SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), DependsOn=[log_ingester_permission], )) log_ingester_log_group = template.add_resource( LogGroup( "LogIngesterLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(log_ingester)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) log_ingester_policy = template.add_resource( PolicyType( "LogIngesterPolicy", Roles=[Ref(log_ingester_role)], PolicyName="IngestLogPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/cloudfront/*", ], ), Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/s3/*", ], ), GetAtt(log_ingester_log_group, "Arn"), ], ), Statement( Effect=Allow, Action=[s3.GetObject], Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])], ), ], ), )) bucket = template.add_resource( Bucket( "ContentBucket", LifecycleConfiguration=LifecycleConfiguration(Rules=[ # not supported by CFN yet: # LifecycleRule( # Transitions=[ # LifecycleRuleTransition( # StorageClass='INTELLIGENT_TIERING', # TransitionInDays=1, # ), # ], # Status="Enabled", # ), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=7), Status="Enabled", ) ]), LoggingConfiguration=LoggingConfiguration( DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # Origin Access Identities can't use KMS SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), )) origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "CloudFrontIdentity", CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig( Comment=GetAtt(bucket, "Arn")), )) bucket_policy = template.add_resource( BucketPolicy( "ContentBucketPolicy", Bucket=Ref(bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "CanonicalUser", GetAtt(origin_access_identity, "S3CanonicalUserId"), ), Action=[s3.GetObject], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])], ), ], ), )) # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs # state "In some circumstances [...] S3 resets permissions on the bucket to the default value", # and this allows logging to work without any ACLs in place. log_bucket_policy = template.add_resource( BucketPolicy( "LogBucketPolicy", Bucket=Ref(log_bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join( "/", [GetAtt(log_bucket, "Arn"), "cloudfront", "*"]) ], ), Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.ListBucket], Resource=[Join("/", [GetAtt(log_bucket, "Arn")])], ), Statement( Effect=Allow, Principal=Principal("Service", "s3.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"]) ], ), ], ), )) certificate_validator_dlq = template.add_resource( Queue( "CertificateValidatorDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", Condition=should_create_certificate, )) certificate_validator_role = template.add_resource( Role( "CertificateValidatorRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[ GetAtt(certificate_validator_dlq, "Arn") ], ) ], ), ) ], # TODO scope down ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", ], Condition=should_create_certificate, )) certificate_validator_function = template.add_resource( Function( "CertificateValidatorFunction", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(certificate_validator.handler.__name__), Code=Code(ZipFile=inspect.getsource(certificate_validator)), MemorySize=256, Timeout=300, Role=GetAtt(certificate_validator_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(certificate_validator_dlq, "Arn")), Environment=Environment( Variables={ certificate_validator.EnvVars.HOSTED_ZONE_ID.name: Ref(hosted_zone_id) }), Condition=should_create_certificate, )) certificate_validator_log_group = template.add_resource( LogGroup( "CertificateValidatorLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(certificate_validator_function)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), Condition=should_create_certificate, )) certificate_validator_rule = template.add_resource( Rule( "CertificateValidatorRule", EventPattern={ "detail-type": ["AWS API Call via CloudTrail"], "detail": { "eventSource": ["acm.amazonaws.com"], "eventName": ["AddTagsToCertificate"], "requestParameters": { "tags": { "key": [certificate_validator_function.title], "value": [GetAtt(certificate_validator_function, "Arn")], } }, }, }, Targets=[ Target( Id="certificate-validator-lambda", Arn=GetAtt(certificate_validator_function, "Arn"), ) ], DependsOn=[certificate_validator_log_group], Condition=should_create_certificate, )) certificate_validator_permission = template.add_resource( Permission( "CertificateValidatorPermission", FunctionName=GetAtt(certificate_validator_function, "Arn"), Action="lambda:InvokeFunction", Principal="events.amazonaws.com", SourceArn=GetAtt(certificate_validator_rule, "Arn"), Condition=should_create_certificate, )) certificate = template.add_resource( Certificate( "Certificate", DomainName=Select(0, Ref(dns_names)), SubjectAlternativeNames=Ref( dns_names), # duplicate first name works fine ValidationMethod="DNS", Tags=Tags( **{ certificate_validator_function.title: GetAtt(certificate_validator_function, "Arn") }), DependsOn=[certificate_validator_permission], Condition=should_create_certificate, )) edge_hook_role = template.add_resource( Role( "EdgeHookRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal( "Service", [ "lambda.amazonaws.com", "edgelambda.amazonaws.com" ], ), Action=[sts.AssumeRole], ) ], ), )) edge_hook_function = template.add_resource( Function( "EdgeHookFunction", Runtime=PYTHON_RUNTIME, Handler="index.handler", Code=Code(ZipFile=inspect.getsource(edge_hook)), MemorySize=128, Timeout=3, Role=GetAtt(edge_hook_role, "Arn"), )) edge_hook_function_hash = (hashlib.sha256( json.dumps(edge_hook_function.to_dict(), sort_keys=True).encode("utf-8")).hexdigest()[:10].upper()) edge_hook_version = template.add_resource( Version( "EdgeHookVersion" + edge_hook_function_hash, FunctionName=GetAtt(edge_hook_function, "Arn"), )) replica_log_group_name = Join( "/", [ "/aws/lambda", Join( ".", [ FindInMap(partition_config, Partition, "PrimaryRegion"), Ref(edge_hook_function), ], ), ], ) edge_hook_role_policy = template.add_resource( PolicyType( "EdgeHookRolePolicy", PolicyName="write-logs", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "*", ], ), ], ), ], ), Roles=[Ref(edge_hook_role)], )) stack_set_administration_role = template.add_resource( Role( "StackSetAdministrationRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "cloudformation.amazonaws.com"), Action=[sts.AssumeRole], ), ], ), )) stack_set_execution_role = template.add_resource( Role( "StackSetExecutionRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "AWS", GetAtt(stack_set_administration_role, "Arn")), Action=[sts.AssumeRole], ), ], ), Policies=[ PolicyProperty( PolicyName="create-stackset-instances", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ cloudformation.DescribeStacks, logs.DescribeLogGroups, ], Resource=["*"], ), # stack instances communicate with the CFN service via SNS Statement( Effect=Allow, Action=[sns.Publish], NotResource=[ Join( ":", [ "arn", Partition, "sns", "*", AccountId, "*" ], ) ], ), Statement( Effect=Allow, Action=[ logs.CreateLogGroup, logs.DeleteLogGroup, logs.PutRetentionPolicy, logs.DeleteRetentionPolicy, ], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "", ], ), ], ), Statement( Effect=Allow, Action=[ cloudformation.CreateStack, cloudformation.DeleteStack, cloudformation.UpdateStack, ], Resource=[ Join( ":", [ "arn", Partition, "cloudformation", "*", AccountId, Join( "/", [ "stack", Join( "-", [ "StackSet", StackName, "*" ], ), ], ), ], ) ], ), ], ), ), ], )) stack_set_administration_role_policy = template.add_resource( PolicyType( "StackSetAdministrationRolePolicy", PolicyName="assume-execution-role", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Resource=[GetAtt(stack_set_execution_role, "Arn")], ), ], ), Roles=[Ref(stack_set_administration_role)], )) edge_log_groups = template.add_resource( StackSet( "EdgeLambdaLogGroupStackSet", AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"), ExecutionRoleName=Ref(stack_set_execution_role), StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]), PermissionModel="SELF_MANAGED", Description="Multi-region log groups for Lambda@Edge replicas", Parameters=[ StackSetParameter( ParameterKey="LogGroupName", ParameterValue=replica_log_group_name, ), StackSetParameter( ParameterKey="LogRetentionDays", ParameterValue=Ref(log_retention_days), ), ], OperationPreferences=OperationPreferences( FailureToleranceCount=0, MaxConcurrentPercentage=100, ), StackInstancesGroup=[ StackInstances( DeploymentTargets=DeploymentTargets(Accounts=[AccountId]), Regions=FindInMap(partition_config, Partition, "DefaultRegions"), ) ], TemplateBody=create_log_group_template().to_json(indent=None), DependsOn=[stack_set_administration_role_policy], )) price_class_distribution = template.add_resource( Distribution( "PriceClassDistribution", DistributionConfig=DistributionConfig( Comment="Dummy distribution used for price class hack", DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=False), ), Enabled=True, Origins=[ Origin(Id="default", DomainName=GetAtt(bucket, "DomainName")) ], IPV6Enabled=True, ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True), PriceClass="PriceClass_All", ), Condition=using_price_class_hack, )) distribution = template.add_resource( Distribution( "ContentDistribution", DistributionConfig=DistributionConfig( Enabled=True, Aliases=If(using_dns_names, Ref(dns_names), NoValue), Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"), Prefix="cloudfront/"), DefaultRootObject="index.html", Origins=[ Origin( Id="default", DomainName=GetAtt(bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join( "", [ "origin-access-identity/cloudfront/", Ref(origin_access_identity), ], )), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", Compress=True, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="redirect-to-https", DefaultTTL=Ref(default_ttl_seconds), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType="origin-request", LambdaFunctionARN=Ref(edge_hook_version), ) ], ), HttpVersion="http2", IPV6Enabled=True, ViewerCertificate=ViewerCertificate( AcmCertificateArn=If( using_acm_certificate, Ref(acm_certificate_arn), If(using_hosted_zone, Ref(certificate), NoValue), ), SslSupportMethod=If(using_certificate, "sni-only", NoValue), CloudFrontDefaultCertificate=If(using_certificate, NoValue, True), MinimumProtocolVersion=Ref(tls_protocol_version), ), PriceClass=If(using_price_class_hack, "PriceClass_100", "PriceClass_All"), ), DependsOn=[ bucket_policy, log_ingester_policy, edge_log_groups, precondition_region_is_primary, ], )) distribution_log_group = template.add_resource( LogGroup( "DistributionLogGroup", LogGroupName=Join( "", ["/aws/cloudfront/", Ref(distribution)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) bucket_log_group = template.add_resource( LogGroup( "BucketLogGroup", LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) template.add_output(Output("DistributionId", Value=Ref(distribution))) template.add_output( Output("DistributionDomain", Value=GetAtt(distribution, "DomainName"))) template.add_output( Output( "DistributionDnsTarget", Value=If( using_price_class_hack, GetAtt(price_class_distribution, "DomainName"), GetAtt(distribution, "DomainName"), ), )) template.add_output( Output( "DistributionUrl", Value=Join("", ["https://", GetAtt(distribution, "DomainName"), "/"]), )) template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket, "Arn"))) return template
BucketPolicy = t.add_resource(BucketPolicy( "BucketPolicy", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": "s3:GetBucketAcl", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": Join("", [ "arn:aws:s3:::", Ref(S3Bucket) ]), "Effect": "Allow", "Sid": "AWSCloudTrailAclCheck" }, { "Action": "s3:PutObject", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": Join("", [ "arn:aws:s3:::", Ref(S3Bucket), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Effect": "Allow", "Sid": "AWSCloudTrailWrite", "Condition": { "StringEquals": {"s3:x-amz-acl": "bucket-owner-full-control"} } }] }, Bucket=Ref(S3Bucket), ))
Bucket(WEBSITE_BUCKET, AccessControl="PublicRead", WebsiteConfiguration=WebsiteConfiguration( IndexDocument="index.html", ErrorDocument="404.html"))) # Generate the bucketpolicy to allow user to access your static website BucketPolicyStaticWebsite = t.add_resource( BucketPolicy( "BucketPolicyStaticWebsite", PolicyDocument={ "Version": IAM_VERSION, "Statement": [{ "Sid": "ReadOnly", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": [Join("", [GetAtt(WebsiteBucket, "Arn"), "/*"])] }] }, Bucket=Ref(WebsiteBucket), )) # Generate the bucket for storing build artifacts BuildArtifacts = t.add_resource(Bucket(ARTIFACT_BUCKET)) # Generate the codecommit repository CodeCommit = t.add_resource(Repository(GIT_REPO, RepositoryName=GIT_REPO))
OriginProtocolPolicy="http-only"), ) ], Enabled=True, PriceClass="PriceClass_100"), DependsOn=["StaticSiteBucket", "wwwStaticSiteBucket"])) StaticSiteBucketPolicy = t.add_resource( BucketPolicy( "StaticSiteBucketPolicy", Bucket=Ref(StaticSiteBucket), PolicyDocument={ "Statement": [{ "Action": ["s3:GetObject"], "Effect": "Allow", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "StaticSiteBucket" }, "/*"]] }, "Principal": "*" }] })) wwwStaticSiteBucketPolicy = t.add_resource( BucketPolicy("wwwStaticSiteBucketPolicy", Bucket=Ref(wwwStaticSiteBucket), PolicyDocument={ "Statement": [{ "Action": ["s3:GetObject"], "Effect": "Allow",
def generate_template(service_name): t = Template() t.add_version('2010-09-09') t.add_description("""\ AWS CloudFormation Template for AWS Exploitation Lab """) t.add_mapping("PublicRegionMap", ami_public_mapping) t.add_mapping("PrivateRegionMap", ami_private_mapping) keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription= 'must be the name of an existing EC2 KeyPair.', Description= 'Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description= ' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/32', AllowedPattern= "(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter( Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default='t2.micro', AllowedValues=[ 't2.micro', 't2.small', 't2.medium', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) ref_stack_id = Ref('AWS::StackId') ec2_role = t.add_resource( Role("%sEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"])) ]))) ec2_role.ManagedPolicyArns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] ec2_snapshot_policy_document = awacs.aws.Policy(Statement=[ awacs.aws.Statement(Sid="PermitEC2Snapshots", Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("ec2", "CreateSnapshot"), awacs.aws.Action("ec2", "ModifySnapshotAttribute"), ], Resource=["*"]) ]) ec2_snapshot_policy = Policy(PolicyName="EC2SnapshotPermissions", PolicyDocument=ec2_snapshot_policy_document) priv_ec2_role = t.add_resource( Role("%sPrivEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"])) ]), Policies=[ec2_snapshot_policy])) priv_ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] VPC_ref = t.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags(Application=ref_stack_id))) instanceProfile = t.add_resource( InstanceProfile("InstanceProfile", InstanceProfileName="%sInstanceRole" % (service_name), Roles=[Ref(ec2_role)])) privInstanceProfile = t.add_resource( InstanceProfile("PrivInstanceProfile", InstanceProfileName="%sPrivInstanceRole" % (service_name), Roles=[Ref(priv_ec2_role)])) public_subnet = t.add_resource( Subnet('%sSubnetPublic' % service_name, MapPublicIpOnLaunch=True, CidrBlock='10.0.1.0/24', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sSubnet_public" % (service_name)))) private_subnet = t.add_resource( Subnet('%sSubnetPrivate' % service_name, MapPublicIpOnLaunch=False, CidrBlock='10.0.2.0/24', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sSubnet_private" % (service_name)))) internetGateway = t.add_resource( InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id, Name="%sInternetGateway" % service_name))) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(VPC_ref), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable('RouteTable', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sRouteTable" % service_name))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Only associate this Route Table with the public subnet subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(routeTable), )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='%sSecurityGroup' % service_name, SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='0', ToPort='65535', CidrIp="10.0.0.0/8"), ], VpcId=Ref(VPC_ref), )) public_instance = t.add_resource( Instance( "Public%sInstance" % service_name, ImageId=FindInMap("PublicRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet)) ], UserData=Base64(public_instance_userdata), Tags=Tags(Application=ref_stack_id, Name='%sPublicInstance' % (service_name)))) private_instance = t.add_resource( Instance( "Private%sInstance" % service_name, ImageId=FindInMap("PrivateRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(private_subnet)) ], UserData=Base64(private_instance_userdata), Tags=Tags(Application=ref_stack_id, Name='%sPrivateInstance' % (service_name)), IamInstanceProfile="%sPrivInstanceRole" % (service_name))) outputs = [] outputs.append( Output( "PublicIP", Description="IP Address of Public Instance", Value=GetAtt(public_instance, "PublicIp"), )) t.add_output(outputs) # Set up S3 Bucket and CloudTrail S3Bucket = t.add_resource(Bucket("S3Bucket", DeletionPolicy="Retain")) S3PolicyDocument = awacs.aws.PolicyDocument( Id='EnforceServersideEncryption', Version='2012-10-17', Statement=[ awacs.aws.Statement( Sid='PermitCTBucketPut', Action=[s3.PutObject], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket), "/*"])], ), awacs.aws.Statement( Sid='PermitCTBucketACLRead', Action=[s3.GetBucketAcl], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket)])], ) ]) S3BucketPolicy = t.add_resource( BucketPolicy("BucketPolicy", PolicyDocument=S3PolicyDocument, Bucket=Ref(S3Bucket), DependsOn=[S3Bucket])) myTrail = t.add_resource( Trail( "CloudTrail", IsLogging=True, S3BucketName=Ref(S3Bucket), DependsOn=["BucketPolicy"], )) myTrail.IsMultiRegionTrail = True myTrail.IncludeGlobalServiceEvents = True return t.to_json()
def create_cloudfront_template(): template = Template() cname = template.add_parameter( parameter=Parameter(title='Cname', Type='String')) acm_certificate_arn = template.add_parameter( parameter=Parameter(title='AcmCertificateArn', Type='String')) host_zone_id = template.add_parameter( parameter=Parameter(title='HostZoneId', Type='String')) bucket = template.add_resource( resource=Bucket(title='SampleBucket', BucketName=Sub('sample-bucket-${AWS::AccountId}'))) identity = template.add_resource(resource=CloudFrontOriginAccessIdentity( title='SampleOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig(Comment='sample'))) template.add_resource(resource=BucketPolicy( title='SampleBucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Statement': [{ 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']), 'Principal': { 'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId') } }] })) distribution = template.add_resource(resource=Distribution( title='SampleDistribution', DistributionConfig=DistributionConfig( Aliases=[Ref(cname)], # CustomErrorResponses=[ # CustomErrorResponse( # ErrorCode=403, # ResponseCode=200, # ResponsePagePath='/404.html', # ErrorCachingMinTTL=30 # ) # ], DefaultCacheBehavior=DefaultCacheBehavior( ForwardedValues=ForwardedValues(QueryString=True, ), TargetOriginId=Sub('S3-${' + bucket.title + '}'), ViewerProtocolPolicy='redirect-to-https', ), # DefaultRootObject='index.html', Enabled=True, Origins=[ Origin(Id=Sub('S3-${' + bucket.title + '}'), DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'), S3OriginConfig=S3OriginConfig(OriginAccessIdentity=Sub( 'origin-access-identity/cloudfront/${' + identity.title + '}'))) ], ViewerCertificate=ViewerCertificate(AcmCertificateArn=Ref( acm_certificate_arn), SslSupportMethod='sni-only')))) template.add_resource(resource=RecordSetType( title='SampleRecordSet', AliasTarget=AliasTarget(HostedZoneId='Z2FDTNDATAQYW2', DNSName=GetAtt(logicalName=distribution, attrName='DomainName')), HostedZoneId=Ref(host_zone_id), Name=Ref(cname), Type='A')) with open('./cloudfront.yml', mode='w') as file: file.write(template.to_yaml())
TPL.add_resource(BUCKET) BUCKET_POLICY = BucketPolicy( 'ReplicaBucketPolicy', Condition=REPLICA_REGION_CON, Bucket=Ref(BUCKET), PolicyDocument={ "Version": "2008-10-17", "Id": "S3ReplicationPolicy", "Statement": [{ "Sid": "S3ReplicationPolicy", "Effect": "Allow", "Principal": { "AWS": Sub("arn:aws:iam::${AWS::AccountId}:root") }, "Action": [ "s3:GetBucketVersioning", "s3:PutBucketVersioning", "s3:ReplicateObject", "s3:ReplicateDelete" ], "Resource": [ Sub(f"arn:aws:s3:::${{{BUCKET.title}}}"), Sub(f"arn:aws:s3:::${{{BUCKET.title}}}/*") ] }] }) TPL.add_resource(BUCKET_POLICY) TPL.add_output(
t.add_resource(Bucket( "S3Bucket", DeletionPolicy="Retain", )) t.add_resource( BucketPolicy('BucketPolicy', Bucket=Ref("S3Bucket"), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[PutObject], Effect=Allow, Principal=Principal("AWS", ["054676820928"]), Resource=[ Join('', [ ARN(''), Ref("S3Bucket"), "/AWSLogs/762605676694/*" ]) ], ) ]))) t.add_resource( ec2.SecurityGroup( "LoadBalancerSecurityGroup", GroupDescription="Web load balancer security group.", VpcId=ImportValue( Join("-", [
def create_wordpress_environment(self): template = Template() template.add_version('2010-09-09') # Wordpress preparation: format vpc name and split private and public subnets in two lists vpc_name_formatted = ''.join( e for e in self.private_vpc_name if e.isalnum()).capitalize() filter_private_subnets = filter(lambda x : x["type"] == "private", self.private_vpc_subnets) filter_public_subnets = filter(lambda x : x["type"] == "public", self.private_vpc_subnets) private_subnets = [] for subnet in filter_private_subnets: subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize() private_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted))) public_subnets = [] for subnet in filter_public_subnets: subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize() public_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted))) # Instances Security Groups web_dmz_security_group = template.add_resource( SecurityGroup( "{}WebDMZSecurityGroup".format(self.stage), GroupName="{}webdmz-sg".format(self.stage), VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), GroupDescription="Enables external http access to EC2 instance(s) that host the webpages", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", SourceSecurityGroupId=ImportValue("{}BastionHostSecurityGroupID".format(self.stage)) ) ] ) ) rds_private_security_group = template.add_resource( SecurityGroup( "{}RdsPrivateSecurityGroup".format(self.stage), GroupName="{}rds-private-sg".format(self.stage), VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), GroupDescription="Allow access to the mysql port from the webservers", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=self.database_port, ToPort=self.database_port, SourceSecurityGroupId=Ref(web_dmz_security_group) ) ] ) ) # S3 Buckets for wordpress content bucket_wordpress_code = template.add_resource( Bucket( "{}BucketWordpressCode".format(self.stage), BucketName="{}-wordpress-code".format(self.stage), AccessControl=Private ) ) bucket_wordpress_media_assets = template.add_resource( Bucket( "{}BucketWordpressMediaAssets".format(self.stage), BucketName="{}-wordpress-media-assets".format(self.stage), AccessControl=Private ) ) # Database Instance to store wordpress data rds_subnet_group = template.add_resource( DBSubnetGroup( "{}PrivateRDSSubnetGroup".format(self.stage), DBSubnetGroupName="{}private-rds-subnet-group".format(self.stage), DBSubnetGroupDescription="Subnets available for the RDS DB Instance", SubnetIds=private_subnets ) ) template.add_resource( DBInstance( "{}RdsInstance".format(self.stage), DBInstanceIdentifier="{}RdsInstance".format(self.stage), DBName=self.database_name, AllocatedStorage="20", DBInstanceClass=self.database_instance_class, Engine=self.database_engine, EngineVersion=self.database_engine_version, MasterUsername=self.database_username, MasterUserPassword=self.database_password, Port=self.database_port, BackupRetentionPeriod=0, MultiAZ=self.database_multiaz, DBSubnetGroupName=Ref(rds_subnet_group), VPCSecurityGroups=[Ref(rds_private_security_group)], Tags=Tags( Name=self.database_name_tag ) ) ) # Cloudfront Distribution to load images cloudfront_origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "{}CloudfrontOriginAccessIdentity".format(self.stage), CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( "{}CloudFrontOriginAccessIdentityConfig".format(self.stage), Comment="WordPress Origin Access Identity" ) ) ) template.add_resource(BucketPolicy( "{}BucketWordpressMediaAssetsPolicy".format(self.stage), Bucket=Ref(bucket_wordpress_media_assets), PolicyDocument={ "Version": "2008-10-17", "Id": "PolicyForCloudFrontPrivateContent", "Statement": [ { "Sid": "1", "Effect": "Allow", "Principal": { "CanonicalUser": GetAtt(cloudfront_origin_access_identity, 'S3CanonicalUserId') }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::{}-wordpress-media-assets/*".format(self.stage) } ] } )) cloudfront_distribution = template.add_resource( Distribution( "{}CloudfrontDistribution".format(self.stage), DistributionConfig=DistributionConfig( Origins=[ Origin( Id="MediaAssetsOrigin", DomainName=GetAtt(bucket_wordpress_media_assets, 'DomainName'), S3OriginConfig=S3Origin( OriginAccessIdentity=Join("", [ "origin-access-identity/cloudfront/", Ref(cloudfront_origin_access_identity) ]) ) ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="MediaAssetsOrigin", ForwardedValues=ForwardedValues( QueryString=False ), ViewerProtocolPolicy="allow-all" ), Enabled=True, HttpVersion='http2' ) ) ) # Wordpress EC2 Instances ''' EC2 Instances types: Write node = To make changes to your blog. E.g: add new posts Read Nodes = Instances open to the internet for blog reading ''' wordpress_ec2_role = template.add_resource( Role( "{}WordPressEC2InstanceRole".format(self.stage), RoleName="{}WordPressEC2InstanceRole".format(self.stage), Path="/", AssumeRolePolicyDocument={"Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }]}, Policies=[ Policy( PolicyName="S3FullAccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": "s3:*", "Resource": "*" }], } ) ] ) ) spotfleetrole = template.add_resource( Role( "{}spotfleetrole".format(self.stage), AssumeRolePolicyDocument={ "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "spotfleet.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ], "Version": "2012-10-17" }, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole" ] ) ) ec2_instance_profile = template.add_resource( InstanceProfile( "{}WriteWordpressEc2InstanceProfile".format(self.stage), Roles=[Ref(wordpress_ec2_role)] ) ) template.add_resource( SpotFleet( "{}WriteWordpressEc2Instance".format(self.stage), SpotFleetRequestConfigData=SpotFleetRequestConfigData( AllocationStrategy="lowestPrice", IamFleetRole=GetAtt(spotfleetrole,"Arn"), LaunchSpecifications=[LaunchSpecifications( IamInstanceProfile=IamInstanceProfile( Arn=GetAtt(ec2_instance_profile, "Arn") ), ImageId=self.write_instance_image_id, InstanceType=self.write_instance_type, KeyName=self.write_instance_key_name, SecurityGroups=[SecurityGroups(GroupId=Ref(web_dmz_security_group))], SubnetId=next(iter(public_subnets)), UserData=Base64( Join("", [ """ #!/bin/bash yum install httpd php php-mysql -y cd /var/www/html echo \"healthy\" > healthy.html wget https://wordpress.org/latest.tar.gz tar -xzf latest.tar.gz cp -r wordpress/* /var/www/html/ rm -rf wordpress rm -rf latest.tar.gz chmod -R 755 wp-content chown -R apache:apache wp-content echo -e 'Options +FollowSymlinks \nRewriteEngine on \nrewriterule ^wp-content/uploads/(.*)$ http://""", GetAtt(cloudfront_distribution, 'DomainName'), """/$1 [r=301,nc]' > .htaccess chkconfig httpd on cd /var/www sudo chown -R apache /var/www/html cd html/ sudo find . -type d -exec chmod 0755 {} \; sudo find . -type f -exec chmod 0644 {} \; sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf sed -i 's/AllowOverride none/AllowOverride All/g' /etc/httpd/conf/httpd.conf echo -e "*/1 * * * * root aws s3 sync --delete /var/www/html s3://""", Ref(bucket_wordpress_code), """">> /etc/crontab echo -e "*/1 * * * * root aws s3 sync --delete /var/www/html/wp-content/uploads s3://""", Ref(bucket_wordpress_media_assets), """">> /etc/crontab service httpd start """ ]) ) )], TargetCapacity=1, Type="request" ) ) ) template.add_resource( LaunchConfiguration( "{}WordPressReadLaunchConfiguration".format(self.stage), InstanceType=self.read_instance_type, ImageId=self.read_instance_image_id, KeyName=self.read_instance_key_name, LaunchConfigurationName="{}-wordpress-launch-config".format(self.stage), SecurityGroups=[Ref(web_dmz_security_group)], IamInstanceProfile=Ref(ec2_instance_profile), SpotPrice="0.5", UserData=Base64( Join("", [ """ #!/bin/bash yum install httpd php php-mysql -y cd /var/www/html echo \"healthy\" > healthy.html wget https://wordpress.org/latest.tar.gz tar -xzf latest.tar.gz cp -r wordpress/* /var/www/html/ rm -rf wordpress rm -rf latest.tar.gz chmod -R 755 wp-content chown -R apache:apache wp-content echo -e 'Options +FollowSymlinks \nRewriteEngine on \nrewriterule ^wp-content/uploads/(.*)$ http://""", GetAtt(cloudfront_distribution, 'DomainName'), """/$1 [r=301,nc]' > .htaccess chkconfig httpd on cd /var/www sudo chown -R apache /var/www/html cd html/ sudo find . -type d -exec chmod 0755 {} \; sudo find . -type f -exec chmod 0644 {} \; sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf sed -i 's/AllowOverride none/AllowOverride All/g' /etc/httpd/conf/httpd.conf echo -e "*/1 * * * * root aws s3 sync --delete s3://""", Ref(bucket_wordpress_code), """ /var/www/html">> /etc/crontab echo -e "*/1 * * * * root aws s3 sync --delete s3://""", Ref(bucket_wordpress_media_assets), """/var/www/html/wp-content/uploads">> /etc/crontab service httpd start """ ]) ) ) ) alb = template.add_resource( LoadBalancer( "{}ApplicationLoadBalancer".format(self.stage), Name="{}-wordpress-alb".format(self.stage), SecurityGroups=[Ref(web_dmz_security_group)], Subnets=public_subnets, Type="application" ) ) target_group = template.add_resource( TargetGroup( "{}TargetGroup".format(self.stage), Name="{}-wordpress-target-group".format(self.stage), Port=80, Protocol="HTTP", VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), HealthCheckPort=8080 ) ) template.add_resource( AutoScalingGroup( "{}AutoScalingGroup".format(self.stage), DependsOn="{}WordPressReadLaunchConfiguration".format(self.stage), AutoScalingGroupName="{}-wordpress-auto-scaling".format(self.stage), LaunchConfigurationName="{}-wordpress-launch-config".format(self.stage), TargetGroupARNs=[Ref(target_group)], MaxSize="3", MinSize="1", VPCZoneIdentifier=public_subnets, Tags=[ Tag("Name", "{}-wordpress-read-node".format(self.stage), True) ] ) ) template.add_resource( Listener( "ALBListener", DefaultActions=[ Action( TargetGroupArn=Ref(target_group), Type="forward" ) ], LoadBalancerArn=Ref(alb), Port=80, Protocol="HTTP" ) ) f = open("modules/template_wordpress.yaml", 'w') print(template.to_yaml(), file=f)
BucketPolicy = t.add_resource(BucketPolicy( "BucketPolicy", PolicyDocument={ "Statement": [ { "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": [ "cloudtrail.amazonaws.com" ] }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::GetAtt": [ "AWSCloudTrailBucket", "Arn" ] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": [ "cloudtrail.amazonaws.com" ] }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ { "Fn::GetAtt": [ "AWSCloudTrailBucket", "Arn" ] }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } } ] }, Bucket=Ref(AWSCloudTrailBucket), ))
def create_cloud_front_template(): template = Template() bucket = template.add_resource( resource=Bucket( title='SampleBucket', BucketName=Sub('sample-bucket-${AWS::AccountId}') ) ) identity = template.add_resource( resource=CloudFrontOriginAccessIdentity( title='SampleOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( Comment='sample' ) ) ) template.add_resource( resource=BucketPolicy( title='SampleBucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Statement': [{ 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']), 'Principal': { 'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId') } }] } ) ) template.add_resource( resource=Distribution( title='SampleDistribution', DistributionConfig=DistributionConfig( Enabled=True, Origins=[ Origin( Id=Sub('S3-${' + bucket.title + '}'), DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Sub('origin-access-identity/cloudfront/${' + identity.title + '}') ) ) ], DefaultCacheBehavior=DefaultCacheBehavior( ForwardedValues=ForwardedValues( QueryString=True, ), TargetOriginId=Sub('S3-${' + bucket.title + '}'), ViewerProtocolPolicy='redirect-to-https', ), DefaultRootObject='index.html', CustomErrorResponses=[ CustomErrorResponse( ErrorCode=403, ResponseCode=200, ResponsePagePath='/404.html', ErrorCachingMinTTL=30 ) ] ) ) ) with open('./cloudfront.yml', mode='w') as file: file.write(template.to_yaml())
DeletionPolicy="Retain", NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations(Event="s3:ObjectCreated:*", Topic=ImportValue( Sub("${LambdaEnv}-JsonTopicArn"))) ]))) t.add_resource( BucketPolicy( "LiveBucketTopicPolicy", Bucket=Ref("NotificationsToBeIngested"), PolicyDocument=Policy(Statement=[ Statement( Effect=Allow, Action=[ListBucket, GetObject, GetObjectVersion], Principal=AWSPrincipal("arn:aws:iam::195048873603:root"), Resource=[ Join("/", [GetAtt("NotificationsToBeIngested", "Arn"), "*"]), GetAtt("NotificationsToBeIngested", "Arn") ]) ]))) t.add_output( Output("JsonBucketArn", Export=Export(Sub("${LambdaEnv}-JsonBucketArn")), Description="ARN of the bucket receiving clips data", Value=GetAtt("NotificationsToBeIngested", "Arn"))) print(t.to_json(indent=2))
['OAI for ', Ref(AWS_STACK_NAME), ' public videos.']), ), )) template.add_resource( BucketPolicy( "VideoBucketPolicy", Bucket=Ref(video_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "OriginAccessIdentity", "Effect": "Allow", "Principal": { "CanonicalUser": GetAtt(video_oai, 'S3CanonicalUserId') }, "Action": ["s3:GetObject"], "Resource": Join('', ["arn:aws:s3:::", Ref(video_bucket), "/*"]), }, ], }, )) template.add_resource( QueuePolicy( "StartEncodingDestinationQueuesPolicy",
BucketPolicy( 'BucketPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': 's3:PutObject', 'Effect': 'Allow', 'Resource': Join('', [ 'arn:aws:s3:::', Ref(s3_bucket), '/teller-responses/*' ]), 'Principal': { 'AWS': GetAtt(lambda_role_dump_teller_response, 'Arn') } }, { 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join('', ['arn:aws:s3:::', Ref(s3_bucket), '/*']), 'Principal': { 'AWS': GetAtt(lambda_role_diff_alert, 'Arn') } }, { 'Action': 's3:ListBucket', 'Effect': 'Allow', 'Resource': Join('', ['arn:aws:s3:::', Ref(s3_bucket)]), 'Principal': { 'AWS': GetAtt(lambda_role_diff_alert, 'Arn') } }] }, Bucket=Ref(s3_bucket)))
CloudFrontOriginAccessIdentity( 'WebsiteOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig(Comment=Ref(AWS_STACK_NAME), ), )) template.add_resource( BucketPolicy( 'WebsiteOriginPolicy', Bucket=Ref(s3_website_origin), PolicyDocument=Policy(Statement=[ Statement( Effect=Allow, Action=[s3.GetObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(s3_website_origin), '/*']) ], Principal=Principal( 'CanonicalUser', GetAtt(origin_access_identity, 'S3CanonicalUserId')), ) ], ), )) cloudfront = template.add_resource( Distribution( 'CloudFront', DistributionConfig=DistributionConfig( Aliases=['robkenis.com', 'www.robkenis.com'], Comment=Ref(AWS_STACK_NAME),
management_bucket_policy = BucketPolicy( region.replace("-", "") + "managementbucketpolicy", Bucket=Ref(management_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::156460612806:root" }, "Action": "s3:PutObject", "Resource": Sub("arn:aws:s3:::mgmt.eu-west-1.weblox.io/logs/AWSLogs/${AWS::AccountId}/*" ), }, { "Effect": "Allow", "Principal": { "Service": "delivery.logs.amazonaws.com" }, "Action": "s3:PutObject", "Resource": Sub("arn:aws:s3:::mgmt.eu-west-1.weblox.io/logs/AWSLogs/${AWS::AccountId}/*" ), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }, { "Effect": "Allow", "Principal": { "Service": "delivery.logs.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": "arn:aws:s3:::mgmt.eu-west-1.weblox.io" }] })
t.add_resource(Bucket( "S3Bucket", DeletionPolicy="Retain", )) t.add_resource( BucketPolicy('BucketPolicy', Bucket=Ref("S3Bucket"), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[PutObject], Effect=Allow, Principal=Principal("AWS", ["127311923021"]), Resource=[ Join('', [ ARN(''), Ref("S3Bucket"), "/AWSLogs/511912822958/*" ]) ], ) ]))) t.add_resource( ec2.SecurityGroup( "LoadBalancerSecurityGroup", GroupDescription="Web load balancer security group.", VpcId=ImportValue( Join("-", [
BucketPolicy( "BucketPolicy", Bucket=Ref("S3Bucket"), PolicyDocument={ "Statement": [{ "Action": "s3:GetBucketAcl", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": Join("", ["arn:aws:s3:::", Ref("S3Bucket")]) }, { "Action": "s3:PutObject", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": Join("", [ "arn:aws:s3:::", Ref("S3Bucket"), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] }, ))
DeletionPolicy="Retain")) bucket_policy = t.add_resource( BucketPolicy( "BucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy(Statement=[ Statement( Sid="AWSCloudTrailAclCheck", Effect=Allow, Action=[Action("s3", "GetBucketAcl")], Principal=Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join("", ["arn:aws:s3:::", Ref(bucket)])]), Statement( Sid="AWSCloudTrailWrite", Effect=Allow, Action=[Action("s3", "PutObject")], Principal=Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[ Join("", [ "arn:aws:s3:::", Ref(bucket), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]) ], Condition=Condition( StringEquals('s3:x-amz-acl', 'bucket-owner-full-control'))) ]))) lambda_role = t.add_resource( Role(
BucketPolicy( "LogPolicy", Bucket=Ref("LogBucket"), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { # https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", Ref("LogBucket")]] } }, { # CloudTrail automatically writes to the bucket_name/AWSLogs/account_ID/ folder, # so the bucket policy grants write privileges for that prefix # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudtrail-trail.html#w1ab2c19c12d143c15 "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", Ref("LogBucket"), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }, { # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html "Sid": "AWSELBLogWrite", "Effect": "Allow", "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", ["arn:aws:s3:::", Ref("LogBucket"), "/ELBLogs/*"] ] }, "Principal": { "AWS": [{ "Fn::FindInMap": ["Principals", Ref("AWS::Region"), "ELB"] }] } } ] }))
def bucketPolicy(self, bucket: Bucket, pol: awacs.aws.Policy) -> BucketPolicy: return BucketPolicy(bucket.title + "BucketPolicy", Bucket=Ref(bucket), PolicyDocument=pol)
def elb_asg_lc_template(app, env, nameSGRDS, rdsPort, instanceType, ami, subnets, elbPort, elbCidrBlock, ec2Port, desiredCapacity, minSize, maxSize, region, nameBucket, officeIP): template = Template() sgELB = template.add_resource( SecurityGroup( "SecurityGroupELB" + app + env, GroupDescription="Security group for " + app + "-" + env, VpcId=ImportValue("VPC" + env), SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=elbPort, ToPort=elbPort, CidrIp=elbCidrBlock, ) ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-ELB" + app + "-" + env, app=app, ), )) sgEC2 = template.add_resource( SecurityGroup( "SecurityGroupEC2" + app + env, GroupDescription="Security group for EC2 " + app + "-" + env, VpcId=ImportValue("VPC" + env), DependsOn="SecurityGroupELB" + app + env, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=ec2Port, ToPort=ec2Port, SourceSecurityGroupId=Ref(sgELB), ), SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp=officeIP, ), ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-EC2-" + app + "-" + env, app=app, ), )) addIngressRDS = template.add_resource( SecurityGroupIngress( "ingressSGRDS" + app + env, SourceSecurityGroupId=Ref(sgEC2), Description="From EC2 instances", GroupId=ImportValue("SG-" + nameSGRDS + "-" + app + "-" + env), IpProtocol="tcp", FromPort=rdsPort, ToPort=rdsPort, DependsOn="SecurityGroupEC2" + app + env, )) launchConfig = template.add_resource( LaunchConfiguration("LaunchConfiguration" + app + env, InstanceType=instanceType, ImageId=ami, SecurityGroups=[Ref(sgEC2)], IamInstanceProfile=ImportValue("Role-" + app + "-" + env))) bucketPolicy = template.add_resource( BucketPolicy("BucketPolicy" + nameBucket + app + env, Bucket=ImportValue("Bucket" + nameBucket + app + env), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": Join("", [ "arn:aws:s3:::", ImportValue("Bucket" + nameBucket + app + env), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Principal": { "AWS": ["156460612806"] } }] })) lb = template.add_resource( LoadBalancer("LoadBalancer" + app + env, ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=120, ), Subnets=subnets, HealthCheck=elb.HealthCheck( "HealthCheck", Target="TCP:" + str(ec2Port), HealthyThreshold="5", UnhealthyThreshold="5", Interval="30", Timeout="15", ), Listeners=[ elb.Listener( LoadBalancerPort=elbPort, InstancePort=ec2Port, Protocol="HTTP", InstanceProtocol="HTTP", ), ], CrossZone=True, SecurityGroups=[Ref(sgELB)], LoadBalancerName="lb-" + app + "-" + env, Scheme="internet-facing", AccessLoggingPolicy=AccessLoggingPolicy( "LoggingELB" + app + env, EmitInterval=5, Enabled=True, S3BucketName=ImportValue("Bucket" + nameBucket + app + env), ))) asg = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + app + env, DesiredCapacity=desiredCapacity, Tags=[Tag("Environment", env, True)], LaunchConfigurationName=Ref(launchConfig), MinSize=minSize, MaxSize=maxSize, LoadBalancerNames=[Ref(lb)], AvailabilityZones=GetAZs(region), VPCZoneIdentifier=subnets, HealthCheckType="ELB", HealthCheckGracePeriod=300, UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True, )))) return (template.to_json())
], DependsOn=Ref(alb_security_group)) template.add_resource(alb_security_group) template.add_resource(ec2_security_group) logs_bucket = Bucket("LogsBucket", DeletionPolicy="Retain") logs_bucket_policy = BucketPolicy( "LogsBucketPolicy", Bucket=Ref(logs_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "Stmt1429136633762", "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": Join("", ['s3:::', Ref(logs_bucket), "/alb/*"]), "Principal": { "AWS": Ref("AWS::AccountId") } }] }) template.add_resource(logs_bucket) template.add_resource(logs_bucket_policy) # Create Application Load Balancer load_balancer = LoadBalancer( "exampleloadbalancer",