def add_bucket(self): # type: () -> s3.Bucket """Add the bucket resource along with an output of it's name / website url. Returns: dict: The bucket resource """ bucket = self.template.add_resource( s3.Bucket( 'Bucket', AccessControl=(s3.Private if self.cf_enabled else s3.PublicRead), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html'))) self.template.add_output( Output('BucketName', Description='Name of website bucket', Value=bucket.ref())) if not self.cf_enabled: self.template.add_output( Output('BucketWebsiteURL', Description='URL of the bucket website', Value=bucket.get_att('WebsiteURL'))) return bucket
def add_bucket(self): # type: () -> s3.Bucket """Add the bucket resource along with an output of it's name / website url. Returns: The bucket resource. """ bucket = self.template.add_resource( s3.Bucket( "Bucket", AccessControl=(s3.Private if self.cf_enabled else s3.PublicRead), LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status="Enabled" ) ] ), VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) self.template.add_output( Output( "BucketName", Description="Name of website bucket", Value=bucket.ref() ) ) if not self.cf_enabled: # bucket cannot be configured with WebsiteConfiguration when using OAI S3Origin bucket["WebsiteConfiguration"] = s3.WebsiteConfiguration( IndexDocument="index.html", ErrorDocument="error.html" ) self.template.add_output( Output( "BucketWebsiteURL", Description="URL of the bucket website", Value=bucket.get_att("WebsiteURL"), ) ) return bucket
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version('2010-09-09') template.set_description('Static Website - Bucket and Distribution') # Conditions template.add_condition( 'AcmCertSpecified', And(Not(Equals(variables['AcmCertificateArn'].ref, '')), Not(Equals(variables['AcmCertificateArn'].ref, 'undefined'))) ) template.add_condition( 'AliasesSpecified', And(Not(Equals(Select(0, variables['Aliases'].ref), '')), Not(Equals(Select(0, variables['Aliases'].ref), 'undefined'))) ) template.add_condition( 'CFLoggingEnabled', And(Not(Equals(variables['LogBucketName'].ref, '')), Not(Equals(variables['LogBucketName'].ref, 'undefined'))) ) template.add_condition( 'DirectoryIndexSpecified', And(Not(Equals(variables['RewriteDirectoryIndex'].ref, '')), Not(Equals(variables['RewriteDirectoryIndex'].ref, 'undefined'))) # noqa ) template.add_condition( 'WAFNameSpecified', And(Not(Equals(variables['WAFWebACL'].ref, '')), Not(Equals(variables['WAFWebACL'].ref, 'undefined'))) ) # Resources oai = template.add_resource( cloudfront.CloudFrontOriginAccessIdentity( 'OAI', CloudFrontOriginAccessIdentityConfig=cloudfront.CloudFrontOriginAccessIdentityConfig( # noqa pylint: disable=line-too-long Comment='CF access to website' ) ) ) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status='Enabled' ) ] ), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled' ), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html' ) ) ) template.add_output(Output( 'BucketName', Description='Name of website bucket', Value=bucket.ref() )) allowcfaccess = template.add_resource( s3.BucketPolicy( 'AllowCFAccess', Bucket=bucket.ref(), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Principal=Principal( 'CanonicalUser', oai.get_att('S3CanonicalUserId') ), Resource=[ Join('', [bucket.get_att('Arn'), '/*']) ] ) ] ) ) ) cfdirectoryindexrewriterole = template.add_resource( iam.Role( 'CFDirectoryIndexRewriteRole', Condition='DirectoryIndexSpecified', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal('Service', ['lambda.amazonaws.com', 'edgelambda.amazonaws.com']) ) ] ), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ] ) ) cfdirectoryindexrewrite = template.add_resource( awslambda.Function( 'CFDirectoryIndexRewrite', Condition='DirectoryIndexSpecified', Code=awslambda.Code( ZipFile=Join( '', ["'use strict';\n", "exports.handler = (event, context, callback) => {\n", "\n", " // Extract the request from the CloudFront event that is sent to Lambda@Edge\n", # noqa pylint: disable=line-too-long " var request = event.Records[0].cf.request;\n", " // Extract the URI from the request\n", " var olduri = request.uri;\n", " // Match any '/' that occurs at the end of a URI. Replace it with a default index\n", # noqa pylint: disable=line-too-long " var newuri = olduri.replace(/\\/$/, '\\/", variables['RewriteDirectoryIndex'].ref, "');\n", # noqa " // Log the URI as received by CloudFront and the new URI to be used to fetch from origin\n", # noqa pylint: disable=line-too-long " console.log(\"Old URI: \" + olduri);\n", " console.log(\"New URI: \" + newuri);\n", " // Replace the received URI with the URI that includes the index page\n", # noqa pylint: disable=line-too-long " request.uri = newuri;\n", " // Return to CloudFront\n", " return callback(null, request);\n", "\n", "};\n"] ) ), Description='Rewrites CF directory HTTP requests to default page', # noqa Handler='index.handler', Role=cfdirectoryindexrewriterole.get_att('Arn'), Runtime='nodejs8.10' ) ) # Generating a unique resource name here for the Lambda version, so it # updates automatically if the lambda code changes code_hash = hashlib.md5( str(cfdirectoryindexrewrite.properties['Code'].properties['ZipFile'].to_dict()).encode() # noqa pylint: disable=line-too-long ).hexdigest() cfdirectoryindexrewritever = template.add_resource( awslambda.Version( 'CFDirectoryIndexRewriteVer' + code_hash, Condition='DirectoryIndexSpecified', FunctionName=cfdirectoryindexrewrite.ref() ) ) # If custom associations defined, use them if variables['lambda_function_associations']: lambda_function_associations = [ cloudfront.LambdaFunctionAssociation( EventType=x['type'], LambdaFunctionARN=x['arn'] ) for x in variables['lambda_function_associations'] ] else: # otherwise fallback to pure CFN condition lambda_function_associations = If( 'DirectoryIndexSpecified', [cloudfront.LambdaFunctionAssociation( EventType='origin-request', LambdaFunctionARN=cfdirectoryindexrewritever.ref() )], NoValue ) cfdistribution = template.add_resource( get_cf_distribution_class()( 'CFDistribution', DependsOn=allowcfaccess.title, DistributionConfig=get_cf_distro_conf_class()( Aliases=If( 'AliasesSpecified', variables['Aliases'].ref, NoValue ), Origins=[ get_cf_origin_class()( DomainName=Join( '.', [bucket.ref(), 's3.amazonaws.com']), S3OriginConfig=get_s3_origin_conf_class()( OriginAccessIdentity=Join( '', ['origin-access-identity/cloudfront/', oai.ref()]) ), Id='S3Origin' ) ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( AllowedMethods=['GET', 'HEAD'], Compress=False, DefaultTTL='86400', ForwardedValues=cloudfront.ForwardedValues( Cookies=cloudfront.Cookies(Forward='none'), QueryString=False, ), LambdaFunctionAssociations=lambda_function_associations, # noqa TargetOriginId='S3Origin', ViewerProtocolPolicy='redirect-to-https' ), DefaultRootObject='index.html', Logging=If( 'CFLoggingEnabled', cloudfront.Logging( Bucket=Join('.', [variables['LogBucketName'].ref, 's3.amazonaws.com']) ), NoValue ), PriceClass=variables['PriceClass'].ref, Enabled=True, WebACLId=If( 'WAFNameSpecified', variables['WAFWebACL'].ref, NoValue ), ViewerCertificate=If( 'AcmCertSpecified', cloudfront.ViewerCertificate( AcmCertificateArn=variables['AcmCertificateArn'].ref, # noqa SslSupportMethod='sni-only' ), NoValue ) ) ) ) template.add_output(Output( 'CFDistributionId', Description='CloudFront distribution ID', Value=cfdistribution.ref() )) template.add_output( Output( 'CFDistributionDomainName', Description='CloudFront distribution domain name', Value=cfdistribution.get_att('DomainName') ) )
def create_template(self) -> None: """Create template (main function called by Stacker).""" self.template.set_version("2010-09-09") self.template.set_description("Terraform State Resources") # Conditions for i in ["BucketName", "TableName"]: self.template.add_condition( "%sOmitted" % i, Or( Equals(self.variables[i].ref, ""), Equals(self.variables[i].ref, "undefined"), ), ) # Resources terraformlocktable = self.template.add_resource( dynamodb.Table( "TerraformStateTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName="LockID", AttributeType="S") ], KeySchema=[ dynamodb.KeySchema(AttributeName="LockID", KeyType="HASH") ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If("TableNameOmitted", NoValue, self.variables["TableName"].ref), )) self.template.add_output( Output( "%sName" % terraformlocktable.title, Description="Name of DynamoDB table for Terraform state", Value=terraformlocktable.ref(), )) terraformstatebucket = self.template.add_resource( s3.Bucket( "TerraformStateBucket", DeletionPolicy=self.variables["BucketDeletionPolicy"], AccessControl=s3.Private, BucketName=If("BucketNameOmitted", NoValue, self.variables["BucketName"].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status="Enabled") ]), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) self.template.add_output( Output( "%sName" % terraformstatebucket.title, Description="Name of bucket storing Terraform state", Value=terraformstatebucket.ref(), )) self.template.add_output( Output( "%sArn" % terraformstatebucket.title, Description="Arn of bucket storing Terraform state", Value=terraformstatebucket.get_att("Arn"), )) managementpolicy = self.template.add_resource( iam.ManagedPolicy( "ManagementPolicy", Description="Managed policy for Terraform state management.", Path="/", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att("Arn")], ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join("", [ terraformstatebucket.get_att("Arn"), "/*" ]) ], ), Statement( Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem, ], Effect=Allow, Resource=[terraformlocktable.get_att("Arn")], ), ], ), )) self.template.add_output( Output( "PolicyArn", Description="Managed policy Arn", Value=managementpolicy.ref(), ))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version('2010-09-09') template.set_description('Static Website - Dependencies') # Resources awslogbucket = template.add_resource( s3.Bucket('AWSLogBucket', AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('AWSLogBucketName', Description='Name of bucket storing AWS logs', Value=awslogbucket.ref())) template.add_resource( s3.BucketPolicy( 'AllowAWSLogWriting', Bucket=awslogbucket.ref(), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.PutObject], Effect=Allow, Principal=AWSPrincipal( Join(':', ['arn:aws:iam:', AccountId, 'root'])), Resource=[ Join('', [ 'arn:aws:s3:::', awslogbucket.ref(), '/*' ]) ]) ]))) artifacts = template.add_resource( s3.Bucket( 'Artifacts', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('ArtifactsBucketName', Description='Name of bucket storing artifacts', Value=artifacts.ref())) if variables['AuthAtEdge']: callbacks = self.context.hook_data['aae_callback_url_retriever'][ 'callback_urls'] if variables['CreateUserPool']: user_pool = template.add_resource( cognito.UserPool("AuthAtEdgeUserPool")) user_pool_id = user_pool.ref() template.add_output( Output('AuthAtEdgeUserPoolId', Description= 'Cognito User Pool App Client for Auth @ Edge', Value=user_pool_id)) else: user_pool_id = self.context.hook_data[ 'aae_user_pool_id_retriever']['id'] client = template.add_resource( cognito.UserPoolClient( "AuthAtEdgeClient", AllowedOAuthFlows=['code'], CallbackURLs=callbacks, UserPoolId=user_pool_id, AllowedOAuthScopes=variables['OAuthScopes'])) template.add_output( Output( 'AuthAtEdgeClient', Description='Cognito User Pool App Client for Auth @ Edge', Value=client.ref()))
def create_template(self) -> None: """Create template (main function called by Stacker).""" template = self.template template.set_version("2010-09-09") template.set_description("Static Website - Dependencies") # Resources awslogbucket = template.add_resource( s3.Bucket( "AWSLogBucket", AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( Output( "AWSLogBucketName", Description="Name of bucket storing AWS logs", Value=awslogbucket.ref(), )) template.add_resource( s3.BucketPolicy( "AllowAWSLogWriting", Bucket=awslogbucket.ref(), PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Action=[awacs.s3.PutObject], Effect=Allow, Principal=AWSPrincipal( Join(":", ["arn:aws:iam:", AccountId, "root"])), Resource=[ Join("", [ "arn:aws:s3:::", awslogbucket.ref(), "/*" ]) ], ) ], ), )) artifacts = template.add_resource( s3.Bucket( "Artifacts", AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status="Enabled") ]), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( Output( "ArtifactsBucketName", Description="Name of bucket storing artifacts", Value=artifacts.ref(), )) if self.variables["AuthAtEdge"]: userpool_client_params = { "AllowedOAuthFlows": ["code"], "AllowedOAuthScopes": self.variables["OAuthScopes"], } if self.variables["Aliases"]: userpool_client_params[ "AllowedOAuthFlowsUserPoolClient"] = True userpool_client_params[ "SupportedIdentityProviders"] = self.variables[ "SupportedIdentityProviders"] redirect_domains = [ add_url_scheme(x) for x in self.variables["Aliases"] ] + [ add_url_scheme(x) for x in self.variables["AdditionalRedirectDomains"] ] redirect_uris = get_redirect_uris( redirect_domains, self.variables["RedirectPathSignIn"], self.variables["RedirectPathSignOut"], ) userpool_client_params["CallbackURLs"] = redirect_uris[ "sign_in"] userpool_client_params["LogoutURLs"] = redirect_uris[ "sign_out"] else: userpool_client_params[ "CallbackURLs"] = self.context.hook_data[ "aae_callback_url_retriever"]["callback_urls"] if self.variables["CreateUserPool"]: user_pool = template.add_resource( cognito.UserPool("AuthAtEdgeUserPool")) user_pool_id = user_pool.ref() template.add_output( Output( "AuthAtEdgeUserPoolId", Description= "Cognito User Pool App Client for Auth @ Edge", Value=user_pool_id, )) else: user_pool_id = self.context.hook_data[ "aae_user_pool_id_retriever"]["id"] userpool_client_params["UserPoolId"] = user_pool_id client = template.add_resource( cognito.UserPoolClient("AuthAtEdgeClient", **userpool_client_params)) template.add_output( Output( "AuthAtEdgeClient", Description="Cognito User Pool App Client for Auth @ Edge", Value=client.ref(), ))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version("2010-09-09") template.set_description("Static Website - Dependencies") # Resources awslogbucket = template.add_resource( s3.Bucket( "AWSLogBucket", AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) template.add_output( Output( "AWSLogBucketName", Description="Name of bucket storing AWS logs", Value=awslogbucket.ref(), ) ) template.add_resource( s3.BucketPolicy( "AllowAWSLogWriting", Bucket=awslogbucket.ref(), PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Action=[awacs.s3.PutObject], Effect=Allow, Principal=AWSPrincipal( Join(":", ["arn:aws:iam:", AccountId, "root"]) ), Resource=[ Join("", ["arn:aws:s3:::", awslogbucket.ref(), "/*"]) ], ) ], ), ) ) artifacts = template.add_resource( s3.Bucket( "Artifacts", AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status="Enabled" ) ] ), VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) template.add_output( Output( "ArtifactsBucketName", Description="Name of bucket storing artifacts", Value=artifacts.ref(), ) ) if variables["AuthAtEdge"]: callbacks = self.context.hook_data["aae_callback_url_retriever"][ "callback_urls" ] if variables["CreateUserPool"]: user_pool = template.add_resource( cognito.UserPool("AuthAtEdgeUserPool") ) user_pool_id = user_pool.ref() template.add_output( Output( "AuthAtEdgeUserPoolId", Description="Cognito User Pool App Client for Auth @ Edge", Value=user_pool_id, ) ) else: user_pool_id = self.context.hook_data["aae_user_pool_id_retriever"][ "id" ] client = template.add_resource( cognito.UserPoolClient( "AuthAtEdgeClient", AllowedOAuthFlows=["code"], CallbackURLs=callbacks, UserPoolId=user_pool_id, AllowedOAuthScopes=variables["OAuthScopes"], ) ) template.add_output( Output( "AuthAtEdgeClient", Description="Cognito User Pool App Client for Auth @ Edge", Value=client.ref(), ) )
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version("2010-09-09") template.set_description("App - Build Pipeline") # Resources boundary_arn = Join( "", [ "arn:", Partition, ":iam::", AccountId, ":policy/", variables["RolePermissionsBoundaryName"].ref, ], ) # Repo image limit is 1000 by default; this lambda function will prune # old images image_param_path = Join("", ["/", variables["AppPrefix"].ref, "/current-hash"]) image_param_arn = Join( "", [ "arn:", Partition, ":ssm:", Region, ":", AccountId, ":parameter", image_param_path, ], ) ecr_repo_arn = Join( "", [ "arn:", Partition, ":ecr:", Region, ":", AccountId, ":repository/", variables["EcrRepoName"].ref, ], ) cleanuplambdarole = template.add_resource( iam.Role( "CleanupLambdaRole", AssumeRolePolicyDocument=make_simple_assume_policy( "lambda.amazonaws.com" ), ManagedPolicyArns=[IAM_ARN_PREFIX + "AWSLambdaBasicExecutionRole"], PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join( "", [variables["AppPrefix"].ref, "-ecrcleanup"] ), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Action=[awacs.ssm.GetParameter], Effect=Allow, Resource=[image_param_arn], ), Statement( Action=[ awacs.ecr.DescribeImages, awacs.ecr.BatchDeleteImage, ], Effect=Allow, Resource=[ecr_repo_arn], ), ], ), ) ], ) ) cleanupfunction = template.add_resource( awslambda.Function( "CleanupFunction", Description="Cleanup stale ECR images", Code=awslambda.Code(ZipFile=variables["ECRCleanupLambdaFunction"]), Environment=awslambda.Environment( Variables={ "ECR_REPO_NAME": variables["EcrRepoName"].ref, "SSM_PARAM": image_param_path, } ), Handler="index.handler", Role=cleanuplambdarole.get_att("Arn"), Runtime="python3.6", Timeout=120, ) ) cleanuprule = template.add_resource( events.Rule( "CleanupRule", Description="Regularly invoke CleanupFunction", ScheduleExpression="rate(7 days)", State="ENABLED", Targets=[ events.Target( Arn=cleanupfunction.get_att("Arn"), Id="CleanupFunction" ) ], ) ) template.add_resource( awslambda.Permission( "AllowCWLambdaInvocation", FunctionName=cleanupfunction.ref(), Action=awacs.awslambda.InvokeFunction.JSONrepr(), Principal="events.amazonaws.com", SourceArn=cleanuprule.get_att("Arn"), ) ) appsource = template.add_resource( codecommit.Repository( "AppSource", RepositoryName=Join("-", [variables["AppPrefix"].ref, "source"]), ) ) for i in ["Name", "Arn"]: template.add_output( Output( "AppRepo%s" % i, Description="%s of app source repo" % i, Value=appsource.get_att(i), ) ) bucket = template.add_resource( s3.Bucket( "Bucket", AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status="Enabled" ) ] ), VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) template.add_output( Output( "PipelineBucketName", Description="Name of pipeline bucket", Value=bucket.ref(), ) ) # This list must be kept in sync between the CodeBuild project and its # role build_name = Join("", [variables["AppPrefix"].ref, "-build"]) build_role = template.add_resource( iam.Role( "BuildRole", AssumeRolePolicyDocument=make_simple_assume_policy( "codebuild.amazonaws.com" ), PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join("", [build_name, "-policy"]), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Resource=[Join("", [bucket.get_att("Arn"), "/*"])], ), Statement( Action=[awacs.ecr.GetAuthorizationToken], Effect=Allow, Resource=["*"], ), Statement( Action=[ awacs.ecr.BatchCheckLayerAvailability, awacs.ecr.BatchGetImage, awacs.ecr.CompleteLayerUpload, awacs.ecr.DescribeImages, awacs.ecr.GetDownloadUrlForLayer, awacs.ecr.InitiateLayerUpload, awacs.ecr.PutImage, awacs.ecr.UploadLayerPart, ], Effect=Allow, Resource=[ecr_repo_arn], ), Statement( Action=[ awacs.ssm.GetParameter, awacs.ssm.PutParameter, ], Effect=Allow, Resource=[image_param_arn], ), Statement( Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], Effect=Allow, Resource=[ Join( "", [ "arn:", Partition, ":logs:", Region, ":", AccountId, ":log-group:/aws/codebuild/", build_name, ] + x, ) for x in [[":*"], [":*/*"]] ], ), ], ), ) ], ) ) buildproject = template.add_resource( codebuild.Project( "BuildProject", Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"), Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", EnvironmentVariables=[ codebuild.EnvironmentVariable( Name="AWS_DEFAULT_REGION", Type="PLAINTEXT", Value=Region ), codebuild.EnvironmentVariable( Name="AWS_ACCOUNT_ID", Type="PLAINTEXT", Value=AccountId ), codebuild.EnvironmentVariable( Name="IMAGE_REPO_NAME", Type="PLAINTEXT", Value=variables["EcrRepoName"].ref, ), ], Image="aws/codebuild/docker:18.09.0", Type="LINUX_CONTAINER", ), Name=build_name, ServiceRole=build_role.get_att("Arn"), Source=codebuild.Source( Type="CODEPIPELINE", BuildSpec=variables["BuildProjectBuildSpec"] ), ) ) pipelinerole = template.add_resource( iam.Role( "PipelineRole", AssumeRolePolicyDocument=make_simple_assume_policy( "codepipeline.amazonaws.com" ), PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join("", [build_name, "-pipeline-policy"]), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Action=[ awacs.codecommit.GetBranch, awacs.codecommit.GetCommit, awacs.codecommit.UploadArchive, awacs.codecommit.GetUploadArchiveStatus, # noqa awacs.codecommit.CancelUploadArchive, ], # noqa Effect=Allow, Resource=[appsource.get_att("Arn")], ), Statement( Action=[awacs.s3.GetBucketVersioning], Effect=Allow, Resource=[bucket.get_att("Arn")], ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[Join("", [bucket.get_att("Arn"), "/*"])], ), Statement( Action=[ awacs.codebuild.BatchGetBuilds, awacs.codebuild.StartBuild, ], Effect=Allow, Resource=[buildproject.get_att("Arn")], ), ], ), ) ], ) ) template.add_resource( codepipeline.Pipeline( "Pipeline", ArtifactStore=codepipeline.ArtifactStore( Location=bucket.ref(), Type="S3" ), Name=build_name, RoleArn=pipelinerole.get_att("Arn"), Stages=[ codepipeline.Stages( Name="Source", Actions=[ codepipeline.Actions( Name="CodeCommit", ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Provider="CodeCommit", Version="1", ), Configuration={ "RepositoryName": appsource.get_att("Name"), # noqa "BranchName": "master", }, OutputArtifacts=[ codepipeline.OutputArtifacts(Name="CodeCommitRepo") ], ), ], ), codepipeline.Stages( Name="Build", Actions=[ codepipeline.Actions( Name="Build", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Provider="CodeBuild", Version="1", ), Configuration={"ProjectName": buildproject.ref()}, InputArtifacts=[ codepipeline.InputArtifacts(Name="CodeCommitRepo") ], ) ], ), ], ) )