def add_s3_buckets(self): for bucket_name, bucket_kwargs in self.sceptre_user_data.iteritems(): if "BucketName" not in bucket_kwargs: bucket_kwargs.update({"BucketName": bucket_name}) bucket_kwargs.update(self.tag_resource(bucket_kwargs.pop("Tags"))) if "LifecycleConfiguration" in bucket_kwargs: bucket_kwargs.update( self.add_lifecycle_config( bucket_kwargs.pop("LifecycleConfiguration"))) if "CorsConfiguration" in bucket_kwargs: bucket_kwargs.update( self.add_cors_config( bucket_kwargs.pop("CorsConfiguration"))) if "LoggingConfiguration" in bucket_kwargs: logging_config = LoggingConfiguration( **bucket_kwargs.pop("LoggingConfiguration")) bucket_kwargs.update({"LoggingConfiguration": logging_config}) if "VersioningConfiguration" in bucket_kwargs: versioning_config = VersioningConfiguration( **bucket_kwargs.pop("VersioningConfiguration")) bucket_kwargs.update( {"VersioningConfiguration": versioning_config}) # Finally create the bucket bucket = self.template.add_resource( Bucket(bucket_name, **bucket_kwargs)) self.template.add_output( Output(bucket_name + "BucketName", Value=Ref(bucket))) self.template.add_output( Output(bucket_name + "BucketArn", Value=GetAtt(bucket, "Arn")))
def add_bucket(tags, template, versioning): template.add_resource( Bucket("GetMeABucket", BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule(ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( SSEAlgorithm="aws:kms", )) ]), DeletionPolicy="Retain", PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=tags, VersioningConfiguration=VersioningConfiguration( Status=versioning, ))) template.add_output( Output("BucketName", Description="Bucket name", Value=Ref("GetMeABucket"))) template.add_output( Output("BucketNameARN", Description="Bucket name Arn", Value=GetAtt("GetMeABucket", "Arn")))
def add_bucket(tags, template, versioning): template.add_resource( Bucket( "GetMeABucket", DeletionPolicy="Retain", PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=tags, VersioningConfiguration=VersioningConfiguration( Status=versioning, ) ) ) template.add_output( Output( "BucketName", Description="Bucket name", Value=Ref("GetMeABucket") ) ) template.add_output( Output( "BucketNameARN", Description="Bucket name Arn", Value=GetAtt("GetMeABucket", "Arn") ) )
def addResources(): global t global applicationTags #Redshift Cluster Resource bucket = t.add_resource( Bucket( "Bucket", AccessControl=Private, #AccelerateConfiguration=AccelerateConfiguration, #AnalyticsConfigurations=[ AnalyticsConfiguration, ... ], BucketName=Ref('BucketName'), #CorsConfiguration=CorsConfiguration, #InventoryConfigurations=[ InventoryConfiguration, ... ], #LifecycleConfiguration=LifecycleConfiguration, #LoggingConfiguration=LoggingConfiguration, #MetricsConfigurations=[ MetricsConfiguration, ... ] #NotificationConfiguration=NotificationConfiguration, #ReplicationConfiguration=ReplicationConfiguration, Tags=applicationTags, VersioningConfiguration=VersioningConfiguration(Status='Enabled'), #WebsiteConfiguration=WebsiteConfiguration )) # bucketPolicy = t.add_resource(BucketPolicy( # "BucketPolicy", # Bucket=Ref(bucket), # PolicyDocument={ # "Version": "2012-10-17", # "Id": "PutObjPolicy", # "Statement": [ # { # "Sid": "DenyIncorrectEncryptionHeader", # "Effect": "Deny", # "Principal": "*", # "Action": "s3:PutObject", # "Resource": Join('/',[GetAtt(bucket,'Arn'),'*']), # "Condition": { # "StringNotEquals": { # "s3:x-amz-server-side-encryption": "AES256" # } # } # }, # { # "Sid": "DenyUnEncryptedObjectUploads", # "Effect": "Deny", # "Principal": "*", # "Action": "s3:PutObject", # "Resource": Join('/',[GetAtt(bucket,'Arn'),'*']), # "Condition": { # "Null": { # "s3:x-amz-server-side-encryption": "true" # } # } # } # ] # } # )) #Create necessary Lambda IAM Roles #EDW Subnet Groups t.add_output(Output('BucketArn', Value=GetAtt(bucket, 'Arn')), )
def add_bucket(self): self.bucket = self.template.add_resource( Bucket( "GetMeABucket", DeletionPolicy="Retain", Tags=Tags( app=self.sceptre_user_data["app"], env=self.sceptre_user_data["env"], team=self.sceptre_user_data["team"], repo=self.repo.remotes.origin.url, ), VersioningConfiguration=VersioningConfiguration( Status=self.sceptre_user_data["bucket_versioning"], ) ) ) self.template.add_output( Output( "BucketName", Description="Bucket name", Value=Ref(self.bucket) ) ) self.template.add_output( Output( "BucketNameARN", Description="Bucket name Arn", Value=GetAtt(self.bucket, "Arn") ) )
def __init__(self, title, bucket_name, **kwargs): """ Args: kwargs: AppendRegion: boolean UseVersioning: boolean UseLifecycle: boolean UseReplication: boolean ReplicationRole: str() role name of full ARN ReplicaBucket: Name of the bucket for replication UseEncryptionReplication: boolean ReplicateEncryptedObjects: boolean (default true) ReplicaKmsKeyID: alias or arn of the KMS Key UseEncryption: boolean KMSMasterKeyID: Default KMS Key ID for encryption returns: S3Bucket """ super().__init__(title) if 'AppendRegion' in kwargs.keys() and kwargs['AppendRegion']: self.BucketName = Sub(f'{bucket_name}-${{AWS::Region}}') else: self.BucketName = bucket_name if 'UseEncryption' in kwargs.keys() and kwargs['UseEncryption']: self.set_bucket_encryption(**kwargs) if 'UseLifecycle' in kwargs.keys() and kwargs['UseLifecycle']: self.set_bucket_lifecycle() if not hasattr(self, 'VersioningConfiguration'): setattr(self, 'VersioningConfiguration', VersioningConfiguration(Status='Enabled')) if 'UseReplication' in kwargs.keys() and kwargs['UseReplication']: self.set_bucket_replication(**kwargs) if not hasattr(self, 'VersioningConfiguration'): setattr(bucket, 'VersioningConfiguration', VersioningConfiguration(Status='Enabled'))
def create_s3_bucket(template=None): if not template: template = Template() template.add_description( 'This cloudformation template creates an s3 bucket for storing' 'all of uploaded contact files.') template.add_version('2010-09-09') code_bucket = Bucket( resource_name, BucketName=bucket_name, VersioningConfiguration=VersioningConfiguration(Status='Enabled')) template.add_resource(code_bucket) write_json_to_file('bucket.json', template)
def handle(self, chain_context): """ This step adds in the shell of a pipeline. * s3 bucket * policies for the bucket and pipeline * your next step in the chain MUST be a source stage :param chain_context: :return: """ bucket = Bucket( self.logical_name, BucketName=self.bucket_name, VersioningConfiguration=VersioningConfiguration( Status="Enabled" ) ) chain_context.template.add_resource(bucket) print("Added bucket: " + self.logical_name)
def handle(self, chain_context): """ This step adds in the shell of a pipeline. * s3 bucket * policies for the bucket and pipeline * your next step in the chain MUST be a source stage :param chain_context: :return: """ if self.create_bucket: pipeline_bucket = Bucket( "PipelineBucket%s" % chain_context.instance_name, BucketName=self.bucket_name, VersioningConfiguration=VersioningConfiguration( Status="Enabled")) chain_context.template.add_resource(pipeline_bucket) default_bucket_policies = self.get_default_bucket_policy_statements( self.bucket_name) if self.bucket_policy_statements: bucket_access_policy = self.get_bucket_policy( pipeline_bucket=self.bucket_name, bucket_policy_statements=self.bucket_policy_statements, ) chain_context.template.add_resource(bucket_access_policy) pipeline_bucket_access_policy = iam.ManagedPolicy( "PipelineBucketAccessPolicy", Path='/managed/', PolicyDocument=awacs.aws.PolicyDocument( Version="2012-10-17", Id="bucket-access-policy%s" % chain_context.instance_name, Statement=default_bucket_policies)) chain_context.metadata[cumulus.steps.dev_tools. META_PIPELINE_BUCKET_NAME] = self.bucket_name chain_context.metadata[ cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref( pipeline_bucket_access_policy) # TODO: this can be cleaned up by using a policytype and passing in the pipeline role it should add itself to. pipeline_policy = iam.Policy( PolicyName="%sPolicy" % self.name, PolicyDocument=awacs.aws.PolicyDocument( Version="2012-10-17", Id="PipelinePolicy", Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, # TODO: actions here could be limited more Action=[awacs.aws.Action("s3", "*")], Resource=[ troposphere.Join( '', [awacs.s3.ARN(), self.bucket_name, "/*"]), troposphere.Join('', [ awacs.s3.ARN(), self.bucket_name, ]), ], ), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("kms", "*")], Resource=['*'], ), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("cloudformation", "*"), awacs.aws.Action("codebuild", "*"), ], # TODO: restrict more accurately Resource=["*"]), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.codecommit.GetBranch, awacs.codecommit.GetCommit, awacs.codecommit.UploadArchive, awacs.codecommit.GetUploadArchiveStatus, awacs.codecommit.CancelUploadArchive ], Resource=["*"]), awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.iam.PassRole], Resource=["*"]), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("lambda", "*")], Resource=["*"]) ], )) pipeline_service_role = iam.Role( "PipelineServiceRole", Path="/", AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.sts.AssumeRole], Principal=awacs.aws.Principal( 'Service', "codepipeline.amazonaws.com")) ]), Policies=[pipeline_policy] + self.pipeline_policies) generic_pipeline = codepipeline.Pipeline( "Pipeline", RoleArn=troposphere.GetAtt(pipeline_service_role, "Arn"), Stages=[], ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=self.bucket_name, ) # TODO: optionally add kms key here ) if self.bucket_kms_key_arn: encryption_config = codepipeline.EncryptionKey( "ArtifactBucketKmsKey", Id=self.bucket_kms_key_arn, Type='KMS', ) generic_pipeline.ArtifactStore.EncryptionKey = encryption_config pipeline_output = troposphere.Output( "PipelineName", Description="Code Pipeline", Value=Ref(generic_pipeline), ) pipeline_bucket_output = troposphere.Output( "PipelineBucket", Description="Name of the input artifact bucket for the pipeline", Value=self.bucket_name, ) chain_context.template.add_resource(pipeline_bucket_access_policy) chain_context.template.add_resource(pipeline_service_role) chain_context.template.add_resource(generic_pipeline) chain_context.template.add_output(pipeline_output) chain_context.template.add_output(pipeline_bucket_output)
) from troposphere.codepipeline import (Actions, ActionTypeID, ArtifactStore, InputArtifacts, OutputArtifacts, Pipeline, Stages) from troposphere.iam import Role from troposphere.iam import Policy as IAMPolicy from troposphere.s3 import Bucket, VersioningConfiguration t = Template() t.add_description("CodePipeLine Template") t.add_resource( Bucket("S3Bucket", VersioningConfiguration=VersioningConfiguration( Status="Enabled", ))) t.add_resource( Role("PipelineRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["codepipeline.amazonaws.com"])) ]), Path="/", Policies=[ IAMPolicy(PolicyName="CodePipelinePolicy", PolicyDocument={ "Statement": [ {
def __init__(self, utils, templatePath='./cloudformation/api.json', description='Top Level API Gateway Template for {App}', version='2010-09-09'): super(self.__class__, self).__init__() self.utils = utils self.templatePath = templatePath appName = self.utils.config['App'] domainName = self.utils.config['Domain'] tags = self.utils.config['Tags'] self.add_version(version) self.add_description(description.format(App=appName)) ################### # ACM Certificate # ################### self.certificate = self.add_resource( Certificate( '{App}Certificate'.format(App=appName), DomainName=domainName, SubjectAlternativeNames=[ '*.{Domain}'.format(Domain=domainName) ], Tags=Tags(tags), )) ##################### # Deployment Bucket # ##################### self.bucket = self.add_resource( Bucket( '{App}DeploymentBucket'.format(App=appName), DeletionPolicy='Retain', Tags=Tags(tags), VersioningConfiguration=VersioningConfiguration( Status='Enabled', ), )) ########### # RestApi # ########### self.api = self.add_resource( RestApi( '{App}Api'.format(App=appName), Name=appName + 'Api', Description='API for {App} AWS SAML Login Redirection'.format( App=appName), )) ################## # RestApi Domain # ################## self.apiDomain = self.add_resource( DomainName( '{App}ApiDomainName'.format(App=appName), CertificateArn=Ref(self.certificate), DomainName=domainName, )) self.apiDomainMapping = self.add_resource( BasePathMapping( '{App}ApiDomainNameMapping'.format(App=appName), DomainName=Ref(self.apiDomain), RestApiId=Ref(self.api), )) ########### # Outputs # ########### self.add_output( Output( '{App}Api'.format(App=appName), Value=Ref(self.api), Export=Export('{App}Api'.format(App=appName), ), )) self.add_output( Output( '{App}ApiDomainName'.format(App=appName), Value=Ref(self.apiDomain), Export=Export('{App}ApiDomainName'.format(App=appName), ), )) self.add_output( Output( '{App}ApiDomainDistribution'.format(App=appName), Value=GetAtt(self.apiDomain, 'DistributionDomainName'), Export=Export( '{App}ApiDomainDistribution'.format(App=appName), ), )) self.add_output( Output( '{App}ApiRoot'.format(App=appName), Value=GetAtt(self.api, 'RootResourceId'), Export=Export('{App}ApiRoot'.format(App=appName), ), )) self.add_output( Output( '{App}Certificate'.format(App=appName), Value=Ref(self.certificate), Export=Export('{App}Certificate'.format(App=appName), ), )) self.add_output( Output( '{App}DeploymentBucket'.format(App=appName), Value=Ref(self.bucket), Export=Export('{App}DeploymentBucket'.format(App=appName), ), )) ################## # Write Template # ################## with open(templatePath, 'w') as templateFile: templateFile.write(self.to_json())
SSEAlgorithm='AES256') cp_bucket_encryption_rule = ServerSideEncryptionRule( ServerSideEncryptionByDefault=cp_bucket_encryption_config) cp_bucket_encryption = BucketEncryption( ServerSideEncryptionConfiguration=[cp_bucket_encryption_rule]) input_bucket = t.add_resource( Bucket( 'InputBucket', AccessControl='Private', BucketName=Join("", [Ref("accountparameter"), Ref("inputbucketparameter")]), #BucketEncryption=cp_bucket_encryption, VersioningConfiguration=VersioningConfiguration(Status='Enabled'))) output_bucket = t.add_resource( Bucket( 'OutputBucket', AccessControl='Private', BucketName=Join( "", [Ref("accountparameter"), Ref("outputbucketparameter")]) #BucketEncryption=cp_bucket_encryption, #DependsOn='CodePipelineBucket' )) codepipeline_artifact_store_bucket = t.add_resource( Bucket('CodePipelineBucket', AccessControl='Private',
def add_resources(self): """ Add All Cloudformation Resources. This will include vpc, igw, and any other network resources """ self.vpc = self.template.add_resource( ec2.VPC( "VPC", CidrBlock=Ref(self.VpcCidr), EnableDnsSupport=True, Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-VPC"), )) self.RESTPubSubnet1 = self.template.add_resource( ec2.Subnet( "RESTPubSubnet1", CidrBlock=Ref(self.RESTPubSub1Cidr), VpcId=Ref(self.vpc), AvailabilityZone="us-east-1a", Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPubSubnet1"), )) self.RESTPubSubnet2 = self.template.add_resource( ec2.Subnet( "RESTPubSubnet2", VpcId=Ref(self.vpc), CidrBlock=Ref(self.RESTPubSub2Cidr), AvailabilityZone="us-east-1b", Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPubSubnet2"), )) self.RESTPrivSubnet1 = self.template.add_resource( ec2.Subnet( "RESTPrivSubnet1", VpcId=Ref(self.vpc), CidrBlock=Ref(self.RESTPrivSub1Cidr), AvailabilityZone="us-east-1a", Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPrivSubnet1"), )) self.RESTPrivSubnet2 = self.template.add_resource( ec2.Subnet( "RESTPrivSubnet2", CidrBlock=Ref(self.RESTPrivSub2Cidr), VpcId=Ref(self.vpc), AvailabilityZone="us-east-1b", Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPrivSubnet2"), )) self.RESTIGW = self.template.add_resource( ec2.InternetGateway( "RESTIGW", Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTIGW"), )) self.RESTIGWAttachment = self.template.add_resource( ec2.VPCGatewayAttachment( "RESTIGWAttachment", VpcId=Ref(self.vpc), InternetGatewayId=Ref(self.RESTIGW), )) self.RESTEIP1 = self.template.add_resource( ec2.EIP( "RESTEIP1", Domain="vpc", )) self.RESTEIP2 = self.template.add_resource( ec2.EIP( "RESTEIP2", Domain="vpc", )) self.RESTNAT1 = self.template.add_resource( ec2.NatGateway( "NAT", AllocationId=GetAtt(self.RESTEIP1, "AllocationId"), SubnetId=Ref(self.RESTPubSubnet1), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTNAT1"), )) self.RESTNAT2 = self.template.add_resource( ec2.NatGateway( "NAT2", AllocationId=GetAtt(self.RESTEIP2, "AllocationId"), SubnetId=Ref(self.RESTPubSubnet2), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTNAT2"), )) self.RESTPrivRT1 = self.template.add_resource( ec2.RouteTable( "RESTPrivRT1", VpcId=Ref(self.vpc), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPRIVRT1"), )) self.RESTPrivRT2 = self.template.add_resource( ec2.RouteTable( "RESTPrivRT2", VpcId=Ref(self.vpc), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPRIVRT2"), )) self.RESTNatRoute = self.template.add_resource( ec2.Route( "RESTNatRoute", RouteTableId=Ref(self.RESTPrivRT1), DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(self.RESTNAT1), )) self.RESTNat2Route = self.template.add_resource( ec2.Route( "RESTNatRoute2", RouteTableId=Ref(self.RESTPrivRT2), DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(self.RESTNAT2), )) self.RESTPrivRT1Association = self.template.add_resource( ec2.SubnetRouteTableAssociation( "RESTPrivRT1Association", SubnetId=Ref(self.RESTPrivSubnet1), RouteTableId=Ref(self.RESTPrivRT1), )) self.RESTPrivRT2Association = self.template.add_resource( ec2.SubnetRouteTableAssociation( "RESTPrivRT2Association", SubnetId=Ref(self.RESTPrivSubnet2), RouteTableId=Ref(self.RESTPrivRT2), )) self.RESTPubRT1 = self.template.add_resource( ec2.RouteTable( "RESTPubRT1", VpcId=Ref(self.vpc), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPUBRT1"), )) self.RESTPubRT2 = self.template.add_resource( ec2.RouteTable( "RESTPubRT2", VpcId=Ref(self.vpc), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-RESTPUBRT2"), )) self.RESTPubRT1IGWattachment = self.template.add_resource( ec2.Route( "RESTPubRT1IGWAttachment", DependsOn=["RESTIGWAttachment"], RouteTableId=Ref(self.RESTPubRT1), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(self.RESTIGW), )) self.RESTPubRT2IGWattachment = self.template.add_resource( ec2.Route( "RESTPubRT2IGWAttachment", DependsOn=["RESTIGWAttachment"], RouteTableId=Ref(self.RESTPubRT2), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(self.RESTIGW), )) self.RESTPubRT1Association = self.template.add_resource( ec2.SubnetRouteTableAssociation( "RESTPubRT1Associate", SubnetId=Ref(self.RESTPubSubnet1), RouteTableId=Ref(self.RESTPubRT1), )) self.RESTPubRT2Asocation = self.template.add_resource( ec2.SubnetRouteTableAssociation( "RESTPubR2Associate", SubnetId=Ref(self.RESTPubSubnet2), RouteTableId=Ref(self.RESTPubRT2), )) self.VPCPeeringBetweenSharedVPCAndClientVPC = self.template.add_resource( ec2.VPCPeeringConnection( "VPCPeeringBetweenSharedVPCAndClientVPC", DependsOn=["RESTPrivRT1", "RESTPrivRT2"], VpcId=Ref(self.SharedServicesVpcId), PeerVpcId=Ref(self.vpc), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-SSTOCLIENTVPCPEER"), )) self.PeeringRouteForClientVPCPriv1 = self.template.add_resource( ec2.Route( "PeeringRouteForClientVPCPriv1", DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"], RouteTableId=Ref(self.RESTPrivRT1), DestinationCidrBlock=Ref(self.SharedServicesVpcCidrBlock), VpcPeeringConnectionId=Ref( self.VPCPeeringBetweenSharedVPCAndClientVPC), )) self.PeeringRouteForClientVPCPriv2 = self.template.add_resource( ec2.Route( "PeeringRouteForClientVPCPriv2", DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"], RouteTableId=Ref(self.RESTPrivRT2), DestinationCidrBlock=Ref(self.SharedServicesVpcCidrBlock), VpcPeeringConnectionId=Ref( self.VPCPeeringBetweenSharedVPCAndClientVPC), )) self.PeeringRouteForSharedServicesVPCPriv1 = self.template.add_resource( ec2.Route( "PeeringRouteForSharedServicesVPCPriv1", DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"], RouteTableId=Ref(self.SharedServicesPrivateRouteTable1), DestinationCidrBlock=Ref(self.VpcCidr), VpcPeeringConnectionId=Ref( self.VPCPeeringBetweenSharedVPCAndClientVPC), )) self.PeeringRouteForSharedServicesVPCPriv2 = self.template.add_resource( ec2.Route( "PeeringRouteForSharedServicesVPCPriv2", DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"], RouteTableId=Ref(self.SharedServicesPrivateRouteTable2), DestinationCidrBlock=Ref(self.VpcCidr), VpcPeeringConnectionId=Ref( self.VPCPeeringBetweenSharedVPCAndClientVPC), )) self.EnvironmentArtifactsBucket = self.template.add_resource( Bucket( "EnvironmentArtifactsBucket", BucketName=( self.environment_parameters["ClientEnvironmentKey"] + "-environment-artifacts").lower(), AccessControl="BucketOwnerRead", VersioningConfiguration=VersioningConfiguration( Status="Enabled", ), )) self.BootstrapRepositorySSMParameter = self.template.add_resource( SSMParameter( "BootstrapRepositorySSMParameter", Description="The Bootstrap Repository", Name=self.environment_parameters["ClientEnvironmentKey"] + "-bootstrapRepository", Type="String", Value=(self.environment_parameters["ClientEnvironmentKey"] + "-environment-artifacts").lower(), ))
AccelerateConfiguration, CorsConfiguration, CorsRules) BUCKET_NAME_SUFFIX = 'MockDatalake' BUCKETS = [["LandingZone", "Raw"], ["WorkZone", "Partially Processed"], ["GoldZone", "Final Processed"]] BUCKET_CORS_CONFIG = CorsConfiguration(CorsRules=[CorsRules( AllowedOrigins=["*"], AllowedMethods=["POST", "PUT", "HEAD", "GET"], AllowedHeaders=["*"], )]) BUCKET_VERSIONING_CONFIG = VersioningConfiguration(Status="Enabled") BUCKET_ACCELERATION_CONFIG = AccelerateConfiguration( AccelerationStatus="Enabled") def init(): load_dotenv(find_dotenv()) init() DEBUG = os.getenv('DEBUG', False) if not DEBUG: print('---------------------------------------------') print('Loading .env ....')
def handle(self, chain_context): """ This step adds in the shell of a pipeline. * s3 bucket * policies for the bucket and pipeline * your next step in the chain MUST be a source stage :param chain_context: :return: """ if self.create_bucket: pipeline_bucket = Bucket( "PipelineBucket%s" % self.name, BucketName=self.bucket_name, VersioningConfiguration=VersioningConfiguration( Status="Enabled" ) ) chain_context.template.add_resource(pipeline_bucket) default_bucket_policies = self.get_default_bucket_policy_statements(self.bucket_name) if self.bucket_policy_statements: bucket_access_policy = self.get_bucket_policy( pipeline_bucket=self.bucket_name, bucket_policy_statements=self.bucket_policy_statements, ) chain_context.template.add_resource(bucket_access_policy) pipeline_bucket_access_policy = iam.ManagedPolicy( "PipelineBucketAccessPolicy", Path='/managed/', PolicyDocument=awacs.aws.PolicyDocument( Version="2012-10-17", Id="bucket-access-policy%s" % chain_context.instance_name, Statement=default_bucket_policies ) ) chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_NAME] = self.bucket_name chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref( pipeline_bucket_access_policy) default_pipeline_role = self.get_default_pipeline_role() pipeline_service_role_arn = self.pipeline_service_role_arn or troposphere.GetAtt(default_pipeline_role, "Arn") generic_pipeline = codepipeline.Pipeline( "Pipeline", RoleArn=pipeline_service_role_arn, Stages=[], ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=self.bucket_name, ) ) if self.bucket_kms_key_arn: encryption_config = codepipeline.EncryptionKey( "ArtifactBucketKmsKey", Id=self.bucket_kms_key_arn, Type='KMS', ) generic_pipeline.ArtifactStore.EncryptionKey = encryption_config pipeline_output = troposphere.Output( "PipelineName", Description="Code Pipeline", Value=Ref(generic_pipeline), ) pipeline_bucket_output = troposphere.Output( "PipelineBucket", Description="Name of the input artifact bucket for the pipeline", Value=self.bucket_name, ) if not self.pipeline_service_role_arn: chain_context.template.add_resource(default_pipeline_role) chain_context.template.add_resource(pipeline_bucket_access_policy) chain_context.template.add_resource(generic_pipeline) chain_context.template.add_output(pipeline_output) chain_context.template.add_output(pipeline_bucket_output)