def _add_regional_bucket(builder: Template) -> (s3.Bucket, s3.BucketPolicy): name = "RegionalBucket" bucket = builder.add_resource(s3.Bucket(name, Tags=DEFAULT_TAGS)) policy = s3.BucketPolicy( f"{name}Policy", Bucket=bucket.ref(), PolicyDocument=dict( Version="2012-10-17", Statement=[ dict( Sid="CloudTrailAclCheck", Effect="Allow", Principal=dict(Service="cloudtrail.amazonaws.com"), Action=S3.GetBucketAcl, Resource=Sub( f"arn:${{{AWS_PARTITION}}}:s3:::${{{bucket.title}}}"), ), dict( Sid="CloudTrailWrite", Effect="Allow", Principal=dict(Service="cloudtrail.amazonaws.com"), Action=S3.PutObject, Resource=Sub( f"arn:${{{AWS_PARTITION}}}:s3:::${{{bucket.title}}}/accretion/cloudtrail/*" ), Condition=dict(StringEquals={ "s3:x-amz-acl": "bucket-owner-full-control" }), ), ], ), ) builder.add_resource(policy) return bucket, policy
def register_type_project_template(cls, project, template): """Registers into the project stack a S3 bucket where all lambdas code will be stored, as well as an output so any subsequent template can have a reference to this resource.""" bucket_name = troposphere.Join("-", [ utils.validate_code_bucket(project.settings['code-bucket']), troposphere.Ref(troposphere.AWS_REGION), troposphere.Ref('Stage') ]) code_bucket = s3.Bucket( "CodeBucket", BucketName=bucket_name, AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled')) template.add_resource(code_bucket) template.add_output([ troposphere.Output( "CodeBucket", Description="CodeBucket name", Value=bucket_name, ) ])
"AWS": Join('', ['', GetAtt(federated_auth_user, 'Arn'), '']) }, "Action": "sts:AssumeRole" }] } federated_auth_api_keys = t.add_resource( iam.AccessKey('federatedAuthAccessKey', UserName=Ref(federated_auth_user))) if arguments.get('--existing_bucket', None): logging_bucket = arguments.get('--existing_bucket', None) else: logging_bucket_resource = t.add_resource( s3.Bucket('cloudTrailBucket', DependsOn=federated_auth_user.title)) logging_bucket = Ref(logging_bucket_resource) t.add_output( Output('bucketName', Value=logging_bucket, Description='Name of the S3 bucket created for logging')) role_base = { 'DependsOn': logging_bucket_resource.title, 'AssumeRolePolicyDocument': assume_role_policy_document, 'Path': '/userroles/', 'Policies': [] } admin_args = role_base.copy()
def generate_codepipeline_template( codepipeline_role_name: str, codepipeline_role_path: str, codebuild_role_name: str, codebuild_role_path: str, ssm_parameter_prefix: str, scm_provider: str, scm_connection_arn: str, scm_full_repository_id: str, scm_branch_name: str, scm_bucket_name: str, scm_object_key: str, scm_skip_creation_of_repo: str, migrate_role_arn: str, ) -> troposphere.Template: version = pkg_resources.get_distribution("aws-organized").version t = troposphere.Template() t.set_description( "CICD template that runs aws organized migrate for the given branch of the given repo" ) project_name = "AWSOrganized-Migrate" bucket_name = scm_bucket_name if scm_provider.lower( ) == "codecommit" and scm_skip_creation_of_repo is False: t.add_resource( codecommit.Repository("Repository", RepositoryName=scm_full_repository_id)) if scm_provider.lower() == "s3" and scm_skip_creation_of_repo is False: bucket_name = ( scm_bucket_name if scm_bucket_name else troposphere.Sub("aws-organized-pipeline-source-${AWS::AccountId}")) t.add_resource( s3.Bucket( "Source", BucketName=bucket_name, VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault( SSEAlgorithm="AES256")) ]), )) artifact_store = t.add_resource( s3.Bucket( "ArtifactStore", VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), )) codepipeline_role = t.add_resource( iam.Role( "CodePipelineRole", RoleName=codepipeline_role_name, Path=codepipeline_role_path, ManagedPolicyArns=["arn:aws:iam::aws:policy/AdministratorAccess"], AssumeRolePolicyDocument=aws.PolicyDocument( Version="2012-10-17", Statement=[ aws.Statement( Effect=aws.Allow, Action=[awacs_sts.AssumeRole], Principal=aws.Principal( "Service", ["codepipeline.amazonaws.com"]), ) ], ), )) codebuild_role = t.add_resource( iam.Role( "CodeBuildRole", RoleName=codebuild_role_name, Path=codebuild_role_path, ManagedPolicyArns=["arn:aws:iam::aws:policy/AdministratorAccess"], AssumeRolePolicyDocument=aws.PolicyDocument( Version="2012-10-17", Statement=[ aws.Statement( Effect=aws.Allow, Action=[awacs_sts.AssumeRole], Principal=aws.Principal("Service", ["codebuild.amazonaws.com"]), ) ], ), )) version_parameter = ssm.Parameter( "versionparameter", Name=f"{ssm_parameter_prefix}/version", Type="String", Value=version, ) t.add_resource(version_parameter) project = t.add_resource( codebuild.Project( "AWSOrganizedMigrate", Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"), Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Name": "MIGRATE_ROLE_ARN", "Type": "PLAINTEXT", "Value": migrate_role_arn, }, { "Name": "Version", "Type": "PARAMETER_STORE", "Value": troposphere.Ref(version_parameter), }, { "Name": "SSM_PARAMETER_PREFIX", "Type": "PLAINTEXT", "Value": ssm_parameter_prefix, }, ], ), Name=project_name, ServiceRole=troposphere.GetAtt(codebuild_role, "Arn"), Source=codebuild.Source( Type="CODEPIPELINE", BuildSpec=yaml.safe_dump( dict( version="0.2", phases=dict( install={ "runtime-versions": dict(python="3.8"), "commands": ["pip install aws-organized==${Version}"], }, build={ "commands": [ "aws-organized migrate --ssm-parameter-prefix $SSM_PARAMETER_PREFIX $MIGRATE_ROLE_ARN" ] }, ), artifacts=dict(files=["environment"]), )), ), )) source_actions = dict( codecommit=codepipeline.Actions( Name="SourceAction", ActionTypeId=codepipeline.ActionTypeId(Category="Source", Owner="AWS", Version="1", Provider="CodeCommit"), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="SourceOutput") ], Configuration={ "RepositoryName": scm_full_repository_id, "BranchName": scm_branch_name, "PollForSourceChanges": "true", }, RunOrder="1", ), codestarsourceconnection=codepipeline.Actions( Name="SourceAction", ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="CodeStarSourceConnection", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="SourceOutput") ], Configuration={ "ConnectionArn": scm_connection_arn, "FullRepositoryId": scm_full_repository_id, "BranchName": scm_branch_name, "OutputArtifactFormat": "CODE_ZIP", }, RunOrder="1", ), s3=codepipeline.Actions( Name="SourceAction", ActionTypeId=codepipeline.ActionTypeId(Category="Source", Owner="AWS", Version="1", Provider="S3"), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="SourceOutput") ], Configuration={ "S3Bucket": bucket_name, "S3ObjectKey": scm_object_key, "PollForSourceChanges": True, }, RunOrder="1", ), ).get(scm_provider.lower()) t.add_resource( codepipeline.Pipeline( "Pipeline", RoleArn=troposphere.GetAtt(codepipeline_role, "Arn"), Stages=[ codepipeline.Stages(Name="Source", Actions=[source_actions]), codepipeline.Stages( Name="Migrate", Actions=[ codepipeline.Actions( Name="Migrate", InputArtifacts=[ codepipeline.InputArtifacts( Name="SourceOutput") ], ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), Configuration={ "ProjectName": troposphere.Ref(project), "PrimarySource": "SourceAction", }, RunOrder="1", ) ], ), ], ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=troposphere.Ref(artifact_store)), )) return t
from troposphere.cloudformation import AWSCustomObject from troposphere.glue import ExecutionProperty, JobCommand, Job, Database, \ DatabaseInput, Crawler, SchemaChangePolicy, S3Target, Targets from troposphere.stepfunctions import StateMachine template = Template() template.set_version("2010-09-09") template.set_transform('AWS::Serverless-2016-10-31') template.set_description("Example") #### Internal S3 Bucket #### internal_s3_bucket = template.add_resource(s3.Bucket( "DatalakeS3Bucket", BucketName=Sub(f'${{{AWS_ACCOUNT_ID}}}-test-stepfunctions-troposphere-glue'), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True ) )) custom_resource_empty_on_delete_execution_role = template.add_resource( iam.Role( "CustomResourceEmptyDatalakeBucketOnDeleteExecutionRole", RoleName='test-stepfunctions-troposphere-glue-empty-bucket-cr-role', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": {
# Create an entry in the group-table template.add_resource( custom_resources.dynamodb.Item( "AuthorizedDomainExampleGroup", TableName=ImportValue( Join('-', [Ref(param_authorizer_param_stack), "GroupTable"])), ItemKey={"group": { "S": "Example Group" }}, ItemValue={"domains": { "SS": [domain_name] }}, )) # Create a bucket with example content (only needed for this example, obviously) example_bucket = template.add_resource(s3.Bucket("ExampleBucket", )) example_bucket_content = template.add_resource( custom_resources.s3.Object( "ExampleBucketContent", Bucket=Ref(example_bucket), Key="index.html", ContentType='text/html', Body=Sub("""<html> <head><title>Protected content</title></head> <body> <h1>Protected Content</h1> <p>You've reached the protected part of this example site.</p> <p>The secret is: ${ExampleBucket}</p> </body> </html> """),
from troposphere import ecr, s3, iam, codebuild t = Template() # Create the nginx Repository Repository = t.add_resource( ecr.Repository( "Repository", RepositoryName="nginx" ) ) # Create the S3 Bucket for Output NginxBuildOutputBucket = t.add_resource( s3.Bucket( "NginxBuildOutputBucket" ) ) # CodeBuild Service Role CodeBuildServiceRole = t.add_resource(iam.Role( "CodeBuildServiceRole", AssumeRolePolicyDocument={ "Statement": [ { 'Effect': 'Allow', 'Principal': {'Service': 'codebuild.amazonaws.com'}, "Action": "sts:AssumeRole" } ] }
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
def create_bucket() -> s3.Bucket: bucket = s3.Bucket("mybucket") bucket.BucketName = "dnvriend-foobar-bucket" return bucket
def main(): t = Template("A template to create a load balanced autoscaled Web flask deployment using ansible.") addMapping(t) ### VPC CONFIGURATION ### vpc = ec2.VPC( "MainVPC", CidrBlock="10.1.0.0/16" ) t.add_resource(vpc) vpc_id = Ref(vpc) subnet_1 = ec2.Subnet( "WebAppSubnet1", t, AvailabilityZone="us-east-1a", CidrBlock="10.1.0.0/24", MapPublicIpOnLaunch=True, VpcId=vpc_id, ) subnet_1_id = Ref(subnet_1) subnet_2 = ec2.Subnet( "WebAppSubnet2", t, AvailabilityZone="us-east-1b", CidrBlock="10.1.1.0/24", MapPublicIpOnLaunch=True, VpcId=vpc_id, ) subnet_2_id = Ref(subnet_2) ### NETWORKING ### igw = ec2.InternetGateway("internetGateway", t) gateway_to_internet = ec2.VPCGatewayAttachment( "GatewayToInternet", t, VpcId=vpc_id, InternetGatewayId=Ref(igw) ) route_table = ec2.RouteTable( "subnetRouteTable", t, VpcId=vpc_id ) route_table_id = Ref(route_table) internet_route = ec2.Route( "routeToInternet", t, DependsOn=gateway_to_internet, DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(igw), RouteTableId=route_table_id ) subnet_1_route_assoc = ec2.SubnetRouteTableAssociation( "Subnet1RouteAssociation", t, RouteTableId=route_table_id, SubnetId=Ref(subnet_1) ) subnet_2_route_assoc = ec2.SubnetRouteTableAssociation( "Subnet2RouteAssociation", t, RouteTableId=route_table_id, SubnetId=Ref(subnet_2) ) http_ingress = { "CidrIp": "0.0.0.0/0", "Description": "Allow HTTP traffic in from internet.", "IpProtocol": "tcp", "FromPort": 80, "ToPort": 80, } ssh_ingress = { "CidrIp": "0.0.0.0/0", "Description": "Allow SSH traffic in from internet.", "IpProtocol": "tcp", "FromPort": 22, "ToPort": 22, } elb_sg = ec2.SecurityGroup( "elbSecurityGroup", t, GroupName="WebGroup", GroupDescription="Allow web traffic in from internet to ELB", VpcId=vpc_id, SecurityGroupIngress=[ http_ingress ]) ssh_sg = ec2.SecurityGroup( "sshSecurityGroup", t, GroupName="SSHGroup", GroupDescription="Allow SSH traffic in from internet", VpcId=vpc_id, SecurityGroupIngress=[ ssh_ingress ] ) elb_sg_id = Ref(elb_sg) ssh_sg_id = Ref(ssh_sg) autoscale_ingress = { "SourceSecurityGroupId": elb_sg_id, "Description": "Allow web traffic in from ELB", "IpProtocol": "tcp", "FromPort": 80, "ToPort": 80 } autoscale_sg = ec2.SecurityGroup( "WebAutoscaleSG", t, GroupName="AutoscaleGroup", GroupDescription="Allow web traffic in from elb on port 80", VpcId=vpc_id, SecurityGroupIngress=[ autoscale_ingress ] ) autoscale_sg_id = Ref(autoscale_sg) # BUCKETS app_bucket = s3.Bucket( "CodeDeployApplicationBucket", t, ) ### LOAD BALANCING ### Web_elb = elb.LoadBalancer( "WebElb", t, Name="WebElb", # TODO: Fix for name conflict Subnets=[subnet_1_id, subnet_2_id], SecurityGroups=[elb_sg_id] ) Web_target_group = elb.TargetGroup( "WebTargetGroup", t, DependsOn=Web_elb, HealthCheckPath="/health", HealthCheckPort=80, HealthCheckProtocol="HTTP", Matcher=elb.Matcher(HttpCode="200"), Name="NginxTargetGroup", Port=80, Protocol="HTTP", VpcId=vpc_id ) Web_listener = elb.Listener( "WebListener", t, LoadBalancerArn=Ref(Web_elb), DefaultActions=[ elb.Action("forwardAction", TargetGroupArn=Ref(Web_target_group), Type="forward" ) ], Port=80, Protocol="HTTP" ) ### AUTOSCALING ### # Everything after sudo -u ubuntu is one command # The sudo command is required to properly set file permissions when # running the ansible script as it assumes running from non root user lc_user_data = Base64(Join("\n", [ "#!/bin/bash", "apt-add-repository -y ppa:ansible/ansible", "apt-get update && sudo apt-get -y upgrade", "apt-get -y install git", "apt-get -y install ansible", "cd /home/ubuntu/", "sudo -H -u ubuntu bash -c '" "export LC_ALL=C.UTF-8 && " "export LANG=C.UTF-8 && " "ansible-pull -U https://github.com/DameonSmith/aws-meetup-ansible.git --extra-vars \"user=ubuntu\"'" ])) web_instance_role = iam.Role( "webInstanceCodeDeployRole", t, AssumeRolePolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': 'ec2.amazonaws.com' }, 'Action': 'sts:AssumeRole' }] }, Policies=[ iam.Policy( PolicyName="CodeDeployS3Policy", PolicyDocument=aws.Policy( Version='2012-10-17', Statement=[ aws.Statement( Sid='CodeDeployS3', Effect=aws.Allow, Action=[ aws_s3.PutObject, aws_s3.GetObject, aws_s3.GetObjectVersion, aws_s3.DeleteObject, aws_s3.ListObjects, aws_s3.ListBucket, aws_s3.ListBucketVersions, aws_s3.ListAllMyBuckets, aws_s3.ListMultipartUploadParts, aws_s3.ListBucketMultipartUploads, aws_s3.ListBucketByTags, ], Resource=[ GetAtt(app_bucket, 'Arn'), Join('', [ GetAtt(app_bucket, 'Arn'), '/*', ]), "arn:aws:s3:::aws-codedeploy-us-east-2/*", "arn:aws:s3:::aws-codedeploy-us-east-1/*", "arn:aws:s3:::aws-codedeploy-us-west-1/*", "arn:aws:s3:::aws-codedeploy-us-west-2/*", "arn:aws:s3:::aws-codedeploy-ca-central-1/*", "arn:aws:s3:::aws-codedeploy-eu-west-1/*", "arn:aws:s3:::aws-codedeploy-eu-west-2/*", "arn:aws:s3:::aws-codedeploy-eu-west-3/*", "arn:aws:s3:::aws-codedeploy-eu-central-1/*", "arn:aws:s3:::aws-codedeploy-ap-northeast-1/*", "arn:aws:s3:::aws-codedeploy-ap-northeast-2/*", "arn:aws:s3:::aws-codedeploy-ap-southeast-1/*", "arn:aws:s3:::aws-codedeploy-ap-southeast-2/*", "arn:aws:s3:::aws-codedeploy-ap-south-1/*", "arn:aws:s3:::aws-codedeploy-sa-east-1/*", ] ) ] ) ) ] ) web_instance_profile = iam.InstanceProfile( "webInstanceProfile", t, Path='/', Roles=[Ref(web_instance_role)], ) Web_launch_config = autoscaling.LaunchConfiguration( "webLaunchConfig", t, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), # TODO: Remove magic string SecurityGroups=[ssh_sg_id, autoscale_sg_id], IamInstanceProfile=Ref(web_instance_profile), InstanceType="t2.micro", BlockDeviceMappings= [{ "DeviceName": "/dev/sdk", "Ebs": {"VolumeSize": "10"} }], UserData= lc_user_data, KeyName="advanced-cfn", ) Web_autoscaler = autoscaling.AutoScalingGroup( "WebAutoScaler", t, LaunchConfigurationName=Ref(Web_launch_config), MinSize="2", # TODO: Change to parameter MaxSize="2", VPCZoneIdentifier=[subnet_2_id, subnet_1_id], TargetGroupARNs= [Ref(Web_target_group)] ) t.add_output([ Output( "ALBDNS", Description="The DNS name for the application load balancer.", Value=GetAtt(Web_elb, "DNSName") ) ]) # DEVTOOLS CONFIG codebuild_service_role = iam.Role( "CMSCodeBuildServiceRole", t, AssumeRolePolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': ['codebuild.amazonaws.com'] }, 'Action': ['sts:AssumeRole'] }] }, Policies=[ iam.Policy( PolicyName="CloudWatchLogsPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='logs', Effect=aws.Allow, Action=[ aws_logs.CreateLogGroup, aws_logs.CreateLogStream, aws_logs.PutLogEvents ], Resource=['*'] ) ] ) ), iam.Policy( PolicyName="s3AccessPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='codebuilder', Effect=aws.Allow, Action=[ aws_s3.PutObject, aws_s3.GetObject, aws_s3.GetObjectVersion, aws_s3.DeleteObject ], Resource=[ GetAtt(app_bucket, 'Arn'), Join('', [ GetAtt(app_bucket, 'Arn'), '/*', ]) ] ) ] ) ) ] ) github_repo = Parameter( "GithubRepoLink", Description="Name of the repository you wish to connect to codebuild.", Type="String" ) artifact_key = Parameter( "ArtifactKey", Description="The key for the artifact that codebuild creates.", Type="String" ) t.add_parameter(github_repo) t.add_parameter(artifact_key) cms_code_build_project = codebuild.Project( "CMSBuild", t, Name="CMS-Build", Artifacts=codebuild.Artifacts( Location=Ref(app_bucket), Name=Ref(artifact_key), NamespaceType="BUILD_ID", Type="S3", Packaging="ZIP" ), Description="Code build for CMS", Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/python:3.6.5", Type="LINUX_CONTAINER", ), ServiceRole=GetAtt(codebuild_service_role, 'Arn'), Source=codebuild.Source( "CMSSourceCode", Auth=codebuild.SourceAuth( "GitHubAuth", Type="OAUTH" ), Location=Ref(github_repo), Type="GITHUB" ), Triggers=codebuild.ProjectTriggers( Webhook=True ) ) codedeploy_service_role = iam.Role( "CMSDeploymentGroupServiceRole", t, AssumeRolePolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': ['codedeploy.amazonaws.com'] }, 'Action': ['sts:AssumeRole'] }] }, Policies=[ iam.Policy( PolicyName="CloudWatchLogsPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='logs', Effect=aws.Allow, Action=[ aws_logs.CreateLogGroup, aws_logs.CreateLogStream, aws_logs.PutLogEvents ], Resource=['*'] ) ] ) ), iam.Policy( PolicyName="s3AccessPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='codebuilder', Effect=aws.Allow, Action=[ aws_s3.PutObject, aws_s3.GetObject, aws_s3.GetObjectVersion, aws_s3.DeleteObject ], Resource=[ GetAtt(app_bucket, 'Arn'), Join('', [ GetAtt(app_bucket, 'Arn'), '/*' ]) ] ) ] ) ), iam.Policy( PolicyName="autoscalingAccess", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='codebuilder', Effect=aws.Allow, Action=[ aws.Action('autoscaling', '*'), aws.Action('elasticloadbalancing', '*') ], Resource=[ '*' ] ) ] ) ) ] ) cms_codedeploy_application = codedeploy.Application( "CMSCodeDeployApplication", t, ) cms_deployment_group = codedeploy.DeploymentGroup( "CMSDeploymentGroup", t, DependsOn=[cms_codedeploy_application], ApplicationName=Ref(cms_codedeploy_application), AutoScalingGroups=[Ref(Web_autoscaler)], LoadBalancerInfo=codedeploy.LoadBalancerInfo( "CodeDeployLBInfo", TargetGroupInfoList=[ codedeploy.TargetGroupInfoList( "WebTargetGroup", Name=GetAtt(Web_target_group, "TargetGroupName") ) ] ), ServiceRoleArn=GetAtt(codedeploy_service_role, 'Arn') ) print(t.to_yaml())
param_error_doc = t.add_parameter( Parameter('ErrorDocument', Type='String', Default='error.html', Description='The name of the error document for the website.')) # # Resource # bucket = t.add_resource( s3.Bucket( 'Bucket', BucketName=Join('.', [Ref(param_domain_name), Ref(param_hosted_domain)]), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument=Ref(param_index_doc), ErrorDocument=Ref(param_error_doc), ), )) bucket_policy = t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Sid': 'PublicReadGetObject', 'Effect': 'Allow',
def buildInfrastructure(t, args): if (not args.recovery): t.add_resource( kms.Key( 'OpenEMRKey', DeletionPolicy='Retain' if args.recovery else 'Delete' if args.dev else 'Retain', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) t.add_resource( ec2.SecurityGroup('ApplicationSecurityGroup', GroupDescription='Application Security Group', VpcId=Ref('VPC'), Tags=Tags(Name='Application'))) return t
QueueName=Ref(sqs_default_queue_name) ) ) notifications_queue = template.add_resource( sqs.Queue( 'NotificationsQueue', QueueName=Ref(sqs_notifications_queue_name) ) ) # Create the S3 buckets. uploads_bucket = template.add_resource( s3.Bucket( 'UploadsBucket', BucketName=Ref(s3_uploads_bucket_name), AccessControl='Private' ) ) frontend_bucket = template.add_resource( s3.Bucket( 'FrontendBucket', BucketName=Ref(s3_frontend_bucket_name), AccessControl='PublicRead' ) ) backend_bucket = template.add_resource( s3.Bucket( 'BackendBucket',
src_bucket = t.add_resource(s3.Bucket( 'SourceBucket', NotificationConfiguration= \ If('BucketNotificationEnabled', s3.NotificationConfiguration( LambdaConfigurations=[ s3.LambdaConfigurations( Function=Sub( 'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:%s' \ % lambda_function_name), Event='s3:ObjectCreated:*', Filter=s3.Filter( S3Key=s3.S3Key( Rules=[ s3.Rules(Name='prefix', Value=''), s3.Rules(Name='suffix', Value='.zip') ] ) ), ), s3.LambdaConfigurations( Function=Sub( 'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:%s' \ % lambda_function_name), Event='s3:ObjectCreated:*', Filter=s3.Filter( S3Key=s3.S3Key( Rules=[ s3.Rules(Name='prefix', Value=''), s3.Rules(Name='suffix', Value='.gz') ] ) ), ) ] ), Ref(AWS_NO_VALUE)), ))
from troposphere import s3, Template import boto3 import time t = Template() s3_bucket = s3.Bucket("davidsbucket", BucketName="davidsawesomebucket") t.add_resource(s3_bucket) s3_bucket2 = s3.Bucket("davidsbucket2", BucketName="davidsawesomebucket2") t.add_resource(s3_bucket2) print(t.to_yaml()) temp = open("cfn.yaml", "w+") temp.write(t.to_yaml()) temp.close()
""" Troposphere template responsible for the product bucket Resources ~~~~~~~~~ * **S3 Bucket:** Where all the hyp3 products are stored * **SSM Parameter:** Stores the name of the products bucket """ from troposphere import s3, Sub, Ref from troposphere.ssm import Parameter as SSMParameter from template import t products_bucket = t.add_resource( s3.Bucket("S3Bucket", BucketName=Sub("${StackName}-products-bucket", StackName=Ref("AWS::StackName")))) ssm_products_bucket_name = t.add_resource( SSMParameter("HyP3SSMParameterProductsBucket", Name=Sub("/${StackName}/ProductsS3Bucket", StackName=Ref("AWS::StackName")), Type="String", Value=Ref(products_bucket)))
from troposphere import Template import troposphere.s3 as s3 t = Template() t.add_resource(s3.Bucket('S3Bucket', DeletionPolicy='Retain')) print(t.to_yaml())
def get_template( puppet_version, all_regions, source, is_caching_enabled, is_manual_approvals: bool, scm_skip_creation_of_repo: bool, should_validate: bool, ) -> t.Template: is_codecommit = source.get("Provider", "").lower() == "codecommit" is_github = source.get("Provider", "").lower() == "github" is_codestarsourceconnection = (source.get( "Provider", "").lower() == "codestarsourceconnection") is_custom = (source.get("Provider", "").lower() == "custom") is_s3 = source.get("Provider", "").lower() == "s3" description = f"""Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies {{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}""" template = t.Template(Description=description) version_parameter = template.add_parameter( t.Parameter("Version", Default=puppet_version, Type="String")) org_iam_role_arn_parameter = template.add_parameter( t.Parameter("OrgIamRoleArn", Default="None", Type="String")) with_manual_approvals_parameter = template.add_parameter( t.Parameter( "WithManualApprovals", Type="String", AllowedValues=["Yes", "No"], Default="No", )) puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter( t.Parameter( "PuppetCodePipelineRolePermissionBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetCodePipelineRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) source_role_permissions_boundary_parameter = template.add_parameter( t.Parameter( "SourceRolePermissionsBoundary", Type="String", Description="IAM Permission Boundary to apply to the SourceRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) puppet_generate_role_permission_boundary_parameter = template.add_parameter( t.Parameter( "PuppetGenerateRolePermissionBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetGenerateRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) puppet_deploy_role_permission_boundary_parameter = template.add_parameter( t.Parameter( "PuppetDeployRolePermissionBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetDeployRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter( t.Parameter( "PuppetProvisioningRolePermissionsBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetProvisioningRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter( t.Parameter( "CloudFormationDeployRolePermissionsBoundary", Type="String", Description= "IAM Permission Boundary to apply to the CloudFormationDeployRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) deploy_environment_compute_type_parameter = template.add_parameter( t.Parameter( "DeployEnvironmentComputeType", Type="String", Description="The AWS CodeBuild Environment Compute Type", Default="BUILD_GENERAL1_SMALL", )) spoke_deploy_environment_compute_type_parameter = template.add_parameter( t.Parameter( "SpokeDeployEnvironmentComputeType", Type="String", Description= "The AWS CodeBuild Environment Compute Type for spoke execution mode", Default="BUILD_GENERAL1_SMALL", )) deploy_num_workers_parameter = template.add_parameter( t.Parameter( "DeployNumWorkers", Type="Number", Description= "Number of workers that should be used when running a deploy", Default=10, )) puppet_role_name_parameter = template.add_parameter( t.Parameter("PuppetRoleName", Type="String", Default="PuppetRole")) puppet_role_path_template_parameter = template.add_parameter( t.Parameter("PuppetRolePath", Type="String", Default="/servicecatalog-puppet/")) template.add_condition( "ShouldUseOrgs", t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), "None"))) template.add_condition( "HasManualApprovals", t.Equals(t.Ref(with_manual_approvals_parameter), "Yes")) template.add_resource( s3.Bucket( "StacksRepository", BucketName=t.Sub("sc-puppet-stacks-repository-${AWS::AccountId}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}), )) manual_approvals_param = template.add_resource( ssm.Parameter( "ManualApprovalsParam", Type="String", Name="/servicecatalog-puppet/manual-approvals", Value=t.Ref(with_manual_approvals_parameter), )) template.add_resource( ssm.Parameter( "SpokeDeployEnvParameter", Type="String", Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME, Value=t.Ref(spoke_deploy_environment_compute_type_parameter), )) param = template.add_resource( ssm.Parameter( "Param", Type="String", Name="service-catalog-puppet-version", Value=t.Ref(version_parameter), )) partition_parameter = template.add_resource( ssm.Parameter( "PartitionParameter", Type="String", Name="/servicecatalog-puppet/partition", Value=t.Ref("AWS::Partition"), )) puppet_role_name_parameter = template.add_resource( ssm.Parameter( "PuppetRoleNameParameter", Type="String", Name="/servicecatalog-puppet/puppet-role/name", Value=t.Ref(puppet_role_name_parameter), )) puppet_role_path_parameter = template.add_resource( ssm.Parameter( "PuppetRolePathParameter", Type="String", Name="/servicecatalog-puppet/puppet-role/path", Value=t.Ref(puppet_role_path_template_parameter), )) share_accept_function_role = template.add_resource( iam.Role( "ShareAcceptFunctionRole", RoleName="ShareAcceptFunctionRole", ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" ) ], Path=t.Ref(puppet_role_path_template_parameter), Policies=[ iam.Policy( PolicyName="ServiceCatalogActions", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Resource": { "Fn::Sub": "arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}" }, "Effect": "Allow", }], }, ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] }, }], }, )) provisioning_role = template.add_resource( iam.Role( "ProvisioningRole", RoleName="PuppetProvisioningRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codebuild.amazonaws.com"] }, }, { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "AWS": { "Fn::Sub": "${AWS::AccountId}" } }, }, ], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( puppet_provisioning_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) cloud_formation_deploy_role = template.add_resource( iam.Role( "CloudFormationDeployRole", RoleName="CloudFormationDeployRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["cloudformation.amazonaws.com"] }, }, { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "AWS": { "Fn::Sub": "${AWS::AccountId}" } }, }, ], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( cloud_formation_deploy_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) pipeline_role = template.add_resource( iam.Role( "PipelineRole", RoleName="PuppetCodePipelineRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codepipeline.amazonaws.com"] }, }], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( puppet_code_pipeline_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) source_role = template.add_resource( iam.Role( "SourceRole", RoleName="PuppetSourceRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codepipeline.amazonaws.com"] }, }, { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "AWS": { "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:root" } }, }, ], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( source_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) dry_run_notification_topic = template.add_resource( sns.Topic( "DryRunNotificationTopic", DisplayName="service-catalog-puppet-dry-run-approvals", TopicName="service-catalog-puppet-dry-run-approvals", Condition="HasManualApprovals", )) deploy_role = template.add_resource( iam.Role( "DeployRole", RoleName="PuppetDeployRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codebuild.amazonaws.com"] }, }], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( puppet_deploy_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) num_workers_ssm_parameter = template.add_resource( ssm.Parameter( "NumWorkersSSMParameter", Type="String", Name="/servicecatalog-puppet/deploy/num-workers", Value=t.Sub("${DeployNumWorkers}"), )) parameterised_source_bucket = template.add_resource( s3.Bucket( "ParameterisedSourceBucket", PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=t.Sub("sc-puppet-parameterised-runs-${AWS::AccountId}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) source_stage = codepipeline.Stages( Name="Source", Actions=[ codepipeline.Actions( RunOrder=1, RoleArn=t.GetAtt("SourceRole", "Arn"), ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="S3", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="ParameterisedSource") ], Configuration={ "S3Bucket": t.Ref(parameterised_source_bucket), "S3ObjectKey": "parameters.zip", "PollForSourceChanges": True, }, Name="ParameterisedSource", ) ], ) install_spec = { "runtime-versions": dict(python="3.7"), "commands": [ f"pip install {puppet_version}" if "http" in puppet_version else f"pip install aws-service-catalog-puppet=={puppet_version}", ], } deploy_env_vars = [ { "Type": "PLAINTEXT", "Name": "PUPPET_ACCOUNT_ID", "Value": t.Ref("AWS::AccountId"), }, { "Type": "PLAINTEXT", "Name": "PUPPET_REGION", "Value": t.Ref("AWS::Region"), }, { "Type": "PARAMETER_STORE", "Name": "PARTITION", "Value": t.Ref(partition_parameter), }, { "Type": "PARAMETER_STORE", "Name": "PUPPET_ROLE_NAME", "Value": t.Ref(puppet_role_name_parameter), }, { "Type": "PARAMETER_STORE", "Name": "PUPPET_ROLE_PATH", "Value": t.Ref(puppet_role_path_parameter), }, ] if is_codecommit: template.add_resource( codecommit.Repository( "CodeRepo", RepositoryName=source.get("Configuration").get( "RepositoryName"), RepositoryDescription= "Repo to store the servicecatalog puppet solution", DeletionPolicy="Retain", )) source_stage.Actions.append( codepipeline.Actions( RunOrder=1, RoleArn=t.GetAtt("SourceRole", "Arn"), ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="CodeCommit", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "RepositoryName": source.get("Configuration").get("RepositoryName"), "BranchName": source.get("Configuration").get("BranchName"), "PollForSourceChanges": source.get("Configuration").get("PollForSourceChanges", True), }, Name="Source", )) if is_github: source_stage.Actions.append( codepipeline.Actions( RunOrder=1, ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="ThirdParty", Version="1", Provider="GitHub", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "Owner": source.get("Configuration").get("Owner"), "Repo": source.get("Configuration").get("Repo"), "Branch": source.get("Configuration").get("Branch"), "OAuthToken": t.Join( "", [ "{{resolve:secretsmanager:", source.get("Configuration").get( "SecretsManagerSecret"), ":SecretString:OAuthToken}}", ], ), "PollForSourceChanges": source.get("Configuration").get("PollForSourceChanges"), }, Name="Source", )) if is_custom: source_stage.Actions.append( codepipeline.Actions( RunOrder=1, ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="Custom", Version=source.get("Configuration").get( "CustomActionTypeVersion"), Provider=source.get("Configuration").get( "CustomActionTypeProvider"), ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "GitUrl": source.get("Configuration").get("GitUrl"), "Branch": source.get("Configuration").get("Branch"), "PipelineName": t.Sub("${AWS::StackName}-pipeline"), }, Name="Source", )) webhook = codepipeline.Webhook( "Webhook", Authentication="IP", TargetAction="Source", AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration( AllowedIPRange=source.get("Configuration").get( "GitWebHookIpAddress")), Filters=[ codepipeline.WebhookFilterRule( JsonPath="$.changes[0].ref.id", MatchEquals="refs/heads/{Branch}") ], TargetPipelineVersion=1, TargetPipeline=t.Sub("${AWS::StackName}-pipeline"), ) template.add_resource(webhook) values_for_sub = { "GitUrl": source.get("Configuration").get("GitUrl"), "WebhookUrl": t.GetAtt(webhook, "Url"), } output_to_add = t.Output("WebhookUrl") output_to_add.Value = t.Sub("${GitUrl}||${WebhookUrl}", **values_for_sub) output_to_add.Export = t.Export(t.Sub("${AWS::StackName}-pipeline")) template.add_output(output_to_add) if is_codestarsourceconnection: source_stage.Actions.append( codepipeline.Actions( RunOrder=1, RoleArn=t.GetAtt("SourceRole", "Arn"), ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="CodeStarSourceConnection", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "ConnectionArn": source.get("Configuration").get("ConnectionArn"), "FullRepositoryId": source.get("Configuration").get("FullRepositoryId"), "BranchName": source.get("Configuration").get("BranchName"), "OutputArtifactFormat": source.get("Configuration").get("OutputArtifactFormat"), }, Name="Source", )) if is_s3: bucket_name = source.get("Configuration").get("S3Bucket") if not scm_skip_creation_of_repo: template.add_resource( s3.Bucket( bucket_name, PublicAccessBlockConfiguration=s3. PublicAccessBlockConfiguration( IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault( SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=bucket_name, VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) source_stage.Actions.append( codepipeline.Actions( RunOrder=1, ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="S3", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "S3Bucket": bucket_name, "S3ObjectKey": source.get("Configuration").get("S3ObjectKey"), "PollForSourceChanges": source.get("Configuration").get("PollForSourceChanges"), }, Name="Source", )) single_account_run_project_build_spec = dict( version=0.2, phases=dict( install=install_spec, build={ "commands": [ 'echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml', "cat parameters.yaml", "zip parameters.zip parameters.yaml", "aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip", ] }, post_build={ "commands": [ "servicecatalog-puppet wait-for-parameterised-run-to-complete", ] }, ), artifacts=dict( name="DeployProject", files=[ "ServiceCatalogPuppet/manifest.yaml", "ServiceCatalogPuppet/manifest-expanded.yaml", "results/*/*", "output/*/*", "exploded_results/*/*", "tasks.log", ], ), ) single_account_run_project_args = dict( Name="servicecatalog-puppet-single-account-run", Description="Runs puppet for a single account - SINGLE_ACCOUNT_ID", ServiceRole=t.GetAtt(deploy_role, "Arn"), Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ), TimeoutInMinutes=480, Environment=codebuild.Environment( ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PLAINTEXT", "Name": "SINGLE_ACCOUNT_ID", "Value": "CHANGE_ME", }, ] + deploy_env_vars, ), Source=codebuild.Source( Type="NO_SOURCE", BuildSpec=yaml.safe_dump(single_account_run_project_build_spec), ), ) single_account_run_project = template.add_resource( codebuild.Project("SingleAccountRunProject", **single_account_run_project_args)) single_account_run_project_build_spec["phases"]["post_build"]["commands"] = [ "servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL" ] single_account_run_project_args[ "Name"] = "servicecatalog-puppet-single-account-run-with-callback" single_account_run_project_args[ "Description"] = "Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put" single_account_run_project_args.get( "Environment").EnvironmentVariables.append({ "Type": "PLAINTEXT", "Name": "CALLBACK_URL", "Value": "CHANGE_ME", }) single_account_run_project_args["Source"] = codebuild.Source( Type="NO_SOURCE", BuildSpec=yaml.safe_dump(single_account_run_project_build_spec), ) single_account_run_project_with_callback = template.add_resource( codebuild.Project("SingleAccountRunWithCallbackProject", **single_account_run_project_args)) stages = [source_stage] if should_validate: template.add_resource( codebuild.Project( "ValidateProject", Name="servicecatalog-puppet-validate", ServiceRole=t.GetAtt("DeployRole", "Arn"), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"), TimeoutInMinutes=60, Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", ), Source=codebuild.Source( BuildSpec=yaml.safe_dump( dict( version="0.2", phases={ "install": { "runtime-versions": { "python": "3.7", }, "commands": [ f"pip install {puppet_version}" if "http" in puppet_version else f"pip install aws-service-catalog-puppet=={puppet_version}", ], }, "build": { "commands": [ "servicecatalog-puppet validate manifest.yaml" ] }, }, )), Type="CODEPIPELINE", ), Description="Validate the manifest.yaml file", )) stages.append( codepipeline.Stages( Name="Validate", Actions=[ codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), ], Name="Validate", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts( Name="ValidateProject") ], Configuration={ "ProjectName": t.Ref("ValidateProject"), "PrimarySource": "Source", }, RunOrder=1, ), ], )) if is_manual_approvals: deploy_stage = codepipeline.Stages( Name="Deploy", Actions=[ codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), codepipeline.InputArtifacts( Name="ParameterisedSource"), ], Name="DryRun", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="DryRunProject") ], Configuration={ "ProjectName": t.Ref("DryRunProject"), "PrimarySource": "Source", }, RunOrder=1, ), codepipeline.Actions( ActionTypeId=codepipeline.ActionTypeId( Category="Approval", Owner="AWS", Version="1", Provider="Manual", ), Configuration={ "NotificationArn": t.Ref("DryRunNotificationTopic"), "CustomData": "Approve when you are happy with the dry run.", }, Name="DryRunApproval", RunOrder=2, ), codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), codepipeline.InputArtifacts( Name="ParameterisedSource"), ], Name="Deploy", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="DeployProject") ], Configuration={ "ProjectName": t.Ref("DeployProject"), "PrimarySource": "Source", }, RunOrder=3, ), ], ) else: deploy_stage = codepipeline.Stages( Name="Deploy", Actions=[ codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), codepipeline.InputArtifacts( Name="ParameterisedSource"), ], Name="Deploy", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="DeployProject") ], Configuration={ "ProjectName": t.Ref("DeployProject"), "PrimarySource": "Source", "EnvironmentVariables": '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]', }, RunOrder=1, ), ], ) stages.append(deploy_stage) pipeline = template.add_resource( codepipeline.Pipeline( "Pipeline", RoleArn=t.GetAtt("PipelineRole", "Arn"), Stages=stages, Name=t.Sub("${AWS::StackName}-pipeline"), ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=t.Sub( "sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}" ), ), RestartExecutionOnUpdate=True, )) if is_github: template.add_resource( codepipeline.Webhook( "Webhook", AuthenticationConfiguration=codepipeline. WebhookAuthConfiguration(SecretToken=t.Join( "", [ "{{resolve:secretsmanager:", source.get("Configuration").get( "SecretsManagerSecret"), ":SecretString:SecretToken}}", ], )), Filters=[ codepipeline.WebhookFilterRule( JsonPath="$.ref", MatchEquals="refs/heads/" + source.get("Configuration").get("Branch"), ) ], Authentication="GITHUB_HMAC", TargetPipeline=t.Ref(pipeline), TargetAction="Source", Name=t.Sub("${AWS::StackName}-webhook"), TargetPipelineVersion=t.GetAtt(pipeline, "Version"), RegisterWithThirdParty="true", )) deploy_project_build_spec = dict( version=0.2, phases=dict( install={ "runtime-versions": dict(python="3.7"), "commands": [ f"pip install {puppet_version}" if "http" in puppet_version else f"pip install aws-service-catalog-puppet=={puppet_version}", ], }, pre_build={ "commands": [ "servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml", ] }, build={ "commands": [ "servicecatalog-puppet --info deploy --num-workers ${NUM_WORKERS} manifest-expanded.yaml", ] }, ), artifacts=dict( name="DeployProject", files=[ "manifest-expanded.yaml", "results/*/*", "output/*/*", "exploded_results/*/*", "tasks.log", ], ), ) deploy_project_args = dict( Name="servicecatalog-puppet-deploy", ServiceRole=t.GetAtt(deploy_role, "Arn"), Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="CODEPIPELINE", ), TimeoutInMinutes=480, Environment=codebuild.Environment( ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PARAMETER_STORE", "Name": "NUM_WORKERS", "Value": t.Ref(num_workers_ssm_parameter), }, { "Type": "PARAMETER_STORE", "Name": "SPOKE_EXECUTION_MODE_DEPLOY_ENV", "Value": constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME, }, ] + deploy_env_vars, ), Source=codebuild.Source( Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec), ), Description="deploys out the products to be deployed", ) deploy_project = template.add_resource( codebuild.Project("DeployProject", **deploy_project_args)) deploy_project_build_spec["phases"]["build"]["commands"] = [ "servicecatalog-puppet --info dry-run manifest-expanded.yaml" ] deploy_project_build_spec["artifacts"]["name"] = "DryRunProject" deploy_project_args["Name"] = "servicecatalog-puppet-dryrun" deploy_project_args[ "Description"] = "dry run of servicecatalog-puppet-dryrun" deploy_project_args["Source"] = codebuild.Source( Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec), ) dry_run_project = template.add_resource( codebuild.Project("DryRunProject", **deploy_project_args)) bootstrap_project = template.add_resource( codebuild.Project( "BootstrapProject", Name="servicecatalog-puppet-bootstrap-spokes-in-ou", ServiceRole=t.GetAtt("DeployRole", "Arn"), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"), TimeoutInMinutes=60, Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PLAINTEXT", "Name": "OU_OR_PATH", "Value": "CHANGE_ME" }, { "Type": "PLAINTEXT", "Name": "IAM_ROLE_NAME", "Value": "OrganizationAccountAccessRole", }, { "Type": "PLAINTEXT", "Name": "IAM_ROLE_ARNS", "Value": "" }, ], ), Source=codebuild.Source( BuildSpec= "version: 0.2\nphases:\n install:\n runtime-versions:\n python: 3.7\n commands:\n - pip install aws-service-catalog-puppet\n build:\n commands:\n - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS\nartifacts:\n files:\n - results/*/*\n - output/*/*\n name: BootstrapProject\n", Type="NO_SOURCE", ), Description="Bootstrap all the accounts in an OU", )) template.add_resource( codebuild.Project( "BootstrapASpokeProject", Name="servicecatalog-puppet-bootstrap-spoke", ServiceRole=t.GetAtt("DeployRole", "Arn"), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"), TimeoutInMinutes=60, Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PLAINTEXT", "Name": "PUPPET_ACCOUNT_ID", "Value": t.Sub("${AWS::AccountId}"), }, { "Type": "PLAINTEXT", "Name": "ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN", "Value": "CHANGE_ME", }, { "Type": "PLAINTEXT", "Name": "ASSUMABLE_ROLE_IN_ROOT_ACCOUNT", "Value": "CHANGE_ME", }, ], ), Source=codebuild.Source( BuildSpec=yaml.safe_dump( dict( version=0.2, phases=dict( install=install_spec, build={ "commands": [ "servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN}" ] }, ), )), Type="NO_SOURCE", ), Description="Bootstrap given account as a spoke", )) cloud_formation_events_queue = template.add_resource( sqs.Queue( "CloudFormationEventsQueue", QueueName="servicecatalog-puppet-cloudformation-events", Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), )) cloud_formation_events_queue_policy = template.add_resource( sqs.QueuePolicy( "CloudFormationEventsQueuePolicy", Queues=[t.Ref(cloud_formation_events_queue)], PolicyDocument={ "Id": "AllowSNS", "Version": "2012-10-17", "Statement": [{ "Sid": "allow-send-message", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["sqs:SendMessage"], "Resource": "*", "Condition": { "ArnEquals": { "aws:SourceArn": t.Sub( "arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events" ) } }, }], }, )) spoke_deploy_bucket = template.add_resource( s3.Bucket( "SpokeDeployBucket", PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=t.Sub("sc-puppet-spoke-deploy-${AWS::AccountId}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) caching_bucket = template.add_resource( s3.Bucket( "CachingBucket", PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=t.Sub( "sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( t.Output( "CloudFormationEventsQueueArn", Value=t.GetAtt(cloud_formation_events_queue, "Arn"), )) template.add_output(t.Output("Version", Value=t.GetAtt(param, "Value"))) template.add_output( t.Output("ManualApprovalsParam", Value=t.GetAtt(manual_approvals_param, "Value"))) template.add_resource( ssm.Parameter( "DefaultTerraformVersion", Type="String", Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME, Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE, )) return template
idp_duo_integrationKey = t.add_parameter( Parameter("duointegrationKey", Description="Integration Key from Duo", Type="String")) # Create the ECR Repository Repository = t.add_resource( ecr.Repository("Repository", RepositoryName="shibboleth")) #Create the Redirect Repository RedirectRepository = t.add_resource( ecr.Repository("RedirectRepository", RepositoryName="shibboleth-redirect")) # Create the S3 Bucket for the Configuration S3Bucket = t.add_resource(s3.Bucket("ConfigBucket")) # Create instance/task roles # Instance Role InstanceRole = t.add_resource( iam.Role("InstanceRole", AssumeRolePolicyDocument={ "Statement": [{ 'Effect': 'Allow', 'Principal': { 'Service': ['ec2.amazonaws.com'] }, 'Action': ["sts:AssumeRole"] }, { 'Effect': 'Allow', 'Principal': {
def generate_cf_template(): """ Returns an entire CloudFormation stack by using troposphere to construct each piece """ # Header of CloudFormation template t = Template() t.add_version("2010-09-09") t.add_description("Lambda Chat AWS Resources") # Paramters description = "should match [0-9]+-[a-z0-9]+.apps.googleusercontent.com" google_oauth_client_id = t.add_parameter(Parameter( "GoogleOAuthClientID", AllowedPattern="[0-9]+-[a-z0-9]+.apps.googleusercontent.com", Type="String", Description="The Client ID of your Google project", ConstraintDescription=description )) website_s3_bucket_name = t.add_parameter(Parameter( "WebsiteS3BucketName", AllowedPattern="[a-zA-Z0-9\-]*", Type="String", Description="Name of S3 bucket to store the website in", ConstraintDescription="can contain only alphanumeric characters and dashes.", )) # The SNS topic the website will publish chat messages to website_sns_topic = t.add_resource(sns.Topic( 'WebsiteSnsTopic', TopicName='lambda-chat', DisplayName='Lambda Chat' )) t.add_output(Output( "WebsiteSnsTopic", Description="sns_topic_arn", Value=Ref(website_sns_topic), )) # The IAM Role and Policy the website will assume to publish to SNS website_role = t.add_resource(iam.Role( "WebsiteRole", Path="/", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRoleWithWebIdentity")], Principal=Principal("Federated", "accounts.google.com"), Condition=Condition( StringEquals( "accounts.google.com:aud", Ref(google_oauth_client_id) ) ), ), ], ), )) t.add_resource(iam.PolicyType( "WebsitePolicy", PolicyName="lambda-chat-website-policy", Roles=[Ref(website_role)], PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[Action("sns", "Publish")], Resource=[ Ref(website_sns_topic) ], ), ], ) )) t.add_output(Output( "WebsiteRole", Description="website_iam_role_arn", Value=GetAtt(website_role, "Arn"), )) website_bucket = t.add_resource(s3.Bucket( 'WebsiteS3Bucket', BucketName=Ref(website_s3_bucket_name), WebsiteConfiguration=s3.WebsiteConfiguration( ErrorDocument="error.html", IndexDocument="index.html" ) )) t.add_output(Output( "S3Bucket", Description="s3_bucket", Value=Ref(website_bucket), )) t.add_resource(s3.BucketPolicy( 'WebsiteS3BucketPolicy', Bucket=Ref(website_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "PublicAccess", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": [{ "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "WebsiteS3Bucket", }, "/*" ] ] }] } ] } )) return t
def r_bucket(self): return s3.Bucket( 'MyBucketResource', BucketName=Sub(self.conf['bucket_name']) )
Description='Index page for your site', )) error_page = template.add_parameter( Parameter( 'WebsitePageError', Type=c.STRING, Default='error.html', Description='Error page for your site', )) root_bucket = template.add_resource( s3.Bucket('RootBucket', AccessControl=s3.PublicRead, BucketName=Ref(domain), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument=Ref(index_page), ErrorDocument=Ref(error_page), ))) root_bucket_arn = Join('', ['arn:aws:s3:::', Ref(root_bucket), '/*']) template.add_resource( s3.BucketPolicy('RootBucketPolicy', Bucket=Ref(root_bucket), PolicyDocument={ 'Statement': [{ 'Action': ['s3:GetObject'], 'Effect': 'Allow', 'Resource': root_bucket_arn, 'Principal': '*', }]
def create_template_bucket(self): t = self.template t.add_resource(s3.Bucket("TemplateBucket"))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined'))) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='LockID', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH') ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If('TableNameOmitted', NoValue, variables['TableName'].ref))) template.add_output( Output('%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref())) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', NoValue, variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref())) template.add_output( Output('%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn'))) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')]), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [ terraformstatebucket.get_att('Arn'), '/*' ]) ]), Statement(Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem ], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')]) ]))) template.add_output( Output('PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref()))
def add_bucket(bucket_name): # UNUSED return s3.Bucket('WebsiteS3Bucket', BucketName=bucket_name)
# Troposphere to create CloudFormation template to build the init-db-lambda.zip bundle # By Jason Umiker ([email protected]) from troposphere import Ref, Template, Output from troposphere import s3, iam, codebuild t = Template() # Create the S3 Bucket for Output S3Bucket = t.add_resource(s3.Bucket("InitDBBuildOutput")) # CodeBuild Service Role ServiceRole = t.add_resource( iam.Role("InstanceRole", AssumeRolePolicyDocument={ "Statement": [{ 'Effect': 'Allow', 'Principal': { 'Service': 'codebuild.amazonaws.com' }, "Action": "sts:AssumeRole" }] })) # CodeBuild Service Policy CodeBuildServiceRolePolicy = t.add_resource( iam.PolicyType( "CodeBuildServiceRolePolicy", PolicyName="CodeBuildServiceRolePolicy", PolicyDocument={ "Version":
# By Jason Umiker ([email protected]) from troposphere import Output, Join, Ref, Template from troposphere import AWS_ACCOUNT_ID, AWS_REGION from troposphere import ecr, s3, iam, codebuild t = Template() t.add_description("Template to set up a CodeBuild for the Clair container") # Create the clair Repository Repository = t.add_resource( ecr.Repository("Repository", RepositoryName="clair")) # Create the S3 Bucket for Output S3Bucket = t.add_resource(s3.Bucket("ClairBuildOutput")) # CodeBuild Service Role ServiceRole = t.add_resource( iam.Role("InstanceRole", AssumeRolePolicyDocument={ "Statement": [{ 'Effect': 'Allow', 'Principal': { 'Service': 'codebuild.amazonaws.com' }, "Action": "sts:AssumeRole" }] })) # CodeBuild Service Policy
from troposphere import Template, Output, GetAtt import troposphere.s3 as s3 t = Template() s3bucket = t.add_resource(s3.Bucket( 'S3Bucket', AccessControl=s3.PublicRead, WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html' ) )) t.add_output( Output( 'WebsiteURL', Value=GetAtt(s3bucket, 'WebsiteURL'), Description='URL for website hosted on S3' ) ) print(t.to_yaml())
bucket = t.add_resource( s3.Bucket( 'Bucket', BucketName=If('HasBucketName', Ref(param_bucket_name), Ref(AWS_NO_VALUE)), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ # Add a rule to s3.LifecycleRule( # Rule attributes Id='S3BucketRule1', Prefix='', Status='Enabled', # Applies to current objects # ExpirationInDays=3650, Transitions=[ s3.LifecycleRuleTransition( StorageClass='STANDARD_IA', TransitionInDays=365, ), ], # Applies to Non Current objects NoncurrentVersionExpirationInDays=365, NoncurrentVersionTransitions=[ s3.NoncurrentVersionTransition( StorageClass='STANDARD_IA', TransitionInDays=30, ), ], ), ]), ))
def buildInfrastructure(t, args): t.add_resource( ec2.VPC('VPC', CidrBlock='10.0.0.0/16', EnableDnsSupport='true', EnableDnsHostnames='true')) t.add_resource( ec2.Subnet('PublicSubnet1', VpcId=Ref('VPC'), CidrBlock='10.0.1.0/24', AvailabilityZone=Select("0", GetAZs("")))) t.add_resource(ec2.InternetGateway('ig')) t.add_resource( ec2.VPCGatewayAttachment('igAttach', VpcId=Ref('VPC'), InternetGatewayId=Ref('ig'))) t.add_resource(ec2.RouteTable('rtTablePublic', VpcId=Ref('VPC'))) t.add_resource( ec2.Route('rtPublic', RouteTableId=Ref('rtTablePublic'), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref('ig'), DependsOn='igAttach')) t.add_resource( ec2.SubnetRouteTableAssociation('rtPublic1Attach', SubnetId=Ref('PublicSubnet1'), RouteTableId=Ref('rtTablePublic'))) t.add_resource( kms.Key('OpenEMRKey', DeletionPolicy='Delete', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) return t