def handle(self, chain_context): """ A sample handle event, showing how parameters should be implemented. :type chain_context: chaincontext.ChainContext """ if not chain_context.auto_param_creation: chain_context.required_params.add("NumberOfMinions") chain_context.required_params.add("NumberOfEyeballs") if chain_context.auto_param_creation: chain_context.template.add_parameter( troposphere.Parameter("NumberOfMinions", Type="String")) chain_context.template.add_parameter( troposphere.Parameter("NumberOfEyeballs", Type="String"))
def _build_template(github_owner: str, github_branch: str) -> Template: """Build and return the pipeline template.""" template = Template( Description= "CI/CD pipeline for Decrypt Oracle powered by the AWS Encryption SDK for Python" ) github_access_token = template.add_parameter( troposphere.Parameter( "GithubPersonalToken", Type="String", Description="Personal access token for the github repo.", NoEcho=True)) application_bucket = template.add_resource(s3.Bucket("ApplicationBucket")) artifact_bucket = template.add_resource(s3.Bucket("ArtifactBucketStore")) builder_role = template.add_resource(_codebuild_role()) builder = template.add_resource( _codebuild_builder(builder_role, application_bucket)) # add codepipeline role pipeline_role = template.add_resource( _pipeline_role(buckets=[application_bucket, artifact_bucket])) # add cloudformation deploy role cfn_role = template.add_resource(_cloudformation_role()) # add codepipeline template.add_resource( _pipeline( pipeline_role=pipeline_role, cfn_role=cfn_role, codebuild_builder=builder, artifact_bucket=artifact_bucket, github_owner=github_owner, github_branch=github_branch, github_access_token=github_access_token, )) return template
def _base_troposphere_template(self): """Returns the most basic troposphere template possible""" template = troposphere.Template() template.add_parameter( troposphere.Parameter( "Stage", Default="dev", Description="Name of the Stage", Type="String", )) template.add_parameter( troposphere.Parameter( "Region", Description="AWS Region", Type="String", )) return template
def test_should_build_template_with_required_parameters_added_externally( self): the_chain = chain.Chain() mock_step = MockStepWithRequiredParam() the_chain.add(mock_step) self.context = chaincontext.ChainContext( template=troposphere.Template(), instance_name='wont_generate_parameters', auto_param_creation=False) self.context.template.add_parameter( troposphere.Parameter("NumberOfMinions", Type="String")) self.context.template.add_parameter( troposphere.Parameter("NumberOfEyeballs", Type="String")) the_chain.run(self.context)
def register_type_resources_template(cls, project, template): """Registers into the resources stack ``CodeBucket`` as parameter so any resource in the template can use it.""" template.add_parameter( troposphere.Parameter( "CodeBucket", Description="Bucket where the code is located.", Type="String", ))
def test_tropo_to_string(self): utility.tropo_to_string(tropo.Template()) utility.tropo_to_string(tropo.Base64('efsdfsdf')) utility.tropo_to_string(tropo.Output('efsdfsdf', Value='dsfsdfs')) utility.tropo_to_string(tropo.Parameter('efsdfsdf', Type='dsfsdfs')) # These constructors recursively call themselves for some reason # Don't instantiate directly # utility.tropo_to_string(tropo.AWSProperty()) # utility.tropo_to_string(tropo.AWSAttribute()) utility.tropo_to_string( ec2.Instance("ec2instance", InstanceType="m3.medium", ImageId="ami-951945d0"))
def _fix_references(value): if isinstance(value, troposphere.Ref): name = value.data['Ref'] if name not in (list(template.parameters.keys()) + list(template.resources.keys()) ) and not name.startswith('AWS::'): template.add_parameter( troposphere.Parameter( name, Type="String", )) elif isinstance(value, troposphere.Join): for v in value.data['Fn::Join'][1]: _fix_references(v) elif isinstance(value, troposphere.BaseAWSObject): for _, v in six.iteritems(value.properties): _fix_references(v)
def test_string_parameters(): class InstanceType(skies.StringParameter): """Instance type""" AllowedValues = [ 't1.micro', 'm1.medium', 'm1.large', 'c1.medium' ] Default = 'm1.large' x = troposphere.Parameter( 'InstanceType', Description='Instance type', Type='String', AllowedValues=['t1.micro', 'm1.medium', 'm1.large', 'c1.medium'], Default='m1.large', ) print x.JSONrepr() assert InstanceType().JSONrepr() == x.JSONrepr()
def use_custom_resources_stack_name_parameter( template, parameter_title="CustomResourcesStack", parameter_kwargs_dict=None, ): if parameter_kwargs_dict is None: parameter_kwargs_dict = {} param_kwargs = { # defaults 'Type': troposphere.constants.STRING, 'Default': "custom-resources", 'Description': "Name of the custom resources stack", } param_kwargs.update(parameter_kwargs_dict) p = template.add_parameter( troposphere.Parameter(parameter_title, **param_kwargs)) global CUSTOM_RESOURCES_STACK_NAME CUSTOM_RESOURCES_STACK_NAME = troposphere.Ref(p) return p
def generate_cft(): VPC_NETWORK = "10.0.0.0/16" VPC_PRIVATE = "10.0.0.0/24" t = troposphere.Template() t.add_description("HaaS Stack") key_name = t.add_parameter( troposphere.Parameter( "KeyName", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be a valid keypair Id", )) username_and_password = t.add_parameter( troposphere.Parameter( "UserNameAndPassword", Type="String", Default="", Description= "(Optional) Enter like: username/password Used to log into ECL Watch and ECL IDE." )) cluster_size = t.add_parameter( troposphere.Parameter( "ClusterSize", Type="Number", Default="1", Description="Number of slave instances to be launched")) num_slaves = t.add_parameter( troposphere.Parameter( "NumberOfSlavesPerNode", Type="Number", Default="1", Description="Number of THOR slave nodes per slave instance")) master_instance_type = t.add_parameter( troposphere.Parameter( "MasterInstanceType", Type="String", AllowedValues=[ 't2.micro', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge' ], Default="c4.large", Description="HPCC Thor Master EC2 instance type")) slave_instance_type = t.add_parameter( troposphere.Parameter("SlaveInstanceType", Type="String", AllowedValues=[ 't2.micro', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge' ], Default="c4.large", Description="HPCC Thor Slave EC2 instance type")) vpc_availability_zone = t.add_parameter( troposphere.Parameter( "AvailabilityZone", Type="String", AllowedValues=['us-east-1d'], Default="us-east-1d", Description="Availability zone", )) t.add_mapping('RegionMap', {'us-east-1': {'64': 'ami-24c2ee32'}}) instance_role = t.add_resource( troposphere.iam.Role( "HPCCInstanceRoles", AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.sts.AssumeRole], Principal=awacs.aws.Principal( "Service", ["ec2.amazonaws.com"])) ]), Policies=[ troposphere.iam.Policy( PolicyName="root", PolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.aws.Action("*")], Resource=["*"]) ])) ], Path="/")) instance_profile = t.add_resource( troposphere.iam.InstanceProfile("HPCCInstanceProfile", Path="/", Roles=[troposphere.Ref(instance_role) ])) vpc = t.add_resource( troposphere.ec2.VPC( "HPCCVpc", CidrBlock=VPC_NETWORK, InstanceTenancy="default", EnableDnsSupport=True, EnableDnsHostnames=False, Tags=troposphere.Tags(Name=troposphere.Ref("AWS::StackName")))) internetGateway = t.add_resource( troposphere.ec2.InternetGateway( "InternetGateway", Tags=troposphere.Tags(Name=troposphere.Join( "-", [troposphere.Ref("AWS::StackName"), "gateway"]), ), )) gatewayAttachment = t.add_resource( troposphere.ec2.VPCGatewayAttachment( "InternetGatewayAttachment", InternetGatewayId=troposphere.Ref(internetGateway), VpcId=troposphere.Ref(vpc))) # public routing table publicRouteTable = t.add_resource( troposphere.ec2.RouteTable( "PublicRouteTable", VpcId=troposphere.Ref(vpc), Tags=troposphere.Tags(Name=troposphere.Join( "-", [troposphere.Ref("AWS::StackName"), "public-rt"]), ), )) internetRoute = t.add_resource( troposphere.ec2.Route("RouteToInternet", DestinationCidrBlock="0.0.0.0/0", GatewayId=troposphere.Ref(internetGateway), RouteTableId=troposphere.Ref(publicRouteTable), DependsOn=gatewayAttachment.title)) subnet = t.add_resource( troposphere.ec2.Subnet( "Subnet", CidrBlock=VPC_PRIVATE, Tags=troposphere.Tags(Name=troposphere.Join( "-", [troposphere.Ref("AWS::StackName"), "subnet"]), ), VpcId=troposphere.Ref(vpc))) t.add_resource( troposphere.ec2.SubnetRouteTableAssociation( "SubnetRouteTableAssociation", RouteTableId=troposphere.Ref(publicRouteTable), SubnetId=troposphere.Ref(subnet))) placement_group = t.add_resource( troposphere.ec2.PlacementGroup("HPCCPlacementGroup", Strategy="cluster")) security_groups = t.add_resource( troposphere.ec2.SecurityGroup( "HPCCSecurityGroups", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupEgress=[ troposphere.ec2.SecurityGroupRule( IpProtocol="-1", CidrIp="0.0.0.0/0", ), ], SecurityGroupIngress=[ troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8888, ToPort=8888, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=9042, ToPort=9042, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=7000, ToPort=7000, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=7001, ToPort=7001, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=7199, ToPort=7199, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=9160, ToPort=9160, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=61620, ToPort=61620, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=61621, ToPort=61621, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8002, ToPort=8002, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8010, ToPort=8010, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8015, ToPort=8015, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8145, ToPort=8145, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=0, ToPort=65535, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="udp", FromPort=0, ToPort=65535, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8050, ToPort=8050, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8008, ToPort=8008, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=9876, ToPort=9876, CidrIp="0.0.0.0/0", ), troposphere.ec2.SecurityGroupRule( IpProtocol="icmp", FromPort=-1, ToPort=-1, CidrIp="0.0.0.0/0", ), ], VpcId=troposphere.Ref(vpc))) # AutoScaling slave_launch_config = t.add_resource( troposphere.autoscaling.LaunchConfiguration( "SlaveLaunchCfg", ImageId=troposphere.FindInMap("RegionMap", troposphere.Ref("AWS::Region"), "64"), InstanceType=troposphere.Ref(slave_instance_type), AssociatePublicIpAddress="True", KeyName=troposphere.Ref(key_name), SecurityGroups=[troposphere.Ref(security_groups)], IamInstanceProfile=troposphere.Ref(instance_profile), UserData=troposphere.Base64( troposphere.Join('\n', [ "#!/bin/bash", "exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1", "echo [Initialization] starting the slave node", troposphere.Join(" ", [ "su - osr /bin/bash -c 'cd /home/osr/project-aws; git pull; /bin/bash scripts/auto_hpcc.sh", troposphere.Ref("AWS::StackName"), troposphere.Ref("AWS::Region"), "'", ]), "echo [Initialization] completed the slave node", "echo SCRIPT: 'Signal stack that setup of HPCC System is complete.'", troposphere.Join(" ", [ "/usr/local/bin/cfn-signal -e 0 --stack ", troposphere.Ref("AWS::StackName"), "--resource SlaveASG ", "--region ", troposphere.Ref("AWS::Region") ]), "echo SCRIPT: 'Done signaling stack that setup of HPCC System has completed.'" ])), )) slave_autoscaling_group = t.add_resource( troposphere.autoscaling.AutoScalingGroup( "SlaveASG", DesiredCapacity=troposphere.Ref(cluster_size), # @TODO: disable here to support t2.micro for cheap testing #PlacementGroup=troposphere.Ref(placement_group), LaunchConfigurationName=troposphere.Ref(slave_launch_config), MinSize=troposphere.Ref(cluster_size), MaxSize=troposphere.Ref(cluster_size), HealthCheckType="EC2", HealthCheckGracePeriod="300", VPCZoneIdentifier=[troposphere.Ref(subnet)], #AvailabilityZones=[troposphere.Ref(vpc_availability_zone)], Tags=[ troposphere.autoscaling.Tag("StackName", troposphere.Ref("AWS::StackName"), True), troposphere.autoscaling.Tag("slavesPerNode", troposphere.Ref(num_slaves), True), troposphere.autoscaling.Tag( "UserNameAndPassword", troposphere.Ref(username_and_password), True), troposphere.autoscaling.Tag( "Name", troposphere.Join( "-", [troposphere.Ref("AWS::StackName"), "Slave"]), True), ], )) master_launch_config = t.add_resource( troposphere.autoscaling.LaunchConfiguration( "MasterLaunchCfg", ImageId=troposphere.FindInMap("RegionMap", troposphere.Ref("AWS::Region"), "64"), InstanceType=troposphere.Ref(master_instance_type), AssociatePublicIpAddress="True", KeyName=troposphere.Ref(key_name), SecurityGroups=[troposphere.Ref(security_groups)], IamInstanceProfile=troposphere.Ref(instance_profile), UserData=troposphere.Base64( troposphere.Join('\n', [ "#!/bin/bash", "exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1", "echo [Initialization] starting the master node", troposphere.Join(" ", [ "su - osr /bin/bash -c 'cd /home/osr/project-aws; git pull; /bin/bash scripts/auto_hpcc.sh", troposphere.Ref("AWS::StackName"), troposphere.Ref("AWS::Region"), "'", ]), "echo [Initialization] completed the master node", "echo SCRIPT: 'Signal stack that setup of HPCC System is complete.'", troposphere.Join(" ", [ "/usr/local/bin/cfn-signal -e 0 --stack ", troposphere.Ref("AWS::StackName"), "--resource MasterASG ", "--region ", troposphere.Ref("AWS::Region") ]), "echo SCRIPT: 'Done signaling stack that setup of HPCC System has completed.'" ])), )) master_autoscaling_group = t.add_resource( troposphere.autoscaling.AutoScalingGroup( "MasterASG", DesiredCapacity="1", # need to update x -> N+x # @TODO: disable here to support t2.micro for cheap testing #PlacementGroup=troposphere.Ref(placement_group), LaunchConfigurationName=troposphere.Ref(master_launch_config), MinSize="1", MaxSize="1", HealthCheckType="EC2", HealthCheckGracePeriod="300", VPCZoneIdentifier=[troposphere.Ref(subnet)], #AvailabilityZones=[troposphere.Ref(vpc_availability_zone)], Tags=[ troposphere.autoscaling.Tag("StackName", troposphere.Ref("AWS::StackName"), True), troposphere.autoscaling.Tag("slavesPerNode", troposphere.Ref(num_slaves), True), troposphere.autoscaling.Tag( "UserNameAndPassword", troposphere.Ref(username_and_password), True), troposphere.autoscaling.Tag( "Name", troposphere.Join( "-", [troposphere.Ref("AWS::StackName"), "Master"]), True), ], )) print(t.to_json()) return t.to_dict()
def password(self): return ts.Parameter( self._get_logical_id('DatabasePassword'), Type='String', NoEcho=True, # do not print in aws console )
import troposphere.cloudformation import troposphere.sqs import troposphere.logs import troposphere.iam import troposphere.ecs import ipaddress t = troposphere.Template() t.add_version("2010-09-09") S3ArchiveBucket = t.add_parameter( troposphere.Parameter( "S3ArchiveBucket", Default="", Description= "S3 Bucket where you will store the output archives. If empty, it will be created. An existing bucket needs to notify S3BundlerQueueName on new manifests written by s3grouper.", Type="String", )) NeedsArchiveBucket = t.add_condition( "NeedsArchiveBucket", troposphere.Equals(troposphere.Ref(S3ArchiveBucket), "")) S3ArchivePrefix = t.add_parameter( troposphere.Parameter( "S3ArchivePrefix", Default="archive", Description= "Prefix within S3 Bucket where you will store the output archives", Type="String",
def add_parameter(template, name, description, type='String'): p = template.add_parameter( troposphere.Parameter(name, Type=type, Description=description)) return p
def image(self): return ts.Parameter( self._get_logical_id('Image'), Type='String', )
"AWS CloudFormation Sample Template S3_Bucket: template showing " "how to create a publicly accessible S3 bucket.") s3bucket1 = t.add_resource(Bucket( "S3Bucket1", AccessControl=PublicRead, )) t.add_output( Output("BucketName", Value=Ref(s3bucket1), Description="Name of S3 bucket")) param_foo = t.add_parameter( troposphere.Parameter( 'InstanceType', Description='Type of EC2 instance', Type='String', )) def generate_template(): return t.to_json() def post_hook(awsclient, config, parameters, stack_outputs, stack_state): # do validations on arguments print('hi from hook') assert awsclient is not None #assert type(awsclient) in [AWSClient, PlaceboAWSClient] is not None #assert config == ConfigTree([('cloudformation', ConfigTree([('StackName', 'infra-dev-kumo-sample-stack-with-hooks'), ('InstanceType', 't2.medium')]))])
from troposphere.rds import DBInstance, DBParameterGroup from troposphere.ec2 import SecurityGroupRule as SGR from gcdt_kumo.iam import IAMRoleAndPolicies SERVICE_NAME = os.getenv('SERVICE_NAME', 'gcdtSampleStackWithEc2Instance') t = Template() t.add_description( "AWS CloudFormation Sample Template S3_Bucket: template showing " "how to create a publicly accessible S3 bucket.") param_vpc_id = t.add_parameter( troposphere.Parameter( 'VPCId', Type="String", Description="ID of glomex default VPC", )) param_instance_type = t.add_parameter( troposphere.Parameter( 'InstanceType', Description='Type of EC2 instance', Type='String', Default='t2.micro', )) param_hosted_zone = t.add_parameter( troposphere.Parameter( 'HostedZone', Description='Name of the hosted Zone (without trailing dot)',
def get_template( puppet_version, all_regions, source, is_caching_enabled, is_manual_approvals: bool, scm_skip_creation_of_repo: bool, should_validate: bool, ) -> t.Template: is_codecommit = source.get("Provider", "").lower() == "codecommit" is_github = source.get("Provider", "").lower() == "github" is_codestarsourceconnection = (source.get( "Provider", "").lower() == "codestarsourceconnection") is_custom = (source.get("Provider", "").lower() == "custom") is_s3 = source.get("Provider", "").lower() == "s3" description = f"""Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies {{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}""" template = t.Template(Description=description) version_parameter = template.add_parameter( t.Parameter("Version", Default=puppet_version, Type="String")) org_iam_role_arn_parameter = template.add_parameter( t.Parameter("OrgIamRoleArn", Default="None", Type="String")) with_manual_approvals_parameter = template.add_parameter( t.Parameter( "WithManualApprovals", Type="String", AllowedValues=["Yes", "No"], Default="No", )) puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter( t.Parameter( "PuppetCodePipelineRolePermissionBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetCodePipelineRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) source_role_permissions_boundary_parameter = template.add_parameter( t.Parameter( "SourceRolePermissionsBoundary", Type="String", Description="IAM Permission Boundary to apply to the SourceRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) puppet_generate_role_permission_boundary_parameter = template.add_parameter( t.Parameter( "PuppetGenerateRolePermissionBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetGenerateRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) puppet_deploy_role_permission_boundary_parameter = template.add_parameter( t.Parameter( "PuppetDeployRolePermissionBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetDeployRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter( t.Parameter( "PuppetProvisioningRolePermissionsBoundary", Type="String", Description= "IAM Permission Boundary to apply to the PuppetProvisioningRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter( t.Parameter( "CloudFormationDeployRolePermissionsBoundary", Type="String", Description= "IAM Permission Boundary to apply to the CloudFormationDeployRole", Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data, )) deploy_environment_compute_type_parameter = template.add_parameter( t.Parameter( "DeployEnvironmentComputeType", Type="String", Description="The AWS CodeBuild Environment Compute Type", Default="BUILD_GENERAL1_SMALL", )) spoke_deploy_environment_compute_type_parameter = template.add_parameter( t.Parameter( "SpokeDeployEnvironmentComputeType", Type="String", Description= "The AWS CodeBuild Environment Compute Type for spoke execution mode", Default="BUILD_GENERAL1_SMALL", )) deploy_num_workers_parameter = template.add_parameter( t.Parameter( "DeployNumWorkers", Type="Number", Description= "Number of workers that should be used when running a deploy", Default=10, )) puppet_role_name_parameter = template.add_parameter( t.Parameter("PuppetRoleName", Type="String", Default="PuppetRole")) puppet_role_path_template_parameter = template.add_parameter( t.Parameter("PuppetRolePath", Type="String", Default="/servicecatalog-puppet/")) template.add_condition( "ShouldUseOrgs", t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), "None"))) template.add_condition( "HasManualApprovals", t.Equals(t.Ref(with_manual_approvals_parameter), "Yes")) template.add_resource( s3.Bucket( "StacksRepository", BucketName=t.Sub("sc-puppet-stacks-repository-${AWS::AccountId}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}), )) manual_approvals_param = template.add_resource( ssm.Parameter( "ManualApprovalsParam", Type="String", Name="/servicecatalog-puppet/manual-approvals", Value=t.Ref(with_manual_approvals_parameter), )) template.add_resource( ssm.Parameter( "SpokeDeployEnvParameter", Type="String", Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME, Value=t.Ref(spoke_deploy_environment_compute_type_parameter), )) param = template.add_resource( ssm.Parameter( "Param", Type="String", Name="service-catalog-puppet-version", Value=t.Ref(version_parameter), )) partition_parameter = template.add_resource( ssm.Parameter( "PartitionParameter", Type="String", Name="/servicecatalog-puppet/partition", Value=t.Ref("AWS::Partition"), )) puppet_role_name_parameter = template.add_resource( ssm.Parameter( "PuppetRoleNameParameter", Type="String", Name="/servicecatalog-puppet/puppet-role/name", Value=t.Ref(puppet_role_name_parameter), )) puppet_role_path_parameter = template.add_resource( ssm.Parameter( "PuppetRolePathParameter", Type="String", Name="/servicecatalog-puppet/puppet-role/path", Value=t.Ref(puppet_role_path_template_parameter), )) share_accept_function_role = template.add_resource( iam.Role( "ShareAcceptFunctionRole", RoleName="ShareAcceptFunctionRole", ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" ) ], Path=t.Ref(puppet_role_path_template_parameter), Policies=[ iam.Policy( PolicyName="ServiceCatalogActions", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Resource": { "Fn::Sub": "arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}" }, "Effect": "Allow", }], }, ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] }, }], }, )) provisioning_role = template.add_resource( iam.Role( "ProvisioningRole", RoleName="PuppetProvisioningRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codebuild.amazonaws.com"] }, }, { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "AWS": { "Fn::Sub": "${AWS::AccountId}" } }, }, ], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( puppet_provisioning_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) cloud_formation_deploy_role = template.add_resource( iam.Role( "CloudFormationDeployRole", RoleName="CloudFormationDeployRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["cloudformation.amazonaws.com"] }, }, { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "AWS": { "Fn::Sub": "${AWS::AccountId}" } }, }, ], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( cloud_formation_deploy_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) pipeline_role = template.add_resource( iam.Role( "PipelineRole", RoleName="PuppetCodePipelineRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codepipeline.amazonaws.com"] }, }], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( puppet_code_pipeline_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) source_role = template.add_resource( iam.Role( "SourceRole", RoleName="PuppetSourceRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codepipeline.amazonaws.com"] }, }, { "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "AWS": { "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:root" } }, }, ], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( source_role_permissions_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) dry_run_notification_topic = template.add_resource( sns.Topic( "DryRunNotificationTopic", DisplayName="service-catalog-puppet-dry-run-approvals", TopicName="service-catalog-puppet-dry-run-approvals", Condition="HasManualApprovals", )) deploy_role = template.add_resource( iam.Role( "DeployRole", RoleName="PuppetDeployRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codebuild.amazonaws.com"] }, }], }, ManagedPolicyArns=[ t.Sub( "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess" ) ], PermissionsBoundary=t.Ref( puppet_deploy_role_permission_boundary_parameter), Path=t.Ref(puppet_role_path_template_parameter), )) num_workers_ssm_parameter = template.add_resource( ssm.Parameter( "NumWorkersSSMParameter", Type="String", Name="/servicecatalog-puppet/deploy/num-workers", Value=t.Sub("${DeployNumWorkers}"), )) parameterised_source_bucket = template.add_resource( s3.Bucket( "ParameterisedSourceBucket", PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=t.Sub("sc-puppet-parameterised-runs-${AWS::AccountId}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) source_stage = codepipeline.Stages( Name="Source", Actions=[ codepipeline.Actions( RunOrder=1, RoleArn=t.GetAtt("SourceRole", "Arn"), ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="S3", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="ParameterisedSource") ], Configuration={ "S3Bucket": t.Ref(parameterised_source_bucket), "S3ObjectKey": "parameters.zip", "PollForSourceChanges": True, }, Name="ParameterisedSource", ) ], ) install_spec = { "runtime-versions": dict(python="3.7"), "commands": [ f"pip install {puppet_version}" if "http" in puppet_version else f"pip install aws-service-catalog-puppet=={puppet_version}", ], } deploy_env_vars = [ { "Type": "PLAINTEXT", "Name": "PUPPET_ACCOUNT_ID", "Value": t.Ref("AWS::AccountId"), }, { "Type": "PLAINTEXT", "Name": "PUPPET_REGION", "Value": t.Ref("AWS::Region"), }, { "Type": "PARAMETER_STORE", "Name": "PARTITION", "Value": t.Ref(partition_parameter), }, { "Type": "PARAMETER_STORE", "Name": "PUPPET_ROLE_NAME", "Value": t.Ref(puppet_role_name_parameter), }, { "Type": "PARAMETER_STORE", "Name": "PUPPET_ROLE_PATH", "Value": t.Ref(puppet_role_path_parameter), }, ] if is_codecommit: template.add_resource( codecommit.Repository( "CodeRepo", RepositoryName=source.get("Configuration").get( "RepositoryName"), RepositoryDescription= "Repo to store the servicecatalog puppet solution", DeletionPolicy="Retain", )) source_stage.Actions.append( codepipeline.Actions( RunOrder=1, RoleArn=t.GetAtt("SourceRole", "Arn"), ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="CodeCommit", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "RepositoryName": source.get("Configuration").get("RepositoryName"), "BranchName": source.get("Configuration").get("BranchName"), "PollForSourceChanges": source.get("Configuration").get("PollForSourceChanges", True), }, Name="Source", )) if is_github: source_stage.Actions.append( codepipeline.Actions( RunOrder=1, ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="ThirdParty", Version="1", Provider="GitHub", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "Owner": source.get("Configuration").get("Owner"), "Repo": source.get("Configuration").get("Repo"), "Branch": source.get("Configuration").get("Branch"), "OAuthToken": t.Join( "", [ "{{resolve:secretsmanager:", source.get("Configuration").get( "SecretsManagerSecret"), ":SecretString:OAuthToken}}", ], ), "PollForSourceChanges": source.get("Configuration").get("PollForSourceChanges"), }, Name="Source", )) if is_custom: source_stage.Actions.append( codepipeline.Actions( RunOrder=1, ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="Custom", Version=source.get("Configuration").get( "CustomActionTypeVersion"), Provider=source.get("Configuration").get( "CustomActionTypeProvider"), ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "GitUrl": source.get("Configuration").get("GitUrl"), "Branch": source.get("Configuration").get("Branch"), "PipelineName": t.Sub("${AWS::StackName}-pipeline"), }, Name="Source", )) webhook = codepipeline.Webhook( "Webhook", Authentication="IP", TargetAction="Source", AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration( AllowedIPRange=source.get("Configuration").get( "GitWebHookIpAddress")), Filters=[ codepipeline.WebhookFilterRule( JsonPath="$.changes[0].ref.id", MatchEquals="refs/heads/{Branch}") ], TargetPipelineVersion=1, TargetPipeline=t.Sub("${AWS::StackName}-pipeline"), ) template.add_resource(webhook) values_for_sub = { "GitUrl": source.get("Configuration").get("GitUrl"), "WebhookUrl": t.GetAtt(webhook, "Url"), } output_to_add = t.Output("WebhookUrl") output_to_add.Value = t.Sub("${GitUrl}||${WebhookUrl}", **values_for_sub) output_to_add.Export = t.Export(t.Sub("${AWS::StackName}-pipeline")) template.add_output(output_to_add) if is_codestarsourceconnection: source_stage.Actions.append( codepipeline.Actions( RunOrder=1, RoleArn=t.GetAtt("SourceRole", "Arn"), ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="CodeStarSourceConnection", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "ConnectionArn": source.get("Configuration").get("ConnectionArn"), "FullRepositoryId": source.get("Configuration").get("FullRepositoryId"), "BranchName": source.get("Configuration").get("BranchName"), "OutputArtifactFormat": source.get("Configuration").get("OutputArtifactFormat"), }, Name="Source", )) if is_s3: bucket_name = source.get("Configuration").get("S3Bucket") if not scm_skip_creation_of_repo: template.add_resource( s3.Bucket( bucket_name, PublicAccessBlockConfiguration=s3. PublicAccessBlockConfiguration( IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault( SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=bucket_name, VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) source_stage.Actions.append( codepipeline.Actions( RunOrder=1, ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="S3", ), OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")], Configuration={ "S3Bucket": bucket_name, "S3ObjectKey": source.get("Configuration").get("S3ObjectKey"), "PollForSourceChanges": source.get("Configuration").get("PollForSourceChanges"), }, Name="Source", )) single_account_run_project_build_spec = dict( version=0.2, phases=dict( install=install_spec, build={ "commands": [ 'echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml', "cat parameters.yaml", "zip parameters.zip parameters.yaml", "aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip", ] }, post_build={ "commands": [ "servicecatalog-puppet wait-for-parameterised-run-to-complete", ] }, ), artifacts=dict( name="DeployProject", files=[ "ServiceCatalogPuppet/manifest.yaml", "ServiceCatalogPuppet/manifest-expanded.yaml", "results/*/*", "output/*/*", "exploded_results/*/*", "tasks.log", ], ), ) single_account_run_project_args = dict( Name="servicecatalog-puppet-single-account-run", Description="Runs puppet for a single account - SINGLE_ACCOUNT_ID", ServiceRole=t.GetAtt(deploy_role, "Arn"), Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ), TimeoutInMinutes=480, Environment=codebuild.Environment( ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PLAINTEXT", "Name": "SINGLE_ACCOUNT_ID", "Value": "CHANGE_ME", }, ] + deploy_env_vars, ), Source=codebuild.Source( Type="NO_SOURCE", BuildSpec=yaml.safe_dump(single_account_run_project_build_spec), ), ) single_account_run_project = template.add_resource( codebuild.Project("SingleAccountRunProject", **single_account_run_project_args)) single_account_run_project_build_spec["phases"]["post_build"]["commands"] = [ "servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL" ] single_account_run_project_args[ "Name"] = "servicecatalog-puppet-single-account-run-with-callback" single_account_run_project_args[ "Description"] = "Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put" single_account_run_project_args.get( "Environment").EnvironmentVariables.append({ "Type": "PLAINTEXT", "Name": "CALLBACK_URL", "Value": "CHANGE_ME", }) single_account_run_project_args["Source"] = codebuild.Source( Type="NO_SOURCE", BuildSpec=yaml.safe_dump(single_account_run_project_build_spec), ) single_account_run_project_with_callback = template.add_resource( codebuild.Project("SingleAccountRunWithCallbackProject", **single_account_run_project_args)) stages = [source_stage] if should_validate: template.add_resource( codebuild.Project( "ValidateProject", Name="servicecatalog-puppet-validate", ServiceRole=t.GetAtt("DeployRole", "Arn"), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"), TimeoutInMinutes=60, Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", ), Source=codebuild.Source( BuildSpec=yaml.safe_dump( dict( version="0.2", phases={ "install": { "runtime-versions": { "python": "3.7", }, "commands": [ f"pip install {puppet_version}" if "http" in puppet_version else f"pip install aws-service-catalog-puppet=={puppet_version}", ], }, "build": { "commands": [ "servicecatalog-puppet validate manifest.yaml" ] }, }, )), Type="CODEPIPELINE", ), Description="Validate the manifest.yaml file", )) stages.append( codepipeline.Stages( Name="Validate", Actions=[ codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), ], Name="Validate", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts( Name="ValidateProject") ], Configuration={ "ProjectName": t.Ref("ValidateProject"), "PrimarySource": "Source", }, RunOrder=1, ), ], )) if is_manual_approvals: deploy_stage = codepipeline.Stages( Name="Deploy", Actions=[ codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), codepipeline.InputArtifacts( Name="ParameterisedSource"), ], Name="DryRun", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="DryRunProject") ], Configuration={ "ProjectName": t.Ref("DryRunProject"), "PrimarySource": "Source", }, RunOrder=1, ), codepipeline.Actions( ActionTypeId=codepipeline.ActionTypeId( Category="Approval", Owner="AWS", Version="1", Provider="Manual", ), Configuration={ "NotificationArn": t.Ref("DryRunNotificationTopic"), "CustomData": "Approve when you are happy with the dry run.", }, Name="DryRunApproval", RunOrder=2, ), codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), codepipeline.InputArtifacts( Name="ParameterisedSource"), ], Name="Deploy", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="DeployProject") ], Configuration={ "ProjectName": t.Ref("DeployProject"), "PrimarySource": "Source", }, RunOrder=3, ), ], ) else: deploy_stage = codepipeline.Stages( Name="Deploy", Actions=[ codepipeline.Actions( InputArtifacts=[ codepipeline.InputArtifacts(Name="Source"), codepipeline.InputArtifacts( Name="ParameterisedSource"), ], Name="Deploy", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="DeployProject") ], Configuration={ "ProjectName": t.Ref("DeployProject"), "PrimarySource": "Source", "EnvironmentVariables": '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]', }, RunOrder=1, ), ], ) stages.append(deploy_stage) pipeline = template.add_resource( codepipeline.Pipeline( "Pipeline", RoleArn=t.GetAtt("PipelineRole", "Arn"), Stages=stages, Name=t.Sub("${AWS::StackName}-pipeline"), ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=t.Sub( "sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}" ), ), RestartExecutionOnUpdate=True, )) if is_github: template.add_resource( codepipeline.Webhook( "Webhook", AuthenticationConfiguration=codepipeline. WebhookAuthConfiguration(SecretToken=t.Join( "", [ "{{resolve:secretsmanager:", source.get("Configuration").get( "SecretsManagerSecret"), ":SecretString:SecretToken}}", ], )), Filters=[ codepipeline.WebhookFilterRule( JsonPath="$.ref", MatchEquals="refs/heads/" + source.get("Configuration").get("Branch"), ) ], Authentication="GITHUB_HMAC", TargetPipeline=t.Ref(pipeline), TargetAction="Source", Name=t.Sub("${AWS::StackName}-webhook"), TargetPipelineVersion=t.GetAtt(pipeline, "Version"), RegisterWithThirdParty="true", )) deploy_project_build_spec = dict( version=0.2, phases=dict( install={ "runtime-versions": dict(python="3.7"), "commands": [ f"pip install {puppet_version}" if "http" in puppet_version else f"pip install aws-service-catalog-puppet=={puppet_version}", ], }, pre_build={ "commands": [ "servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml", ] }, build={ "commands": [ "servicecatalog-puppet --info deploy --num-workers ${NUM_WORKERS} manifest-expanded.yaml", ] }, ), artifacts=dict( name="DeployProject", files=[ "manifest-expanded.yaml", "results/*/*", "output/*/*", "exploded_results/*/*", "tasks.log", ], ), ) deploy_project_args = dict( Name="servicecatalog-puppet-deploy", ServiceRole=t.GetAtt(deploy_role, "Arn"), Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="CODEPIPELINE", ), TimeoutInMinutes=480, Environment=codebuild.Environment( ComputeType=t.Ref(deploy_environment_compute_type_parameter), Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PARAMETER_STORE", "Name": "NUM_WORKERS", "Value": t.Ref(num_workers_ssm_parameter), }, { "Type": "PARAMETER_STORE", "Name": "SPOKE_EXECUTION_MODE_DEPLOY_ENV", "Value": constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME, }, ] + deploy_env_vars, ), Source=codebuild.Source( Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec), ), Description="deploys out the products to be deployed", ) deploy_project = template.add_resource( codebuild.Project("DeployProject", **deploy_project_args)) deploy_project_build_spec["phases"]["build"]["commands"] = [ "servicecatalog-puppet --info dry-run manifest-expanded.yaml" ] deploy_project_build_spec["artifacts"]["name"] = "DryRunProject" deploy_project_args["Name"] = "servicecatalog-puppet-dryrun" deploy_project_args[ "Description"] = "dry run of servicecatalog-puppet-dryrun" deploy_project_args["Source"] = codebuild.Source( Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec), ) dry_run_project = template.add_resource( codebuild.Project("DryRunProject", **deploy_project_args)) bootstrap_project = template.add_resource( codebuild.Project( "BootstrapProject", Name="servicecatalog-puppet-bootstrap-spokes-in-ou", ServiceRole=t.GetAtt("DeployRole", "Arn"), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"), TimeoutInMinutes=60, Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PLAINTEXT", "Name": "OU_OR_PATH", "Value": "CHANGE_ME" }, { "Type": "PLAINTEXT", "Name": "IAM_ROLE_NAME", "Value": "OrganizationAccountAccessRole", }, { "Type": "PLAINTEXT", "Name": "IAM_ROLE_ARNS", "Value": "" }, ], ), Source=codebuild.Source( BuildSpec= "version: 0.2\nphases:\n install:\n runtime-versions:\n python: 3.7\n commands:\n - pip install aws-service-catalog-puppet\n build:\n commands:\n - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS\nartifacts:\n files:\n - results/*/*\n - output/*/*\n name: BootstrapProject\n", Type="NO_SOURCE", ), Description="Bootstrap all the accounts in an OU", )) template.add_resource( codebuild.Project( "BootstrapASpokeProject", Name="servicecatalog-puppet-bootstrap-spoke", ServiceRole=t.GetAtt("DeployRole", "Arn"), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"), TimeoutInMinutes=60, Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Type": "PLAINTEXT", "Name": "PUPPET_ACCOUNT_ID", "Value": t.Sub("${AWS::AccountId}"), }, { "Type": "PLAINTEXT", "Name": "ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN", "Value": "CHANGE_ME", }, { "Type": "PLAINTEXT", "Name": "ASSUMABLE_ROLE_IN_ROOT_ACCOUNT", "Value": "CHANGE_ME", }, ], ), Source=codebuild.Source( BuildSpec=yaml.safe_dump( dict( version=0.2, phases=dict( install=install_spec, build={ "commands": [ "servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN}" ] }, ), )), Type="NO_SOURCE", ), Description="Bootstrap given account as a spoke", )) cloud_formation_events_queue = template.add_resource( sqs.Queue( "CloudFormationEventsQueue", QueueName="servicecatalog-puppet-cloudformation-events", Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), )) cloud_formation_events_queue_policy = template.add_resource( sqs.QueuePolicy( "CloudFormationEventsQueuePolicy", Queues=[t.Ref(cloud_formation_events_queue)], PolicyDocument={ "Id": "AllowSNS", "Version": "2012-10-17", "Statement": [{ "Sid": "allow-send-message", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["sqs:SendMessage"], "Resource": "*", "Condition": { "ArnEquals": { "aws:SourceArn": t.Sub( "arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events" ) } }, }], }, )) spoke_deploy_bucket = template.add_resource( s3.Bucket( "SpokeDeployBucket", PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( IgnorePublicAcls=True, BlockPublicPolicy=True, BlockPublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=t.Sub("sc-puppet-spoke-deploy-${AWS::AccountId}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) caching_bucket = template.add_resource( s3.Bucket( "CachingBucket", PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), Tags=t.Tags.from_dict( **{"ServiceCatalogPuppet:Actor": "Framework"}), BucketName=t.Sub( "sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}"), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( t.Output( "CloudFormationEventsQueueArn", Value=t.GetAtt(cloud_formation_events_queue, "Arn"), )) template.add_output(t.Output("Version", Value=t.GetAtt(param, "Value"))) template.add_output( t.Output("ManualApprovalsParam", Value=t.GetAtt(manual_approvals_param, "Value"))) template.add_resource( ssm.Parameter( "DefaultTerraformVersion", Type="String", Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME, Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE, )) return template
def create_cdk_pipeline(name, version, product_name, product_version, template_config, p) -> t.Template: description = f"""Builds a cdk pipeline {{"version": "{constants.VERSION}", "framework": "servicecatalog-factory", "role": "product-pipeline", "type": "{name}", "version": "{version}"}}""" configuration = template_config.get("Configuration") template = t.Template(Description=description) template.add_parameter(t.Parameter("PuppetAccountId", Type="String")) template.add_parameter( t.Parameter("CDKSupportCDKDeployRequireApproval", Type="String", Default="never")) template.add_parameter( t.Parameter("CDKSupportCDKComputeType", Type="String", Default="BUILD_GENERAL1_SMALL")) template.add_parameter( t.Parameter("CDKSupportCDKDeployImage", Type="String", Default="aws/codebuild/standard:4.0")) template.add_parameter( t.Parameter("CDKSupportCDKToolkitStackName", Type="String", Default="CDKToolKit")) template.add_parameter( t.Parameter( "CDKSupportCDKDeployExtraArgs", Type="String", Default="", Description="Extra args to pass to CDK deploy", )) template.add_parameter( t.Parameter( "CDKSupportStartCDKDeployFunctionArn", Type="String", )) template.add_parameter( t.Parameter( "CDKSupportGetOutputsForGivenCodebuildIdFunctionArn", Type="String", )) template.add_parameter( t.Parameter("CDKSupportIAMRolePaths", Type="String", Default="/servicecatalog-factory-cdk-support/")) template.add_parameter( t.Parameter("CDKSupportCDKDeployRoleName", Type="String", Default="CDKDeployRoleName")) manifest = json.loads(open(f"{p}/{PREFIX}/manifest.json", "r").read()) cdk_deploy_parameter_args = list() for artifact_name, artifact in manifest.get("artifacts", {}).items(): if artifact.get("type") == "aws:cloudformation:stack": artifact_template_file_path = artifact.get("properties", {}).get("templateFile") assert ( artifact_template_file_path ), f"Could not find template file in manifest.json for {artifact_name}" artifact_template = json.loads( open(f"{p}/{PREFIX}/{artifact_template_file_path}", "r").read()) for parameter_name, parameter_details in artifact_template.get( "Parameters", {}).items(): if template.parameters.get(parameter_name) is None: template.add_parameter( t.Parameter(parameter_name, **parameter_details)) cdk_deploy_parameter_args.append( f"--parameters {artifact_name}:{parameter_name}=${{{parameter_name}}}" ) for output_name, output_details in artifact_template.get( "Outputs", {}).items(): if template.outputs.get(output_name) is None: new_output = dict(**output_details) new_output["Value"] = t.GetAtt("GetOutputsCode", output_name) template.add_output(t.Output(output_name, **new_output)) cdk_deploy_parameter_args = " ".join(cdk_deploy_parameter_args) class DeployDetailsCustomResource(cloudformation.AWSCustomObject): resource_type = "Custom::DeployDetails" props = dict() runtime_versions = dict( nodejs=constants.BUILDSPEC_RUNTIME_VERSIONS_NODEJS_DEFAULT, ) if configuration.get("runtime-versions"): runtime_versions.update(configuration.get("runtime-versions")) extra_commands = list(configuration.get("install", {}).get("commands", [])) template.add_resource( codebuild.Project( "CDKDeploy", Name=t.Sub("${AWS::StackName}-deploy"), Description='Run CDK deploy for given source code', ServiceRole=t.Sub( "arn:aws:iam::${AWS::AccountId}:role${CDKSupportIAMRolePaths}${CDKSupportCDKDeployRoleName}" ), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ), Environment=codebuild.Environment( ComputeType=t.Ref('CDKSupportCDKComputeType'), EnvironmentVariables=[ codebuild.EnvironmentVariable( Name="CDK_DEPLOY_REQUIRE_APPROVAL", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="CDK_DEPLOY_EXTRA_ARGS", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable( Name="CDK_TOOLKIT_STACK_NAME", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="UId", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="PUPPET_ACCOUNT_ID", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="REGION", Type="PLAINTEXT", Value=t.Ref("AWS::Region")), codebuild.EnvironmentVariable( Name="CDK_DEPLOY_PARAMETER_ARGS", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="ON_COMPLETE_URL", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="NAME", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="VERSION", Type="PLAINTEXT", Value="CHANGE_ME"), ], Image=t.Ref('CDKSupportCDKDeployImage'), Type="LINUX_CONTAINER", ), Source=codebuild.Source( Type="NO_SOURCE", BuildSpec=t.Sub( yaml.safe_dump( dict( version=0.2, phases=dict( install={ "runtime-versions": runtime_versions, "commands": [ "aws s3 cp s3://sc-factory-artifacts-$PUPPET_ACCOUNT_ID-$REGION/CDK/1.0.0/$NAME/$VERSION/$NAME-$VERSION.zip $NAME-$VERSION.zip", "unzip $NAME-$VERSION.zip", "npm install", ] + extra_commands }, build={ "commands": [ "npm run cdk deploy -- --toolkit-stack-name $CDK_TOOLKIT_STACK_NAME --require-approval $CDK_DEPLOY_REQUIRE_APPROVAL --outputs-file scf_outputs.json $CDK_DEPLOY_EXTRA_ARGS $CDK_DEPLOY_PARAMETER_ARGS '*'", "aws s3 cp scf_outputs.json s3://sc-cdk-artifacts-${AWS::AccountId}/CDK/1.0.0/$NAME/$VERSION/scf_outputs-$CODEBUILD_BUILD_ID.json", ] }, ), artifacts={ "name": "CDKDeploy", "files": ["*", "**/*"], }, ))), ), TimeoutInMinutes=480, )) template.add_resource( codebuild.Project( "CDKDestroy", Name=t.Sub("${AWS::StackName}-destroy"), Description='Run CDK destroy for given source code', ServiceRole=t.Sub( "arn:aws:iam::${AWS::AccountId}:role${CDKSupportIAMRolePaths}${CDKSupportCDKDeployRoleName}" ), Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ), Environment=codebuild.Environment( ComputeType=t.Ref('CDKSupportCDKComputeType'), EnvironmentVariables=[ codebuild.EnvironmentVariable( Name="CDK_DEPLOY_REQUIRE_APPROVAL", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="CDK_DEPLOY_EXTRA_ARGS", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable( Name="CDK_TOOLKIT_STACK_NAME", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="UId", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="PUPPET_ACCOUNT_ID", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="REGION", Type="PLAINTEXT", Value=t.Ref("AWS::Region")), codebuild.EnvironmentVariable( Name="CDK_DEPLOY_PARAMETER_ARGS", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="ON_COMPLETE_URL", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="NAME", Type="PLAINTEXT", Value="CHANGE_ME"), codebuild.EnvironmentVariable(Name="VERSION", Type="PLAINTEXT", Value="CHANGE_ME"), ], Image=t.Ref('CDKSupportCDKDeployImage'), Type="LINUX_CONTAINER", ), Source=codebuild.Source( Type="NO_SOURCE", BuildSpec=t.Sub( yaml.safe_dump( dict( version=0.2, phases=dict( install={ "runtime-versions": runtime_versions, "commands": [ "aws s3 cp s3://sc-factory-artifacts-$PUPPET_ACCOUNT_ID-$REGION/CDK/1.0.0/$NAME/$VERSION/$NAME-$VERSION.zip $NAME-$VERSION.zip", "unzip $NAME-$VERSION.zip", "npm install", ] + extra_commands }, build={ "commands": [ "npm run cdk destroy -- --toolkit-stack-name $CDK_TOOLKIT_STACK_NAME --force --ignore-errors '*'" ] }, ), artifacts={ "name": "CDKDeploy", "files": ["*", "**/*"], }, ))), ), TimeoutInMinutes=480, )) template.add_resource( DeployDetailsCustomResource( "StartCDKDeploy", DependsOn=["CDKDeploy", "CDKDestroy"], ServiceToken=t.Ref("CDKSupportStartCDKDeployFunctionArn"), CreateUpdateProject=t.Ref("CDKDeploy"), DeleteProject=t.Ref("CDKDestroy"), CDK_DEPLOY_EXTRA_ARGS=t.Ref("CDKSupportCDKDeployExtraArgs"), CDK_TOOLKIT_STACK_NAME=t.Ref("CDKSupportCDKToolkitStackName"), PUPPET_ACCOUNT_ID=t.Ref("PuppetAccountId"), CDK_DEPLOY_PARAMETER_ARGS=t.Sub(cdk_deploy_parameter_args), CDK_DEPLOY_REQUIRE_APPROVAL=t.Ref( "CDKSupportCDKDeployRequireApproval"), NAME=product_name, VERSION=product_version, )) template.add_resource( DeployDetailsCustomResource( "GetOutputsCode", DependsOn=[ "StartCDKDeploy", ], ServiceToken=t.Ref( "CDKSupportGetOutputsForGivenCodebuildIdFunctionArn"), CodeBuildBuildId=t.GetAtt("StartCDKDeploy", "BuildId"), BucketName=t.Sub("sc-cdk-artifacts-${AWS::AccountId}"), ObjectKeyPrefix=t.Sub( f"CDK/1.0.0/{product_name}/{product_version}"), )) return template
ec2 = boto3.client('ec2', region_name=args.region) response = ec2.describe_availability_zones(Filters=[{ 'Name': 'state', 'Values': ['available'] }]) azs = [az['ZoneName'] for az in response['AvailabilityZones']] vpccidrblock = ipaddress.ip_network(args.cidr) t = troposphere.Template() t.add_version("2010-09-09") ami = t.add_parameter( troposphere.Parameter("ECSOptimizedAMI", Description="EC2 AMI for use with ECS", Type="String", Default="ami-275ffe31")) ECSCluster = t.add_parameter( troposphere.Parameter( "ECSClusterName", Description="ECS Cluster name for spot fleet instances to join", Type="String")) SSHKeyPair = t.add_parameter( troposphere.Parameter( "SSHKeyPair", Description="SSH Key registered in this region", Type="AWS::EC2::KeyPair::KeyName", ))
def register_resources_template(self, template): """Register the lambda Function into the troposphere template. If this function requires a custom Role, register it too.""" role = self.get_role() depends_on = [] if isinstance(role, iam.Role): template.add_resource(role) depends_on.append(role.name) role = troposphere.GetAtt(role, 'Arn') template.add_parameter( troposphere.Parameter( utils.valid_cloudformation_name(self.name, "s3version"), Type="String", )) extra = {} if self.settings.get('vpc'): vpc = self.project.get_resource('vpc::{}'.format( self.settings.get('vpc'))) if isinstance(vpc.settings['security-groups'], troposphere.Ref): vpc.settings[ 'security-groups']._type = 'List<AWS::EC2::SecurityGroup::Id>' if isinstance(vpc.settings['subnet-ids'], troposphere.Ref): vpc.settings['subnet-ids']._type = 'List<AWS::EC2::Subnet::Id>' extra['VpcConfig'] = awslambda.VPCConfig( SecurityGroupIds=vpc.settings['security-groups'], SubnetIds=vpc.settings['subnet-ids']) function = template.add_resource( awslambda.Function(self.in_project_cf_name, DependsOn=depends_on, Code=awslambda.Code( S3Bucket=troposphere.Ref("CodeBucket"), S3Key=self.get_bucket_key(), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name( self.name, "s3version")), ), Description=self.settings.get( 'description', ''), Handler=self.get_handler(), MemorySize=self.get_memory(), Role=role, Runtime=self.get_runtime(), Timeout=self.get_timeout(), **extra)) lambda_version = 'lambda:contrib_lambdas:version' lambda_ref = troposphere.GetAtt(self.project.reference(lambda_version), 'Arn') if not self.in_project_name.startswith('lambda:contrib_lambdas:'): lambda_version = '{}:current'.format(lambda_version) lambda_ref = troposphere.Ref( self.project.reference(lambda_version)) version = template.add_resource( LambdaVersion.create_with( utils.valid_cloudformation_name(self.name, "Version"), DependsOn=[ self.project.reference(lambda_version), function.name ], lambda_arn=lambda_ref, FunctionName=troposphere.Ref(function), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name(self.name, "s3version")), )) alias = template.add_resource( awslambda.Alias( self.current_alias_cf_name, DependsOn=[version.name], FunctionName=troposphere.Ref(function), FunctionVersion=troposphere.GetAtt(version, "Version"), Name="current", )) if self._get_true_false('cli-output', 't'): template.add_output([ troposphere.Output( utils.valid_cloudformation_name("Clioutput", self.in_project_name), Value=troposphere.Ref(alias), ) ])
parser.add_argument('--lambda-dir', help='Where to look for defined Lambda functions', default='lambda_code') parser.add_argument( '--output-dir', help='Where to place the Zip-files and the CloudFormation template', default='output') args = parser.parse_args() template = Template("Custom Resources") s3_bucket = template.add_parameter( troposphere.Parameter( "S3Bucket", Type=constants.STRING, Description="S3 bucket where the ZIP files are located", )) template.set_parameter_label(s3_bucket, "S3 bucket") lambda_code_location = template.add_parameter_to_group(s3_bucket, "Lambda code location") s3_path = template.add_parameter( troposphere.Parameter( "S3Path", Type=constants.STRING, Default='', Description= "Path prefix where the ZIP files are located (should probably end with a '/')", )) template.set_parameter_label(s3_path, "S3 path")
if is_mail_record(record): mail_records.append(record) elif is_serverless_website(record): serverless_website_records.append(record) elif is_miscellaneous(record): miscellaneous_records.append(record) else: standard_records.append(record) template_name = "".format(zone_name_without_dot) deletion_change_record_set_name = "{}-deletions-change-record-set.json".format( zone_name_without_dot) creations_change_record_set_name = "{}-creations-change-record-set.json".format( zone_name_without_dot) zone_name = troposphere.Parameter('HostedZoneName') zone_name.Type = 'String' zone_name.Default = zone_name_with_dot with open("{}-{}-template.yaml".format(zone_name_without_dot, "mail"), 'w') as f: f.write( create_template_yaml(zone_name, get_simple_resources(mail_records, zone_name))) with open( "{}-{}-template.yaml".format(zone_name_without_dot, "serverless-website"), 'w') as f: f.write( create_template_yaml(
def get_template(version: str, default_region_value) -> t.Template: description = f"""Bootstrap template used to bootstrap a region of ServiceCatalog-Puppet master {{"version": "{version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master-region"}}""" template = t.Template(Description=description) version_parameter = template.add_parameter( t.Parameter("Version", Default=version, Type="String") ) default_region_value_parameter = template.add_parameter( t.Parameter("DefaultRegionValue", Default=default_region_value, Type="String") ) template.add_resource( ssm.Parameter( "DefaultRegionParam", Name="/servicecatalog-puppet/home-region", Type="String", Value=t.Ref(default_region_value_parameter), Tags={"ServiceCatalogPuppet:Actor": "Framework"}, ) ) version_ssm_parameter = template.add_resource( ssm.Parameter( "Param", Name="service-catalog-puppet-regional-version", Type="String", Value=t.Ref(version_parameter), Tags={"ServiceCatalogPuppet:Actor": "Framework"}, ) ) template.add_resource( s3.Bucket( "PipelineArtifactBucket", BucketName=t.Sub( "sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}" ), VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault( SSEAlgorithm="AES256" ) ) ] ), PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}), ) ) regional_product_topic = template.add_resource( sns.Topic( "RegionalProductTopic", DisplayName="servicecatalog-puppet-cloudformation-regional-events", TopicName="servicecatalog-puppet-cloudformation-regional-events", Subscription=[ sns.Subscription( Endpoint=t.Sub( "arn:${AWS::Partition}:sqs:${DefaultRegionValue}:${AWS::AccountId}:servicecatalog-puppet-cloudformation-events" ), Protocol="sqs", ) ], ), ) template.add_output( t.Output("Version", Value=t.GetAtt(version_ssm_parameter, "Value")) ) template.add_output( t.Output("RegionalProductTopic", Value=t.Ref(regional_product_topic)) ) return template