def init_template(description=None): """ Initialize the template """ template = Template(description) template.add_metadata({'GeneratedOn': dt.utcnow().isoformat()}) return template
def _generate_template(tms=1, within_vpc=False): t = Template() t.add_description(FLINK_TEMPLATE_DESCRIPTION) t.add_version(FLINK_TEMPLATE_VERSION) t.add_metadata({'LastUpdated': datetime.datetime.now().strftime('%c')}) # mappings mappings.add_mappings(t) # parameters parameters.add_parameters(t) vpc = None subnet_pri = None subnet_pub = None if within_vpc: # networking resources vpc, subnet_pri, subnet_pub = _define_vpc(t) # security groups sg_ssh = t.add_resource(securitygroups.ssh( parameters.ssh_location, vpc)) sg_jobmanager = t.add_resource(securitygroups.jobmanager( parameters.http_location, vpc)) sg_taskmanager = t.add_resource(securitygroups.taskmanager(None, vpc)) jobmanager = t.add_resource(instances.jobmanager( 0, [Ref(sg_ssh), Ref(sg_jobmanager)], within_vpc, subnet_pub )) prefix = "JobManager00" t.add_output(outputs.ssh_to(jobmanager, prefix)) t.add_output(Output( "FlinkWebGui", Description="Flink web interface", Value=Join("", [ 'http://', GetAtt(jobmanager, "PublicDnsName"), ':8081' ]) )) for index in range(0, tms): i = t.add_resource(instances.taskmanager( index, jobmanager, [Ref(sg_ssh), Ref(sg_taskmanager)], within_vpc, subnet_pri )) prefix = "TaskManager%2.2d" % index t.add_output(outputs.ssh_to(i, prefix, bastion=jobmanager)) return t.to_json()
class Stack(object): def __init__(self): self.template = Template() self.template.add_version("2010-09-09") self.template.add_description("Creates resources for a Ethereum node") parameters = Parameters() vpn = Vpn(parameters=parameters) for key, value in Mappings().mappings.iteritems(): self.template.add_mapping(key, value) for param in parameters.values(): self.template.add_parameter(param) for res in vpn.values(): self.template.add_resource(res) self.template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "EC2" }, "Parameters": [ "InstanceImage", "InstanceKeyPair", "InstanceStorageData", "InstanceStorageOS", "InstanceType" ] }, { "Label": { "default": "VPC" }, "Parameters": ["VPC"] }, ] } })
def generate(env, output): """Cloud Platform, Viaplay \n Simple program that generates the cloudformation to cloud environment context. \n Written by Felipe Ribeiro <*****@*****.**>, April 2019 \n Github: https://github.com/gohackfelipe/v-play """ logging.info('Initial configurations to create the cloudformation file.') template = Template() template.add_description("Service VPC") logging.info('Adding description on template') template.add_metadata({ "DependsOn": [], "Environment": env, "StackName": '{}-{}'.format(env, 'VPC'), }) logging.info('Adding metadata on template') internet_gateway = template.add_resource( InternetGateway("InternetGateway", Tags=Tags(Environment=env, Name='{}-{}'.format(env, 'InternetGateway')))) logging.info('Adding InternetGateway on template') vpc = template.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', EnableDnsHostnames="true", EnableDnsSupport="true", InstanceTenancy="default", Tags=Tags(Environment=env, Name='{}-{}'.format(env, 'ServiceVPC')))) logging.info('Adding VPC on template') template.add_resource( VPCGatewayAttachment( "VpcGatewayAttachment", VpcId=Ref("VPC"), InternetGatewayId=Ref("InternetGateway"), )) logging.info('Adding VpcGatewayAttachment on template') network_acl = template.add_resource( NetworkAcl( 'VpcNetworkAcl', VpcId=Ref(vpc), Tags=Tags(Environment=env, Name='{}-{}'.format(env, 'NetworkAcl')), )) logging.info('Creating Network ALC on template') template.add_resource( NetworkAclEntry( 'VpcNetworkAclInboundRule', NetworkAclId=Ref(network_acl), RuleNumber=100, Protocol='6', PortRange=PortRange(To='443', From='443'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) logging.info('Adding Network ALC Inbound Rule on template') template.add_resource( NetworkAclEntry( 'VpcNetworkAclOutboundRule', NetworkAclId=Ref(network_acl), RuleNumber=200, Protocol='6', Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) logging.info('Adding Network ALC Outbound Rule on template') # Outputs template.add_output([ Output('InternetGateway', Value=Ref(internet_gateway)), Output('VPCID', Value=Ref(vpc)) ]) logging.info('Adding Output on template') if (not output): print(template.to_json()) logging.info('Printing the cloudformation content on screen.') else: createFile(output, template.to_json())
def main(): t = Template() t.add_version("2010-09-09") t.add_description( "Currently supporting RHEL/CentOS 7.5. Setup IAM role and security groups, " "launch instance, create/attach 10 EBS volumes, install/fix ZFS " "(http://download.zfsonlinux.org/epel/zfs-release.el7_5.noarch.rpm), " "create zfs RAID6 pool, setup NFS server, export NFS share") InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'zfs_pool_name="', Ref('ZfsPool'), '"\n', 'zfs_mount_point="', Ref('ZfsMountPoint'), '"\n', 'nfs_cidr_block="', Ref('NFSCidr'), '"\n', 'nfs_opts="', Ref('NFSOpts'), '"\n', 'my_wait_handle="', Ref('NFSInstanceWaitHandle'), '"\n', '\n', ] with open( '_include/Tropo_build_zfs_export_nfs.sh', 'r', ) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Instance Configuration' }, 'Parameters': [ "OperatingSystem", "VPCId", "Subnet", "UsePublicIp", "CreateElasticIP", "EC2KeyName", "NFSInstanceType", "SshAccessCidr", "ExistingSecurityGroup", "ExistingPlacementGroup", "S3BucketName" ] }, { 'Label': { 'default': 'Storage Options - Required' }, 'Parameters': [ "RAIDLevel", "VolumeSize", "VolumeType", "EBSVolumeType", "VolumeIops" ] }, { 'Label': { 'default': 'ZFS Pool and FS Options - Required' }, 'Parameters': ["ZfsPool", "ZfsMountPoint"] }, { 'Label': { 'default': 'NFS Options - Required' }, 'Parameters': ["NFSCidr", "NFSOpts"] }], 'ParameterLabels': { 'OperatingSystem': { 'default': 'Operating System of AMI' }, 'VPCId': { 'default': 'VPC ID' }, 'Subnet': { 'default': 'Subnet ID' }, 'UsePublicIp': { 'default': 'Assign a Public IP ' }, 'CreateElasticIP': { 'default': 'Create and use an EIP ' }, 'EC2KeyName': { 'default': 'EC2 Key Name' }, 'NFSInstanceType': { 'default': 'Instance Type' }, 'SshAccessCidr': { 'default': 'SSH Access CIDR Block' }, 'ExistingSecurityGroup': { 'default': 'OPTIONAL: Existing Security Group' }, 'ExistingPlacementGroup': { 'default': 'OPTIONAL: Existing Placement Group' }, 'S3BucketName': { 'default': 'Optional S3 Bucket Name' }, 'RAIDLevel': { 'default': 'RAID Level' }, 'VolumeSize': { 'default': 'Volume size of the EBS vol' }, 'VolumeType': { 'default': 'Volume type of the EBS vol' }, 'EBSVolumeType': { 'default': 'Volume type of the EBS vol' }, 'VolumeIops': { 'default': 'IOPS for each EBS vol (only for io1)' }, 'ZfsPool': { 'default': 'ZFS pool name' }, 'ZfsMountPoint': { 'default': 'Mount Point' }, 'NFSCidr': { 'default': 'NFS CIDR block for mounts' }, 'NFSOpts': { 'default': 'NFS options' }, } } }) EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair")) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) NFSInstanceType = t.add_parameter( Parameter( 'NFSInstanceType', Type="String", Description="NFS instance type", Default="r4.16xlarge", AllowedValues=[ "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge" ], ConstraintDescription="Must an EC2 instance type from the list")) VolumeType = t.add_parameter( Parameter( 'VolumeType', Type="String", Description="Type of EBS volume", Default="EBS", AllowedValues=["EBS", "InstanceStore"], ConstraintDescription="Volume type has to EBS or InstanceStore")) EBSVolumeType = t.add_parameter( Parameter('EBSVolumeType', Description="Type of EBS volumes to create", Type="String", Default="io1", ConstraintDescription="Must be a either: io1, gp2, st1", AllowedValues=["io1", "gp2", "st1"])) VolumelSize = t.add_parameter( Parameter('VolumeSize', Type="Number", Default="500", Description="Volume size in GB")) VolumeIops = t.add_parameter( Parameter('VolumeIops', Type="Number", Default="20000", Description="IOPS for the EBS volume")) RAIDLevel = t.add_parameter( Parameter( 'RAIDLevel', Description="RAID Level, currently only 6 (8+2p) is supported", Type="String", Default="0", AllowedValues=["0"], ConstraintDescription="Must be 0")) ZfsPool = t.add_parameter( Parameter('ZfsPool', Description="ZFS pool name", Type="String", Default="v01")) ZfsMountPoint = t.add_parameter( Parameter( 'ZfsMountPoint', Description= "ZFS mount point, absolute path will be /pool_name/mount_point (e.g. /v01/testzfs)", Type="String", Default="testzfs")) VPCId = t.add_parameter( Parameter('VPCId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance")) ExistingPlacementGroup = t.add_parameter( Parameter('ExistingPlacementGroup', Type="String", Description="OPTIONAL: Existing placement group")) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/talse", AllowedValues=["true", "false"])) CreateElasticIP = t.add_parameter( Parameter( 'CreateElasticIP', Type="String", Description= "Create an Elasic IP address, that will be assinged to an instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) S3BucketName = t.add_parameter( Parameter('S3BucketName', Type="String", Description="S3 bucket to allow this instance read access.")) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 0.0.0.0/0", Default="0.0.0.0/0", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSCidr = t.add_parameter( Parameter( 'NFSCidr', Type="String", Description= "CIDR for NFS Security Group and NFS clients, to allow all access use 0.0.0.0/0", Default="10.0.0.0/16", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSOpts = t.add_parameter( Parameter( 'NFSOpts', Description="NFS export options", Type="String", Default="(rw,async,no_root_squash,wdelay,no_subtree_check,no_acl)") ) VarLogMessagesFile = t.add_parameter( Parameter( 'VarLogMessagesFile', Type="String", Description= "S3 bucket and file name for log CloudWatch config (e.g. s3://jouser-logs/var-log-message.config)" )) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Policies=[ iam.Policy(PolicyName="s3bucketaccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } }, { "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" } ] ] } }], }), ])) NFSSecurityGroup = t.add_resource( SecurityGroup("NFSSecurityGroup", VpcId=Ref(VPCId), GroupDescription="NFS Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="2049", ToPort="2049", CidrIp=Ref(NFSCidr), ), ])) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VPCId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) EIPAddress = t.add_resource( EIP('EIPAddress', Domain='vpc', Condition="create_elastic_ip")) tags = Tags(Name=Ref("AWS::StackName")) NFSInstance = t.add_resource( ec2.Instance( 'NFSInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(NFSInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If("not_existing_sg", [Ref(NFSSecurityGroup), Ref(SshSecurityGroup)], [ Ref(NFSSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup) ]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), PlacementGroupName=(Ref(ExistingPlacementGroup)), BlockDeviceMappings=If( 'vol_type_ebs', [ ec2.BlockDeviceMapping( DeviceName="/dev/sdh", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdi", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdj", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdk", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdl", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdm", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ], {"Ref": "AWS::NoValue"}, ), UserData=Base64(Join('', InstUserData)), )) # End of NFSInstance t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("vol_type_ebs", Equals(Ref(VolumeType), "EBS")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "True")) t.add_condition("Has_Bucket", Not(Equals(Ref(S3BucketName), ""))) t.add_condition("create_elastic_ip", Equals(Ref(CreateElasticIP), "True")) nfswaithandle = t.add_resource( WaitConditionHandle('NFSInstanceWaitHandle')) nfswaitcondition = t.add_resource( WaitCondition("NFSInstanceWaitCondition", Handle=Ref(nfswaithandle), Timeout="1500", DependsOn="NFSInstance")) t.add_output([ Output("ElasticIP", Description="Elastic IP address for the instance", Value=Ref(EIPAddress), Condition="create_elastic_ip") ]) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(NFSInstance)) ]) t.add_output([ Output("InstancePrivateIP", Value=GetAtt('NFSInstance', 'PrivateIp')) ]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) t.add_output([ Output("ElasticPublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="create_elastic_ip") ]) t.add_output([ Output("PrivateMountPoint", Description="Mount point on private network", Value=Join("", [GetAtt('NFSInstance', 'PrivateIp'), ":/fs1"])) ]) t.add_output([ Output("ExampleClientMountCommands", Description="Example commands to mount NFS on the clients", Value=Join("", [ "sudo mkdir /nfs1; sudo mount ", GetAtt('NFSInstance', 'PrivateIp'), ":/", Ref("ZfsPool"), "/", Ref("ZfsMountPoint"), " /nfs1" ])) ]) t.add_output([ Output("S3BucketName", Value=(Ref("S3BucketName")), Condition="Has_Bucket") ]) # "Volume01" : { "Value" : { "Ref" : "Volume01" } }, # "Volume02" : { "Value" : { "Ref" : "Volume02" } }, # "Volume03" : { "Value" : { "Ref" : "Volume03" } }, # "Volume04" : { "Value" : { "Ref" : "Volume04" } }, # "Volume05" : { "Value" : { "Ref" : "Volume05" } }, # "Volume06" : { "Value" : { "Ref" : "Volume06" } }, # "Volume07" : { "Value" : { "Ref" : "Volume07" } }, # "Volume08" : { "Value" : { "Ref" : "Volume08" } }, # "Volume09" : { "Value" : { "Ref" : "Volume09" } }, # "Volume10" : { "Value" : { "Ref" : "Volume10" } } print(t.to_json(indent=2))
Default="ebs-snapshots.zip", Description="Name of the zip file inside the S3 bucket", )) template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "Basic configuration" }, "Parameters": [ "S3BucketParameter", "SourceZipParameter", ] }, ], "ParameterLabels": { "S3BucketParameter": { "default": "Name of S3 bucket" }, "SourceZipParameter": { "default": "Name of ZIP file" }, } } }) # Role for Lambda lambda_role = template.add_resource( iam.Role(
from troposphere import (Tags, Template) # Common tags to tag AWS Resources with common_tags = dict(Environment='ApiDev', Owner='Foo industries', Service='ServiceVPC', VPC='Dev') # The CloudFormation template for "stack_dev" template = Template(Description="Service VPC - used for services") template.add_metadata({ "Build": "development", "DependsOn": [], "Environment": "ApiDev", "Revision": "develop", "StackName": "ApiDev-Dev-VPC", "StackType": "InfrastructureResource", "TemplateBucket": "cfn-apidev", "TemplateName": "VPC", "TemplatePath": "ApiDev/Dev/VPC" }) def tags(resource_name): """ Returns common tags along with custom name tag for each kind of resource type. Parameters: resource_name: str Name of resource
from troposphere.ec2 import VPCGatewayAttachment, NetworkInterfaceProperty, SecurityGroup, SubnetRouteTableAssociation, RouteTable, Route, Subnet, InternetGateway, VPC, EIP from troposphere.iam import AccessKey, User, Policy from troposphere.route53 import RecordSetType import awacs.aws t = Template() # Header t.add_description("Perforce Helix Deployment for EC2") # Header End # Metadata t.add_metadata({ "Comments": "Perforce Helix Deployment for EC2", "LastUpdated": "Sep 14th 2016", "UpdatedBy": "Graeme Rich", "Version": "2016.1", }) # Metadata End # Conditions ProdNotify = t.add_condition("ProdNotify", Equals(Ref("EnvironmentType"), "Production")), EvalNotify = t.add_condition("EvalNotify", Equals(Ref("EnvironmentType"), "Evaluationn")), DevNotify = t.add_condition("DevNotify", Equals(Ref("EnvironmentType"), "Development")), # Exp josn
template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterLabels': { # Project email.title: { 'default': 'Notifications' }, # Django CDN hosted_zone_id.title: { 'default': 'Hosted Zone ID' }, domain.title: { 'default': 'Domain' }, certificate.title: { 'default': 'ACM certificate' }, # Static assets CDN static_domain.title: { 'default': 'Static Domain Name' }, static_path.title: { 'default': 'Static Path' }, # Media assets CDN media_domain.title: { 'default': 'Media Domain Name' }, media_path.title: { 'default': 'Media Path' }, media_pattern.title: { 'default': 'Media Pattern' }, }, 'ParameterGroups': [ { 'Label': { 'default': 'Project' }, 'Parameters': [ email.title, ] }, { 'Label': { 'default': 'Django CDN' }, 'Parameters': [ hosted_zone_id.title, domain.title, certificate.title, ] }, { 'Label': { 'default': 'Static Assets CDN' }, 'Parameters': [ static_domain.title, static_path.title, ] }, { 'Label': { 'default': 'Media Assets CDN' }, 'Parameters': [ media_domain.title, media_path.title, media_pattern.title, ] }, ] } })
class Stack(object): def __init__(self): self.template = Template() self.template.add_version("2010-09-09") self.template.add_description("Create resources for the \ AWS account environment. Includes VPC, shared services and network elements") parameters = Parameters() vpc = Vpc(parameters=parameters) outputs = Outputs(vpc=vpc, parameters=parameters) for resource in parameters.values(): self.template.add_parameter(resource) # Condition to specify whether NAT gateways should be deployed (NAT GWs cost $$$) self.template.add_condition( "DeployNATGateways", Equals(Ref(parameters.DeployNATGateways), "true")) # Condition to specify whether to create a production route53 zone self.template.add_condition( "CreateProductionZone", Equals(Ref(parameters.Environment), "prod")) for resource in vpc.values(): self.template.add_resource(resource) for res in outputs.values(): self.template.add_output(res) self.template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "Availability Zones" }, "Parameters": ["AvailabilityZoneA", "AvailabilityZoneB"] }, { "Label": { "default": "Environment" }, "Parameters": ["Environment"] }, { "Label": { "default": "Domain" }, "Parameters": ["DomainName"] }, { "Label": { "default": "NAT Gateway" }, "Parameters": ["DeployNATGateways"] }, { "Label": { "default": "VPC" }, "Parameters": ["VPCCIDR"] }, { "Label": { "default": "Private Subnets" }, "Parameters": [ "GeneralPrivateSubnetACIDR", "GeneralPrivateSubnetBCIDR" ] }, { "Label": { "default": "Shared Services Public Subnets" }, "Parameters": [ "SharedServicesPublicSubnetACIDR", "SharedServicesPublicSubnetBCIDR" ] }, { "Label": { "default": "Shared Services Private Subnets" }, "Parameters": [ "SharedServicesPrivateSubnetACIDR", "SharedServicesPrivateSubnetBCIDR" ] }, { "Label": { "default": "Load Balancer Subnets" }, "Parameters": ["LBSubnetACIDR", "LBSubnetBCIDR"] }, ] } })
revision = env else: print( "Invalid environment, exiting. Please use any of these: {envs}".format( envs=environments)) sys.exit() # Description template.add_description("Service VPC - used for services") template.add_metadata({ "Build": h.getBuildAndRevision(env)[0], "DependsOn": [], "Environment": "Api{e}".format(e=env), "Revision": h.getBuildAndRevision(env)[1], "StackName": "Api{e}-{e}-VPC".format(e=env), "StackType": "InfrastructureResource", "TemplateBucket": "cfn-api{el}".format(el=envl), "TemplateName": "VPC", "TemplatePath": "Api{e}/{e}/VPC".format(e=env) }) # Outputs template.add_output([ Output( "BastionSG", Value=Ref("BastionSG"), ), Output( "CloudWatchAlarmTopic", Value=Ref("CloudWatchAlarmTopic"),
from troposphere import Template def stack_name(): stack_name = "backup-manager-stack" return stack_name template = Template() template.add_version('2010-09-09') template.add_description("Automated ec2 backups with lambda") template.add_metadata({ "Comments": "", "LastUpdated": "Sep 17st 2017", "UpdatedBy": "Matej Ferenc", "Version": "V1.0", })
from troposphere.constants import ( IMAGE_ID, SECURITY_GROUP_ID, SUBNET_ID, ) import troposphere.iam as iam import troposphere.policies as policies import troposphere.s3 as s3 OUTPUTFILE = "CloudFormation.json" t = Template() t.add_description("CloudFormation template for Strongarm Cronjob setup") t.add_metadata({ "Comments": "Made with troposphere", "LastUpdated": datetime.now().strftime('%Y-%m-%d'), "Version": "V0.1", }) # Parameters securityGroup = t.add_parameter( Parameter( "SecurityGroup", Description= "The security group for the region this stack is running in", Type=SECURITY_GROUP_ID, ConstraintDescription="The id of the default security group in this " "region to enable communication between instances", Default="sg-51530134")) imageId = t.add_parameter( Parameter(
def __setup_template(self): """ Produces a valid template instance which can then be print as json or yaml """ template = Template() template.add_description("Service VPC - used for services") template.add_metadata({ "Build": "development", "DependsOn": [], "Environment": "ApiDev", "Revision": "develop", "StackName": "ApiDev-Dev-VPC", "StackType": "InfrastructureResource", "TemplateBucket": "cfn-apidev", "TemplateName": "VPC", "TemplatePath": "ApiDev/Dev/VPC" }) vpc = template.add_resource( ec2.VPC( "VPC", CidrBlock="10.0.0.0/16", EnableDnsHostnames="true", EnableDnsSupport="true", InstanceTenancy="default", Tags=self.__get_tags("ServiceVPC"), ) ) instance_sg = template.add_resource( ec2.SecurityGroup( "BastionSG", GroupDescription="Used for source/dest rules", Tags=self.__get_tags("VPC-Bastion-SG"), VpcId=Ref( vpc ) ), ) cw_alarm_topic = template.add_resource( Topic( "CloudWatchAlarmTopic", TopicName="ApiDev-Dev-CloudWatchAlarms", ) ) dhcp_options = template.add_resource( ec2.DHCPOptions( "DhcpOptions", DomainName=Join( "", [ Ref("AWS::Region"), ".compute.internal" ] ), DomainNameServers=["AmazonProvidedDNS"], Tags=self.__get_tags("DhcpOptions"), ) ) gateway = template.add_resource( ec2.InternetGateway( "InternetGateway", Tags=self.__get_tags("InternetGateway") ) ) nat_emergency_topic = template.add_resource( Topic( "NatEmergencyTopic", TopicName="ApiDev-Dev-NatEmergencyTopic", ) ) vpc_dhcp_options_assoc = template.add_resource( ec2.VPCDHCPOptionsAssociation( "VpcDhcpOptionsAssociation", DhcpOptionsId=Ref( dhcp_options ), VpcId=Ref( vpc ) ) ) vpc_gw_attachment = template.add_resource( ec2.VPCGatewayAttachment( "VpcGatewayAttachment", InternetGatewayId=Ref( gateway ), VpcId=Ref( vpc ) ) ) vpc_network_acl = template.add_resource( ec2.NetworkAcl( "VpcNetworkAcl", Tags=self.__get_tags("NetworkAcl"), VpcId=Ref( vpc ) ) ) vpc_network_acl_rules = template.add_resource([ ec2.NetworkAclEntry( "VpcNetworkAclInboundRulePublic443", CidrBlock="0.0.0.0/0", Egress="false", NetworkAclId=Ref( vpc_network_acl ), PortRange=ec2.PortRange( From="443", To="443", ), Protocol="6", RuleAction="allow", RuleNumber=20001 ), ec2.NetworkAclEntry( "VpcNetworkAclInboundRulePublic80", CidrBlock="0.0.0.0/0", Egress="false", NetworkAclId=Ref( vpc_network_acl ), PortRange=ec2.PortRange( From="80", To="80", ), Protocol="6", RuleAction="allow", RuleNumber=20000 ), ec2.NetworkAclEntry( "VpcNetworkAclOutboundRule", CidrBlock="0.0.0.0/0", Egress="true", NetworkAclId=Ref( vpc_network_acl ), Protocol="-1", RuleAction="allow", RuleNumber=30000 ), ec2.NetworkAclEntry( "VpcNetworkAclSsh", CidrBlock="127.0.0.1/32", Egress="false", NetworkAclId=Ref( vpc_network_acl ), PortRange=ec2.PortRange( From="22", To="22", ), Protocol="6", RuleAction="allow", RuleNumber=10000 ) ]) template.add_output([ Output( "BastionSG", Value=Ref(instance_sg) ), Output( "CloudWatchAlarmTopic", Value=Ref(cw_alarm_topic) ), Output( "InternetGateway", Value=Ref(gateway) ), Output( "NatEmergencyTopicARN", Value=Ref(nat_emergency_topic) ), Output( "VPCID", Value=Ref(vpc) ), Output( "VPCName", Value=Ref("AWS::StackName") ), Output( "VpcNetworkAcl", Value=Ref(vpc_network_acl) ) ]) return template
def main(): template = Template() template.add_description("Example Server") for key, value in Mappings().mappings.iteritems(): template.add_mapping(key, value) parameters = Parameters() for param in parameters.values(): template.add_parameter(param) template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "Required parameters." }, "Parameters": [ "DBPassword", "KeyPair", ] }, { "Label": { "default": "Advanced: Database and instance" }, "Parameters": [ "DBInstanceType", "DBStorageSize", "DBBackupRetention", "EC2InstanceType" ] }, ], "ParameterLabels": { "DBPassword": { "default": "Choose a database password" }, "DBStorageSize": { "default": "Database storage (advanced)" }, "DBBackupRetention": { "default": "How long to keep backups (advanced)" }, "DBInstanceType": { "default": "Database instance class (advanced)" }, "KeyPair": { "default": "Choose a key pair" }, "EC2InstanceType": { "default": "Instance class (advanced)" }, } } }) vpc = VPC() for res in vpc.values(): template.add_resource(res) elb = LoadBalancer(vpc=vpc) for res in elb.values(): template.add_resource(res) db = Database(parameters=parameters, vpc=vpc, loadbalancer=elb) for res in db.values(): template.add_resource(res) ec2 = EC2(parameters=parameters, vpc=vpc, loadbalancer=elb) for res in ec2.values(): template.add_resource(res) template.add_output( Output("LoadBalancerDNSName", Value=GetAtt(elb.load_balancer, "DNSName"))) print(template.to_json())
t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [ { 'Label': { 'default': 'Container', }, 'Parameters': [ container_name.title, container_port.title, family.title, scr_hostname.title, ecr.title, image_name.title, image_tag.title, service_path.title, health_check_path.title, autoscaling_max.title, autoscaling_min.title, listener_priority.title, ] }, { 'Label': { 'default': 'Dependent stacks', }, 'Parameters': [ alb_stack.title, encrypt_lambda_stack.title, ecs_stack.title, network_stack.title, ] }, { 'Label': { 'default': 'Optional', }, 'Parameters': [ certificate_arn.title, service_host.title, ] }, ] } })
t.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "StorReduce Configuration" }, "Parameters": [ "KeyPairName", "StorReducePassword", "ShardsNum", "ReplicaShardsNum", "InstanceType", "MonitorInstanceType", "BucketName", "NumSRRHosts", "StorReduceLicense", # "InvokeSSLCert", # "SSLCertificateId", # "DomainName", # "ValidationDomainName", ] }, { "Label": { "default": "VPC Network Configuration" }, "Parameters": [ "NumberOfAZs", "VpcId", "VPCCIDR", "RemoteAccessCIDR", "PrivateSubnetsToSpan", "PublicSubnetsToSpan", ] }, { "Label": { "default": "AWS Quick Start Configuration" }, "Parameters": ["QSS3BucketName", "QSS3KeyPrefix"] } ], "ParameterLabels": { "PrivateSubnetsToSpan": { "default": "VPC Private Subnets" }, "PublicSubnetsToSpan": { "default": "VPC Public Subnets" }, "VpcId": { "default": "VPC ID" }, "StorReducePassword": { "default": "StorReduce Password" }, "ShardsNum": { "default": "Number of Shards" }, "ReplicaShardsNum": { "default": "Number of Replica Shards" }, "StorReduceLicense": { "default": "StorReduce license" }, # "InvokeSSLCert": { # "default": "Invoke SSL Cert" # }, # "SSLCertificateId": { # "default": "SSL Certificate ID" # }, # "DomainName": { # "default": "Domain Name" # }, # "ValidationDomainName": { # "default": "Validation Domain Name" # }, "NumSRRHosts": { "default": "Number of StorReduce servers" }, "BastionAMIOS": { "default": "Bastion AMI Operating System" }, "BastionInstanceType": { "default": "Bastion Instance Type" }, "KeyPairName": { "default": "Key Pair Name" }, "InstanceType": { "default": "Instance Type" }, "MonitorInstanceType": { "default": "Monitor Instance Type" }, "BucketName": { "default": "Bucket Name" }, "NumberOfAZs": { "default": "Number of Availability Zones" }, "NumBastionHosts": { "default": "Number of Bastion Hosts" }, "QSS3BucketName": { "default": "Quick Start S3 Bucket Name" }, "QSS3KeyPrefix": { "default": "Quick Start S3 Key Prefix" }, "RemoteAccessCIDR": { "default": "External Allowed Access CIDR" }, "VPCCIDR": { "default": "VPC CIDR" } } } })
t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'General project configuration' }, 'Parameters': ['accountparameter', 'regionparameter', 'projectnameparameter'] }, { 'Label': { 'default': 'Encryption' }, 'Parameters': ['projectkmskeyparameter'] }, { 'Label': { 'default': 'Input and output s3 buckets for training, testing, and evaultion data.' }, 'Parameters': ['inputbucketparameter', 'outputbucketparameter'] }, { 'Label': { 'default': 'CI/CD Pipeline information' }, 'Parameters': [ 'pipelinenameparameter', 'reponameparameter', 'mldockerregistrynameparameter' ] }, { 'Label': { 'default': 'Lambda function information' }, 'Parameters': ['lambdafunctionbucketparameter', 'loglevelparameter'] }], 'ParameterLabels': { 'accountparameter': { 'default': 'Account ID' }, 'regionparameter': { 'default': 'Region' }, 'projectnameparameter': { 'default': 'Project name' }, 'projectkmskeyparameter': { 'default': 'KMS key name' }, 'inputbucketparameter': { 'default': 'Model input bucket name' }, 'outputbucketparameter': { 'default': 'Model output bucket name' }, 'pipelinenameparameter': { 'default': 'Name of the CodePipeline pipeline' }, 'reponameparameter': { 'default': 'Name of the CodeCommit repo' }, 'mldockerregistrynameparameter': { 'default': 'Name of the ECR registry' }, 'lambdafunctionbucketparameter': { 'default': 'Name of the S3 bucket that contains the lambda function zip file called sageDispatch.zip.' }, 'loglevelparameter': { 'default': 'The Lambda logging level to use for this function. Default is set to Warning.' } } } })
"SourceZipParameter", Type="String", Default="ebs-snapshots.zip", Description="Name of the zip file inside the S3 bucket", )) template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "Basic configuration" }, "Parameters": [ "S3BucketParameter", "SourceZipParameter", ] }, ], "ParameterLabels": { "S3BucketParameter": {"default": "Name of S3 bucket"}, "SourceZipParameter": {"default": "Name of ZIP file"}, } } }) # Role for Lambda lambda_role = template.add_resource(iam.Role( "LambdaRole", AssumeRolePolicyDocument=aws.Policy( Statement=[
description = "Service VPC - used for services" template.add_description(description) # Set Metadata and add to template metadata = { "Build": "development", "DependsOn": [], "Environment": "ApiDev", "Revision": "develop", "StackName": "ApiDev-Dev-VPC", "StackType": "InfrastructureResource", "TemplateBucket": "cfn-apidev", "TemplateName": "VPC", "TemplatePath": "ApiDev/Dev/VPC" } template.add_metadata(metadata) # Create Parameters and add to template parameter_environment = Parameter("Environment", Type="String", Default="ApiDev", Description="Environment of the VPC") template.add_parameter(parameter_environment) parameter_owner = Parameter( "Owner", Type="String", Default="Foo industries", Description="Business unit that owns the resources in the stack") template.add_parameter(parameter_owner)
from troposphere import Template t = Template() t.set_description("Example to show adding a Metadata section to the template") t.add_metadata({ "Comments": "Initial Draft", "LastUpdated": "Jan 1st 2015", "UpdatedBy": "First Last", "Version": "V1.0", }) print(t.to_json())
template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterLabels': { # Network vpc.title: { 'default': 'VPC' }, allow_cidr.title: { 'default': 'Allow' }, # Database Service database_engine.title: { 'default': 'Engine' }, database_instance_class.title: { 'default': 'Instance Class' }, database_master_username.title: { 'default': 'Master Username' }, database_master_password.title: { 'default': 'Master Password' }, }, 'ParameterGroups': [ { 'Label': { 'default': 'Network' }, 'Parameters': [ vpc.title, allow_cidr.title, ] }, { 'Label': { 'default': 'Database Service' }, 'Parameters': [ database_master_username.title, database_master_password.title, database_instance_class.title, database_engine.title, ] }, ] } })
def build_template(sierrafile): template = Template() template.add_version('2010-09-09') template.add_metadata(build_interface(sierrafile.extra_params)) parameters = AttrDict( # Network Parameters vpc_cidr=template.add_parameter(Parameter( 'VpcCidr', Type='String', Default='192.172.0.0/16', )), subnet1_cidr=template.add_parameter(Parameter( 'Subnet1Cidr', Type='String', Default='192.172.1.0/24', )), subnet2_cidr=template.add_parameter(Parameter( 'Subnet2Cidr', Type='String', Default='192.172.2.0/24', )), # ECS Parameters cluster_size=template.add_parameter(Parameter( 'ClusterSize', Type='Number', Default=2, )), instance_type=template.add_parameter(Parameter( 'InstanceType', Type='String', Default='t2.medium' )), key_name=template.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', )), image_id=template.add_parameter(Parameter( 'ImageId', Type='AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>', Default=( '/aws/service/ecs/optimized-ami' '/amazon-linux/recommended/image_id' ), Description=( 'An SSM parameter that resolves to a valid AMI ID.' ' This is the AMI that will be used to create ECS hosts.' ' The default is the current recommended ECS-optimized AMI.' ) )), # Other Parameters github_token=template.add_parameter(Parameter( 'GitHubToken', Type='String', NoEcho=True, )), ) # Environment Variable Parameters for env_var_param, env_var_name in sierrafile.extra_params: template.add_parameter(Parameter( env_var_param, Type='String', NoEcho=True, )) # Resource Declarations # # Network network_vpc = template.add_resource(VPC( 'NetworkVpc', CidrBlock=Ref(parameters.vpc_cidr), Tags=Tags(Name=Ref('AWS::StackName')), )) network_ig = template.add_resource(InternetGateway( 'NetworkInternetGateway', Tags=Tags(Name=Ref('AWS::StackName')), )) vpc_attach = template.add_resource(VPCGatewayAttachment( 'NetworkInternetGatewayAttachment', InternetGatewayId=Ref(network_ig), VpcId=Ref(network_vpc), )) route_table = template.add_resource(RouteTable( 'NetworkRouteTable', VpcId=Ref(network_vpc), Tags=Tags(Name=Ref('AWS::StackName')), )) template.add_resource(Route( 'NetworkDefaultRoute', DependsOn=[vpc_attach.title], RouteTableId=Ref(route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(network_ig), )) subnet1 = template.add_resource(Subnet( 'NetworkSubnet1', VpcId=Ref(network_vpc), AvailabilityZone=Select(0, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet1_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) subnet2 = template.add_resource(Subnet( 'NetworkSubnet2', VpcId=Ref(network_vpc), AvailabilityZone=Select(1, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet2_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet1RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet1), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet2RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet2), )) elb = template.add_resource(LoadBalancer( ELB_NAME, Name=Sub('${AWS::StackName}-elb'), Type='network', Subnets=[Ref(subnet1), Ref(subnet2)], )) # # Cluster ecs_host_role = template.add_resource(Role( 'EcsHostRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ec2.amazonaws.com'), Action=[awacs.sts.AssumeRole] )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonEC2ContainerServiceforEC2Role' ] )) ecs_host_profile = template.add_resource(InstanceProfile( 'EcsHostInstanceProfile', Roles=[Ref(ecs_host_role)] )) ecs_host_sg = template.add_resource(SecurityGroup( 'EcsHostSecurityGroup', GroupDescription=Sub('${AWS::StackName}-hosts'), VpcId=Ref(network_vpc), SecurityGroupIngress=[SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1' )] )) cluster = template.add_resource(Cluster( 'EcsCluster', ClusterName=Ref('AWS::StackName') )) autoscaling_name = 'EcsHostAutoScalingGroup' launch_conf_name = 'EcsHostLaunchConfiguration' launch_conf = template.add_resource(LaunchConfiguration( launch_conf_name, ImageId=Ref(parameters.image_id), InstanceType=Ref(parameters.instance_type), IamInstanceProfile=Ref(ecs_host_profile), KeyName=Ref(parameters.key_name), SecurityGroups=[Ref(ecs_host_sg)], UserData=Base64(Sub( '#!/bin/bash\n' 'yum install -y aws-cfn-bootstrap\n' '/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' '/opt/aws/bin/cfn-signal -e $?' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {autoscaling_name}\n' )), Metadata={ 'AWS::CloudFormation::Init': { 'config': { 'commands': { '01_add_instance_to_cluster': { 'command': Sub( f'echo ECS_CLUSTER=${{{cluster.title}}}' f' > /etc/ecs/ecs.config' ), } }, 'files': { '/etc/cfn/cfn-hup.conf': { 'mode': 0o400, 'owner': 'root', 'group': 'root', 'content': Sub( '[main]\n' 'stack=${AWS::StackId}\n' 'region=${AWS::Region}\n' ), }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Sub( '[cfn-auto-reloader-hook]\n' 'triggers=post.update\n' 'path=Resources.ContainerInstances.Metadata' '.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' ), }, }, 'services': { 'sysvinit': { 'cfn-hup': { 'enabled': True, 'ensureRunning': True, 'files': [ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] } } } } } } )) autoscaling_group = template.add_resource(AutoScalingGroup( autoscaling_name, VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], LaunchConfigurationName=Ref(launch_conf), DesiredCapacity=Ref(parameters.cluster_size), MinSize=Ref(parameters.cluster_size), MaxSize=Ref(parameters.cluster_size), Tags=[{ 'Key': 'Name', 'Value': Sub('${AWS::StackName} - ECS Host'), 'PropagateAtLaunch': True, }], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=1, MaxBatchSize=1, PauseTime='PT5M', WaitOnResourceSignals=True, ), ), )) # # Services task_role = template.add_resource(Role( 'TaskExecutionRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ecs-tasks.amazonaws.com'), Action=[awacs.sts.AssumeRole], )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonECSTaskExecutionRolePolicy' ], )) artifact_bucket = template.add_resource(Bucket( 'ArtifactBucket', DeletionPolicy='Retain', )) codebuild_role = template.add_resource(Role( 'CodeBuildServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codebuild.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ssm.GetParameters, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.s3.GetObject, awacs.s3.PutObject, awacs.s3.GetObjectVersion, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], ), ], ), )], )) codepipeline_role = template.add_resource(Role( 'CodePipelineServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codepipeline.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=[ Sub(f'${{{artifact_bucket.title}.Arn}}/*') ], Effect=Allow, Action=[ awacs.s3.GetBucketVersioning, awacs.s3.GetObject, awacs.s3.GetObjectVersion, awacs.s3.PutObject, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ecs.DescribeServices, awacs.ecs.DescribeTaskDefinition, awacs.ecs.DescribeTasks, awacs.ecs.ListTasks, awacs.ecs.RegisterTaskDefinition, awacs.ecs.UpdateService, awacs.codebuild.StartBuild, awacs.codebuild.BatchGetBuilds, awacs.iam.PassRole, ], ), ], ), )], )) log_group = template.add_resource(LogGroup( 'LogGroup', LogGroupName=Sub('/ecs/${AWS::StackName}'), )) if any(conf.pipeline.enable for conf in sierrafile.services.values()): project = template.add_resource(Project( 'CodeBuildProject', Name=Sub('${AWS::StackName}-build'), ServiceRole=Ref(codebuild_role), Artifacts=Artifacts(Type='CODEPIPELINE'), Source=Source(Type='CODEPIPELINE'), Environment=Environment( ComputeType='BUILD_GENERAL1_SMALL', Image='aws/codebuild/docker:17.09.0', Type='LINUX_CONTAINER', ), )) for name, settings in sierrafile.services.items(): task_definition = template.add_resource(TaskDefinition( f'{name}TaskDefinition', RequiresCompatibilities=['EC2'], Cpu=str(settings.container.cpu), Memory=str(settings.container.memory), NetworkMode='bridge', ExecutionRoleArn=Ref(task_role.title), ContainerDefinitions=[ ContainerDefinition( Name=f'{name}', Image=settings.container.image, Memory=str(settings.container.memory), Essential=True, PortMappings=[ PortMapping( ContainerPort=settings.container.port, Protocol='tcp', ), ], Environment=[ troposphere.ecs.Environment(Name=k, Value=v) for k, v in sierrafile.env_vars.items() if k in settings.get('environment', []) ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group.title), 'awslogs-stream-prefix': Ref('AWS::StackName'), }, ), ), ], )) target_group = template.add_resource(TargetGroup( f'{name}TargetGroup', Port=settings.container.port, Protocol='TCP', VpcId=Ref(network_vpc), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-{name}')), )) listener = template.add_resource(Listener( f'{name}ElbListener', LoadBalancerArn=Ref(elb), Port=settings.container.port, Protocol='TCP', DefaultActions=[ Action(TargetGroupArn=Ref(target_group), Type='forward') ], )) service = template.add_resource(Service( f'{name}Service', Cluster=Ref(cluster), ServiceName=f'{name}-service', DependsOn=[autoscaling_group.title, listener.title], DesiredCount=settings.container.count, TaskDefinition=Ref(task_definition), LaunchType='EC2', LoadBalancers=[ troposphere.ecs.LoadBalancer( ContainerName=f'{name}', ContainerPort=settings.container.port, TargetGroupArn=Ref(target_group), ), ], )) if settings.pipeline.enable: pipeline = template.add_resource(Pipeline( f'{name}Pipeline', RoleArn=GetAtt(codepipeline_role, 'Arn'), ArtifactStore=ArtifactStore( Type='S3', Location=Ref(artifact_bucket), ), Stages=[ Stages( Name='Source', Actions=[Actions( Name='Source', ActionTypeId=ActionTypeId( Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub', ), OutputArtifacts=[ OutputArtifacts(Name=f'{name}Source'), ], RunOrder='1', Configuration={ 'Owner': settings.pipeline.user, 'Repo': settings.pipeline.repo, 'Branch': settings.pipeline.branch, 'OAuthToken': Ref(parameters.github_token), }, )], ), Stages( Name='Build', Actions=[Actions( Name='Build', ActionTypeId=ActionTypeId( Category='Build', Owner='AWS', Version='1', Provider='CodeBuild', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Source'), ], OutputArtifacts=[ OutputArtifacts(Name=f'{name}Build'), ], RunOrder='1', Configuration={ 'ProjectName': Ref(project), }, )], ), Stages( Name='Deploy', Actions=[Actions( Name='Deploy', ActionTypeId=ActionTypeId( Category='Deploy', Owner='AWS', Version='1', Provider='ECS', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Build') ], RunOrder='1', Configuration={ 'ClusterName': Ref(cluster), 'ServiceName': Ref(service), 'FileName': 'image.json', }, )], ), ], )) template.add_resource(Webhook( f'{name}CodePipelineWebhook', Name=Sub(f'${{AWS::StackName}}-{name}-webhook'), Authentication='GITHUB_HMAC', AuthenticationConfiguration=AuthenticationConfiguration( SecretToken=Ref(parameters.github_token), ), Filters=[FilterRule( JsonPath='$.ref', MatchEquals=f'refs/heads/{settings.pipeline.branch}' )], TargetAction='Source', TargetPipeline=Ref(pipeline), TargetPipelineVersion=1, RegisterWithThirdParty=True, )) return template
template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "Basic configuration" }, "Parameters": [ "TargetRegionParameter", "S3BucketParameter", "SourceZipParameter", ] }, { "Label": { "default": "Encryption - see https://github.com/pbudzon/aws-maintenance#encryption for details" }, "Parameters": [ "KMSKeyParameter", ] }, { "Label": { "default": "Optional: limit to specific RDS database(s)" }, "Parameters": [ "DatabasesToUse", ] }, { "Label": { "default": "Optional: Aurora support" }, "Parameters": ["IncludeAuroraClusters", "ClustersToUse"] }, ], "ParameterLabels": { "TargetRegionParameter": { "default": "Target region" }, "DatabasesToUse": { "default": "Databases to use for" }, "KMSKeyParameter": { "default": "KMS Key in target region" }, "IncludeAuroraClusters": { "default": "Use for Aurora clusters" }, "ClustersToUse": { "default": "Aurora clusters to use for" }, "S3BucketParameter": { "default": "Name of S3 bucket" }, "SourceZipParameter": { "default": "Name of ZIP file" }, } } })
t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [ { 'Label': { 'default': 'Availability Zone Configuration' }, 'Parameters': [ 'NumberOfAZs', 'AvailabilityZones', ] }, { 'Label': { 'default': 'Network Configuration' }, 'Parameters': [ 'VpcCidr', 'PublicSubnetCidrs', ] }, { 'Label': { 'default': 'Subnet Configuration' }, 'Parameters': [ 'AutoAssignPublicIp', ] }, ], 'ParameterLabels': { 'AvailabilityZones': { 'default': 'Availability Zones' }, 'NumberOfAZs': { 'default': 'Number of Availability Zones' }, 'VpcCidr': { 'default': 'VPC CIDR' }, 'PublicSubnetCidrs': { 'default': 'Public Subnet CIDRs' }, 'AutoAssignPublicIp': { 'default': 'Auto Assign Public IP' }, }, } })
'default': 'Dns Configuration' }, 'Parameters': [ 'HostedZoneName', 'DomainName', 'RecordType' 'Target', 'Ttl', ] }, ] t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': parameter_groups, 'ParameterLabels': dict(cfnutil.generate_parameter_labels(parameter_groups)) } }) # # Parameter # param_hosted_domain = t.add_parameter( Parameter('HostedZoneName', Type='String', Description='Hosted zone name')) param_domain_name = t.add_parameter( Parameter('DomainName', Type='String', Description='Domain name')) param_record_type = t.add_parameter( Parameter('RecordType',
# endregion # region Metadata template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterLabels': { domain_name.title: { 'default': 'Main Domain' }, alternative_domain_names.title: { 'default': 'Alt Domain' }, validation_domain.title: { 'default': 'Validation Domain' }, }, 'ParameterGroups': [{ 'Label': { 'default': 'Domain' }, 'Parameters': [ domain_name.title, alternative_domain_names.title, validation_domain.title, ] }] } }) # endregion if __name__ == '__main__':
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
TEMPLATE.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [ { 'Label': {'default': 'Configuration for Cloudera'}, 'Parameters': [ CLOUDERA_MASTER_INSTANCE_TYPE.title, CLOUDERA_WORKER_INSTANCE_TYPE.title, CLOUDERA_WORKER_COUNT.title, ], }, { 'Label': {'default': 'Configuration for Cloud Foundry'}, 'Parameters': [ CF_PASSWORD.title, CF_SYSTEM_DOMAIN.title, CF_RUNNER_Z1_INSTANCES.title, CF_RUNNER_Z1_INSTANCE_TYPE.title, ], }, { 'Label': {'default': 'Configuration for SMTP'}, 'Parameters': [ SMTP_HOST.title, SMTP_PORT.title, SMTP_SENDER_USER.title, SMTP_PASSWORD.title, SMTP_SENDER_NAME.title, SMTP_SENDER_EMAIL.title, ], }, { 'Label': {'default': 'Credentials for Quay.io robot account'}, 'Parameters': [ QUAY_IO_USERNAME.title, QUAY_IO_PASSWORD.title, ], }, ], 'ParameterLabels': { KEY_NAME.title: {'default': 'Key pair name'}, TERMINATION_PROTECTION_ENABLED.title: {'default': 'Termination protection'}, CLOUDERA_MASTER_INSTANCE_TYPE.title: {'default': 'Instance type for masters'}, CLOUDERA_WORKER_INSTANCE_TYPE.title: {'default': 'Instance type for workers'}, CLOUDERA_WORKER_COUNT.title: {'default': 'Number of workers'}, CF_PASSWORD.title: {'default': 'Password'}, CF_SYSTEM_DOMAIN.title: {'default': 'System domain'}, CF_RUNNER_Z1_INSTANCES.title: {'default': 'Number of DEAs'}, CF_RUNNER_Z1_INSTANCE_TYPE.title: {'default': 'Instance type for DEA'}, SMTP_HOST.title: {'default': 'Server host address'}, SMTP_PORT.title: {'default': 'Server port'}, SMTP_SENDER_USER.title: {'default': 'Server username'}, SMTP_PASSWORD.title: {'default': 'Server password'}, SMTP_SENDER_NAME.title: {'default': 'From name'}, SMTP_SENDER_EMAIL.title: {'default': 'From email address'}, QUAY_IO_USERNAME.title: {'default': 'Username'}, QUAY_IO_PASSWORD.title: {'default': 'Password'}, NGINX_EIP.title: {'default': 'Elastic IP address for the load balancer'}, }, } })
template.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": { "default": "Basic configuration" }, "Parameters": [ "TargetRegionParameter", "S3BucketParameter", "SourceZipParameter", ] }, { "Label": { "default": "Encryption - see https://github.com/pbudzon/aws-maintenance#encryption for details" }, "Parameters": [ "KMSKeyParameter", ] }, { "Label": { "default": "Optional: limit to specific RDS database(s)" }, "Parameters": [ "DatabasesToUse", ] }, { "Label": { "default": "Optional: Aurora support" }, "Parameters": [ "IncludeAuroraClusters", "ClustersToUse" ] }, ], "ParameterLabels": { "TargetRegionParameter": {"default": "Target region"}, "DatabasesToUse": {"default": "Databases to use for"}, "KMSKeyParameter": {"default": "KMS Key in target region"}, "IncludeAuroraClusters": {"default": "Use for Aurora clusters"}, "ClustersToUse": {"default": "Aurora clusters to use for"}, "S3BucketParameter": {"default": "Name of S3 bucket"}, "SourceZipParameter": {"default": "Name of ZIP file"}, } } })
from awacs import aws, sts template = Template() template.add_description("Image gateway") template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [ { 'Label': {'default': 'Storage'}, 'Parameters': ['BucketName'] }, { 'Label': {'default': 'Lambda source'}, 'Parameters': ['LambdaSourceBucket', 'LambdaFileName'] }, ], 'ParameterLabels': { 'BucketName': {'default': 'S3 Storage Bucket'}, 'LambdaSourceBucket': {'default': 'S3 Bucket with Lambda source'}, 'LambdaFileName': {'default': 'Path and name of the file inside S3 Bucket'}, } } }) param_bucket_name = template.add_parameter(Parameter( "BucketName", Type="String", Description='Name of the STORAGE bucket to create where the images will be uploaded and renditions cached' ))