def o_dynamodb_table_arn(self): _id = 'TableArn' return Output(_id, Description='The ARN identifier of the DynamoDB table', Value=GetAtt(self.r_table, 'Arn'), Export=Export(Sub("${AWS::StackName}-%s" % _id)))
from troposphere import Template, Ref, Output, GetAtt from troposphere.iam import AccessKey, User tpl = Template() tpl.add_version('2010-09-09') tpl.add_description("Create a CircleCI user with access to S3 bucket.") # Resources superuser = tpl.add_resource(User(title='czpycon2015circleci', )) access_keys = tpl.add_resource( AccessKey("Troposphere", Status="Active", UserName=Ref(superuser))) # Outputs tpl.add_output( Output( "AccessKey", Value=Ref(access_keys), Description="AWSAccessKeyId", )) tpl.add_output( Output( "SecretKey", Value=GetAtt(access_keys, "SecretAccessKey"), Description="AWSSecretKey", )) if __name__ == '__main__': print(tpl.to_json())
t.add_resource(Alarm( "CPUTooHigh", AlarmDescription="Alarm if CPU too high", Namespace="AWS/EC2", MetricName="CPUUtilization", Dimensions=[ MetricDimension( Name="AutoScalingGroupName", Value=Ref("AutoscalingGroup") ), ], Statistic="Average", Period="60", EvaluationPeriods="1", Threshold="60", ComparisonOperator="GreaterThanThreshold", AlarmActions=[Ref("ScaleUpPolicy"), ], InsufficientDataActions=[Ref("ScaleUpPolicy")], )) t.add_output(Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("LoadBalancer", "DNSName"), ":", ApplicationPort ]), )) print t.to_json()
"a stack from this template.") hostedzone = t.add_parameter( Parameter( "HostedZone", Description="The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) myDNSRecord = t.add_resource( RecordSetType( "myDNSRecord", HostedZoneName=Join("", [Ref(hostedzone), "."]), Comment="CNAME redirect to aws.amazon.com.", Name=Join( "", [ Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".", Ref(hostedzone), "." ], ), Type="CNAME", TTL="900", ResourceRecords=["aws.amazon.com"], )) t.add_output(Output("DomainName", Value=Ref(myDNSRecord))) print(t.to_json())
def set_up_stack(self): super(Tiler, self).set_up_stack() tags = self.get_input('Tags').copy() tags.update({'StackType': 'Tiler'}) self.default_tags = tags self.region = self.get_input('Region') self.add_description('Tile server stack for MMW') # Parameters self.color = self.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green'] ), 'StackColor') self.keyname = self.add_parameter(Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 key pair' ), 'KeyName') self.availability_zones = self.add_parameter(Parameter( 'AvailabilityZones', Type='CommaDelimitedList', Description='Comma delimited list of availability zones' ), 'AvailabilityZones') self.tile_server_instance_type = self.add_parameter(Parameter( 'TileServerInstanceType', Type='String', Default='t2.micro', Description='Tile server EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.' ), 'TileServerInstanceType') self.tile_server_ami = self.add_parameter(Parameter( 'TileServerAMI', Type='String', Default=self.get_recent_tile_server_ami(), Description='Tile server AMI' ), 'TileServerAMI') self.tile_server_instance_profile = self.add_parameter(Parameter( 'TileServerInstanceProfile', Type='String', Default='TileServerInstanceProfile', Description='Tile server instance profile' ), 'TileServerInstanceProfile') self.tile_server_auto_scaling_desired = self.add_parameter(Parameter( 'TileServerAutoScalingDesired', Type='String', Default='1', Description='Tile server AutoScalingGroup desired' ), 'TileServerAutoScalingDesired') self.tile_server_auto_scaling_min = self.add_parameter(Parameter( 'TileServerAutoScalingMin', Type='String', Default='1', Description='Tile server AutoScalingGroup minimum' ), 'TileServerAutoScalingMin') self.tile_server_auto_scaling_max = self.add_parameter(Parameter( 'TileServerAutoScalingMax', Type='String', Default='1', Description='Tile server AutoScalingGroup maximum' ), 'TileServerAutoScalingMax') self.public_subnets = self.add_parameter(Parameter( 'PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets' ), 'PublicSubnets') self.private_subnets = self.add_parameter(Parameter( 'PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets' ), 'PrivateSubnets') self.public_hosted_zone_name = self.add_parameter(Parameter( 'PublicHostedZoneName', Type='String', Description='Route 53 public hosted zone name' ), 'PublicHostedZoneName') self.vpc_id = self.add_parameter(Parameter( 'VpcId', Type='String', Description='VPC ID' ), 'VpcId') self.notification_topic_arn = self.add_parameter(Parameter( 'GlobalNotificationsARN', Type='String', Description='ARN for an SNS topic to broadcast notifications' ), 'GlobalNotificationsARN') tile_server_lb_security_group, \ tile_server_security_group = self.create_security_groups() tile_server_lb = self.create_load_balancer( tile_server_lb_security_group) self.create_auto_scaling_resources(tile_server_security_group, tile_server_lb) self.create_cloud_watch_resources(tile_server_lb) self.create_dns_records(tile_server_lb) self.add_output(Output('TileServerLoadBalancerEndpoint', Value=GetAtt(tile_server_lb, 'DNSName'))) self.add_output(Output('TileServerLoadBalancerHostedZoneNameID', Value=GetAtt(tile_server_lb, 'CanonicalHostedZoneNameID')))
def test_max_outputs(self): template = Template() for i in range(0, MAX_OUTPUTS): template.add_output(Output(str(i), Value=str(i))) with self.assertRaises(ValueError): template.add_output(Output("output", Value="output"))
# log_group = t.add_resource( logs.LogGroup( 'LogGroup', # DeletionPolicy=Retain, LogGroupName=If('HasName', Ref(param_name), Ref(AWS_NO_VALUE)), RetentionInDays=If('NotExpire', Ref(AWS_NO_VALUE), Ref(param_retention)))) # # Output # t.add_output([ Output( 'LogGroupName', Description='Name of the log group.', Value=Ref(log_group), # Export=Export(Sub('${AWS::StackName}-LogGroupName')) ), Output( 'LogGroupArn', Description='Arn of the log group.', Value=GetAtt(log_group, 'Arn'), # Export=Export(Sub('${AWS::StackName}-LogGroupArn')) ), ]) # # Write # cfnutil.write(t, __file__.replace('Template.py', '.template.yaml'),
def generate_template(): template = Template() ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') template.add_description( 'Base infrastructure Stack implementing VPC Scenario 2 with 2 private subnets' ) # Create VPC vpc = template.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', EnableDnsHostnames=True, Tags=Tags(Application=ref_stack_id))) # Create Cidr Block for IPv6 vpc_cidr_block = template.add_resource( VPCCidrBlock( 'VPCCidrBlock', AmazonProvidedIpv6CidrBlock=True, VpcId=Ref(vpc), )) internet_gateway = template.add_resource( InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id))) template.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway))) # Create Routing Tables public_route_table = template.add_resource( RouteTable('PublicRouteTable', VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id))) template.add_resource( Route( 'RouteIPv4', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(public_route_table), )) template.add_resource( Route( 'RouteIPv6', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationIpv6CidrBlock="::/0", RouteTableId=Ref(public_route_table), )) # Create Public Subnet public_subnet1 = template.add_resource( Subnet('PublicSubnet1', DependsOn=vpc_cidr_block, AssignIpv6AddressOnCreation=True, CidrBlock='10.0.0.0/24', Ipv6CidrBlock=Join("", [ Select( 0, Split("00::/56", Select(0, GetAtt( vpc, 'Ipv6CidrBlocks')))), "00::/64" ]), AvailabilityZone=Join("", [ref_region, 'a']), VpcId=Ref(vpc), Tags=Tags(Name='public-10.0.0.0', Application=ref_stack_id))) template.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation1', SubnetId=Ref(public_subnet1), RouteTableId=Ref(public_route_table), )) # Outputs template.add_output(Output('VPCId', Value=Ref(vpc), Description='VPC Id')) template.add_output( Output('PublicSubnet1', Value=Ref(public_subnet1), Description='Public subnet ID')) template.add_output( Output('StackID', Value=ref_stack_name, Description='Stack ID')) return template
def create_template(): template = Template(Description="Simple public VPC") availability_zones = template.add_parameter( Parameter( "AvailabilityZones", Type="String", )) vpc = template.add_resource( VPC( "Vpc", CidrBlock="10.10.0.0/16", EnableDnsHostnames=False, EnableDnsSupport=True, Tags=Tags(Name=StackName), )) dhcp_options = template.add_resource( DHCPOptions( "DhcpOptions", NtpServers=["169.254.169.123"], DomainNameServers=["AmazonProvidedDNS"], Tags=Tags(Name=StackName), )) template.add_resource( VPCDHCPOptionsAssociation( "VpcDhcpOptionsAssociation", VpcId=Ref(vpc), DhcpOptionsId=Ref(dhcp_options), )) internet_gateway = template.add_resource( InternetGateway( "InternetGateway", Tags=Tags(Name=StackName), )) vpc_gateway_attachment = template.add_resource( VPCGatewayAttachment( "VpcGatewayAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway), )) subnet = template.add_resource( Subnet( "Subnet0", MapPublicIpOnLaunch=True, VpcId=Ref(vpc), CidrBlock=Select(0, Cidr(GetAtt(vpc, "CidrBlock"), 8, 8)), AvailabilityZone=Select(0, Split(",", Ref(availability_zones))), Tags=Tags(Name=StackName), )) route_table = template.add_resource( RouteTable( "RouteTable0", VpcId=Ref(vpc), Tags=Tags(Name=StackName), )) internet_route = template.add_resource( Route( "InternetRoute0", DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(internet_gateway), RouteTableId=Ref(route_table), DependsOn=[vpc_gateway_attachment], )) template.add_resource( SubnetRouteTableAssociation( "SubnetRouteTableAssocation0", RouteTableId=Ref(route_table), SubnetId=Ref(subnet), )) template.add_output(Output( "VpcId", Value=Ref(vpc), )) template.add_output(Output( "SubnetIds", Value=Ref(subnet), )) return template
def main(): template = Template() template.add_version("2010-09-09") template.set_description( "AWS CloudFormation Sample Template: NLB with 1 EC2 instance") AddAMI(template) # Add the Parameters keyname_param = template.add_parameter( Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", )) template.add_parameter( Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge" ], ConstraintDescription="must be a valid EC2 instance type.", )) webport_param = template.add_parameter( Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", )) subnetA = template.add_parameter( Parameter("subnetA", Type="String", Default="subnet-096fd06d")) subnetB = template.add_parameter( Parameter("subnetB", Type="String", Default="subnet-1313ef4b")) VpcId = template.add_parameter( Parameter("VpcId", Type="String", Default="vpc-82c514e6")) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ])) eipA = template.add_resource(ec2.EIP( 'eipA', Domain='vpc', )) eipB = template.add_resource(ec2.EIP( 'eipB', Domain='vpc', )) # Add the web server instance WebInstance = template.add_resource( ec2.Instance( "WebInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), )) # Add the network LB NetworkLB = template.add_resource( elb.LoadBalancer( "NetworkLB", Name="NetworkLB", Scheme="internet-facing", SubnetMappings=[ elb.SubnetMapping(AllocationId=GetAtt(eipA, 'AllocationId'), SubnetId=Ref(subnetA)), elb.SubnetMapping(AllocationId=GetAtt(eipB, 'AllocationId'), SubnetId=Ref(subnetB)) ], Type='network')) TargetGroupWeb = template.add_resource( elb.TargetGroup("TargetGroupWeb", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name="WebTarget", Port=Ref(webport_param), Protocol="HTTP", Targets=[ elb.TargetDescription(Id=Ref(WebInstance), Port=Ref(webport_param)) ], UnhealthyThresholdCount="3", VpcId=Ref(VpcId))) template.add_resource( elb.Listener("Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(NetworkLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(TargetGroupWeb)) ])) template.add_output( Output("URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(NetworkLB, "DNSName")]))) print(template.to_json())
def create_template(self): """Create template.""" bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*") objects_arn = Sub("arn:aws:s3:::${StackerBucket}*/*") cloudformation_scope = Sub( "arn:aws:cloudformation:*:${AWS::AccountId}:" "stack/${StackerNamespace}-*" ) changeset_scope = "*" # This represents the precise IAM permissions that CFNgin itself # needs. cfngin_policy = iam.Policy( PolicyName="Stacker", PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Resource=["*"], Action=[awacs.s3.ListAllMyBuckets], ), Statement( Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation, awacs.s3.CreateBucket, awacs.s3.DeleteBucket, ], ), Statement( Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.GetObject, awacs.s3.GetObjectAcl, awacs.s3.PutObject, awacs.s3.PutObjectAcl, ], ), Statement( Effect="Allow", Resource=[objects_arn], Action=[awacs.s3.DeleteObject], ), Statement( Effect="Allow", Resource=[changeset_scope], Action=[ awacs.cloudformation.DescribeChangeSet, awacs.cloudformation.ExecuteChangeSet, awacs.cloudformation.DeleteChangeSet, ], ), Statement( Effect="Deny", Resource=[Ref("AWS::StackId")], Action=[awacs.cloudformation.Action("*")], ), Statement( Effect="Allow", Resource=[cloudformation_scope], Action=[ awacs.cloudformation.GetTemplate, awacs.cloudformation.CreateChangeSet, awacs.cloudformation.DeleteChangeSet, awacs.cloudformation.DeleteStack, awacs.cloudformation.CreateStack, awacs.cloudformation.UpdateStack, awacs.cloudformation.SetStackPolicy, awacs.cloudformation.DescribeStacks, awacs.cloudformation.DescribeStackEvents, ], ), ] ), ) principal = AWSPrincipal(Ref("AWS::AccountId")) role = self.template.add_resource( iam.Role( "FunctionalTestRole", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Action=[awacs.sts.AssumeRole], Principal=principal, ) ] ), Policies=[cfngin_policy], ) ) assumerole_policy = iam.Policy( PolicyName="AssumeRole", PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Resource=[GetAtt(role, "Arn")], Action=[awacs.sts.AssumeRole], ) ] ), ) user = self.template.add_resource( iam.User("FunctionalTestUser", Policies=[cfngin_policy, assumerole_policy]) ) key = self.template.add_resource( iam.AccessKey("FunctionalTestKey", Serial=1, UserName=Ref(user)) ) self.template.add_output(Output("User", Value=Ref(user))) self.template.add_output(Output("AccessKeyId", Value=Ref(key))) self.template.add_output( Output( "SecretAccessKey", Value=GetAtt("FunctionalTestKey", "SecretAccessKey") ) ) self.template.add_output( Output("FunctionalTestRole", Value=GetAtt(role, "Arn")) )
def create_template(self): """Create template.""" self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_output(Output("DummyId", Value="dummy-1234")) self.template.add_resource(WaitConditionHandle("Dummy2"))
DBInstance( "MyDB", DBName=Ref(dbname), AllocatedStorage=Ref(dballocatedstorage), DBInstanceClass=Ref(dbclass), Engine="MySQL", EngineVersion="5.5", MasterUsername=Ref(dbuser), MasterUserPassword=Ref(dbpassword), DBSubnetGroupName=Ref(mydbsubnetgroup), VPCSecurityGroups=[Ref(myvpcsecuritygroup)], )) t.add_output( Output( "JDBCConnectionString", Description="JDBC connection string for database", Value=Join( "", [ "jdbc:mysql://", GetAtt("MyDB", "Endpoint.Address"), GetAtt("MyDB", "Endpoint.Port"), "/", Ref(dbname), ], ), )) print(t.to_json())
TargetGroupArn=Ref(GhostTargetGroup)) ])) dbinit = t.add_resource( CustomDBInit("DBInit", ServiceToken=GetAtt(DBInitFunction, 'Arn'), Password=Ref(dbpassword), DBHost=GetAtt(ghost_db, "Endpoint.Address"), DependsOn=ghost_db)) # Create the required Outputs # Output the Task Role Arn t.add_output( Output("TaskRoleArn", Value=GetAtt(TaskRole, "Arn"), Description="Task Role Arn", Export=Export(Sub("${AWS::StackName}-TaskRoleArn")))) # Output the Task Execution Role Arn t.add_output( Output("TaskExecutionRoleArn", Value=GetAtt(TaskExecutionRole, "Arn"), Description="Task Execution Role Arn", Export=Export(Sub("${AWS::StackName}-TaskExecutionRoleArn")))) # Output the Log Group name t.add_output( Output("GhostLogGroupName", Value=Ref(GhostLogGroup), Description="Name of Ghost Log Group", Export=Export(Sub("${AWS::StackName}-GhostLogGroupName"))))
Output, Parameter, Ref, Template ) from troposphere.ecr import Repository t = Template() t.add_description("Effective DevOps in AWS: ECR Repository") t.add_parameter(Parameter( "RepoName", Type="String", Description="Name of the ECR repository to create" )) t.add_resource(Repository( "Repository", RepositoryName=Ref("RepoName") )) t.add_output(Output( "Repository", Description="ECR repository", Value=Ref("RepoName"), Export=Export(Join("-", [Ref("RepoName"), "repo"])), )) print(t.to_json())
InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")])) t.add_resource( ec2.Instance( "instance", ImageId="ami-9e90a5fe", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, IamInstanceProfile=Ref("InstanceProfile"), )) t.add_output( Output( "InstancePublicIp", Description="Public IP of our instance", Value=GetAtt("instance", "PublicIp"), )) t.add_output( Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("instance", "PublicDnsName"), ":", ApplicationPort, ]), )) print(t.to_json())
def add_bucket_output(template, s3_bucket): return template.add_output( Output("BucketName", Value=Ref(s3_bucket), Description="Name of S3 bucket"))
from troposphere import Output, Export, GetAtt from stacks.main.resources import lambda_security_group STACK_NAME = "Main" lambda_security_group_id_output_name = "LambdaSecurityGroupId" lambda_security_group_id_export_name = "{}-{}".format( STACK_NAME, lambda_security_group_id_output_name) lambda_security_group_output = Output( lambda_security_group_id_output_name, Value=GetAtt(lambda_security_group, "GroupId"), Export=Export(lambda_security_group_id_export_name), )
"arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/", GetAtt("FoobarFunction", "Arn"), "/invocations" ])), MethodResponses=[MethodResponse("CatResponse", StatusCode='200')])) # Create a deployment stage_name = 'v1' deployment = t.add_resource( Deployment("%sDeployment" % stage_name, RestApiId=Ref(rest_api), StageName=stage_name)) key = t.add_resource( ApiKey("ApiKey", StageKeys=[StageKey(RestApiId=Ref(rest_api), StageName=stage_name)], Enabled=True)) # Add the deployment endpoint as an output t.add_output([ Output( "ApiEndpoint", Value=Join("", [ "https://", Ref(rest_api), ".execute-api.eu-west-1.amazonaws.com/", stage_name ]), Description="Endpoint for this stage of the api"), Output("ApiKey", Value=Ref(key), Description="API key"), ]) print(t.to_json())
# add lambda to post_hook folder # call lambda_deploy, lambda_invoke_lambda_delete from folder t = Template() t.add_description( "AWS CloudFormation Sample Template S3_Bucket: template showing " "how to create a publicly accessible S3 bucket.") s3bucket1 = t.add_resource(Bucket( "S3Bucket1", AccessControl=PublicRead, )) t.add_output( Output("BucketName", Value=Ref(s3bucket1), Description="Name of S3 bucket")) param_foo = t.add_parameter( troposphere.Parameter( 'InstanceType', Description='Type of EC2 instance', Type='String', )) def generate_template(invalid_context, invalid_config): # do not go beyond this point! assert False def post_hook():
def main(args): t = Template() # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id, # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_mt] efs_options = t.add_parameter( Parameter( "EFSOptions", Type="CommaDelimitedList", Description="Comma separated list of efs related options, " "8 parameters in total", )) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for Mount Target")) subnet_id = t.add_parameter( Parameter("SubnetId", Type="String", Description="SubnetId for Mount Target")) create_efs = t.add_condition( "CreateEFS", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")), ) create_mt = t.add_condition( "CreateMT", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")), ) use_performance_mode = t.add_condition( "UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE"))) use_efs_encryption = t.add_condition( "UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true")) use_efs_kms_key = t.add_condition( "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE")))) use_throughput_mode = t.add_condition( "UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE"))) use_provisioned = t.add_condition( "UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned")) use_provisioned_throughput = t.add_condition( "UseProvisionedThroughput", And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))), ) fs = t.add_resource( FileSystem( "EFSFS", PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue), ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue), ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue), Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue), KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue), Condition=create_efs, )) mt = t.add_resource( MountTarget( "EFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(subnet_id), Condition=create_mt, )) t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))), )) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
"S3 origin. " "**WARNING** This template creates a CloudFront distribution. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3dnsname = t.add_parameter( Parameter( "S3DNSNAme", Description="The DNS name of an existing S3 bucket to use as the " "Cloudfront distribution origin", Type="String", )) myDistribution = t.add_resource( Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname))], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ViewerProtocolPolicy="allow-all"), Enabled=True))) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output("DistributionName", Value=Join( "", ["http://", GetAtt(myDistribution, "DomainName")])), ]) print(t.to_json())
Origin( Id="Origin 1", DomainName=GetAtt(s3bucket, "DomainName"), S3OriginConfig=S3OriginConfig(), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="allow-all", ), Enabled=True, HttpVersion="http2", ), )) t.add_output([ Output( "BucketName", Value=Ref(s3bucket), Description="Name of S3 bucket to hold website content", ), Output("DistributionId", Value=Ref(myDistribution)), Output( "DistributionName", Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")]), ), ]) print(t.to_json())
def build_template(self): t = self._init_template() min_inst = t.add_parameter( Parameter('Input{}ASGMinInstances'.format(self.stack_name), Type='String', Default='2', Description='{} Minimum # of instances'.format( self.stack_name))) max_inst = t.add_parameter( Parameter('Input{}ASGMaxInstances'.format(self.stack_name), Type='String', Default='10', Description='{} Minimum # of instances'.format( self.stack_name))) des_inst = t.add_parameter( Parameter('Input{}ASGDesiredInstances'.format(self.stack_name), Type='String', Default='2', Description='{} Minimum # of instances'.format( self.stack_name))) inst_type = t.add_parameter( Parameter('Input{}ASGInstanceType'.format(self.stack_name), Type='String', Default='t2.micro', Description='{} Instance Type'.format(self.stack_name))) inst_tag_name = t.add_parameter( Parameter('Input{}ASGTagName'.format(self.stack_name), Type='String', Default='{}ASG'.format(self.name), Description='{} Instance Name Tag'.format( self.stack_name))) # termination policies term_policies = t.add_parameter( Parameter('Input{}ASGTerminationPolicies'.format(self.stack_name), Type='String', Default='Default', Description='{} Instance Type'.format(self.stack_name))) # root file size root_device_size = t.add_parameter( Parameter("Input{}ASGRootDeviceSize".format(self.stack_name), Type="String", Default="20", Description="{} Root Device File Size".format( self.stack_name))) # root device name root_device_name = t.add_parameter( Parameter("Input{}ASGRootDeviceName".format(self.stack_name), Type="String", Default="/dev/xvda", Description="{} Root Device Name".format( self.stack_name))) # root device type root_device_type = t.add_parameter( Parameter("Input{}ASGRootDeviceType".format(self.stack_name), Type="String", Default="gp2", Description="{} Root Device Type".format( self.stack_name))) # instance profile instance_profile_param = t.add_parameter( Parameter(self.iam_profile.output_instance_profile(), Type='String')) min_in_service = Ref(des_inst) # sec groups sec_groups = [ Ref( t.add_parameter( Parameter(sg.output_security_group(), Type='String'))) for sg in self.security_groups ] # user data params user_data = [] for i in range(0, 4): user_data.append( Ref( t.add_parameter( Parameter('{}UserData{}'.format(self.stack_name, i), Type='String', Default=' ', Description='{} UserData #{}'.format( self.stack_name, i))))) # subnet list if self.private_subnet: sn_list = [i for i in self.vpc_stack.output_private_subnets()] associate_public_ip = False else: sn_list = [i for i in self.vpc_stack.output_public_subnets()] associate_public_ip = True sn_list = [ Ref(t.add_parameter(Parameter(i, Type='String'))) for i in sn_list ] elb_list = [ Ref(t.add_parameter(Parameter(elb.output_elb(), Type='String'))) for elb in self.elb_stacks ] lconfig = t.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.name), AssociatePublicIpAddress=associate_public_ip, IamInstanceProfile=Ref(instance_profile_param), BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName=Ref(root_device_name), Ebs=ec2.EBSBlockDevice( VolumeSize=Ref(root_device_size), VolumeType=Ref(root_device_type), DeleteOnTermination=True)) ], InstanceType=Ref(inst_type), SecurityGroups=sec_groups, ImageId=self.ami, UserData=Base64( Join( '', [ "#!/bin/bash\n", "exec > >(tee /var/log/user-data.log|logger ", "-t user-data -s 2>/dev/console) 2>&1\n", ] + user_data + [ "\n", "\n", "curl -L https://gist.github.com/ibejohn818", "/aa2bcd6743a59f62e1baa098d6365a61/raw", "/install-cfn-init.sh", " -o /tmp/install-cfn-init.sh && chmod +x /tmp/install-cfn-init.sh", # noqa "\n", "/tmp/install-cfn-init.sh ", " {}AutoScalingGroup".format(self.stack_name), " ", Ref("AWS::StackName"), " ", Ref("AWS::Region"), "\n", ])))) if self.keyname: lconfig.KeyName = self.keyname asg = t.add_resource( autoscaling.AutoScalingGroup( '{}AutoScalingGroup'.format(self.stack_name), LaunchConfigurationName=Ref(lconfig), MinSize=Ref(min_inst), MaxSize=Ref(max_inst), DesiredCapacity=Ref(des_inst), VPCZoneIdentifier=sn_list, HealthCheckType='EC2', TerminationPolicies=[Ref(term_policies)], LoadBalancerNames=elb_list, Tags=autoscaling.Tags(Name=Ref(inst_tag_name)), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime=self.pause_time, MinInstancesInService=min_in_service, MaxBatchSize=str(self.update_policy_instance_count), WaitOnResourceSignals=True)))) t.add_output([Output('{}ASG'.format(self.stack_name), Value=Ref(asg))]) return t
Service('GhostService', Cluster=Ref(cluster), DesiredCount=1, TaskDefinition=Ref(ghost_task_definition), LaunchType='FARGATE', LoadBalancers=[ LoadBalancer(ContainerName='ghost', ContainerPort=2368, TargetGroupArn=ImportValue( Sub("${DependencyStackName}-GhostTG"))) ], NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( Subnets=[ ImportValue(Sub("${DependencyStackName}-Subnet1")), ImportValue(Sub("${DependencyStackName}-Subnet2")) ], SecurityGroups=[ ImportValue(Sub("${DependencyStackName}-GhostSG")) ], )))) # Create the required Outputs # Output the Fargate Service Name t.add_output( Output("GhostFargateServiceName", Value=GetAtt(ghost_service, "Name"), Description="Ghost Fargate Service Name")) print(t.to_json())
def o_dynamodb_table_name(self): _id = 'TableName' return Output(_id, Description='The name of the DynamoDB table', Value=self.r_table.ref(), Export=Export(Sub("${AWS::StackName}-%s" % _id)))
AutoScalingGroup( "AdditionalNodes", DesiredCapacity=Ref(AdditionalInstanceCount), MinSize=0, MaxSize=Ref(AdditionalInstanceCount), VPCZoneIdentifier=[Ref(SubnetId)], LaunchConfigurationName=Ref(AdditionalNodeLaunchConfig), DependsOn="AmbariNode", CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Count=Ref(AdditionalInstanceCount), Timeout="PT30M"), ), Tags=[Tag("Name", ref_stack_name, True)], )) t.add_output([ Output("IIAN", Description="Instance ID of additional nodes", Value=Ref('AdditionalNodes')), Output( "AmbariURL", Description="URL of Ambari UI", Value=Join("", ["http://", GetAtt('AmbariNode', 'PublicDnsName'), ":8080"]), ), Output( "AmbariSSH", Description="SSH to the Ambari Node", Value=Join("", ["ssh centos@", GetAtt('AmbariNode', 'PublicDnsName')]), ), Output("AmbariServiceInstanceId",
ComparisonOperator=value['operator'], AlarmActions=[ Ref("{}{}".format(value['alarmPrefix'], reservation)) ])) t.add_resource( ScalingPolicy( "{}{}".format(value['alarmPrefix'], reservation), ScalingAdjustment=value['adjustment'], AutoScalingGroupName=Ref("ECSAutoScalingGroup"), AdjustmentType="ChangeInCapacity", )) t.add_output( Output( "Cluster", Description="ECS Cluster Name", Value=Ref("ECSCluster"), Export=Export(Sub("${AWS::StackName}-id")), )) t.add_output( Output( "VpcId", Description="VpcId", Value=Ref("VpcId"), Export=Export(Sub("${AWS::StackName}-vpc-id")), )) t.add_output( Output( "PublicSubnet", Description="PublicSubnet",
FifoQueue=True)) addqueue = t.add_resource( Queue("Adding", QueueName="Adding.fifo", ReceiveMessageWaitTimeSeconds=20, FifoQueue=True)) mirrorqueue = t.add_resource( Queue("Mirroring", QueueName="Mirroring.fifo", ReceiveMessageWaitTimeSeconds=20, FifoQueue=True)) for queue in [inbound, outbound, addqueue, mirrorqueue]: t.add_output([ Output("{}QueueURL".format(queue.title), Description="{} SQS Queue URL".format(queue.title), Value=Ref(queue)), Output("{}QueueARN".format(queue.title), Description="ARN of {} SQS Queue".format(queue.title), Value=GetAtt(queue, "Arn")), ]) # DyanamoDB: NetKAN Status netkan_db = t.add_resource( Table( "NetKANStatus", AttributeDefinitions=[ AttributeDefinition(AttributeName="ModIdentifier", AttributeType="S"), ], KeySchema=[KeySchema(AttributeName="ModIdentifier", KeyType="HASH")],
def test_noproperty(self): t = Output("MyOutput", Value="myvalue") d = t.to_dict() with self.assertRaises(KeyError): d['Properties']
rds = template.add_resource( rds.DBInstance( "WordPressRDS", DBName=Ref('pDBName'), AllocatedStorage=5, DBInstanceClass=Ref('pRDSInstance'), Engine="MySQL", EngineVersion="5.7", MasterUsername="******", MasterUserPassword="******", VPCSecurityGroups=[Ref('RDSSG')], #DBSubnetGroupName='default2', # Depending on your VPC this might be required MultiAZ=If('IsProd', 'true', 'false'), # Prod gets MultiAZ support PubliclyAccessible='false', )) # This is the URL we should be able to use to access Wordpress. template.add_output([ Output("URL", Description="Website URL", Value=Join('', ["http://", GetAtt('ELB', 'DNSName')])) ]) # Makes the Parameters, Would generally call any other subroutines here. template.add_parameter(create_template_params()) # This just outputs the YAML to STDOUT, can also make JSON. # We could programmatically push this template to AWS or to another tool or service to get it deployed or tested. print(template.to_yaml()) #print(template.to_json())