class CFLab: ''' Class creates VPC/Subnets/ELB/ASG for Cloudformation lab ''' def __init__(self, config_dictionary): ''' Method initializes the DevDeploy class and composes the CloudFormation template to deploy the solution @param config_dictionary [dict] collection of keyword arguments for this class implementation ''' self.globals = config_dictionary.get('globals', {}) self.template_args = config_dictionary.get('template', {}) self.template = Template() self.template.description = self.globals.get('description', '') #create VPC, EC2 self.vpc_generator = VPCGenerator(self.template_args) self.ec2_generator = EC2Generator( self.template_args, self.vpc_generator.vpc, self.vpc_generator.subnets ) for resource in self.vpc_generator.resources: self.template.add_resource(resource) for resource in self.ec2_generator.resources: self.template.add_resource(resource) for output in self.ec2_generator.outputs: self.template.add_output(output)
def sceptre_handler(sceptre_user_data): t = Template() vpc = t.add_resource(VPC( "VirtualPrivateCloud", CidrBlock=sceptre_user_data["cidr_block"], InstanceTenancy="default", EnableDnsSupport=True, EnableDnsHostnames=True, )) igw = t.add_resource(InternetGateway( "InternetGateway", )) t.add_resource(VPCGatewayAttachment( "IGWAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(igw), )) t.add_output(Output( "VpcId", Description="New VPC ID", Value=Ref(vpc) )) return t.to_json()
class CloudFormationTemplate(CloudTemplate): def __init__(self): # initialize Process super(CloudTemplate, self).__init__() def generate(self): self.template = Template() for instance in self.source['instance_groups']: image_id = instance['image_id'] instance_type = instance['type'] key_pair = instance['key_pair'] name = instance['name'] ec2_instance = self.template.add_resource(ec2.Instance( "Ec2Instance", ImageId=image_id, InstanceType=instance_type, KeyName=key_pair, SecurityGroups=[name], UserData=Base64("80") )) self.template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "AZ", Description="Availability Zone of the newly created EC2 instance", Value=GetAtt(ec2_instance, "AvailabilityZone"), ), Output( "PublicIP", Description="Public IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicIp"), ), Output( "PrivateIP", Description="Private IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicDnsName"), ), Output( "PrivateDNS", Description="Private DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateDnsName"), ), ]) self.template = self.template.to_json() return self.template
def test_ne(self): t1 = Template(Description='foo1', Metadata='bar1') t1.add_resource(Bucket('Baz1')) t1.add_output(Output('qux1', Value='qux1')) t2 = Template(Description='foo2', Metadata='bar2') t2.add_resource(Bucket('Baz2')) t2.add_output(Output('qux2', Value='qux2')) self.assertNotEqual(t1, t2)
def test_s3_bucket(self): t = Template() t.add_description("S3 Bucket Example") s3bucket = t.add_resource(s3.Bucket( "S3Bucket", AccessControl=s3.PublicRead,)) t.add_output(Output( "BucketName", Value=Ref(s3bucket), Description="Name of S3 bucket to hold website content" )) self.assertEqual(s3_bucket_yaml, t.to_yaml())
def test_hash(self): metadata = 'foo' description = 'bar' resource = Bucket('Baz') output = Output('qux', Value='qux') t1 = Template(Description=description, Metadata=metadata) t1.add_resource(resource) t1.add_output(output) t2 = Template(Description=description, Metadata=metadata) t2.add_resource(resource) t2.add_output(output) self.assertEqual(len(set([t1, t2])), 1)
def output_template(self): template = Template() for parameter in self.parameters: template.add_parameter(parameter) for mapping in self.mappings: template.add_mapping(mapping[0], mapping[1]) for resource in self.resources: template.add_resource(resource) for output in self.outputs: template.add_output(output) print template.to_json() return
def _generate_template(tms=1, within_vpc=False): t = Template() t.add_description(FLINK_TEMPLATE_DESCRIPTION) t.add_version(FLINK_TEMPLATE_VERSION) t.add_metadata({'LastUpdated': datetime.datetime.now().strftime('%c')}) # mappings mappings.add_mappings(t) # parameters parameters.add_parameters(t) vpc = None subnet_pri = None subnet_pub = None if within_vpc: # networking resources vpc, subnet_pri, subnet_pub = _define_vpc(t) # security groups sg_ssh = t.add_resource(securitygroups.ssh( parameters.ssh_location, vpc)) sg_jobmanager = t.add_resource(securitygroups.jobmanager( parameters.http_location, vpc)) sg_taskmanager = t.add_resource(securitygroups.taskmanager(None, vpc)) jobmanager = t.add_resource(instances.jobmanager( 0, [Ref(sg_ssh), Ref(sg_jobmanager)], within_vpc, subnet_pub )) prefix = "JobManager00" t.add_output(outputs.ssh_to(jobmanager, prefix)) t.add_output(Output( "FlinkWebGui", Description="Flink web interface", Value=Join("", [ 'http://', GetAtt(jobmanager, "PublicDnsName"), ':8081' ]) )) for index in range(0, tms): i = t.add_resource(instances.taskmanager( index, jobmanager, [Ref(sg_ssh), Ref(sg_taskmanager)], within_vpc, subnet_pri )) prefix = "TaskManager%2.2d" % index t.add_output(outputs.ssh_to(i, prefix, bastion=jobmanager)) return t.to_json()
def GenerateGlobalLayer(): t = Template() t.add_description("""\ Global Layer """) stackname_param = t.add_parameter(Parameter( "StackName", Description="Environment Name (default: StepGlobals)", Type="String", Default="StepGlobals", )) crontab_table = t.add_resource(dynamodb.Table( "scheduleTable", AttributeDefinitions=[ dynamodb.AttributeDefinition("taskname", "S"), ], KeySchema=[ dynamodb.Key("taskname", "HASH") ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( 1, 1 ) )) t.add_output([ Output( "crontabtablename", Description="Crontab Table Name", Value=Ref(crontab_table), ) ]) return t
def build_vpc_template(vpc_config): vpc_tags = [{"Key": "Application", "Value": Ref("AWS::StackId")}] vpc = ec2.VPC(name=vpc_config["Name"], CidrBlock=vpc_config["IP Range"], Tags=vpc_tags) t = Template() vpc_config["Subnets"].sort(key=lambda net: net["IP Count"], reverse=True) subnets = build_subnets(vpc_config["Subnets"], vpc_config["IP Range"], vpc_config["Name"]) [subnet.Tags.append(vpc.Tags[0]) for subnet in subnets] private_route_table = build_private_route_table(vpc_config["Name"]) public_route_table = build_public_route_table(vpc_config["Name"]) t.add_resource(vpc) t.add_resource(private_route_table) t.add_resource(public_route_table) t.add_resource(build_public_route()) [t.add_resource(public_route_table_association) for public_route_table_association in build_public_route_table_associations(vpc_config["Subnets"])] [t.add_resource(subnet) for subnet in subnets] [t.add_resource(gateway_attachments) for gateway_attachments in build_public_gateway(vpc_config["Name"])] management_group = build_management_security_group(vpc_config["Name"]) t.add_resource(management_group[0]) default_group = build_default_security_group(vpc_config["Name"]) t.add_resource(default_group[0]) t.add_resource(default_group[1]) t.add_output(management_group[1]) t.add_output(default_group[2]) t.add_output(Output(name="vpcId", Value=Ref(vpc_config["Name"]))) [t.add_output(Output(name=subnet.name, Value=Ref(subnet.name))) for subnet in subnets] return t
key = t.add_resource(ApiKey( "ApiKey", StageKeys=[StageKey( RestApiId=Ref(rest_api), StageName=Ref(stage) )] )) # Add the deployment endpoint as an output t.add_output([ Output( "ApiEndpoint", Value=Join("", [ "https://", Ref(rest_api), ".execute-api.eu-west-1.amazonaws.com/", stage_name ]), Description="Endpoint for this stage of the api" ), Output( "ApiKey", Value=Ref(key), Description="API key" ), ]) print(t.to_json())
class RunnerUnregisterLambda: def __init__(self, sceptre_user_data): self.template = Template() self.sceptre_user_data = sceptre_user_data self.template.add_description( "Lambda Function for Unregisteration of Runners") def add_parameters(self): self.runner_lambda_handler = self.template.add_parameter( Parameter( "RunnerLambdaHandler", Description="runner_lambda_handler", Type="String", )) self.runner_lambda_runtime = self.template.add_parameter( Parameter( "RunnerLambdaRuntime", Description="runner_lambda_runtime", Type="String", )) def add_resources(self): self.lambda_execution_role = self.template.add_resource( Role( "LambdaExecutionRole", Path="/", ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonSSMFullAccess", "arn:aws:iam::aws:policy/AutoScalingFullAccess", ], Policies=[ Policy( PolicyName="root", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["logs:*"], "Resource": "arn:aws:logs:*:*:*", "Effect": "Allow", }], }, ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] }, }], }, )) with open("lambda_functions/unregister_runner.py", "r") as f: self.runner_unregister_code = f.read() self.runner_unregister_function = self.template.add_resource( Function( "RunnerUnregisterFunction", Code=Code(ZipFile=self.runner_unregister_code), Handler=Ref(self.runner_lambda_handler), Role=GetAtt("LambdaExecutionRole", "Arn"), Runtime=Ref(self.runner_lambda_runtime), MemorySize="128", Timeout="30", )) def add_outputs(self): self.template.add_output([ Output( "RunnerUnregisterFunction", Value=Ref(self.runner_unregister_function), ), Output( "RunnerUnregisterFunctionArn", Value=GetAtt(self.runner_unregister_function, "Arn"), ), ])
"""Generating CloudFormation template.""" from troposphere import (Export, Join, Output, Parameter, Ref, Template) from troposphere.ecr import Repository t = Template() t.add_description("Ganesh DevOps in AWS: ECR Repository") t.add_parameter( Parameter("RepoName", Type="String", Description="Name of the ECR repository to create")) t.add_resource(Repository("Repository", RepositoryName=Ref("RepoName"))) t.add_output( Output( "Repository", Description="ECR repository", Value=Ref("RepoName"), Export=Export(Join("-", [Ref("RepoName"), "repo"])), )) print(t.to_json())
def main(): """Generates the CloudFormation template""" template = Template() template.add_version("2010-09-09") template.add_description( 'This template deploys a VPC, with a pair of public and private subnets spread ' + 'across two Availabilty Zones. It deploys an Internet Gateway, with a default ' + 'route on the public subnets. It deploys a pair of NAT Gateways (one in each AZ), ' + 'and default routes for them in the private subnets.' ) # Parameters # EnvironmentName env_param = template.add_parameter(Parameter( 'EnvironmentName', Type='String', Description='An environment name that will be prefixed to resource names', )) # VPC CIDR vpc_cidr_param = template.add_parameter(Parameter( 'VpcCIDR', Type='String', Description='Please enter the IP range (CIDR notation) for this VPC', Default='10.192.0.0/16', )) # PublicSubnet1CIDR pub_subnet_1_param = template.add_parameter(Parameter( 'PublicSubnet1CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the public subnet ' + 'in the first Availability Zone', Default='10.192.10.0/24', )) # PublicSubnet2CIDR pub_subnet_2_param = template.add_parameter(Parameter( 'PublicSubnet2CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the public subnet ' + 'in the second Availability Zone', Default='10.192.11.0/24', )) # PrivateSubnet1CIDR prvt_subnet_1_param = template.add_parameter(Parameter( 'PrivateSubnet1CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the private subnet ' + 'in the first Availability Zone', Default='10.192.20.0/24', )) # PrivateSubnet2CIDR prvt_subnet_2_param = template.add_parameter(Parameter( 'PrivateSubnet2CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the private subnet ' + 'in the second Availability Zone', Default='10.192.21.0/24', )) # Resources # VPC vpc = template.add_resource( VPC( 'VPC', CidrBlock=Ref(vpc_cidr_param), Tags=Tags(Name=Ref(env_param)), ) ) # InternetGateway internet_gateway = template.add_resource( InternetGateway( 'InternetGateway', Tags=Tags(Name=Ref(env_param)), ) ) # InternetGatewayAttachment template.add_resource( VPCGatewayAttachment( 'InternetGatewayAttachment', InternetGatewayId=Ref(internet_gateway), VpcId=Ref(vpc), ) ) # PublicSubnet1 pub_subnet1 = template.add_resource( Subnet( 'PublicSubnet1', VpcId=Ref(vpc), AvailabilityZone=Select('0', GetAZs("")), CidrBlock=Ref(pub_subnet_1_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Public Subnet (AZ1)')), ) ) # PublicSubnet2 pub_subnet2 = template.add_resource( Subnet( 'PublicSubnet2', VpcId=Ref(vpc), AvailabilityZone=Select('1', GetAZs("")), CidrBlock=Ref(pub_subnet_2_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Public Subnet (AZ2)')), ) ) # PrivateSubnet1 prvt_subnet1 = template.add_resource( Subnet( 'PrivateSubnet1', VpcId=Ref(vpc), AvailabilityZone=Select('0', GetAZs("")), CidrBlock=Ref(prvt_subnet_1_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Private Subnet (AZ1)')), ) ) # PrivateSubnet2 prvt_subnet2 = template.add_resource( Subnet( 'PrivateSubnet2', VpcId=Ref(vpc), AvailabilityZone=Select('1', GetAZs("")), CidrBlock=Ref(prvt_subnet_2_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Private Subnet (AZ2)')), ) ) # NatGateway1EIP nat_gateway1_eip = template.add_resource( EIP( 'NatGateway1EIP', DependsOn='InternetGatewayAttachment', Domain='vpc', ) ) # NatGateway2EIP nat_gateway2_eip = template.add_resource( EIP( 'NatGateway2EIP', DependsOn='InternetGatewayAttachment', Domain='vpc', ) ) # NatGateway1 nat_gateway1 = template.add_resource( NatGateway( 'NatGateway1', AllocationId=GetAtt(nat_gateway1_eip, 'AllocationId'), SubnetId=Ref(pub_subnet1), ) ) # NatGateway2 nat_gateway2 = template.add_resource( NatGateway( 'NatGateway2', AllocationId=GetAtt(nat_gateway2_eip, 'AllocationId'), SubnetId=Ref(pub_subnet2), ) ) # PublicRouteTable pub_route_table = template.add_resource( RouteTable( 'PublicRouteTable', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Public Routes')), ) ) # DefaultPublicRoute template.add_resource( Route( 'DefaultPublicRoute', RouteTableId=Ref(pub_route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(internet_gateway), ) ) # PublicSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PublicSubnet1RouteTableAssociation', RouteTableId=Ref(pub_route_table), SubnetId=Ref(pub_subnet1), ) ) # PublicSubnet2RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PublicSubnet2RouteTableAssociation', RouteTableId=Ref(pub_route_table), SubnetId=Ref(pub_subnet2), ) ) # PrivateRouteTable1 prvt_route_table1 = template.add_resource( RouteTable( 'PrivateRouteTable1', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Private Routes (AZ1)')), ) ) # DefaultPrivateRoute1 template.add_resource( Route( 'DefaultPrivateRoute1', RouteTableId=Ref(prvt_route_table1), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat_gateway1), ) ) # PrivateSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PrivateSubnet1RouteTableAssociation', RouteTableId=Ref(prvt_route_table1), SubnetId=Ref(prvt_subnet1), ) ) # PrivateRouteTable2 prvt_route_table2 = template.add_resource( RouteTable( 'PrivateRouteTable2', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Private Routes (AZ2)')), ) ) # DefaultPrivateRoute2 template.add_resource( Route( 'DefaultPrivateRoute2', RouteTableId=Ref(prvt_route_table2), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat_gateway2), ) ) # PrivateSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PrivateSubnet2RouteTableAssociation', RouteTableId=Ref(prvt_route_table2), SubnetId=Ref(prvt_subnet2), ) ) # Outputs template.add_output(Output( 'VPC', Description='A reference to the created VPC', Value=Ref(vpc), )) template.add_output(Output( 'PublicSubnets', Description='A list of the public subnets', Value=Join(',', [Ref(pub_subnet1), Ref(pub_subnet2)]), )) template.add_output(Output( 'PrivateSubnets', Description='A list of the private subnets', Value=Join(',', [Ref(prvt_subnet1), Ref(prvt_subnet2)]), )) template.add_output(Output( 'PublicSubnet1', Description='A reference to the public subnet in the 1st Availability Zone', Value=Ref(pub_subnet1), )) template.add_output(Output( 'PublicSubnet2', Description='A reference to the public subnet in the 2nd Availability Zone', Value=Ref(pub_subnet2), )) template.add_output(Output( 'PrivateSubnet1', Description='A reference to the private subnet in the 1st Availability Zone', Value=Ref(prvt_subnet1), )) template.add_output(Output( 'PrivateSubnet2', Description='A reference to the private subnet in the 2nd Availability Zone', Value=Ref(prvt_subnet2), )) print(template.to_json())
def generate_template(service_name): t = Template() t.add_version('2010-09-09') t.add_description("""\ AWS CloudFormation Template for AWS Exploitation Lab """) t.add_mapping("PublicRegionMap", ami_public_mapping) t.add_mapping("PrivateRegionMap", ami_private_mapping) keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', Description='Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description=' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter(Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default='t2.micro', AllowedValues=[ 't2.micro', 't2.small', 't2.medium', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) ref_stack_id = Ref('AWS::StackId') ec2_role = t.add_resource(Role( "%sEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy( Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"]) ) ] ) )) ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] ec2_snapshot_policy_document = awacs.aws.Policy( Statement=[ awacs.aws.Statement( Sid="PermitEC2Snapshots", Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("ec2", "CreateSnapshot"), awacs.aws.Action("ec2", "ModifySnapshotAttribute"), ], Resource=["*"] ) ] ) ec2_snapshot_policy = Policy( PolicyName="EC2SnapshotPermissions", PolicyDocument=ec2_snapshot_policy_document ) priv_ec2_role = t.add_resource(Role( "%sPrivEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy( Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"]) ) ] ), Policies=[ec2_snapshot_policy] )) priv_ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] VPC_ref = t.add_resource( VPC( 'VPC', CidrBlock='10.0.0.0/16', Tags=Tags( Application=ref_stack_id))) instanceProfile = t.add_resource( InstanceProfile( "InstanceProfile", InstanceProfileName="%sInstanceRole" % (service_name), Roles=[Ref(ec2_role)])) privInstanceProfile = t.add_resource( InstanceProfile( "PrivInstanceProfile", InstanceProfileName="%sPrivInstanceRole" % (service_name), Roles=[Ref(priv_ec2_role)])) public_subnet = t.add_resource( Subnet( '%sSubnetPublic' % service_name, MapPublicIpOnLaunch=True, CidrBlock='10.0.1.0/24', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sSubnet_public" % (service_name)) ) ) private_subnet = t.add_resource( Subnet( '%sSubnetPrivate' % service_name, MapPublicIpOnLaunch=False, CidrBlock='10.0.2.0/24', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sSubnet_private" % (service_name)) ) ) internetGateway = t.add_resource( InternetGateway( 'InternetGateway', Tags=Tags( Application=ref_stack_id, Name="%sInternetGateway" % service_name))) gatewayAttachment = t.add_resource( VPCGatewayAttachment( 'AttachGateway', VpcId=Ref(VPC_ref), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable( 'RouteTable', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sRouteTable" % service_name))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Only associate this Route Table with the public subnet subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(routeTable), )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='%sSecurityGroup' % service_name, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='1080', ToPort='1080', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='0', ToPort='65535', CidrIp="10.0.0.0/8"), ], VpcId=Ref(VPC_ref), ) ) public_instance = t.add_resource( Instance( "Public%sInstance" % service_name, ImageId=FindInMap("PublicRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet))], UserData=Base64(public_instance_userdata), Tags=Tags( Application=ref_stack_id, Name='%sPublicInstance' % (service_name)) ) ) private_instance = t.add_resource( Instance( "Private%sInstance" % service_name, ImageId=FindInMap("PrivateRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(instanceSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(private_subnet))], UserData=Base64(private_instance_userdata), Tags=Tags( Application=ref_stack_id, Name='%sPrivateInstance' % (service_name)), IamInstanceProfile="%sPrivInstanceRole" % (service_name) ) ) outputs = [] outputs.append( Output( "PublicIP", Description="IP Address of Public Instance", Value=GetAtt(public_instance, "PublicIp"), ) ) t.add_output(outputs) # Set up S3 Bucket and CloudTrail S3Bucket = t.add_resource( Bucket( "S3Bucket", DeletionPolicy="Retain" ) ) S3PolicyDocument=awacs.aws.PolicyDocument( Id='EnforceServersideEncryption', Version='2012-10-17', Statement=[ awacs.aws.Statement( Sid='PermitCTBucketPut', Action=[s3.PutObject], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket), "/*"])], ), awacs.aws.Statement( Sid='PermitCTBucketACLRead', Action=[s3.GetBucketAcl], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket)])], ) ] ) S3BucketPolicy = t.add_resource( BucketPolicy( "BucketPolicy", PolicyDocument=S3PolicyDocument, Bucket=Ref(S3Bucket), DependsOn=[S3Bucket] ) ) myTrail = t.add_resource( Trail( "CloudTrail", IsLogging=True, S3BucketName=Ref(S3Bucket), DependsOn=["BucketPolicy"], ) ) myTrail.IsMultiRegionTrail = True myTrail.IncludeGlobalServiceEvents = True return t.to_json()
Base64Data=Base64(Ref(datadog_application_key)), KmsKeyArn=kms_key_arn)) datadog_lambda = t.add_resource( awslambda.Function( "datadoglambda", DependsOn=["LogGroup"], # log_group.title would also work Code=awslambda.Code(S3Bucket=Select(0, Ref(lambda_package)), S3Key=Select(1, Ref(lambda_package))), Handler="index.handler", FunctionName=Join( "-", ["datadoglambda", Ref("AWS::StackName")]), Role=GetAtt(datadog_lambda_role, "Arn"), Runtime="python2.7", Timeout=300, MemorySize=1536, KmsKeyArn=kms_key_arn, Environment=awslambda.Environment( Variables={ 'api_key': GetAtt(api_key, "CiphertextBase64"), 'application_key': GetAtt(application_key, "CiphertextBase64"), }))) t.add_output( Output("LambdaArn", Description="lambda arn", Value=GetAtt(datadog_lambda, "Arn"), Export=Export(Sub("${AWS::StackName}-LambdaArn")))) print(t.to_json())
t.add_resource( ec2.Instance("server", ImageId="ami-4372ba2c", InstanceType="t2.micro", KeyName=Ref("KeyPair"), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( GroupSet=[Ref("VPNSecurityGroup")], AssociatePublicIpAddress='true', SubnetId=Select("0", Ref("PublicSubnet")), DeviceIndex='0', ) ])) t.add_output( Output("VPNAddress", Description="VPN address", Value=GetAtt("server", "PublicIp"))) t.add_output(Output("VPNUser", Description="VPN username", Value="vpn")) t.add_output( Output("VPNPassword", Description="VPN password", Value=Ref("server"))) t.add_output( Output("VPNL2TP", Description="L2TPpreshared key for authentication", Value=Ref("server"))) t.add_output( Output("VPNAdminPassword", Description="Password to connect administration mode",
"hostedzoneID"), FindInMap("RegionMap", Ref("AWS::Region"), "websiteendpoint"))), RecordSet(Name=Join("", ["www.", Ref(HostedZoneName), "."]), Type="CNAME", TTL="900", ResourceRecords=[ GetAtt(StaticSiteBucketDistribution, "DomainName") ]), ] StaticSiteDNSRecord = t.add_resource( RecordSetGroup("StaticSiteDNSRecord", HostedZoneName=Join("", [Ref(HostedZoneName), "."]), Comment="Records for the root of the hosted zone", RecordSets=record_sets)) t.add_output( Output("CloudfrontDomainName", Description="Cloudfront domain name", Value=GetAtt(StaticSiteBucketDistribution, "DomainName"))) t.add_output( Output("S3WebsiteURL", Description="S3 Website URL", Value=GetAtt(wwwStaticSiteBucket, "WebsiteURL"))) def get(): return t.to_json()
t.add_output([ Output( "IIAN", Description="Instance ID of additional nodes", Value=Ref('AdditionalNodes') ), Output( "AmbariURL", Description="URL of Ambari UI", Value=Join("", [ "http://", GetAtt('AmbariNode', 'PublicDnsName'), ":8080" ]), ), Output( "AmbariSSH", Description="SSH to the Ambari Node", Value=Join("", [ "ssh centos@", GetAtt('AmbariNode', 'PublicDnsName') ]), ), Output( "AmbariServiceInstanceId", Description="The Ambari Servers Instance-Id", Value=Ref('AmbariNode') ), Output( "Region", Description="AWS Region", Value=ref_region ), ])
api_sg = template.add_resource( SecurityGroup('ApiSg', VpcId=Ref(vpc_id), GroupDescription='Security Group for the api', SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort="8080", ToPort="8080", SourceSecurityGroupId=Ref(api_elb_sg), ), SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp='0.0.0.0/0', ) ])) template.add_output( Output('ApiElbSgId', Description='Security group id for the api ELB', Value=Ref(api_elb_sg))) template.add_output( Output('ApiSgId', Description='Security group id for the api', Value=Ref(api_sg))) print(template.to_json())
from troposphere.s3 import Bucket, PublicRead, AccelerateConfiguration t = Template() t.add_description( "AWS CloudFormation Sample Template S3_Bucket: Sample template showing :" "How to create a publicly accessible S3 bucket. " "How to enable S3 Transfer Acceleration. " "**WARNING** This template creates an Amazon S3 Bucket. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3bucket = t.add_resource( Bucket( "S3Bucket", # Make public Read AccessControl=PublicRead, # Enable s3 Transfer Acceleration AccelerateConfiguration=AccelerateConfiguration( AccelerationStatus="Enabled", ), )) t.add_output( Output( "BucketName", Value=Ref(s3bucket), Description="Name of S3 bucket with s3 transfer acceleration enabled", )) print(t.to_json())
NumCacheNodes=Ref(param_cache_node_num), CacheSubnetGroupName=Ref(cache_subnet_group), AutoMinorVersionUpgrade=True, VpcSecurityGroupIds=[ If('CreateSecurityGroupCondition', Ref(cache_sg), Ref(param_sg)) ], )) # # Output # t.add_output([ Output( 'EndpointAddress', Description= 'The DNS address of the configuration endpoint for the Memcached cache cluster.', Value=GetAtt(cache_cluster, 'ConfigurationEndpoint.Address')), Output( 'EndpointPort', Description= 'The port number of the configuration endpoint for the Memcached cache cluster.', Value=GetAtt(cache_cluster, 'ConfigurationEndpoint.Port')), ]) # # Write template # cfnutil.write(t, __file__.replace('Template.py', '.template.yaml'), write_yaml=True)
'Action': 'secretsmanager:GetSecretValue', 'Effect': 'Allow', 'Resource': '*' }, { 'Action': 'cloudfront:CreateInvalidation', 'Effect': 'Allow', 'Resource': '*' }] }) ])) # ================================================== # Outputs. # ================================================== template.add_output( Output('BucketName', Description='The S3 bucket name', Value=bucket_name_variable)) template.add_output( Output('CloudFrontId', Description='The ID of the CloudFront distribution', Value=Ref(distribution_resource))) template.add_output( Output('CloudFrontDomain', Description='The domain name of the CloudFront distribution', Value=GetAtt(distribution_resource, 'DomainName'))) # ================================================== # Print the generated template in JSON. # ==================================================
KeyName=If(conditions['has_kp'], Ref(params['keyname']), Ref("AWS::NoValue")), SecurityGroups=[ Ref(resources['sg']), ], ) ) tpl.add_output([ Output( "InstanceId", Description="InstanceId of the EC2 instance", Value=Ref(resources['ec2']), ), Output( "AZ", Description="AZ of the EC2 instance", Value=GetAtt(resources['ec2'], "AvailabilityZone"), ), Output( "PublicDNS", Description="Public DNSName of the EC2 instance", Value=GetAtt(resources['ec2'], "PublicDnsName"), ), Output( "PublicIP", Description="Public IP address of the EC2 instance", Value=GetAtt(resources['ec2'], "PublicIp"), ) ]) print(tpl.to_yaml())
], )) t.add_output([ Output( "AmbariURL", Description="URL of Ambari UI", Value=Join("", [ "http://", GetAtt('AmbariNode', 'PublicDnsName'), ":8080" ]), ), Output( "AmbariSSH", Description="SSH to the Ambari Node", Value=Join("", [ "ssh ec2-user@", GetAtt('AmbariNode', 'PublicDnsName') ]), ), Output( "AmbariServiceInstanceId", Description="The Ambari Servers Instance-Id", Value=Ref('AmbariNode') ), Output( "Region", Description="AWS Region", Value=ref_region ), ]) if __name__ == '__main__':
def generate_vpc_template(layers, az_count, cidr_block): TPL = Template() TPL.set_description('VPC - Version 2019-06-05') TPL.set_metadata({'Author': 'https://github.com/johnpreston'}) VPC = VPCType('VPC', CidrBlock=cidr_block, EnableDnsHostnames=True, EnableDnsSupport=True, Tags=Tags(Name=Ref('AWS::StackName'), EnvironmentName=Ref('AWS::StackName'))) IGW = InternetGateway("InternetGateway") IGW_ATTACH = VPCGatewayAttachment("VPCGatewayAttachement", InternetGatewayId=Ref(IGW), VpcId=Ref(VPC)) DHCP_OPTIONS = DHCPOptions('VpcDhcpOptions', DomainName=Sub(f'${{AWS::StackName}}.local'), DomainNameServers=['AmazonProvidedDNS'], Tags=Tags(Name=Sub(f'DHCP-${{{VPC.title}}}'))) DHCP_ATTACH = VPCDHCPOptionsAssociation('VpcDhcpOptionsAssociate', DhcpOptionsId=Ref(DHCP_OPTIONS), VpcId=Ref(VPC)) DNS_HOSTED_ZONE = HostedZone( 'VpcHostedZone', VPCs=[HostedZoneVPCs(VPCId=Ref(VPC), VPCRegion=Ref('AWS::Region'))], Name=Sub(f'${{AWS::StackName}}.local'), HostedZoneTags=Tags(Name=Sub(f'ZoneFor-${{{VPC.title}}}'))) TPL.add_resource(VPC) TPL.add_resource(IGW) TPL.add_resource(IGW_ATTACH) TPL.add_resource(DHCP_OPTIONS) TPL.add_resource(DHCP_ATTACH) TPL.add_resource(DNS_HOSTED_ZONE) STORAGE_RTB = TPL.add_resource( RouteTable('StorageRtb', VpcId=Ref(VPC), Tags=Tags(Name='StorageRtb'))) STORAGE_SUBNETS = [] for count, subnet_cidr in zip(az_count, layers['stor']): subnet = Subnet( f'StorageSubnet{alpha[count].upper()}', CidrBlock=subnet_cidr, VpcId=Ref(VPC), AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-Storage-{alpha[count]}'), Usage="Storage")) subnet_assoc = TPL.add_resource( SubnetRouteTableAssociation( f'StorageSubnetAssoc{alpha[count].upper()}', SubnetId=Ref(subnet), RouteTableId=Ref(STORAGE_RTB))) STORAGE_SUBNETS.append(subnet) TPL.add_resource(subnet) PUBLIC_RTB = TPL.add_resource( RouteTable('PublicRtb', VpcId=Ref(VPC), Tags=Tags(Name='PublicRtb'))) PUBLIC_ROUTE = TPL.add_resource( Route('PublicDefaultRoute', GatewayId=Ref(IGW), RouteTableId=Ref(PUBLIC_RTB), DestinationCidrBlock='0.0.0.0/0')) PUBLIC_SUBNETS = [] NAT_GATEWAYS = [] for count, subnet_cidr in zip(az_count, layers['pub']): subnet = Subnet( f'PublicSubnet{alpha[count].upper()}', CidrBlock=subnet_cidr, VpcId=Ref(VPC), AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'), MapPublicIpOnLaunch=True, Tags=Tags(Name=Sub(f'${{AWS::StackName}}-Public-{alpha[count]}'))) eip = TPL.add_resource( EIP(f"NatGatewayEip{alpha[count].upper()}", Domain='vpc')) nat = NatGateway(f"NatGatewayAz{alpha[count].upper()}", AllocationId=GetAtt(eip, 'AllocationId'), SubnetId=Ref(subnet)) subnet_assoc = TPL.add_resource( SubnetRouteTableAssociation( f'PublicSubnetsRtbAssoc{alpha[count].upper()}', RouteTableId=Ref(PUBLIC_RTB), SubnetId=Ref(subnet))) NAT_GATEWAYS.append(nat) PUBLIC_SUBNETS.append(subnet) TPL.add_resource(nat) TPL.add_resource(subnet) APP_SUBNETS = [] APP_RTBS = [] for count, subnet_cidr, nat in zip(az_count, layers['app'], NAT_GATEWAYS): SUFFIX = alpha[count].upper() subnet = Subnet( f'AppSubnet{SUFFIX}', CidrBlock=subnet_cidr, VpcId=Ref(VPC), AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-App-{alpha[count]}'))) APP_SUBNETS.append(subnet) rtb = RouteTable(f'AppRtb{alpha[count].upper()}', VpcId=Ref(VPC), Tags=Tags(Name=f'AppRtb{alpha[count].upper()}')) APP_RTBS.append(rtb) route = Route(f'AppRoute{alpha[count].upper()}', NatGatewayId=Ref(nat), RouteTableId=Ref(rtb), DestinationCidrBlock='0.0.0.0/0') subnet_assoc = SubnetRouteTableAssociation( f'SubnetRtbAssoc{alpha[count].upper()}', RouteTableId=Ref(rtb), SubnetId=Ref(subnet)) TPL.add_resource(subnet) TPL.add_resource(rtb) TPL.add_resource(route) TPL.add_resource(subnet_assoc) APP_S3_ENDPOINT = VPCEndpoint( 'AppS3Endpoint', VpcId=Ref(VPC), RouteTableIds=[Ref(rtb) for rtb in APP_RTBS], ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'), VpcEndpointType='Gateway', ) PUBLIC_S3_ENDPOINT = VPCEndpoint( 'PublicS3Endpoint', VpcId=Ref(VPC), RouteTableIds=[Ref(PUBLIC_RTB)], ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'), VpcEndpointType='Gateway', ) STORAGE_S3_ENDPOINT = VPCEndpoint( 'StorageS3Endpoint', VpcId=Ref(VPC), RouteTableIds=[Ref(STORAGE_RTB)], ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'), VpcEndpointType='Gateway') RESOURCES = [] for count in az_count: resource = TPL.add_resource(EIP(f'Eip{count}', Domain='vpc')) RESOURCES.append(resource) TPL.add_resource(APP_S3_ENDPOINT) TPL.add_resource(PUBLIC_S3_ENDPOINT) TPL.add_resource(STORAGE_S3_ENDPOINT) SG_RULES = [] for subnet in layers['app']: RULE = SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp=subnet, ) SG_RULES.append(RULE) ENDPOINT_SG = TPL.add_resource( SecurityGroup( 'VpcEndpointSecurityGroup', VpcId=Ref(VPC), GroupDescription='SG for all Interface VPC Endpoints', SecurityGroupIngress=SG_RULES, Tags=Tags(Name="sg-endpoints"), )) APP_SNS_ENDPOINT = VPCEndpoint( 'AppSNSEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.sns'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SNS_ENDPOINT) APP_SQS_ENDPOINT = VPCEndpoint( 'AppSQSEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.sqs'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SQS_ENDPOINT) APP_ECR_API_ENDPOINT = VPCEndpoint( 'AppECRAPIEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ecr.api'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_ECR_API_ENDPOINT) APP_ECR_DKR_ENDPOINT = VPCEndpoint( 'AppECRDKREndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ecr.dkr'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_ECR_DKR_ENDPOINT) APP_SECRETS_MANAGER_ENDPOINT = VPCEndpoint( 'AppSecretsManagerEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.secretsmanager'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SECRETS_MANAGER_ENDPOINT) APP_SSM_ENDPOINT = VPCEndpoint( 'AppSSMEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ssm'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SSM_ENDPOINT) APP_SSM_MESSAGES_ENDPOINT = VPCEndpoint( 'AppSSMMessagesEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ssmmessages'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SSM_MESSAGES_ENDPOINT) ################################################################################ # # OUTPUTS # TPL.add_output(object_outputs(VPC, name_is_id=True)) TPL.add_output(object_outputs(APP_SQS_ENDPOINT, name_is_id=True)) TPL.add_output(object_outputs(APP_SNS_ENDPOINT, name_is_id=True)) TPL.add_output( comments_outputs([{ 'EIP': Join(',', [GetAtt(resource, "AllocationId") for resource in RESOURCES]) }, { 'PublicSubnets': Join(',', [Ref(subnet) for subnet in PUBLIC_SUBNETS]) }, { 'StorageSubnets': Join(',', [Ref(subnet) for subnet in STORAGE_SUBNETS]) }, { 'ApplicationSubnets': Join(',', [Ref(subnet) for subnet in APP_SUBNETS]) }, { 'StackName': Ref('AWS::StackName') }, { 'VpcZoneId': Ref(DNS_HOSTED_ZONE) }])) return TPL
def test_max_outputs(self): template = Template() for i in range(0, MAX_OUTPUTS): template.add_output(Output(str(i), Value=str(i))) with self.assertRaises(ValueError): template.add_output(Output("output", Value="output"))
#!/usr/bin/env python from troposphere import Output, Template, Ref from troposphere.s3 import Bucket, Private t = Template() s3bucket = t.add_resource(Bucket("code", AccessControl=Private)) t.add_output([ Output("BucketName", Value=Ref(s3bucket), Description="ID of Bucket without any DNS") ]) if __name__ == '__main__': print t.to_json()
], EvaluationPeriods=Ref(credit_evaluations), Threshold=Ref(credit_threshold), ComparisonOperator="LessThanOrEqualToThreshold", AlarmActions=[], InsufficientDataActions=[], OKActions=[], ) ) t.add_output([ Output( 'UpAlarm', Description='Alarm name for up/high', Value=Ref(high_cpu_alarm) ), Output( 'DownAlarm', Description='Alarm name for down/low', Value=Ref(low_cpu_alarm) ), Output( 'CreditLowAlarm', Description='Alarm name for credits out', Value=Ref(low_credit_alarm) ) ]) if __name__ == '__main__': print t.to_json()
"**WARNING** This template creates a CloudFront distribution. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3dnsname = t.add_parameter(Parameter( "S3DNSNAme", Description="The DNS name of an existing S3 bucket to use as the " "Cloudfront distribution origin", Type="String", )) myDistribution = t.add_resource(Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname))], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ViewerProtocolPolicy="allow-all"), Enabled=True ) )) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output( "DistributionName", Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")])), ]) print(t.to_json())
t.add_resource( InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")])) t.add_resource( ec2.Instance( "instance", ImageId="ami-05b3bcf7f311194b3", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, )) t.add_output( Output( "InstancePublicIp", Description="Public IP of our instance.", Value=GetAtt("instance", "PublicIp"), )) t.add_output( Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("instance", "PublicDnsName"), ":", ApplicationPort ]), )) print t.to_json()
def construct_template(): version = util.version_info() t = Template('Toddlr main - {}'.format(version)) # Parameters pui = ParamUi() [ todoist_api_key, archive_00_am, archive_00_pm, archive_10_am, archive_10_pm, archive_20_am, archive_20_pm, archive_30_am, archive_30_pm, archive_40_am, archive_40_pm, archive_50_am, archive_50_pm, inbox_0_sun_am, inbox_0_sun_pm, inbox_1_mon_am, inbox_1_mon_pm, inbox_2_tue_am, inbox_2_tue_pm, inbox_3_wed_am, inbox_3_wed_pm, inbox_4_thu_am, inbox_4_thu_pm, inbox_5_fri_am, inbox_5_fri_pm, inbox_6_sat_am, inbox_6_sat_pm, ] = param.todoist_group(pui, t) [ google_project_id, google_private_key_id, google_private_key, google_client_email, spreadsheet_id, ] = param.spreadsheet_group(pui, t) [ s3_bucket, s3_key_base, ] = param.s3_group(pui, t) pui.output(t) inbox = [ { 'am': inbox_0_sun_am, 'pm': inbox_0_sun_pm, }, { 'am': inbox_1_mon_am, 'pm': inbox_1_mon_pm, }, { 'am': inbox_2_tue_am, 'pm': inbox_2_tue_pm, }, { 'am': inbox_3_wed_am, 'pm': inbox_3_wed_pm, }, { 'am': inbox_4_thu_am, 'pm': inbox_4_thu_pm, }, { 'am': inbox_5_fri_am, 'pm': inbox_5_fri_pm, }, { 'am': inbox_6_sat_am, 'pm': inbox_6_sat_pm, }, ] archive = [ { 'am': archive_00_am, 'pm': archive_00_pm, }, { 'am': archive_10_am, 'pm': archive_10_pm, }, { 'am': archive_20_am, 'pm': archive_20_pm, }, { 'am': archive_30_am, 'pm': archive_30_pm, }, { 'am': archive_40_am, 'pm': archive_40_pm, }, { 'am': archive_50_am, 'pm': archive_50_pm, }, ] # Predefined s3_key_value = predefined.s3_key_value(s3_key_base) # Role events_invoke_lambda_role = t.add_resource(iam.events_invoke_lambda_role()) # DynamoDB words_table = t.add_resource(dynamodb.words_table()) # Predefined lambda_environment_dict = predefined.lambda_environment_dict( words_table=words_table, todoist_api_key=todoist_api_key, inbox=inbox, archive=archive) # sub construct_csvimport( t, lambda_environment_dict=lambda_environment_dict, words_table=words_table, s3_bucket=s3_bucket, s3_key_value=s3_key_value) # sub construct_show( t, lambda_environment_dict=lambda_environment_dict, words_table=words_table, s3_bucket=s3_bucket, s3_key_value=s3_key_value, events_invoke_lambda_role=events_invoke_lambda_role) # sub construct_archive( t, lambda_environment_dict=lambda_environment_dict, words_table=words_table, s3_bucket=s3_bucket, s3_key_value=s3_key_value, events_invoke_lambda_role=events_invoke_lambda_role) # Output t.add_output([output.version(version)]) return t
class EzElb(object): def __init__(self, env_name, vpc_id, name=None, internal=False): self.env_name = env_name self.vpc_id = vpc_id if len(env_name) > 10: raise ValidationException( "env_name must be no more than 10 characters long") self.template = Template() self.subnet_ids = [] self.cert_ids = [] self.default_targets = [] self.http_redirect_targets = [] self.alt_listeners = [] self.target_paths = collections.defaultdict(TargetPath) self._sticky = True self._json = None self._priority_cache = [] self._global_tags = [] self._alarm_topic = None self.alarm_namespace = "AWS/ApplicationELB" self._log_bucket = None self._ecs_redirect = False self.idle_timeout_seconds = 120 self._custom_elb_sgs = None self._elb_name = name self._deletion_protection = False if internal: self._elb_scheme = "internal" else: self._elb_scheme = "internet-facing" self._sg_rules = [ SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=443, ToPort=443), SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=80, ToPort=80) ] # The first call to allow() should clear the default _sg_rules, # subsequent calls should not. self._reset_sg_rules = True def deletion_protection(self, p): self._deletion_protection = p def allow(self, *rules): if self._reset_sg_rules: self._sg_rules = list(rules) self._reset_sg_rules = False else: self._sg_rules += list(rules) def allow_cidr(self, *cidrs): self.allow(*list( SecurityGroupRule( CidrIp=c, IpProtocol="tcp", FromPort=443, ToPort=443) for c in cidrs)) self.allow(*list( SecurityGroupRule( CidrIp=c, IpProtocol="tcp", FromPort=80, ToPort=80) for c in cidrs)) def custom_security_groups(self, *ids): self._custom_elb_sgs = list(ids) def priority_hash(self, name): ret = int(hashlib.md5(name).hexdigest(), 16) % 50000 while ret in self._priority_cache: ret += 1 self._priority_cache.append(ret) return ret def subnet_id(self, *ids): self.subnet_ids += list(ids) def certificate_id(self, *ids): self.cert_ids += list(ids) def alarm_topic(self, arn): self._alarm_topic = arn def dns(self, host, zone, ttl=3600): if not zone.endswith('.'): zone = zone + '.' self.template.add_resource( RecordSetGroup( ("RSG" + hashlib.sha1(zone + host).hexdigest())[:10], Comment=Ref("AWS::StackName"), HostedZoneName=zone, RecordSets=[ RecordSet(Name="%s.%s" % (host, zone), Type="CNAME", ResourceRecords=[GetAtt("ELB", "DNSName")], TTL=ttl) ])) def global_tag(self, key, value): self._global_tags.append({'Key': key, 'Value': value}) def tags_with(self, **kwargs): ret = [] ret += self._global_tags for k, v in kwargs.iteritems(): ret.append({'Key': k, 'Value': v}) return ret def default_target(self, host, port, protocol="HTTP"): self.default_targets.append(TargetHost(host, port, protocol)) def http_redirect_target(self, host, port): self.http_redirect_targets.append(TargetHost(host, port)) def target(self, host, port, path, protocol="HTTP", health_check_codes=None): target_path = self.target_paths[path] target_path.add_host(host, port, protocol) if health_check_codes is not None: target_path.set_health_check_codes(health_check_codes) def log_bucket(self, bucket): self._log_bucket = bucket def alt_listener(self, port, protocol="HTTPS"): ret = AltListener(port, protocol) self.alt_listeners.append(ret) return ret def elb_attributes(self): ret = [ LoadBalancerAttributes(Key="idle_timeout.timeout_seconds", Value=str(self.idle_timeout_seconds)), LoadBalancerAttributes(Key="routing.http2.enabled", Value="true"), LoadBalancerAttributes( Key="deletion_protection.enabled", Value="true" if self._deletion_protection else "false") ] if self._log_bucket is not None: ret += [ LoadBalancerAttributes(Key="access_logs.s3.enabled", Value="true"), LoadBalancerAttributes(Key="access_logs.s3.bucket", Value=self._log_bucket), LoadBalancerAttributes(Key="access_logs.s3.prefix", Value=Sub("${AWS::StackName}-ElbLogs")) ] return ret def attach_alarm(self, target_group): """ :type target_group: TargetGroup """ if self._alarm_topic is not None: self.template.add_resource( Alarm( target_group.title + "UnhealthyHostAlarm", AlarmName=Sub("${AWS::StackName}-UnhealthyHosts-" + target_group.title), AlarmDescription="Unhealthy hosts in target group: %s/%s" % (self.env_name, target_group.title), MetricName="UnHealthyHostCount", Namespace=self.alarm_namespace, Statistic="Minimum", Period=120, EvaluationPeriods=2, Threshold='0', AlarmActions=[self._alarm_topic], ComparisonOperator="GreaterThanThreshold", Dimensions=[ MetricDimension(Name="TargetGroup", Value=GetAtt(target_group, "TargetGroupFullName")), MetricDimension(Name="LoadBalancer", Value=GetAtt("ELB", "LoadBalancerFullName")) ])) def ecs_redirect(self, cluster, url): self._ecs_redirect = True self.template.add_resource( TaskDefinition( "RedirectTaskDef", Volumes=[], Family=Sub("${AWS::StackName}-redirect"), NetworkMode="bridge", ContainerDefinitions=[ ContainerDefinition( Name="redirect", Cpu=1, Environment=[Environment(Name="REDIRECT", Value=url)], Essential=True, Hostname=Sub("${AWS::StackName}-redirect"), Image="cusspvz/redirect:0.0.2", Memory=512, MemoryReservation=128, PortMappings=[ PortMapping(ContainerPort=80, Protocol="tcp") ]) ])) self.template.add_resource( Service("RedirectService", TaskDefinition=Ref("RedirectTaskDef"), Cluster=cluster, DesiredCount=1, DeploymentConfiguration=DeploymentConfiguration( MaximumPercent=200, MinimumHealthyPercent=100), LoadBalancers=[ EcsLoadBalancer( ContainerName="redirect", ContainerPort=80, TargetGroupArn=Ref("DefaultTargetGroup")) ])) @property def sticky(self): return self._sticky @sticky.setter def sticky(self, b): self._sticky = b def to_yaml(self): return yaml.safe_dump(json.loads(self.to_json()), encoding='utf-8') def to_json(self): if self._json is not None: return self._json # Validity checks if len(self.subnet_ids) < 2: raise ValidationException( "Use .subnet_id() to specify at least two ELB subnets") if len(self.cert_ids) < 1: raise ValidationException( "Use .certificate_id() to specify at least one certificate") if not self._ecs_redirect and len(self.default_targets) < 1: raise ValidationException( "Use .default_target() to specify at least one default target or .ecs_redirect(" ") to set up a redirect container") for (name, tp) in self.target_paths.iteritems(): if len(set(map(lambda h: h.type, tp.hosts))) != 1: raise ValidationException( "Inconsistent target types for %s. All hosts for a given path must have the " "same type (ip or instance)." % name) # Build Security Group if self._custom_elb_sgs: elb_sgs = self._custom_elb_sgs else: elb_sg = SecurityGroup( "ElbSecurityGroup", GroupDescription=Sub("${AWS::StackName}-ElbSg"), Tags=self.tags_with(Name=Sub("${AWS::StackName}-ElbSg")), VpcId=self.vpc_id, SecurityGroupEgress=[ SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="-1") ], SecurityGroupIngress=self._sg_rules) self.template.add_resource(elb_sg) self.template.add_output( Output("ElbSecurityGroupOutput", Description="Security group ID assigned to the ELB", Value=Ref(elb_sg), Export=Export(Sub("${AWS::StackName}-ElbSg")))) # Build Attachment Security Group inst_sg = SecurityGroup( "InstanceSecurityGroup", GroupDescription=Sub("${AWS::StackName}-InstSg"), Tags=self.tags_with(Name=Sub("${AWS::StackName}-InstSg")), VpcId=self.vpc_id, SecurityGroupEgress=[ SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="-1") ], SecurityGroupIngress=[ SecurityGroupRule(IpProtocol="-1", SourceSecurityGroupId=Ref(elb_sg)) ]) self.template.add_resource(inst_sg) self.template.add_output( Output("InstanceSecurityGroupOutput", Description="Convenience SG to assign to instances", Value=Ref(inst_sg), Export=Export(Sub("${AWS::StackName}-InstSg")))) elb_sgs = [Ref("ElbSecurityGroup")] # Build ELB elb = LoadBalancer("ELB", SecurityGroups=elb_sgs, Scheme=self._elb_scheme, Subnets=self.subnet_ids, Tags=self.tags_with(Name=Ref("AWS::StackName")), LoadBalancerAttributes=self.elb_attributes()) if self._elb_name: elb.Name = self._elb_name self.template.add_resource(elb) self.template.add_output( Output("ElbArnOutput", Description="ARN of the ELB", Value=Ref(elb), Export=Export(Sub("${AWS::StackName}-ElbArn")))) self.template.add_output( Output("ElbDnsOutput", Description="DNS name of the ELB", Value=GetAtt("ELB", "DNSName"), Export=Export(Sub("${AWS::StackName}-ElbDns")))) # Build Default Target Group if self._ecs_redirect: default_tg_protocol = "HTTP" else: default_tg_protocol = self.default_targets[0].protocol default_tg = TargetGroup( "DefaultTargetGroup", Port=8080, Protocol=default_tg_protocol, Tags=self.tags_with(Name=Sub("${AWS::StackName}-Default")), VpcId=self.vpc_id, Targets=list( map(lambda h: TargetDescription(Id=h.host, Port=h.port), self.default_targets)), HealthyThresholdCount=2, Matcher=Matcher(HttpCode="200-399")) self.template.add_resource(default_tg) self.attach_alarm(default_tg) # Build Listener self.template.add_resource( Listener("HttpsListener", Certificates=list( map(lambda i: Certificate(CertificateArn=i), self.cert_ids)), DefaultActions=[ Action(Type="forward", TargetGroupArn=Ref("DefaultTargetGroup")) ], LoadBalancerArn=Ref("ELB"), Port=443, Protocol="HTTPS")) # Build HTTP redirect if len(self.http_redirect_targets) > 0: # Build Redirect Target Group http_tg = TargetGroup( "RedirectTargetGroup", Port=8080, Protocol=self.http_redirect_targets[0].protocol, Tags=self.tags_with(Name=Sub("${AWS::StackName}-Redirect")), VpcId=self.vpc_id, Targets=list( map(lambda h: TargetDescription(Id=h.host, Port=h.port), self.http_redirect_targets)), HealthyThresholdCount=2, Matcher=Matcher(HttpCode="200-399")) self.template.add_resource(http_tg) self.attach_alarm(http_tg) if self._ecs_redirect or len(self.http_redirect_targets) > 0: if self._ecs_redirect: redirect_tg = "DefaultTargetGroup" else: redirect_tg = "RedirectTargetGroup" # Build Listener self.template.add_resource( Listener("HttpListener", DefaultActions=[ Action(Type="forward", TargetGroupArn=Ref(redirect_tg)) ], LoadBalancerArn=Ref("ELB"), Port=80, Protocol="HTTP")) # Build Target Groups & Rules for (name, tp) in self.target_paths.iteritems(): name_an = alpha_numeric_name(name) tag_name = taggable_name(name) g = TargetGroup( "PathTg" + name_an, Port=tp.hosts[0].port, Protocol=tp.hosts[0].protocol, Tags=self.tags_with(Name="%s/%s" % (self.env_name, tag_name), TargetPath=tag_name), Targets=list(map(lambda h: h.to_target_desc(), tp.hosts)), VpcId=self.vpc_id, HealthCheckPath="/%s" % name, HealthyThresholdCount=2, Matcher=tp.health_check_matcher) # TODO: We should probably explicitly specify this for every TG. Not # doing that now because it will cause lots of updates. Maybe # in 0.4? if len(tp.hosts) > 0 and tp.hosts[0].type != "instance": g.TargetType = tp.hosts[0].type if self.sticky: g.TargetGroupAttributes = [ TargetGroupAttribute(Key="stickiness.enabled", Value="true"), TargetGroupAttribute(Key="stickiness.type", Value="lb_cookie") ] self.template.add_resource(g) self.attach_alarm(g) self.template.add_resource( ListenerRule( "PathRl" + name_an, Actions=[Action(Type="forward", TargetGroupArn=Ref(g))], Conditions=[ Condition(Field="path-pattern", Values=["/%s/*" % name]) ], ListenerArn=Ref("HttpsListener"), Priority=self.priority_hash(name))) self.template.add_resource( ListenerRule( "PathRln" + name_an, Actions=[Action(Type="forward", TargetGroupArn=Ref(g))], Conditions=[ Condition(Field="path-pattern", Values=["/%s" % name]) ], ListenerArn=Ref("HttpsListener"), Priority=self.priority_hash(name))) # Build Alternate Listeners for al in self.alt_listeners: tg_name = "AltTg%d" % al.port tg_protocol = al.hosts[0].protocol tg = TargetGroup( tg_name, Port=9999, Protocol=tg_protocol, Tags=self.tags_with(Name=Sub("${AWS::StackName}-%s" % tg_name)), VpcId=self.vpc_id, Targets=list( map(lambda h: TargetDescription(Id=h.host, Port=h.port), al.hosts)), HealthyThresholdCount=2, Matcher=Matcher(HttpCode="200-399")) self.template.add_resource(tg) self.attach_alarm(tg) listener = Listener("AltListener%d" % al.port, DefaultActions=[ Action(Type="forward", TargetGroupArn=Ref(tg_name)) ], LoadBalancerArn=Ref("ELB"), Port=al.port, Protocol=al.protocol) if al.protocol == "HTTPS": listener.Certificates = list( map(lambda i: Certificate(CertificateArn=i), self.cert_ids)) self.template.add_resource(listener) self._json = self.template.to_json() return self._json
template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "AZ", Description="Availability Zone of the newly created EC2 instance", Value=GetAtt(ec2_instance, "AvailabilityZone"), ), Output( "PublicIP", Description="Public IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicIp"), ), Output( "PrivateIP", Description="Private IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicDnsName"), ), Output( "PrivateDNS", Description="Private DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateDnsName"), ), ])
database_instance = rds.DBInstance( db_logical_id, DependsOn="AppBatchbotRDSInstancePassword", AllocatedStorage="30", # GB DBInstanceClass="db.t2.small", DBInstanceIdentifier=Ref("AWS::StackName"), DBName=Ref("DBName"), DBSubnetGroupName=Ref("DBSubnetGroupName"), Engine="postgres", EngineVersion="12.3", MasterUserPassword="******", MasterUsername=Ref("DBUsername"), EnableIAMDatabaseAuthentication=True, MultiAZ="false", StorageEncrypted="true", StorageType="standard", VPCSecurityGroups=[Ref("DBSecurityGroup")], Tags=tag_builder(), ) t.add_resource(database_instance) t.add_output([ Output( "RDSEndpoint", Description="DNS name of RDS instance", Value=GetAtt(database_instance, "Endpoint.Address"), ) ]) print(t.to_json())
asg.Tag('Name', 'AppServer', True), asg.Tag('Color', Ref(color_param), True) ] )) # # Outputs # t.add_output([ Output( 'AppServerLoadBalancerEndpoint', Description='Application server endpoint', Value=GetAtt(app_server_load_balancer, 'DNSName') ), Output( 'AppServerLoadBalancerHostedZoneNameID', Description='ID of canonical hosted zone name for ELB', Value=GetAtt( app_server_load_balancer, 'CanonicalHostedZoneNameID' ) ) ]) if __name__ == '__main__': utils.validate_cloudformation_template(t.to_json()) file_name = __file__.replace('.py', '.json') with open(file_name, 'w') as f: f.write(t.to_json())
'${EnhancedMonitoringConditionRole}'), Ref(AWS_NO_VALUE)), )) # # Output # t.add_output([ # Output('EndpointAddress', # Description='Endpoint address', # Value=GetAtt(rds_instance, 'Endpoint.Address') # ), # Output('EndpointPort', # Description='Endpoint port', # Value=GetAtt(rds_instance, 'Endpoint.Port') # ), # Output('EnvironmentVariables', # Description='Database environment variables', # Value=Join('', [ # 'PGHOST=', GetAtt(rds_instance, 'Endpoint.Address'), ' ', # 'PGPORT=', GetAtt(rds_instance, 'Endpoint.Port'), ' ', # 'PGUSER='******' ', # 'PGPASSWORD='******' ', # ])), ]) # # Write template # cfnutil.write(t, __file__.replace('Template.py', '.template.yaml'),
) ) # utils.add_lambda_scheduler(template_res=template, # lambda_function_name='hello_world', # lambda_function_arn=GetAtt(hello_world_lambda, 'Arn'), # cron='cron(0/5 * * * ? *)' # ) template.add_output([ Output('LambdaExecutionRole', Description='Lambdas Execution role', Value=Ref(lambda_execution_role)), Output('HelloWorldLambda', Description='HelloWorld Lambda Function', Value=Ref(hello_world_lambda)), Output('HelloWorldLambdaArn', Description='HelloWorld Arn of Lambda Function', Value=GetAtt(hello_world_lambda, 'Arn')), ]) template_json = template.to_json(indent=4) # print(template_json) stack_args = { 'StackName': STACK_NAME, 'TemplateBody': template_json, 'Capabilities': [ 'CAPABILITY_IAM',
KeyName=Ref(keyname_param), SecurityGroups=[{'Ref': 'Private'}], UserData=Base64("80") )), template.add_resource(ec2.Instance( "Ec2InstansePrivate2", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType="t1.micro", KeyName=Ref(keyname_param), SecurityGroups=[{'Ref': 'Private'}], UserData=Base64("80") )), ] """ template.add_output([ Output( "WebServers", Description="InstanceIds of created web servers", Value=Ref(web_servers), ), Output( "DBServers", Description="InstanceIds of created DB servers", Value=Ref(db_servers), ), Output( "CalculationServers", Description="InstanceIds of created Calculation servers", Value=Ref(calculation_servers),
"CPUTooHigh", AlarmDescription="Alarm if CPU too high", Namespace="AWS/EC2", MetricName="CPUUtilization", Dimensions=[ MetricDimension(Name="AutoScalingGroupName", Value=Ref("AutoscalingGroup")), ], Statistic="Average", Period="60", EvaluationPeriods="1", Threshold="60", ComparisonOperator="GreaterThanThreshold", AlarmActions=[ Ref("ScaleUpPolicy"), ], InsufficientDataActions=[Ref("ScaleUpPolicy")], )) t.add_output( Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("LoadBalancer", "DNSName"), ":", ApplicationPort ]), )) print t.to_json()
)) gatewayattachment = t.add_resource(VPCGatewayAttachment( "GatewayAttachment", VpcId=Ref("VPC"), InternetGatewayId=Ref("InternetGateway"), )) securitygroupingress1 = SecurityGroupIngress( "SecurityGroupIngress1", CidrIp="10.0.0.0/16", FromPort="80", ToPort="80", IpProtocol="tcp", ) securitygroup = t.add_resource(SecurityGroup( "SecurityGroup", GroupDescription="Security Group", SecurityGroupIngress=[securitygroupingress1], VpcId=Ref("VPC"), )) t.add_output(Output( "ClusterEndpoint", Value=Join(":", [GetAtt(redshiftcluster, "Endpoint.Address"), GetAtt(redshiftcluster, "Endpoint.Port")]), )) print(t.to_json())
addqueue = t.add_resource( Queue("Adding", QueueName="Adding.fifo", ReceiveMessageWaitTimeSeconds=20, FifoQueue=True)) mirrorqueue = t.add_resource( Queue("Mirroring", QueueName="Mirroring.fifo", ReceiveMessageWaitTimeSeconds=20, FifoQueue=True)) for queue in [inbound, outbound, addqueue, mirrorqueue]: t.add_output([ Output("{}QueueURL".format(queue.title), Description="{} SQS Queue URL".format(queue.title), Value=Ref(queue)), Output("{}QueueARN".format(queue.title), Description="ARN of {} SQS Queue".format(queue.title), Value=GetAtt(queue, "Arn")), ]) # DyanamoDB: NetKAN Status netkan_db = t.add_resource( Table( "NetKANStatus", AttributeDefinitions=[ AttributeDefinition(AttributeName="ModIdentifier", AttributeType="S"), ], KeySchema=[KeySchema(AttributeName="ModIdentifier", KeyType="HASH")], TableName="NetKANStatus", ProvisionedThroughput=ProvisionedThroughput(
tpl.add_description( "Create a superadmin user with all required privileges for this project. " ) # Resources superuser = tpl.add_resource(User( title='czpycon2015', )) access_keys = tpl.add_resource(AccessKey( "Troposphere", Status="Active", UserName=Ref(superuser)) ) # Outputs tpl.add_output(Output( "AccessKey", Value=Ref(access_keys), Description="AWSAccessKeyId of superuser", )) tpl.add_output(Output( "SecretKey", Value=GetAtt(access_keys, "SecretAccessKey"), Description="AWSSecretKey of superuser", )) if __name__ == '__main__': print(tpl.to_json())
"StorReduceMonitorElasticAddress", Value=Join( "", ["http://", GetAtt("MonitorInstance", "PublicDnsName"), ":9989"]), Description="Address for Elasticsearch on StorReduce Monitor")) # for i in range(MIN_INSTANCES): # outputs.append(generate_private_DNS_output(i)) # outputs.append(generate_private_IP_output(i)) # for i in range(MIN_INSTANCES, MAX_INSTANCES): # outputs.append(add_conditional(generate_private_DNS_output(i), i)) # outputs.append(add_conditional(generate_private_IP_output(i), i)) t.add_output(outputs) print(t.to_json()) # "Vault2RecoveryAlarm": { # "Type": "AWS::CloudWatch::Alarm", # "Properties": { # "AlarmDescription": "EC2 Autorecovery for Vault2 node. Autorecover if we fail EC2 status checks for 5 minutes.", # "Namespace": "AWS/EC2", # "MetricName": "StatusCheckFailed_System", # "Statistic": "Minimum", # "Period": "60", # "EvaluationPeriods": "5", # "ComparisonOperator": "GreaterThanThreshold", # "Threshold": "0", # "AlarmActions": [
def create(): es = Template() es.add_description("Stack defining the elasticsearch instance") # Get latest AMIs def getAMI(region): AMIMap = {} print("Getting latest AMZN linux AMI in %s" % region) ec2conn = boto.ec2.connect_to_region(region) images = ec2conn.get_all_images(owners=["amazon"], filters={"name": "amzn-ami-hvm-*.x86_64-gp2"}) latestDate = "" latestAMI = "" for image in images: if image.creationDate > latestDate: latestDate = image.creationDate latestAMI = image.id AMIMap[region] = {"id": latestAMI} return AMIMap # Create AMI Map es.add_mapping("AMIMap",getAMI(region)) # Create es VPC esVPC = es.add_resource( VPC( "esVPC", CidrBlock="10.0.0.0/16", Tags=Tags( Name="esVPC" ) ) ) # Create es IGW esIGW = es.add_resource( InternetGateway( "esIGW" ) ) # Attach IGW to VPC esIGWAttachment = es.add_resource( VPCGatewayAttachment( "esIGWAttachment", VpcId=Ref(esVPC), InternetGatewayId=Ref(esIGW) ) ) # Create es Subnet esSubnet = es.add_resource( Subnet( "esSubnet", CidrBlock="10.0.0.0/24", VpcId=Ref(esVPC) ) ) # Create es RTB esRTB = es.add_resource( RouteTable( "esRTB", VpcId=Ref(esVPC) ) ) # Create route to IGW esDefaultRoute = es.add_resource( Route( "esDefaultRoute", DependsOn="esIGWAttachment", GatewayId=Ref(esIGW), DestinationCidrBlock="0.0.0.0/0", RouteTableId=Ref(esRTB) ) ) # Associate RTB with Subnet esSubnetRTBAssociation = es.add_resource( SubnetRouteTableAssociation( "esSubnetRTBAssociation", SubnetId=Ref(esSubnet), RouteTableId=Ref(esRTB) ) ) # Create es Security Group esSecurityGroup = es.add_resource( SecurityGroup( "esSecurityGroup", GroupDescription="Allow inbound access on port 9200", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort="9200", ToPort="9200", CidrIp="0.0.0.0/0" ) ], VpcId=Ref(esVPC) ) ) # Create es instance metadata esInstanceMetadata = Metadata( Init( # Use ConfigSets to ensure GPG key and repo file are in place # before trying to install elasticsearch InitConfigSets( ordered=["first","second"] ), first=InitConfig( files=InitFiles( { # cfn-hup notices when the cloudformation stack is changed "/etc/cfn/cfn-hup.conf": InitFile( content=Join("", [ "[main]\n", "stack=",Ref("AWS::StackName"),"\n", "region=",Ref("AWS::Region"),"\n" ] ), mode="000400", owner="root", group="root" ), # cfn-hup will then trigger cfn-init to run. # This lets us update the instance just by updating the stack "/etc/cfn/hooks.d/cfn-auto-reloader.conf": InitFile( content=Join("", [ "[cfn-auto-reloader-hook]\n", "triggers=post.update\n", "path=Resources.esInstance.Metadata\n", "action=/opt/aws/bin/cfn-init -v --stack ", Ref("AWS::StackName"), " ", "--resource esInstance ", "--region ", Ref("AWS::Region"), " ", "--c ordered\n" "runas=root\n" ] ), mode="000400", owner="root", group="root" ), # repo file for elastic search "/etc/yum.repos.d/elasticsearch.repo": InitFile( content=Join("", [ "[elasticsearch-2.x]\n", "name=Elasticsearch repository for 2.x packages\n", "baseurl=http://packages.elastic.co/elasticsearch/2.x/centos\n", "gpgcheck=1\n", "gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch\n", "enabled=1\n" ] ), mode="000400", owner="root", group="root" ) } ), commands={ # Install elasticsearch key so package will install "installGPG": { "command": "rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch" } } ), second=InitConfig( packages={ "yum": { # Install elasticsearch "elasticsearch": [], } }, commands={ # Enable external access to elasticsearch "listenOnAllinterfaces": { "command": "echo \"network.host: 0.0.0.0\" >> /etc/elasticsearch/elasticsearch.yml" } }, services={ "sysvinit": InitServices( { "elasticsearch": InitService( enabled=True, ensureRunning=True ), "cfn-hup": InitService( enabled=True, ensureRunning=True, files=[ "/etc/cfn/cfn-hup.conf", "/etc/cfn/hooks.d/cfn-auto-reloader.conf" ] ) } ) } ) ) ) # Create es Instance esInstance = es.add_resource( Instance( "esInstance", ImageId=FindInMap("AMIMap",Ref("AWS::Region"),"id"), InstanceType="t2.micro", Metadata=esInstanceMetadata, UserData=Base64( Join("", [ "#!/bin/bash\n", "/opt/aws/bin/cfn-init -v ", "--stack ", Ref("AWS::StackName"), " ", "--resource esInstance ", "--region ", Ref("AWS::Region"), " ", "-c ordered" ] ) ), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(esSecurityGroup) ], AssociatePublicIpAddress="true", DeviceIndex="0", DeleteOnTermination="true", SubnetId=Ref(esSubnet), ) ], Tags=Tags( Name="esInstance" ) ) ) # Output address es.add_output( [Output ("esAddress", Description="Elastic Search address", Value=Join("", [ "http://", GetAtt("esInstance", "PublicIp"), ":9200/" ] ) ) ] ) return es
] t.add_resource(sg) # This is the keypair that CloudFormation will ask you about when launching the stack keypair = t.add_parameter(Parameter( "KeyName", Description="Name of the SSH key pair that will be used to access the instance", Type="String", )) instance = ec2.Instance("Jenkins") instance.ImageId = "ami-e689729e" instance.InstanceType = "t2.micro" instance.SecurityGroups = [Ref(sg)] instance.KeyName = Ref(keypair) instance.IamInstanceProfile=Ref("InstanceProfile") t.add_resource(instance) t.add_output(Output( "InstanceAccess", Description="Command to use to SSH to instance", Value=Join("", ["ssh -i ~/.ssh/LampKey.pem ec2-user@", GetAtt(instance, "PublicDnsName")]) )) t.add_output(Output( "WebURL", Description="The URL of the application", Value=Join("",["http://", GetAtt(instance,"PublicDnsName"),":8080"]) )) print(t.to_json())
def main(): template = Template() template.add_version("2010-09-09") template.add_description( "AWS CloudFormation Sample Template: ELB with 2 EC2 instances") AddAMI(template) # Add the Parameters keyname_param = template.add_parameter(Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", )) template.add_parameter(Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge" ], ConstraintDescription="must be a valid EC2 instance type.", )) webport_param = template.add_parameter(Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", )) apiport_param = template.add_parameter(Parameter( "ApiServerPort", Type="String", Default="8889", Description="TCP/IP port of the api server", )) subnetA = template.add_parameter(Parameter( "subnetA", Type="String", Default="subnet-096fd06d" )) subnetB = template.add_parameter(Parameter( "subnetB", Type="String", Default="subnet-1313ef4b" )) VpcId = template.add_parameter(Parameter( "VpcId", Type="String", Default="vpc-82c514e6" )) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(apiport_param), ToPort=Ref(apiport_param), CidrIp="0.0.0.0/0", ), ] ) ) # Add the web server instance WebInstance = template.add_resource(ec2.Instance( "WebInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), )) # Add the api server instance ApiInstance = template.add_resource(ec2.Instance( "ApiInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(apiport_param)), )) # Add the application ELB ApplicationElasticLB = template.add_resource(elb.LoadBalancer( "ApplicationElasticLB", Name="ApplicationElasticLB", Scheme="internet-facing", Subnets=[Ref(subnetA), Ref(subnetB)] )) TargetGroupWeb = template.add_resource(elb.TargetGroup( "TargetGroupWeb", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher( HttpCode="200"), Name="WebTarget", Port=Ref(webport_param), Protocol="HTTP", Targets=[elb.TargetDescription( Id=Ref(WebInstance), Port=Ref(webport_param))], UnhealthyThresholdCount="3", VpcId=Ref(VpcId) )) TargetGroupApi = template.add_resource(elb.TargetGroup( "TargetGroupApi", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher( HttpCode="200"), Name="ApiTarget", Port=Ref(apiport_param), Protocol="HTTP", Targets=[elb.TargetDescription( Id=Ref(ApiInstance), Port=Ref(apiport_param))], UnhealthyThresholdCount="3", VpcId=Ref(VpcId) )) Listener = template.add_resource(elb.Listener( "Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(ApplicationElasticLB), DefaultActions=[elb.Action( Type="forward", TargetGroupArn=Ref(TargetGroupWeb) )] )) template.add_resource(elb.ListenerRule( "ListenerRuleApi", ListenerArn=Ref(Listener), Conditions=[elb.Condition( Field="path-pattern", Values=["/api/*"])], Actions=[elb.Action( Type="forward", TargetGroupArn=Ref(TargetGroupApi) )], Priority="1" )) template.add_output(Output( "URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(ApplicationElasticLB, "DNSName")]) )) print(template.to_json())
def test_output(self): t = Template() o = Output("MyOutput", Value="myvalue") t.add_output(o) with self.assertRaises(ValueError): t.add_output(o)
def main(): """ Create a ElastiCache Redis Node and EC2 Instance """ template = Template() # Description template.add_description( 'AWS CloudFormation Sample Template ElastiCache_Redis:' 'Sample template showing how to create an Amazon' 'ElastiCache Redis Cluster. **WARNING** This template' 'creates an Amazon EC2 Instance and an Amazon ElastiCache' 'Cluster. You will be billed for the AWS resources used' 'if you create a stack from this template.') # Mappings template.add_mapping('AWSInstanceType2Arch', { 't1.micro': {'Arch': 'PV64'}, 't2.micro': {'Arch': 'HVM64'}, 't2.small': {'Arch': 'HVM64'}, 't2.medium': {'Arch': 'HVM64'}, 'm1.small': {'Arch': 'PV64'}, 'm1.medium': {'Arch': 'PV64'}, 'm1.large': {'Arch': 'PV64'}, 'm1.xlarge': {'Arch': 'PV64'}, 'm2.xlarge': {'Arch': 'PV64'}, 'm2.2xlarge': {'Arch': 'PV64'}, 'm2.4xlarge': {'Arch': 'PV64'}, 'm3.medium': {'Arch': 'HVM64'}, 'm3.large': {'Arch': 'HVM64'}, 'm3.xlarge': {'Arch': 'HVM64'}, 'm3.2xlarge': {'Arch': 'HVM64'}, 'c1.medium': {'Arch': 'PV64'}, 'c1.xlarge': {'Arch': 'PV64'}, 'c3.large': {'Arch': 'HVM64'}, 'c3.xlarge': {'Arch': 'HVM64'}, 'c3.2xlarge': {'Arch': 'HVM64'}, 'c3.4xlarge': {'Arch': 'HVM64'}, 'c3.8xlarge': {'Arch': 'HVM64'}, 'c4.large': {'Arch': 'HVM64'}, 'c4.xlarge': {'Arch': 'HVM64'}, 'c4.2xlarge': {'Arch': 'HVM64'}, 'c4.4xlarge': {'Arch': 'HVM64'}, 'c4.8xlarge': {'Arch': 'HVM64'}, 'g2.2xlarge': {'Arch': 'HVMG2'}, 'r3.large': {'Arch': 'HVM64'}, 'r3.xlarge': {'Arch': 'HVM64'}, 'r3.2xlarge': {'Arch': 'HVM64'}, 'r3.4xlarge': {'Arch': 'HVM64'}, 'r3.8xlarge': {'Arch': 'HVM64'}, 'i2.xlarge': {'Arch': 'HVM64'}, 'i2.2xlarge': {'Arch': 'HVM64'}, 'i2.4xlarge': {'Arch': 'HVM64'}, 'i2.8xlarge': {'Arch': 'HVM64'}, 'd2.xlarge': {'Arch': 'HVM64'}, 'd2.2xlarge': {'Arch': 'HVM64'}, 'd2.4xlarge': {'Arch': 'HVM64'}, 'd2.8xlarge': {'Arch': 'HVM64'}, 'hi1.4xlarge': {'Arch': 'HVM64'}, 'hs1.8xlarge': {'Arch': 'HVM64'}, 'cr1.8xlarge': {'Arch': 'HVM64'}, 'cc2.8xlarge': {'Arch': 'HVM64'} }) template.add_mapping('AWSRegionArch2AMI', { 'us-east-1': {'PV64': 'ami-0f4cfd64', 'HVM64': 'ami-0d4cfd66', 'HVMG2': 'ami-5b05ba30'}, 'us-west-2': {'PV64': 'ami-d3c5d1e3', 'HVM64': 'ami-d5c5d1e5', 'HVMG2': 'ami-a9d6c099'}, 'us-west-1': {'PV64': 'ami-85ea13c1', 'HVM64': 'ami-87ea13c3', 'HVMG2': 'ami-37827a73'}, 'eu-west-1': {'PV64': 'ami-d6d18ea1', 'HVM64': 'ami-e4d18e93', 'HVMG2': 'ami-72a9f105'}, 'eu-central-1': {'PV64': 'ami-a4b0b7b9', 'HVM64': 'ami-a6b0b7bb', 'HVMG2': 'ami-a6c9cfbb'}, 'ap-northeast-1': {'PV64': 'ami-1a1b9f1a', 'HVM64': 'ami-1c1b9f1c', 'HVMG2': 'ami-f644c4f6'}, 'ap-southeast-1': {'PV64': 'ami-d24b4280', 'HVM64': 'ami-d44b4286', 'HVMG2': 'ami-12b5bc40'}, 'ap-southeast-2': {'PV64': 'ami-ef7b39d5', 'HVM64': 'ami-db7b39e1', 'HVMG2': 'ami-b3337e89'}, 'sa-east-1': {'PV64': 'ami-5b098146', 'HVM64': 'ami-55098148', 'HVMG2': 'NOT_SUPPORTED'}, 'cn-north-1': {'PV64': 'ami-bec45887', 'HVM64': 'ami-bcc45885', 'HVMG2': 'NOT_SUPPORTED'} }) template.add_mapping('Region2Principal', { 'us-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn', 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'}, 'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'} }) # Parameters cachenodetype = template.add_parameter(Parameter( 'ClusterNodeType', Description='The compute and memory capacity of the nodes in the Redis' ' Cluster', Type='String', Default='cache.m1.small', AllowedValues=['cache.m1.small', 'cache.m1.large', 'cache.m1.xlarge', 'cache.m2.xlarge', 'cache.m2.2xlarge', 'cache.m2.4xlarge', 'cache.c1.xlarge'], ConstraintDescription='must select a valid Cache Node type.', )) instancetype = template.add_parameter(Parameter( 'InstanceType', Description='WebServer EC2 instance type', Type='String', Default='t2.micro', AllowedValues=['t1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge'], ConstraintDescription='must be a valid EC2 instance type.', )) keyname = template.add_parameter(Parameter( 'KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access' ' to the instance', Type='AWS::EC2::KeyPair::KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', )) sshlocation = template.add_parameter(Parameter( 'SSHLocation', Description='The IP address range that can be used to SSH to' ' the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.' '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})', ConstraintDescription='must be a valid IP CIDR range of the' ' form x.x.x.x/x.' )) # Resources webserverrole = template.add_resource(iam.Role( 'WebServerRole', AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', [FindInMap('Region2Principal', Ref('AWS::Region'), 'EC2Principal')]), ) ] ), Path='/', )) template.add_resource(iam.PolicyType( 'WebServerRolePolicy', PolicyName='WebServerRole', PolicyDocument=awacs.aws.Policy( Statement=[awacs.aws.Statement( Action=[awacs.aws.Action("elasticache", "DescribeCacheClusters")], Resource=["*"], Effect=awacs.aws.Allow )] ), Roles=[Ref(webserverrole)], )) webserverinstanceprofile = template.add_resource(iam.InstanceProfile( 'WebServerInstanceProfile', Path='/', Roles=[Ref(webserverrole)], )) webserversg = template.add_resource(ec2.SecurityGroup( 'WebServerSecurityGroup', GroupDescription='Enable HTTP and SSH access', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation), ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', ) ] )) webserverinstance = template.add_resource(ec2.Instance( 'WebServerInstance', Metadata=cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'yum': { 'httpd': [], 'php': [], 'php-devel': [], 'gcc': [], 'make': [] } }, files=cloudformation.InitFiles({ '/var/www/html/index.php': cloudformation.InitFile( content=Join('', [ '<?php\n', 'echo \"<h1>AWS CloudFormation sample' ' application for Amazon ElastiCache' ' Redis Cluster</h1>\";\n', '\n', '$cluster_config = json_decode(' 'file_get_contents(\'/tmp/cacheclusterconfig\'' '), true);\n', '$endpoint = $cluster_config[\'CacheClusters' '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add' 'ress\'];\n', '$port = $cluster_config[\'CacheClusters\'][0]' '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];' '\n', '\n', 'echo \"<p>Connecting to Redis Cache Cluster ' 'node \'{$endpoint}\' on port {$port}</p>\";' '\n', '\n', '$redis=new Redis();\n', '$redis->connect($endpoint, $port);\n', '$redis->set(\'testkey\', \'Hello World!\');' '\n', '$return = $redis->get(\'testkey\');\n', '\n', 'echo \"<p>Retrieved value: $return</p>\";' '\n', '?>\n' ]), mode='000644', owner='apache', group='apache' ), '/etc/cron.d/get_cluster_config': cloudformation.InitFile( content='*/5 * * * * root' ' /usr/local/bin/get_cluster_config', mode='000644', owner='root', group='root' ), '/usr/local/bin/get_cluster_config': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'aws elasticache describe-cache-clusters ', ' --cache-cluster-id ', Ref('RedisCluster'), ' --show-cache-node-info' ' --region ', Ref('AWS::Region'), ' > /tmp/cacheclusterconfig\n' ]), mode='000755', owner='root', group='root' ), '/usr/local/bin/install_phpredis': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'cd /tmp\n', 'wget https://github.com/nicolasff/' 'phpredis/zipball/master -O phpredis.zip' '\n', 'unzip phpredis.zip\n', 'cd nicolasff-phpredis-*\n', 'phpize\n', './configure\n', 'make && make install\n', 'touch /etc/php.d/redis.ini\n', 'echo extension=redis.so > /etc/php.d/' 'redis.ini\n' ]), mode='000755', owner='root', group='root' ), '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), mode='000400', owner='root', group='root' ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.Metadata' '.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', 'runas=root\n' ]), # Why doesn't the Amazon template have this? # mode='000400', # owner='root', # group='root' ), }), commands={ '01-install_phpredis': { 'command': '/usr/local/bin/install_phpredis' }, '02-get-cluster-config': { 'command': '/usr/local/bin/get_cluster_config' } }, services={ "sysvinit": cloudformation.InitServices({ "httpd": cloudformation.InitService( enabled=True, ensureRunning=True, ), "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=['/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/' 'cfn-auto-reloader.conf'] ), }), }, ) }) ), ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instancetype), 'Arch')), InstanceType=Ref(instancetype), SecurityGroups=[Ref(webserversg)], KeyName=Ref(keyname), IamInstanceProfile=Ref(webserverinstanceprofile), UserData=Base64(Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '# Setup the PHP sample application\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', '# Signal the status of cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n' ])), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=Tags(Application=Ref('AWS::StackId'), Details='Created using Troposhpere') )) redisclustersg = template.add_resource(elasticache.SecurityGroup( 'RedisClusterSecurityGroup', Description='Lock the cluster down', )) template.add_resource(elasticache.SecurityGroupIngress( 'RedisClusterSecurityGroupIngress', CacheSecurityGroupName=Ref(redisclustersg), EC2SecurityGroupName=Ref(webserversg), )) template.add_resource(elasticache.CacheCluster( 'RedisCluster', Engine='redis', CacheNodeType=Ref(cachenodetype), NumCacheNodes='1', CacheSecurityGroupNames=[Ref(redisclustersg)], )) # Outputs template.add_output([ Output( 'WebsiteURL', Description='Application URL', Value=Join('', [ 'http://', GetAtt(webserverinstance, 'PublicDnsName'), ]) ) ]) # Print CloudFormation Template print(template.to_json())
def generate_stack_template(): template = Template() generate_description(template) generate_version(template) # ---Parameters------------------------------------------------------------ param_vpc_id = Parameter( 'VpcIdentifer', Description='The identity of the VPC (vpc-abcdwxyz) in which this stack shall be created.', Type='AWS::EC2::VPC::Id', ) template.add_parameter(param_vpc_id) param_vpc_cidr_block = Parameter( 'VpcCidrBlock', Description='The CIDR block of the VPC (w.x.y.z/n) in which this stack shall be created.', Type='String', Default='10.0.0.0/16' ) template.add_parameter(param_vpc_cidr_block) param_database_instance_subnet_id = Parameter( 'VpcSubnetIdentifer', Description='The identity of the private subnet (subnet-abcdwxyz) in which the database server shall be created.', Type='AWS::EC2::Subnet::Id', ) template.add_parameter(param_database_instance_subnet_id) param_keyname = Parameter( 'PemKeyName', Description='Name of an existing EC2 KeyPair file (.pem) to use to create EC2 instances', Type='AWS::EC2::KeyPair::KeyName' ) template.add_parameter(param_keyname) param_instance_type = Parameter( 'EC2InstanceType', Description='EC2 instance type, reference this parameter to insure consistency', Type='String', Default='t2.medium', # Prices from (2015-12-03) (Windows, us-west (North CA)) AllowedValues=[ # Source : https://aws.amazon.com/ec2/pricing/ 't2.small', # $0.044/hour 't2.micro', # $0.022/hour 't2.medium', # $0.088/hour 't2.large', # $0.166/hour 'm3.medium', # $0.140/hour 'm3.large', # $0.28/hour 'c4.large' # $0.221/hour ], ConstraintDescription='Must be a valid EC2 instance type' ) template.add_parameter(param_instance_type) param_s3_bucket = Parameter( 'S3Bucket', Description='The bucket in which applicable content can be found.', Type='String', Default='author-it-deployment-test-us-east-1' ) template.add_parameter(param_s3_bucket) param_s3_key = Parameter( 'S3Key', Description='The key within the bucket in which relevant files are located.', Type='String', Default='source/database/postgresql/single' ) template.add_parameter(param_s3_key) param_database_admin_password = Parameter( 'PostgresAdminPassword', Description='The password to be used by user postgres.', Type='String', NoEcho=True ) template.add_parameter(param_database_admin_password) #---Mappings--------------------------------------------------------------- mapping_environment_attribute_map = template.add_mapping( 'EnvironmentAttributeMap', { 'ap-southeast-1': { 'DatabaseServerAmi': 'ami-1ddc0b7e' }, 'ap-southeast-2': { 'DatabaseServerAmi': 'ami-0c95b86f' }, 'us-east-1': { 'DatabaseServerAmi': 'ami-a4827dc9' }, 'us-west-1': { 'DatabaseServerAmi': 'ami-f5f41398' } } ) # ---Resources------------------------------------------------------------- ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') path_database_admin_script = 'usr/ec2-user/postgresql/set_admin_password.sql' name_database_server_wait_handle = 'DatabaseServerWaitHandle' cmd_postgresql_initdb = dict( command='service postgresql-95 initdb' ) cmd_start_postgresql_service = dict( command='service postgresql-95 start' ) cmd_set_postgres_user_password = dict( command='psql -U postgres -f %s' % path_database_admin_script ) cmd_start_postgresql_on_startup = dict( command='chkconfig postgresql on' ) cmd_signal_success = dict( command='cfn-signal --exit-code $?' ) # Create an instance of AWS::IAM::Role for the instance. # This allows: # - Access to S3 bucket content. # - Stack updates resource_instance_role = template.add_resource(iam.Role( 'InstanceRole', AssumeRolePolicyDocument=Policy( Statement=[ Statement( Action=[AssumeRole], Effect=Allow, Principal=Principal( 'Service', ['ec2.amazonaws.com'] ) ) ] ), Path='/' )) # Create the S3 policy and attach it to the role. template.add_resource(iam.PolicyType( 'InstanceS3DownloadPolicy', PolicyName='S3Download', PolicyDocument={ 'Statement':[ { 'Effect': 'Allow', 'Action': ['s3:GetObject'], 'Resource': Join('', [ 'arn:aws:s3:::', Ref(param_s3_bucket), '/*' ]) }, { 'Effect': 'Allow', 'Action': ['cloudformation:DescribeStacks', 'ec2:DescribeInstances'], 'Resource': '*' } ] }, Roles=[Ref(resource_instance_role)] )) # Create the CloudFormation stack update policy and attach it to the role. template.add_resource(iam.PolicyType( 'InstanceStackUpdatePolicy', PolicyName='StackUpdate', PolicyDocument={ 'Statement':[ { "Effect" : "Allow", "Action" : "Update:*", "Resource" : "*" } ] }, Roles=[Ref(resource_instance_role)] )) # Create the AWS::IAM::InstanceProfile from the role for reference in the # database server instance definition. resource_instance_profile = template.add_resource(iam.InstanceProfile( 'InstanceProfile', Path='/', Roles=[Ref(resource_instance_role)] )) # Create a security group for the postgresql instance. # This must be internal to the VPC only. name_security_group_database = 'VpcDatabaseSecurityGroup' resource_database_security_group = ec2.SecurityGroup( name_security_group_database, GroupDescription=Join(' ', ['Security group for VPC database', Ref(param_vpc_id)]), Tags=Tags(Name=name_security_group_database), VpcId=Ref(param_vpc_id) ) template.add_resource(resource_database_security_group) template.add_output( Output( 'SecurityGroupForDatabase', Description='Security group created for database in VPC.', Value=Ref(resource_database_security_group) ) ) # Add ingress rule from VPC to database security group for database traffic. database_port = 5432 ssh_port = 22 template.add_resource(ec2.SecurityGroupIngress( 'DatabaseSecurityGroupDatabaseIngress', CidrIp=Ref(param_vpc_cidr_block), FromPort=str(database_port), GroupId=Ref(resource_database_security_group), IpProtocol='tcp', ToPort=str(database_port) )) # Add ingress rule from VPC to database security group for ssh traffic. ssh_port = 22 template.add_resource(ec2.SecurityGroupIngress( 'DatabaseSecurityGroupSshIngress', CidrIp=Ref(param_vpc_cidr_block), FromPort=str(ssh_port), GroupId=Ref(resource_database_security_group), IpProtocol='tcp', ToPort=str(ssh_port) )) # Create the metadata for the database instance. name_database_server = 'DatabaseServer' database_instance_metadata = cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'rpm': { 'postgresql': 'https://download.postgresql.org/pub/repos/yum/9.5/redhat/rhel-6-x86_64/pgdg-ami201503-95-9.5-2.noarch.rpm' }, 'yum': { 'postgresql95': [], 'postgresql95-libs': [], 'postgresql95-server': [], 'postgresql95-devel': [], 'postgresql95-contrib': [], 'postgresql95-docs': [] } }, files=cloudformation.InitFiles({ # cfn-hup.conf initialization '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', ref_stack_id, '\n', 'region=', ref_region, '\n', 'interval=2', '\n', 'verbose=true', '\n' ]), mode='000400', owner='root', group='root' ), # cfn-auto-reloader.conf initialization '/etc/cfn/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.%s.Metadata.AWS::CloudFormation::Init\n' % name_database_server, 'action=cfn-init.exe ', ' --verbose ' ' --stack ', ref_stack_name, ' --resource %s ' % name_database_server, # resource that defines the Metadata ' --region ', ref_region, '\n' ]), mode='000400', owner='root', group='root' ), # # pg_hba.conf retrieval from S3 '/var/lib/pgsql9/data/pg_hba.conf': cloudformation.InitFile( source=Join('/', [ # Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']), 'https://s3.amazonaws.com', Ref(param_s3_bucket), Ref(param_s3_key), 'conf' 'pg_hba.conf' ]), mode='000400', owner='root', group='root' ), # postgresql.conf retrieval from S3 '/var/lib/pgsql9/data/postgresql.conf': cloudformation.InitFile( source=Join('/', [ #Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']), 'https://s3.amazonaws.com', Ref(param_s3_bucket), Ref(param_s3_key), 'conf' 'postgresql.conf' ]), mode='000400', owner='root', group='root' ), # pg_ident.conf retrieval from S3 '/var/lib/pgsql9/data/pg_ident.conf': cloudformation.InitFile( source=Join('/', [ #Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']), 'https://s3.amazonaws.com', Ref(param_s3_bucket), Ref(param_s3_key), 'conf' 'pg_ident.conf' ]), mode='000400', owner='root', group='root' ), # script to set postgresql admin password. # (admin user = '******') path_database_admin_script: cloudformation.InitFile( source=Join('', [ 'ALTER USER postgres WITH PASSWORD ', Ref(param_database_admin_password), ';', '\n' ]) ) }), commands={ '10-postgresql_initdb': cmd_postgresql_initdb, '20-start_postgresql_service': cmd_start_postgresql_service, '30-set-postgres-user-password': cmd_set_postgres_user_password, '40-start-postgresql-on-startup': cmd_start_postgresql_on_startup, #'99-signal-success': cmd_signal_success }, services=dict( sysvinit=cloudformation.InitServices( { # start cfn-hup service - # required for CloudFormation stack update 'cfn-hup': cloudformation.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] ), # start postgresql service 'postgresql-9.5': cloudformation.InitService( enabled=True, ensureRunning=True ), # Disable sendmail service - not required. 'sendmail': cloudformation.InitService( enabled=False, ensureRunning=False ) } ) ) ) }), cloudformation.Authentication({ 'S3AccessCredentials': cloudformation.AuthenticationBlock( buckets=[Ref(param_s3_bucket)], roleName=Ref(resource_instance_role), type='S3' ) }) ) # Add a wait handle to receive the completion signal. #resource_database_server_wait_handle = template.add_resource( # cloudformation.WaitConditionHandle( # name_database_server_wait_handle # ) # ) #template.add_resource( # cloudformation.WaitCondition( # 'DatabaseServerWaitCondition', # DependsOn=name_database_server, # Handle=Ref(resource_database_server_wait_handle), # Timeout=300, # ) #) resource_database_server = ec2.Instance( name_database_server, DependsOn=name_security_group_database, IamInstanceProfile=Ref(resource_instance_profile), Metadata=database_instance_metadata, ImageId=FindInMap('EnvironmentAttributeMap', ref_region, 'DatabaseServerAmi'), InstanceType=Ref(param_instance_type), KeyName=Ref(param_keyname), SecurityGroupIds=[Ref(resource_database_security_group)], SubnetId=Ref(param_database_instance_subnet_id), Tags=Tags(Name=name_database_server, VPC=Ref(param_vpc_id)), UserData=Base64( Join( '', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '/opt/aws/bin/cfn-init --verbose ', ' --stack ', ref_stack_name, ' --resource DatabaseServer ', ' --region ', ref_region, '\n', '/opt/aws/bin/cfn-signal --exit-code $? ', ' --stack ', ref_stack_name, ' --resource ', name_database_server, '\n' ] ) ) ) template.add_resource(resource_database_server) template.add_output( Output('DatabaseServer', Description='PostgreSQL single instance database server', Value=Ref(resource_database_server) ) ) return template
)) gatewayattachment = t.add_resource(VPCGatewayAttachment( "GatewayAttachment", VpcId=Ref("VPC"), InternetGatewayId=Ref("InternetGateway"), )) securitygroup = t.add_resource(SecurityGroup( "SecurityGroup", GroupDescription="Security Group", SecurityGroupIngress=[ SecurityGroupRule( "SecurityGroupIngress1", CidrIp="10.0.0.0/16", FromPort="80", ToPort="80", IpProtocol="tcp", ) ], VpcId=Ref("VPC"), )) t.add_output(Output( "ClusterEndpoint", Value=Join(":", [GetAtt(redshiftcluster, "Endpoint.Address"), GetAtt(redshiftcluster, "Endpoint.Port")]), )) print(t.to_json())
"TemplatePath": "BuildOutput::ecs-cluster-cf.template", "RoleArn": GetAtt("CloudFormationClusterRole", "Arn"), "ParameterOverrides": """{"VpcId" : { "Fn::GetParam" : [ "BuildOutput", "ProdVpcId.json", "ProdVpcId" ] } , "PublicSubnet" : { "Fn::GetParam" : [ "BuildOutput", "ProdPublicSubnet.json", "ProdPublicSubnet" ] }, "KeyPair" : { "Fn::GetParam" : [ "BuildOutput", "KeyPair.json", "KeyPair" ] } }""" }, InputArtifacts=[ InputArtifacts(Name="App", ), InputArtifacts(Name="BuildOutput") ], ) ]) ], )) ########### # Outputs # ########### t.add_output( Output("CodebuildName", Description="Codebuild Name", Value=Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "codebuild"]))) print(t.to_json())
ClientGroup = t.add_resource(Group( "ClientGroup", Policies=[{ "PolicyName": "ClientGroupPolicy", "PolicyDocument": { "Statement": [{ "Action": "*", "Resource": "*", "Effect": "Allow", "Sid": "AllowAll" }, { "Action": ["aws-portal:ViewBilling", "aws-portal:ViewUsage"], "Resource": "*", "Effect": "Deny", "Sid": "DenyBilling" }, { "Action": ["ec2:PurchaseReservedInstancesOffering"], "Resource": "*", "Effect": "Deny", "Sid": "DenyPurchaseReservedInstancesOffering" }, { "Action": ["rds:PurchaseReservedDBInstancesOffering"], "Resource": "*", "Effect": "Deny", "Sid": "DenyPurchaseReservedDBInstancesOffering" }, { "Action": ["redshift:PurchaseReservedNodeOffering"], "Resource": "*", "Effect": "Deny", "Sid": "DenyPurchaseReservedNodeOffering" }, { "Action": "cloudtrail:*", "Resource": "*", "Effect": "Deny", "Sid": "DenyCloudtrail" }, { "Action": ["iam:AddRoleToInstanceProfile", "iam:AddUserToGroup", "iam:CreateAccessKey", "iam:CreateAccountAlias", "iam:CreateGroup", "iam:CreateInstanceProfile", "iam:CreateLoginProfile", "iam:CreateSAMLProvider", "iam:CreateUser", "iam:DeleteAccessKey", "iam:DeleteAccountAlias", "iam:DeleteAccountPasswordPolicy", "iam:DeleteGroup", "iam:DeleteGroupPolicy", "iam:DeleteInstanceProfile", "iam:DeleteLoginProfile", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:DeleteSAMLProvider", "iam:DeleteServerCertificate", "iam:DeleteSigningCertificate", "iam:DeleteUser", "iam:DeleteUserPolicy", "iam:DeleteVirtualMFADevice", "iam:GetAccountPasswordPolicy", "iam:GetAccountSummary", "iam:GetGroup", "iam:GetGroupPolicy", "iam:GetInstanceProfile", "iam:GetLoginProfile", "iam:GetSAMLProvider", "iam:GetServerCertificate", "iam:GetUser", "iam:GetUserPolicy", "iam:ListAccessKeys", "iam:ListAccountAliases", "iam:ListGroupPolicies", "iam:ListGroups", "iam:ListGroupsForUser", "iam:ListInstanceProfiles", "iam:ListInstanceProfilesForRole", "iam:ListSAMLProviders", "iam:ListServerCertificates", "iam:ListSigningCertificates", "iam:ListUserPolicies", "iam:PutGroupPolicy", "iam:PutRolePolicy", "iam:PutUserPolicy", "iam:RemoveRoleFromInstanceProfile", "iam:RemoveUserFromGroup", "iam:UpdateAccessKey", "iam:UpdateAccountPasswordPolicy", "iam:UpdateAssumeRolePolicy", "iam:UpdateGroup", "iam:UpdateLoginProfile", "iam:UpdateSAMLProvider", "iam:UpdateServerCertificate", "iam:UpdateSigningCertificate", "iam:UpdateUser", "iam:UploadServerCertificate", "iam:UploadSigningCertificate"], "Resource": ["*"], "Effect": "Deny", "Sid": "DenyIAM" }, { "Action": "s3:*", "Resource": "arn:aws:s3:::shelter-mutual-cloudtrail-us-east-1", "Effect": "Deny", "Sid": "DenyS3CloudtrailBucket" }, { "Action": "s3:*", "Resource": Join("", ["arn:aws:s3:::shelter-mutual-cloudtrail-us-east-1", "/*"]), "Effect": "Deny", "Sid": "DenyS3CloudtrailObjects" }] } }], )) craftIamRole = t.add_resource(Role( "craftIamRole", Path="/", Policies=[{ "PolicyName": "craftPolicy", "PolicyDocument": { "Statement": [{ "Action": ["elasticloadbalancing:DescribeLoadBalancers"], "Resource": "*", "Effect": "Allow", "Sid": "AllowDescribeLoadBalancers" }, { "Action": ["s3:ListAllMyBuckets"], "Resource": "*", "Effect": "Allow", "Sid": "AllowS3ListAllMyBuckets" }, { "Action": "s3:*", "Resource": "arn:aws:s3:::shelter-mutual-aws-tools-us-east-1/bootstrap", "Effect": "Allow", "Sid": "AllowS3BootstrapBucket" }, { "Action": "s3:*", "Resource": Join("", ["arn:aws:s3:::shelter-mutual-aws-tools-us-east-1/bootstrap", "/*"]), "Effect": "Allow", "Sid": "AllowS3BootstrapObjects" }, { "Action": "s3:*", "Resource": "arn:aws:s3:::shelter-mutual-aws-tools-us-east-1/pkg", "Effect": "Allow", "Sid": "AllowS3Pkg" }, { "Action": "s3:*", "Resource": Join("", ["arn:aws:s3:::shelter-mutual-aws-tools-us-east-1/pkg", "/*"]), "Effect": "Allow", "Sid": "AllowS3PkgObjects" }, { "Action": "cloudwatch:*", "Resource": "*", "Effect": "Allow", "Sid": "AllowCloudWatch" }, { "Action": ["sns:Publish"], "Resource": "*", "Effect": "Allow", "Sid": "AllowSNSPublish" }, { "Action": ["s3:*"], "Resource": ["arn:aws:s3:::"], "Effect": "Allow", "Sid": "AllowS3Code" }, { "Action": ["s3:*"], "Resource": ["arn:aws:s3:::/*"], "Effect": "Allow", "Sid": "AllowS3CodeObjects" }] } }], AssumeRolePolicyDocument={ "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] } }] }, )) NatInstanceProfile = t.add_output(Output( "NatInstanceProfile", Description="NAT IAM Instance Profile", Value=Ref(NatInstanceProfile), )) craftInstanceProfile = t.add_output(Output( "craftInstanceProfile", Description="craft IAM Instance Profile", Value=Ref(craftInstanceProfile), )) BastionInstanceProfile = t.add_output(Output( "BastionInstanceProfile", Description="Bastion IAM Instance Profile", Value=Ref(BastionInstanceProfile), ))
"**WARNING** This template creates an Amazon EC2 instance. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3dnsname = t.add_parameter(Parameter( "S3DNSNAme", Description="The DNS name of an existing S3 bucket to use as the " "Cloudfront distribution origin", Type="String", )) myDistribution = t.add_resource(Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname))], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ViewerProtocolPolicy="allow-all"), Enabled=True ) )) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output( "DistributionName", Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")])), ]) print(t.to_json())
KeyName=Ref(keyname), SecurityGroups=[Ref(sg)], Tags = [ {'Key': 'Name', 'Value': 'Asgard'} ] )) template.add_output([ Output( "PublicIP", Description="Public IP address of the Asgard instance", Value=GetAtt(ec2_instance, "PublicIp"), ), Output( "PrivateIP", Description="Private IP address of the Asgard instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNSName of the Asgard instance", Value=GetAtt(ec2_instance, "PublicDnsName"), ) ]) print template.to_json() #import requests #myip_response = requests.get(url='http://icanhazip.com') #myip = myip_response.text
ConstraintDescription="should be between 5 and 10000" )) myDynamoDB = t.add_resource(Table( "myDynamoDBTable", AttributeDefinitions=[ AttributeDefinition( AttributeName=Ref(hashkeyname), AttributeType=Ref(hashkeytype) ), ], KeySchema=[ KeySchema( AttributeName=Ref(hashkeyname), KeyType="HASH" ) ], ProvisionedThroughput=ProvisionedThroughput( ReadCapacityUnits=Ref(readunits), WriteCapacityUnits=Ref(writeunits) ) )) t.add_output(Output( "TableName", Value=Ref(myDynamoDB), Description="Table name of the newly create DynamoDB table", )) print(t.to_json())
) ) ] )) sampleEnv = t.add_resource(Environment( "sampleEnvironment", ApplicationName=Ref(sampleApp), Description="AWS Elastic Beanstalk Environment running " "Python Sample Application", SolutionStackName="64bit Amazon Linux running Python", OptionSettings=[ OptionSettings( Namespace="aws:autoscaling:launchconfiguration", OptionName="EC2KeyName", Value=Ref(keyname), ), ], VersionLabel="Initial Version", )) t.add_output([ Output( "URL", Description="URL of the AWS Elastic Beanstalk Environment", Value=Join("", ["http://", GetAtt(sampleEnv, "EndpointURL")]), ) ]) print(t.to_json())
), RecordSet( Name=Join('.', ['www', Ref(hostedzone)]), Type='CNAME', TTL='900', ResourceRecords=[ GetAtt(www_bucket, 'DomainName') ] ), ] )) t.add_output(Output( "BucketName", Value=Ref(root_bucket), Description="Name of S3 bucket to hold website content" )) #print(t.to_json()) #print(t.outputs['BucketName'].Value.data) domain = sys.argv[1] stack_name = domain.replace('.', '') client = boto3.client('cloudformation', region_name='eu-west-1') try: response = client.describe_stacks( StackName=stack_name, )