def create_redis_port_output(template, redis_resource): return template.add_output( Output('RedisPort', Description='The port of the Redis instance', Value=GetAtt(redis_resource, 'RedisEndpoint.Port')))
def set_up_stack(self): super(Application, self).set_up_stack() tags = self.get_input('Tags').copy() tags.update({'StackType': 'Application'}) self.default_tags = tags self.region = self.get_input('Region') self.add_description('Application server stack for MMW') # Parameters self.color = self.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green'] ), 'StackColor') self.keyname = self.add_parameter(Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 key pair' ), 'KeyName') self.availability_zones = self.add_parameter(Parameter( 'AvailabilityZones', Type='CommaDelimitedList', Description='Comma delimited list of availability zones' ), 'AvailabilityZones') self.rds_password = self.add_parameter(Parameter( 'RDSPassword', Type='String', NoEcho=True, Description='Database password', ), 'RDSPassword') self.app_server_instance_type = self.add_parameter(Parameter( 'AppServerInstanceType', Type='String', Default='t2.micro', Description='Application server EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.' ), 'AppServerInstanceType') self.app_server_ami = self.add_parameter(Parameter( 'AppServerAMI', Type='String', Default=self.get_recent_app_server_ami(), Description='Application server AMI' ), 'AppServerAMI') self.app_server_instance_profile = self.add_parameter(Parameter( 'AppServerInstanceProfile', Type='String', Default='AppServerInstanceProfile', Description='Application server instance profile' ), 'AppServerInstanceProfile') self.app_server_auto_scaling_desired = self.add_parameter(Parameter( 'AppServerAutoScalingDesired', Type='String', Default='1', Description='Application server AutoScalingGroup desired' ), 'AppServerAutoScalingDesired') self.app_server_auto_scaling_min = self.add_parameter(Parameter( 'AppServerAutoScalingMin', Type='String', Default='1', Description='Application server AutoScalingGroup minimum' ), 'AppServerAutoScalingMin') self.app_server_auto_scaling_max = self.add_parameter(Parameter( 'AppServerAutoScalingMax', Type='String', Default='1', Description='Application server AutoScalingGroup maximum' ), 'AppServerAutoScalingMax') self.ssl_certificate_arn = self.add_parameter(Parameter( 'SSLCertificateARN', Type='String', Description='ARN for a SSL certificate stored in IAM' ), 'SSLCertificateARN') self.backward_compat_ssl_certificate_arn = self.add_parameter( Parameter( 'BackwardCompatSSLCertificateARN', Type='String', Description='ARN for a SSL certificate stored in IAM' ), 'BackwardCompatSSLCertificateARN') self.public_subnets = self.add_parameter(Parameter( 'PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets' ), 'PublicSubnets') self.private_subnets = self.add_parameter(Parameter( 'PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets' ), 'PrivateSubnets') self.public_hosted_zone_name = self.add_parameter(Parameter( 'PublicHostedZoneName', Type='String', Description='Route 53 public hosted zone name' ), 'PublicHostedZoneName') self.vpc_id = self.add_parameter(Parameter( 'VpcId', Type='String', Description='VPC ID' ), 'VpcId') self.notification_topic_arn = self.add_parameter(Parameter( 'GlobalNotificationsARN', Type='String', Description='ARN for an SNS topic to broadcast notifications' ), 'GlobalNotificationsARN') self.blue_tile_distribution_endpoint = self.add_parameter(Parameter( 'BlueTileServerDistributionEndpoint', Type='String', Description='Endpoint for blue tile CloudFront distribution' ), 'BlueTileServerDistributionEndpoint') self.green_tile_distribution_endpoint = self.add_parameter(Parameter( 'GreenTileServerDistributionEndpoint', Type='String', Description='Endpoint for green tile CloudFront distribution' ), 'GreenTileServerDistributionEndpoint') self.itsi_base_url = self.add_parameter(Parameter( 'ITSIBaseURL', Type='String', Description='Base URL for ITSI portal' ), 'ITSIBaseURL') self.itsi_secret_key = self.add_parameter(Parameter( 'ITSISecretKey', Type='String', NoEcho=True, Description='Secret key for ITSI portal integration' ), 'ITSISecretKey') app_server_lb_security_group, \ app_server_security_group = self.create_security_groups() app_server_lb, \ backward_compat_app_server_lb = self.create_load_balancers( app_server_lb_security_group) self.create_auto_scaling_resources(app_server_security_group, app_server_lb, backward_compat_app_server_lb) self.create_cloud_watch_resources(app_server_lb) self.add_output(Output('AppServerLoadBalancerEndpoint', Value=GetAtt(app_server_lb, 'DNSName'))) self.add_output(Output('AppServerLoadBalancerHostedZoneNameID', Value=GetAtt(app_server_lb, 'CanonicalHostedZoneNameID'))) self.add_output(Output('BackwardCompatAppServerLoadBalancerEndpoint', Value=GetAtt(backward_compat_app_server_lb, 'DNSName'))) self.add_output( Output('BackwardCompatAppServerLoadBalancerHostedZoneNameID', Value=GetAtt(backward_compat_app_server_lb, 'CanonicalHostedZoneNameID')))
def add_child_template(self, template, template_bucket=None, s3_template_prefix=None, template_upload_acl=None, depends_on=[]): """ Method adds a child template to this object's template and binds the child template parameters to properties, resources and other stack outputs @param template [Troposphere.Template] Troposphere Template object to add as a child to this object's template @param template_bucket [str] name of the bucket to upload keys to - will default to value in template_args if not present @param s3_template_prefix [str] s3 key name prefix to prepend to s3 key path - will default to value in template_args if not present @param template_upload_acl [str] name of the s3 canned acl to apply to templates uploaded to S3 - will default to value in template_args if not present """ name = template.name self.add_common_params_to_child_template(template) template.load_ami_cache() template.build_hook() stack_url = self.upload_template( template, template_bucket=template_bucket, s3_template_prefix=s3_template_prefix, template_upload_acl=template_upload_acl) if name not in self.stack_outputs: self.stack_outputs[name] = [] stack_params = {} for parameter in template.parameters.keys(): # Manual parameter bindings single-namespace if parameter in self.manual_parameter_bindings: stack_params[parameter] = self.manual_parameter_bindings[ parameter] # Naming scheme for identifying the AZ of a subnet (not sure if this is even used anywhere) elif parameter.startswith('availabilityZone'): stack_params[parameter] = GetAtt( 'privateSubnet' + parameter.replace('availabilityZone', ''), 'AvailabilityZone') # Match any child stack parameters that have the same name as this stacks **parameters** elif parameter in self.template.parameters.keys(): stack_params[parameter] = Ref( self.template.parameters.get(parameter)) # Match any child stack parameters that have the same name as this stacks **resources** elif parameter in self.template.resources.keys(): stack_params[parameter] = Ref( self.template.resources.get(parameter)) # Match any child stack parameters that have the same name as this stacks **outputs** # TODO: Does this even work? Child runs after parent completes? elif parameter in self.stack_outputs: stack_params[parameter] = GetAtt(self.stack_outputs[parameter], 'Outputs.' + parameter) # Finally if nothing else matches copy the child templates parameter to this template's parameter list # so the value will pass through this stack down to the child. else: stack_params[parameter] = Ref( self.template.add_parameter( template.parameters[parameter])) stack_name = name + 'Stack' stack_obj = cf.Stack(stack_name, TemplateURL=stack_url, Parameters=stack_params, TimeoutInMinutes=self.template_args.get( 'timeout_in_minutes', '60'), DependsOn=depends_on) return self.template.add_resource(stack_obj)
def add_nodes(template, num_nodes, prefix, chassis_spec): ''' Takes a given Template object, an count of nodes to create, and a name to prefix all EC2 instances with. EC2 instances will be created with the naming structure of Prefix + Qumulo + NodeNumber. ''' instances = [] network_interfaces = [ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=False, GroupSet=[Ref(SECURITY_GROUP_NAME)], DeviceIndex=0, DeleteOnTermination=True, SubnetId=Ref('SubnetId'), ) ] block_device_mappings = chassis_spec.get_block_device_mappings() for i in range(1, num_nodes + 1): instance = ec2.Instance( '{}Node{}'.format(prefix, i), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'AMI'), InstanceType=Ref('InstanceType'), KeyName=Ref('KeyName'), NetworkInterfaces=network_interfaces, BlockDeviceMappings=block_device_mappings, ) instances.append(instance) instances[0].UserData = Base64( Join('', generate_node1_user_data(instances, chassis_spec))) for instance in instances[1:]: instance.UserData = Base64( Join('', generate_other_nodes_user_data(chassis_spec))) for instance in instances: template.add_resource(instance) # Create a list containing the Private IPs of all nodes. output_ips = [] for instance in instances: output_ips.append(GetAtt(instance.title, 'PrivateIp')) template.add_output( Output( 'ClusterPrivateIPs', Description= 'List of the private IPs of the nodes in your QF2 Cluster', Value=Join(', ', output_ips), )) template.add_output( Output('TemporaryPassword', Description='Temporary admin password for your QF2 cluster ' '(exclude quotes, matches node1 instance ID).', Value=Join('', ['"', Ref(instances[0].title), '"']))) template.add_output( Output( 'LinkToManagement', Description='Click to launch the QF2 Admin Console', Value=Join('', ['https://', GetAtt(instances[0].title, 'PrivateIp')]), )) template.add_output( Output('QumuloKnowledgeBase', Description='Qumulo Knowledge Base for QF2 in public clouds', Value=KNOWLEDGE_BASE_LINK))
from troposphere import ( GetAtt, s3, ) from aws_lambda import lambda_processing bucket = s3.Bucket( 'UploadedResources', BucketName='uploaded-file-processing-ee5a4c2a-b5a7-4b80-ac22-5763e7a93552', VersioningConfiguration=s3.VersioningConfiguration(Status='Enabled', ), DependsOn=[ 'LambdaProcessing', ], NotificationConfiguration=s3.NotificationConfiguration( LambdaConfigurations=[ s3.LambdaConfigurations(Event='s3:ObjectCreated:*', Function=GetAtt(lambda_processing, 'Arn')) ]))
def main(): """Generates the CloudFormation template""" template = Template() template.add_version("2010-09-09") template.add_description( 'This template deploys a VPC, with a pair of public and private subnets spread ' + 'across two Availabilty Zones. It deploys an Internet Gateway, with a default ' + 'route on the public subnets. It deploys a pair of NAT Gateways (one in each AZ), ' + 'and default routes for them in the private subnets.' ) # Parameters # EnvironmentName env_param = template.add_parameter(Parameter( 'EnvironmentName', Type='String', Description='An environment name that will be prefixed to resource names', )) # VPC CIDR vpc_cidr_param = template.add_parameter(Parameter( 'VpcCIDR', Type='String', Description='Please enter the IP range (CIDR notation) for this VPC', Default='10.192.0.0/16', )) # PublicSubnet1CIDR pub_subnet_1_param = template.add_parameter(Parameter( 'PublicSubnet1CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the public subnet ' + 'in the first Availability Zone', Default='10.192.10.0/24', )) # PublicSubnet2CIDR pub_subnet_2_param = template.add_parameter(Parameter( 'PublicSubnet2CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the public subnet ' + 'in the second Availability Zone', Default='10.192.11.0/24', )) # PrivateSubnet1CIDR prvt_subnet_1_param = template.add_parameter(Parameter( 'PrivateSubnet1CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the private subnet ' + 'in the first Availability Zone', Default='10.192.20.0/24', )) # PrivateSubnet2CIDR prvt_subnet_2_param = template.add_parameter(Parameter( 'PrivateSubnet2CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the private subnet ' + 'in the second Availability Zone', Default='10.192.21.0/24', )) # Resources # VPC vpc = template.add_resource( VPC( 'VPC', CidrBlock=Ref(vpc_cidr_param), Tags=Tags(Name=Ref(env_param)), ) ) # InternetGateway internet_gateway = template.add_resource( InternetGateway( 'InternetGateway', Tags=Tags(Name=Ref(env_param)), ) ) # InternetGatewayAttachment template.add_resource( VPCGatewayAttachment( 'InternetGatewayAttachment', InternetGatewayId=Ref(internet_gateway), VpcId=Ref(vpc), ) ) # PublicSubnet1 pub_subnet1 = template.add_resource( Subnet( 'PublicSubnet1', VpcId=Ref(vpc), AvailabilityZone=Select('0', GetAZs("")), CidrBlock=Ref(pub_subnet_1_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Public Subnet (AZ1)')), ) ) # PublicSubnet2 pub_subnet2 = template.add_resource( Subnet( 'PublicSubnet2', VpcId=Ref(vpc), AvailabilityZone=Select('1', GetAZs("")), CidrBlock=Ref(pub_subnet_2_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Public Subnet (AZ2)')), ) ) # PrivateSubnet1 prvt_subnet1 = template.add_resource( Subnet( 'PrivateSubnet1', VpcId=Ref(vpc), AvailabilityZone=Select('0', GetAZs("")), CidrBlock=Ref(prvt_subnet_1_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Private Subnet (AZ1)')), ) ) # PrivateSubnet2 prvt_subnet2 = template.add_resource( Subnet( 'PrivateSubnet2', VpcId=Ref(vpc), AvailabilityZone=Select('1', GetAZs("")), CidrBlock=Ref(prvt_subnet_2_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Private Subnet (AZ2)')), ) ) # NatGateway1EIP nat_gateway1_eip = template.add_resource( EIP( 'NatGateway1EIP', DependsOn='InternetGatewayAttachment', Domain='vpc', ) ) # NatGateway2EIP nat_gateway2_eip = template.add_resource( EIP( 'NatGateway2EIP', DependsOn='InternetGatewayAttachment', Domain='vpc', ) ) # NatGateway1 nat_gateway1 = template.add_resource( NatGateway( 'NatGateway1', AllocationId=GetAtt(nat_gateway1_eip, 'AllocationId'), SubnetId=Ref(pub_subnet1), ) ) # NatGateway2 nat_gateway2 = template.add_resource( NatGateway( 'NatGateway2', AllocationId=GetAtt(nat_gateway2_eip, 'AllocationId'), SubnetId=Ref(pub_subnet2), ) ) # PublicRouteTable pub_route_table = template.add_resource( RouteTable( 'PublicRouteTable', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Public Routes')), ) ) # DefaultPublicRoute template.add_resource( Route( 'DefaultPublicRoute', RouteTableId=Ref(pub_route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(internet_gateway), ) ) # PublicSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PublicSubnet1RouteTableAssociation', RouteTableId=Ref(pub_route_table), SubnetId=Ref(pub_subnet1), ) ) # PublicSubnet2RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PublicSubnet2RouteTableAssociation', RouteTableId=Ref(pub_route_table), SubnetId=Ref(pub_subnet2), ) ) # PrivateRouteTable1 prvt_route_table1 = template.add_resource( RouteTable( 'PrivateRouteTable1', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Private Routes (AZ1)')), ) ) # DefaultPrivateRoute1 template.add_resource( Route( 'DefaultPrivateRoute1', RouteTableId=Ref(prvt_route_table1), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat_gateway1), ) ) # PrivateSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PrivateSubnet1RouteTableAssociation', RouteTableId=Ref(prvt_route_table1), SubnetId=Ref(prvt_subnet1), ) ) # PrivateRouteTable2 prvt_route_table2 = template.add_resource( RouteTable( 'PrivateRouteTable2', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Private Routes (AZ2)')), ) ) # DefaultPrivateRoute2 template.add_resource( Route( 'DefaultPrivateRoute2', RouteTableId=Ref(prvt_route_table2), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat_gateway2), ) ) # PrivateSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PrivateSubnet2RouteTableAssociation', RouteTableId=Ref(prvt_route_table2), SubnetId=Ref(prvt_subnet2), ) ) # Outputs template.add_output(Output( 'VPC', Description='A reference to the created VPC', Value=Ref(vpc), )) template.add_output(Output( 'PublicSubnets', Description='A list of the public subnets', Value=Join(',', [Ref(pub_subnet1), Ref(pub_subnet2)]), )) template.add_output(Output( 'PrivateSubnets', Description='A list of the private subnets', Value=Join(',', [Ref(prvt_subnet1), Ref(prvt_subnet2)]), )) template.add_output(Output( 'PublicSubnet1', Description='A reference to the public subnet in the 1st Availability Zone', Value=Ref(pub_subnet1), )) template.add_output(Output( 'PublicSubnet2', Description='A reference to the public subnet in the 2nd Availability Zone', Value=Ref(pub_subnet2), )) template.add_output(Output( 'PrivateSubnet1', Description='A reference to the private subnet in the 1st Availability Zone', Value=Ref(prvt_subnet1), )) template.add_output(Output( 'PrivateSubnet2', Description='A reference to the private subnet in the 2nd Availability Zone', Value=Ref(prvt_subnet2), )) print(template.to_json())
) ] ), NoValue ), **common_bucket_conf, ) ) # Output S3 asset bucket name template.add_output( Output( "AssetsBucketDomainName", Description="Assets bucket domain name", Value=GetAtt(assets_bucket, "DomainName"), ) ) # Create an S3 bucket that holds user uploads or other non-public files private_assets_bucket = template.add_resource( Bucket( "PrivateAssetsBucket", AccessControl=Private, PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ),
database_instance = rds.DBInstance( db_logical_id, DependsOn="AppBatchbotRDSInstancePassword", AllocatedStorage="30", # GB DBInstanceClass="db.t2.small", DBInstanceIdentifier=Ref("AWS::StackName"), DBName=Ref("DBName"), DBSubnetGroupName=Ref("DBSubnetGroupName"), Engine="postgres", EngineVersion="12.3", MasterUserPassword="******", MasterUsername=Ref("DBUsername"), EnableIAMDatabaseAuthentication=True, MultiAZ="false", StorageEncrypted="true", StorageType="standard", VPCSecurityGroups=[Ref("DBSecurityGroup")], Tags=tag_builder(), ) t.add_resource(database_instance) t.add_output([ Output( "RDSEndpoint", Description="DNS name of RDS instance", Value=GetAtt(database_instance, "Endpoint.Address"), ) ]) print(t.to_json())
def set_up_stack(self): super(Worker, self).set_up_stack() tags = self.get_input('Tags').copy() tags.update({'StackType': 'Worker'}) self.default_tags = tags self.region = self.get_input('Region') self.add_description('Worker stack for MMW') # Parameters self.color = self.add_parameter( Parameter('StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green']), 'StackColor') self.keyname = self.add_parameter( Parameter('KeyName', Type='String', Description='Name of an existing EC2 key pair'), 'KeyName') self.ip_access = self.add_parameter( Parameter('IPAccess', Type='String', Default=self.get_input('IPAccess'), Description='CIDR for allowing SSH access'), 'IPAccess') self.availability_zones = self.add_parameter( Parameter( 'AvailabilityZones', Type='CommaDelimitedList', Description='Comma delimited list of availability zones'), 'AvailabilityZones') self.rds_password = self.add_parameter( Parameter( 'RDSPassword', Type='String', NoEcho=True, Description='Database password', ), 'RDSPassword') self.worker_instance_type = self.add_parameter( Parameter( 'WorkerInstanceType', Type='String', Default='t2.micro', Description='Worker EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.'), 'WorkerInstanceType') self.worker_ami = self.add_parameter( Parameter('WorkerAMI', Type='String', Default=self.get_recent_worker_ami(), Description='Worker AMI'), 'WorkerAMI') self.worker_instance_profile = self.add_parameter( Parameter('WorkerInstanceProfile', Type='String', Default='WorkerInstanceProfile', Description='Worker instance profile'), 'WorkerInstanceProfile') self.worker_auto_scaling_desired = self.add_parameter( Parameter('WorkerAutoScalingDesired', Type='String', Default='2', Description='Worker AutoScalingGroup desired'), 'WorkerAutoScalingDesired') self.worker_auto_scaling_min = self.add_parameter( Parameter('WorkerAutoScalingMin', Type='String', Default='0', Description='Worker AutoScalingGroup minimum'), 'WorkerAutoScalingMin') self.worker_auto_scaling_max = self.add_parameter( Parameter('WorkerAutoScalingMax', Type='String', Default='2', Description='Worker AutoScalingGroup maximum'), 'WorkerAutoScalingMax') self.worker_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA Parameter('WorkerAutoScalingScheduleStartRecurrence', Type='String', Default='0 12 * * 1-5', Description='Worker ASG schedule start recurrence'), 'WorkerAutoScalingScheduleStartRecurrence') self.worker_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA Parameter('WorkerAutoScalingScheduleStartCapacity', Type='String', Default='2', Description='Worker ASG schedule start capacity'), 'WorkerAutoScalingScheduleStartCapacity') self.worker_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA Parameter('WorkerAutoScalingScheduleEndRecurrence', Type='String', Default='0 0 * * *', Description='Worker ASG schedule end recurrence'), 'WorkerAutoScalingScheduleEndRecurrence') self.worker_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA Parameter('WorkerAutoScalingScheduleEndCapacity', Type='String', Default='0', Description='Worker ASG schedule end capacity'), 'WorkerAutoScalingScheduleEndCapacity') self.public_subnets = self.add_parameter( Parameter('PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets'), 'PublicSubnets') self.private_subnets = self.add_parameter( Parameter('PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets'), 'PrivateSubnets') self.public_hosted_zone_name = self.add_parameter( Parameter('PublicHostedZoneName', Type='String', Description='Route 53 public hosted zone name'), 'PublicHostedZoneName') self.vpc_id = self.add_parameter( Parameter('VpcId', Type='String', Description='VPC ID'), 'VpcId') self.notification_topic_arn = self.add_parameter( Parameter( 'GlobalNotificationsARN', Type='String', Description='ARN for an SNS topic to broadcast notifications'), 'GlobalNotificationsARN') worker_lb_security_group, \ worker_security_group = self.create_security_groups() worker_lb = self.create_load_balancer(worker_lb_security_group) worker_auto_scaling_group = self.create_auto_scaling_resources( worker_security_group, worker_lb) self.create_cloud_watch_resources(worker_auto_scaling_group) self.create_dns_records(worker_lb) self.add_output( Output('WorkerLoadBalancerEndpoint', Value=GetAtt(worker_lb, 'DNSName'))) self.add_output( Output('WorkerLoadBalancerHostedZoneNameID', Value=GetAtt(worker_lb, 'CanonicalHostedZoneNameID')))
application_key = t.add_resource( Encrypt("ApplicationKey", ServiceToken=lambda_arn, Base64Data=Base64(Ref(datadog_application_key)), KmsKeyArn=kms_key_arn)) datadog_lambda = t.add_resource( awslambda.Function( "datadoglambda", DependsOn=["LogGroup"], # log_group.title would also work Code=awslambda.Code(S3Bucket=Select(0, Ref(lambda_package)), S3Key=Select(1, Ref(lambda_package))), Handler="index.handler", FunctionName=Join( "-", ["datadoglambda", Ref("AWS::StackName")]), Role=GetAtt(datadog_lambda_role, "Arn"), Runtime="python2.7", Timeout=300, MemorySize=1536, KmsKeyArn=kms_key_arn, Environment=awslambda.Environment( Variables={ 'api_key': GetAtt(api_key, "CiphertextBase64"), 'application_key': GetAtt(application_key, "CiphertextBase64"), }))) t.add_output( Output("LambdaArn", Description="lambda arn", Value=GetAtt(datadog_lambda, "Arn"), Export=Export(Sub("${AWS::StackName}-LambdaArn"))))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Sample app') # Conditions template.add_condition( 'BucketNameOmitted', Or(Equals(variables['BucketName'].ref, ''), Equals(variables['BucketName'].ref, 'undefined'))) # Resources bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', Ref('AWS::NoValue'), variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), Tags=Tags(application=variables['ApplicationName'].ref, customer=variables['CustomerName'].ref, environment=variables['EnvironmentName'].ref), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('BucketName', Description='Name of bucket', Value=Ref(bucket))) template.add_output( Output('BucketArn', Description='Arn of bucket', Value=GetAtt(bucket, 'Arn'))) # https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html template.add_resource( s3.BucketPolicy( 'RequireBucketEncryption', Bucket=Ref(bucket), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='DenyIncorrectEncryptionHeader', Action=[awacs.s3.PutObject], Condition=Condition( StringNotEquals( 's3:x-amz-server-side-encryption', 'AES256')), Effect=Deny, Principal=Principal('*'), Resource=[Join('', [GetAtt(bucket, 'Arn'), '/*'])]), Statement( Sid='DenyUnEncryptedObjectUploads', Action=[awacs.s3.PutObject], Condition=Condition( Null('s3:x-amz-server-side-encryption', 'true')), Effect=Deny, Principal=Principal('*'), Resource=[Join('', [GetAtt(bucket, 'Arn'), '/*'])]) ])))
def add_private_zone(self, settings, dns_settings): """ Add private zone to root template :return: """ if keyisset("Name", dns_settings[self.private_namespace_key]): self.private_zone_name = dns_settings[ self.private_namespace_key]["Name"] if keyisset("Lookup", dns_settings[self.private_namespace_key]): namespace_info = lookup_namespace( dns_settings[self.private_namespace_key]["Lookup"], settings.session, private=True, ) if (keyisset("Name", dns_settings[self.private_namespace_key]) and not self.private_zone_name == namespace_info["ZoneTld"]): raise ValueError( "Zone name provided does not match the value looked up. Got", self.private_zone_name, "Resolved via ID", namespace_info["ZoneTld"], ) self.private_zone_name = namespace_info["ZoneTld"] self.root_params.update({ dns_params.PRIVATE_DNS_ZONE_NAME.title: namespace_info["ZoneTld"], dns_params.PRIVATE_DNS_ZONE_ID.title: namespace_info["NamespaceId"], }) self.nested_params.update({ dns_params.PRIVATE_DNS_ZONE_NAME.title: dns_params.DEFAULT_PRIVATE_DNS_ZONE, dns_params.PRIVATE_DNS_ZONE_ID: GetAtt(self.private_map, "Id"), }) elif keyisset("Use", dns_settings[self.private_namespace_key]): self.root_params.update({ dns_params.PRIVATE_DNS_ZONE_ID.title: dns_settings[self.private_namespace_key]["Use"], dns_params.PRIVATE_DNS_ZONE_NAME.title: self.private_zone_name, }) self.nested_params.update({ dns_params.PRIVATE_DNS_ZONE_ID.title: If( dns_conditions.CREATE_PRIVATE_NAMESPACE_CON_T, GetAtt(self.private_map, "Id"), Ref(dns_params.PRIVATE_DNS_ZONE_ID), ), dns_params.PRIVATE_DNS_ZONE_NAME.title: dns_params.DEFAULT_PRIVATE_DNS_ZONE, }) else: LOG.info(self.private_zone_name) self.root_params.update({ dns_params.PRIVATE_DNS_ZONE_NAME.title: self.private_zone_name }) self.nested_params.update({ dns_params.PRIVATE_DNS_ZONE_ID.title: If( dns_conditions.CREATE_PRIVATE_NAMESPACE_CON_T, GetAtt(self.private_map, "Id"), Ref(dns_params.PRIVATE_DNS_ZONE_ID), ), dns_params.PRIVATE_DNS_ZONE_NAME.title: dns_params.DEFAULT_PRIVATE_DNS_ZONE, })
def create_elasticsearch_host_output(template, elasticsearch_resource): return template.add_output( Output('ElasticsearchHost', Description='The host of the Elasticsearch instance', Value=GetAtt(elasticsearch_resource, 'DomainEndpoint')))
def create_load_balancer_domain_output(template, load_balancer_resource): return template.add_output( Output('LoadBalancerDomain', Description='The domain name of the load balancer', Value=GetAtt(load_balancer_resource, 'DNSName')))
)) t.add_resource(ec2.Instance( "instance", ImageId="ami-51aa712b", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, IamInstanceProfile=Ref("InstanceProfile"), )) t.add_output(Output( "InstancePublicIp", Description="Public IP of our instance.", Value=GetAtt("instance", "PublicIp"), )) t.add_output(Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("instance", "PublicDnsName"), ":", ApplicationPort ]), )) print t.to_json()
config_recorder_role = template.add_resource( iam.Role( 'ConfigRecorderRole', AssumeRolePolicyDocument=PolicyDocument(Statement=[ Statement(Effect=Allow, Principal=config_service_principal, Action=[awacs.sts.AssumeRole]) ]), Path='/', ManagedPolicyArns=[ Sub('arn:${AWS::Partition}:iam::aws:policy/service-role/AWS_ConfigRole' ) ])) config_recorder = template.add_resource( config.ConfigurationRecorder('ConfigRecorder', DependsOn='ConfigBucketPolicy', RoleARN=GetAtt(config_recorder_role, 'Arn'), RecordingGroup=config.RecordingGroup( AllSupported=True, IncludeGlobalResourceTypes=True))) config_delivery_channel = template.add_resource( config.DeliveryChannel( 'ConfigDeliveryChannel', DependsOn='ConfigBucketPolicy', ConfigSnapshotDeliveryProperties=config. ConfigSnapshotDeliveryProperties(DeliveryFrequency='One_Hour'), S3BucketName=Ref(config_bucket), SnsTopicARN=Ref(config_topic)))
Default="1", ))) web_log_group = logs.LogGroup( "WebLogs", template=template, RetentionInDays=365, DeletionPolicy="Retain", ) template.add_output(Output( "WebLogsGroup", Description="Web application log group", Value=GetAtt(web_log_group, "Arn") )) log_configuration = LogConfiguration( LogDriver="awslogs", Options={ 'awslogs-group': Ref(web_log_group), 'awslogs-region': Ref(AWS_REGION), } ) # ECS task web_task_definition = TaskDefinition( "WebTask", template=template,
def configure(self): """ Returns a BIND template """ self.defaults = {'instance_type': 't3.micro'} self.service = 'bind' self.set_description('Sets up BIND DNS servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) config = constants.ENVIRONMENTS[self.env][self.service] # All subnets in public get a DNS server subnets = self.get_subnets('public') # Add our security group security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {}'.format(self.name), SecurityGroupIngress=[ { "IpProtocol": "tcp", "FromPort": "53", "ToPort": "53", "CidrIp": "0.0.0.0/0" }, # DNS TCP { "IpProtocol": "udp", "FromPort": "53", "ToPort": "53", "CidrIp": "0.0.0.0/0" }, # DNS UDP ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone'] zonefile = '' for zone in config['forwarders']: zonefile += "\n" + self.make_bind_zone(zone) for subnet in subnets: subnet_name = subnet['AvailabilityZone'] role = '{}-{}-{}'.format(self.env, self.service, subnet_name) # myenv-bind-us-west-2a # Add the elastic IP and the ENI for it, then attach it. eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(self.cfn_name(role)), Domain='vpc')) eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(self.cfn_name(role)), SubnetId=subnet['SubnetId'], Description='ENI for {}'.format(role), GroupSet=[Ref(security_group)] + self.security_groups, SourceDestCheck=True, Tags=self.get_tags(service_override=self.service, role_override=role))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format( self.cfn_name(role)), AllocationId=GetAtt(eip, "AllocationId"), NetworkInterfaceId=Ref(eni))) # Add a route53 DNS name self.add_resource( route53.RecordSetGroup('{}Route53'.format(self.cfn_name(role)), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet( Name="{}.{}".format( role, route53_zone), ResourceRecords=[Ref(eip)], Type='A', TTL=600) ])) # Substitute the userdata template and feed it to CFN userdata_template = self.get_cloudinit_template( replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVICE__', self.service), ('__BIND_ZONEFILE__', zonefile))) userdata = Sub( userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets {'CFN_ENI_ID': Ref(eni)}) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.cfn_name(role)), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(self.cfn_name(role)), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=0, MaxSize=1, DesiredCapacity=0, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=self.service, role_override=role) + [autoscaling.Tag('Name', role, True)]))
IpProtocol='-1') ], SecurityGroupEgress=[ ec2.SecurityGroupRule(CidrIp='0.0.0.0/0', IpProtocol='-1') ])) EC2SecurityGroup6 = template.add_resource( ec2.SecurityGroup('EC2SecurityGroup6', GroupDescription='Created by RDS management console', GroupName='sg_mysql', VpcId='vpc-0e12e822c1e5549cf', SecurityGroupIngress=[ ec2.SecurityGroupRule( CidrIp='162.211.184.9/32', FromPort=GetAtt(RDSDBInstance, 'Endpoint.Port'), IpProtocol='tcp', ToPort=GetAtt(RDSDBInstance, 'Endpoint.Port')) ], SecurityGroupEgress=[ ec2.SecurityGroupRule(CidrIp='0.0.0.0/0', IpProtocol='-1') ])) EC2SecurityGroup7 = template.add_resource( ec2.SecurityGroup('EC2SecurityGroup7', GroupDescription='2020-02-23T21:41:00.306Z', GroupName='ws-cur-477', VpcId='vpc-0e12e822c1e5549cf', SecurityGroupIngress=[ ec2.SecurityGroupRule(CidrIp='0.0.0.0/0',
def GenerateStepJenkinsLayer(): t = Template() t.add_description("""\ Jenkins for Step Hackathon Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) vpcid_param = t.add_parameter( Parameter( "VpcId", Type="String", Description="VpcId of your existing Virtual Private Cloud (VPC)", Default="vpc-fab00e9f")) subnets = t.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", Description=( "The list SubnetIds, for public subnets in the " "region and in your Virtual Private Cloud (VPC) - minimum one" ), Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8")) keypair_param = t.add_parameter( Parameter("KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", Default="glueteam")) jenkins_ami_id_param = t.add_parameter( Parameter("JenkinsAmiId", Description="Jenkins server AMI ID (default: ami-f3641a84)", Type="String", Default="ami-f3641a84")) operations_subdomain_hosted_zone_param = t.add_parameter( Parameter("DashsoftHostedZoneParam", Description="HostedZone (default: hackathon.operations.dk)", Type="String", Default="hackathon.operations.dk")) iam_role_param = t.add_parameter( Parameter( "IamRole", Description="IAM Role name", Type="String", )) # --------- Jenkins instance jenkins_sg = t.add_resource( ec2.SecurityGroup( 'JenkinsSG', GroupDescription='Security group for Jenkins host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "SG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp="0.0.0.0/0", ), ])) jenkins_eip = t.add_resource(ec2.EIP( 'JenkinsEIP', Domain='vpc', )) jenkins_eth0 = t.add_resource( ec2.NetworkInterface( "JenkinsEth0", Description=Join("", [Ref(stackname_param), " Eth0"]), GroupSet=[ Ref(jenkins_sg), ], SourceDestCheck=True, SubnetId=Select(0, Ref(subnets)), Tags=Tags( Name=Join("", [Ref(stackname_param), " Interface 0"]), Interface="eth0", ))) jenkins_host = t.add_resource( ec2.Instance( 'JenkinsHost', ImageId=Ref(jenkins_ami_id_param), InstanceType='m3.medium', KeyName=Ref(keypair_param), IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( NetworkInterfaceId=Ref(jenkins_eth0), DeviceIndex="0", ), ], Tags=Tags(Name=Ref(stackname_param), Id=Ref(stackname_param)), UserData=Base64(Join('', [ '#!/bin/bash\n', ])), )) jenkins_eip_assoc = t.add_resource( ec2.EIPAssociation( "JenkinsEIPAssoc", NetworkInterfaceId=Ref(jenkins_eth0), AllocationId=GetAtt("JenkinsEIP", "AllocationId"), PrivateIpAddress=GetAtt("JenkinsEth0", "PrimaryPrivateIpAddress"), )) jenkins_host_cname = t.add_resource( route53.RecordSetType( "JenkinsHostCname", HostedZoneName=Join( "", [Ref(operations_subdomain_hosted_zone_param), "."]), Comment=Join("", ["Jenkins host CNAME for ", Ref(stackname_param)]), Name=Join( "", ["jenkins.", Ref(operations_subdomain_hosted_zone_param), "."]), Type="A", TTL="60", ResourceRecords=[GetAtt("JenkinsHost", "PublicIp")], DependsOn="JenkinsEIPAssoc")) return t
"Version": "2012-10-17", "Statement": [ {"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["lambda.amazonaws.com"]} } ]}, )) hello_world_lambda = template.add_resource( awslambda.Function( 'HelloWorldFunction', FunctionName='hello_world', Description='Hello world lambdas with Python 3.6', Handler='lambda_function.lambda_handler', Role=GetAtt('ExecutionRole', 'Arn'), Code=awslambda.Code( S3Bucket='nicor-dev', S3Key='deployments/lambdas/travis_build/hello_world.zip', ), Runtime='python3.6', Timeout='30', MemorySize=128 ) ) hello_world_with_enc_vars_lambda = template.add_resource( awslambda.Function( 'HelloWorldEncVarsFunction', FunctionName='hello_world_enc_vars', Description='Hello world lambdas with encrypted vars Python 3.6',
def create_subnet(t, output): ref_region = Ref('AWS::Region') igw = t.add_resource(InternetGateway( "InternetGateway", Tags=Tags( Name=Join("", [output["service"], "-", output["env"]]) ) )) GWattachment = t.add_resource(VPCGatewayAttachment( "AttachGateway", VpcId=output["vpc"], InternetGatewayId=Ref("InternetGateway"), )) # Pub Subnet Availability Zone A pubsubneta = t.add_resource( Subnet( 'PubSubnetA', CidrBlock=Join('', [output["vpcenvcidr"], '.', '0', '.0/24']), VpcId=output["vpc"], AvailabilityZone=Select("0", GetAZs(ref_region)), Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-PubA"]) ) ) ) # Pub Subnet Availability Zone B pubsubnetb = t.add_resource( Subnet( 'PubSubnetB', CidrBlock=Join('', [output["vpcenvcidr"], '.', '1', '.0/24']), VpcId=output["vpc"], AvailabilityZone=Select("1", GetAZs(ref_region)), Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-PubB"]) ) ) ) # Pub Subnet Availability Zone C pubsubnetc = t.add_resource( Subnet( 'PubSubnetC', CidrBlock=Join('', [output["vpcenvcidr"], '.', '2', '.0/24']), VpcId=output["vpc"], AvailabilityZone=Select("2", GetAZs(ref_region)), Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-PubC"]) ) ) ) # Private Subnet Availability Zone A privsubneta = t.add_resource( Subnet( 'PrivSubnetA', CidrBlock=Join('', [output["vpcenvcidr"], '.', '3', '.0/24']), VpcId=output["vpc"], AvailabilityZone=Select("0", GetAZs(ref_region)), Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-PrivA"]) ) ) ) # Private Subnet Availability Zone B privsubnetb = t.add_resource( Subnet( 'PrivSubnetB', CidrBlock=Join('', [output["vpcenvcidr"], '.', '4', '.0/24']), VpcId=output["vpc"], AvailabilityZone=Select("1", GetAZs(ref_region)), Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-PrivB"]) ) ) ) # Private Subnet Availability Zone C privsubnetc = t.add_resource( Subnet( 'PrivSubnetC', CidrBlock=Join('', [output["vpcenvcidr"], '.', '5', '.0/24']), VpcId=output["vpc"], AvailabilityZone=Select("2", GetAZs(ref_region)), Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-PrivC"]) ) ) ) output["SubnetIds"] = {} output["SubnetIds"]["pubsubneta"] = Ref(pubsubneta) output["SubnetIds"]["pubsubnetb"] = Ref(pubsubnetb) output["SubnetIds"]["pubsubnetc"] = Ref(pubsubnetc) output["SubnetIds"]["privsubneta"] = Ref(privsubneta) output["SubnetIds"]["privsubnetb"] = Ref(privsubnetb) output["SubnetIds"]["privsubnetc"] = Ref(privsubnetc) # NAT Gateway # Availability Zone A nat_eip_a = t.add_resource(EIP( 'NATeipA', Domain="vpc" )) nat_gw_a = t.add_resource(NatGateway( 'NatGwA', AllocationId=GetAtt(nat_eip_a, 'AllocationId'), SubnetId=Ref(pubsubneta), )) # Availability Zone B nat_eip_b = t.add_resource(EIP( 'NATeipB', Domain="vpc" )) nat_gw_b = t.add_resource(NatGateway( 'NatGwB', AllocationId=GetAtt(nat_eip_b, 'AllocationId'), SubnetId=Ref(pubsubnetb), )) # Availability Zone C nat_eip_c = t.add_resource(EIP( 'NATeipC', Domain="vpc" )) nat_gw_c = t.add_resource(NatGateway( 'NatGwC', AllocationId=GetAtt(nat_eip_c, 'AllocationId'), SubnetId=Ref(pubsubnetc), )) pubroutetable = t.add_resource(RouteTable( "PubRouteTable", VpcId=output["vpc"], Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-", "PubRT"]) ) )) pubroute = t.add_resource(Route( "PubRoute", DependsOn="AttachGateway", RouteTableId=Ref(pubroutetable), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(igw) )) privroutetablea = t.add_resource(RouteTable( "PrivRouteTableA", VpcId=output["vpc"], Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-", "PrivRT-A"]) ) )) privroutea = t.add_resource(Route( "PrivRouteA", RouteTableId=Ref(privroutetablea), DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(nat_gw_a) )) privroutetableb = t.add_resource(RouteTable( "PrivRouteTableB", VpcId=output["vpc"], Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-", "PrivRT-B"]) ) )) privrouteb = t.add_resource(Route( "PrivRouteB", RouteTableId=Ref(privroutetableb), DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(nat_gw_b) )) privroutetablec = t.add_resource(RouteTable( "PrivRouteTableC", VpcId=output["vpc"], Tags=Tags( Name=Join("", [output["service"], "-", output["env"], "-", "PrivRT-C"]) ) )) privroutec = t.add_resource(Route( "PrivRouteC", RouteTableId=Ref(privroutetablec), DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(nat_gw_c) )) subnetroutetableassociationpuba = t.add_resource(SubnetRouteTableAssociation( 'SubnetRouteTableAssociationPubA', SubnetId=Ref(pubsubneta), RouteTableId=Ref(pubroutetable), )) subnetroutetableassociationpubb = t.add_resource(SubnetRouteTableAssociation( 'SubnetRouteTableAssociationPubB', SubnetId=Ref(pubsubnetb), RouteTableId=Ref(pubroutetable), )) subnetroutetableassociationpubc = t.add_resource(SubnetRouteTableAssociation( 'SubnetRouteTableAssociationPubC', SubnetId=Ref(pubsubnetc), RouteTableId=Ref(pubroutetable), )) subnetroutetableassociationpriva = t.add_resource(SubnetRouteTableAssociation( 'SubnetRouteTableAssociationPrivA', SubnetId=Ref(privsubneta), RouteTableId=Ref(privroutetablea), )) subnetroutetableassociationprivb = t.add_resource(SubnetRouteTableAssociation( 'SubnetRouteTableAssociationPrivB', SubnetId=Ref(privsubnetb), RouteTableId=Ref(privroutetableb), )) subnetroutetableassociationprivc = t.add_resource(SubnetRouteTableAssociation( 'SubnetRouteTableAssociationPrivC', SubnetId=Ref(privsubnetc), RouteTableId=Ref(privroutetablec), )) outputs = t.add_output([ Output( "IGW", Value=Ref(igw), Export=Export(Sub("${AWS::StackName}-IGW")), Description="Internet Gateway" ), Output( "NatGWA", Value=Ref(nat_gw_a), Export=Export(Sub("${AWS::StackName}-NATGatewayA")), Description="NAT Gateway AZ A" ), Output( "NatGWB", Value=Ref(nat_gw_b), Export=Export(Sub("${AWS::StackName}-NATGatewayB")), Description="NAT Gateway AZ B" ), Output( "NatGWC", Value=Ref(nat_gw_c), Export=Export(Sub("${AWS::StackName}-NATGatewayC")), Description="NAT Gateway AZ C" ), Output( "VPCEnvCidr", Value=output["vpcenvcidr"], Export=Export(Sub("${AWS::StackName}-CIDR")), Description="First 2 CIDR Block of the VPC" ), Output( "ServiceID", Value=output["service_id"], Export=Export(Sub("${AWS::StackName}-ServiceID")), Description="3rd Oct of CIDR block" ), Output( "PUBSUBNETA", Value=Ref(pubsubneta), Export=Export(Sub("${AWS::StackName}-PubSubnetA")), Description="Public A Subnet" ), Output( "PUBSUBNETB", Value=Ref(pubsubnetb), Export=Export(Sub("${AWS::StackName}-PubSubnetB")), Description="Public B Subnet" ), Output( "PUBSUBNETC", Value=Ref(pubsubnetc), Export=Export(Sub("${AWS::StackName}-PubSubnetC")), Description="Public C Subnet" ), Output( "PRIVSUBNETA", Value=Ref(privsubneta), Export=Export(Sub("${AWS::StackName}-PrivSubnetA")), Description="Priv A Subnet" ), Output( "PRIVSUBNETB", Value=Ref(privsubnetb), Export=Export(Sub("${AWS::StackName}-PrivSubnetB")), Description="Priv B Subnet" ), Output( "PRIVSUBNETC", Value=Ref(privsubnetc), Export=Export(Sub("${AWS::StackName}-PrivSubnetC")), Description="Priv C Subnet" ), ]) # t = create_sg(t, output) # output["SubnetIds"] = [Ref(pubsubneta), Ref(pubsubnetb), Ref(pubsubnetc)] # SubnetIds = [Ref(pubsubneta), Ref(pubsubnetb), Ref(pubsubnetc)] # security_group_id = [GetAtt(sgtrusted, "GroupId")] return t, output
] )) hugged_profile = t.add_resource(InstanceProfile( "HuggedProfile", Roles=[Ref(hugged_role)], )) hugged_dns = t.add_resource(RecordSetType( "HuggedDnsRecord", HostedZoneId=ZONE_ID, Name=FQDN, Type="A", TTL="180", ResourceRecords=[GetAtt("HuggedInstance", "PublicIp")], )) hugged_ecs_role = t.add_resource(Role( "HuggedEcsRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" } ]
# Create a Lambda function that will be mapped code = [ "var response = require('cfn-response');", "exports.handler = function(event, context) {", " context.succeed('foobar!');", " return 'foobar!';", "};", ] # Create the Lambda function foobar_function = t.add_resource( Function( "FoobarFunction", Code=Code(ZipFile=Join("", code)), Handler="index.handler", Role=GetAtt("LambdaExecutionRole", "Arn"), Runtime="nodejs", )) # Create the Event Target foobar_target = Target("FoobarTarget", Arn=GetAtt('FoobarFunction', 'Arn'), Id="FooBarFunction1") # Create the Event Rule rule = t.add_resource( Rule("FoobarRule", EventPattern={ "source": ["aws.ec2"], "detail-type": ["EC2 Instance State-change Notification"], "detail": {
KeyName=If(conditions['has_kp'], Ref(params['keyname']), Ref("AWS::NoValue")), SecurityGroups=[ Ref(resources['sg']), ], ) ) tpl.add_output([ Output( "InstanceId", Description="InstanceId of the EC2 instance", Value=Ref(resources['ec2']), ), Output( "AZ", Description="AZ of the EC2 instance", Value=GetAtt(resources['ec2'], "AvailabilityZone"), ), Output( "PublicDNS", Description="Public DNSName of the EC2 instance", Value=GetAtt(resources['ec2'], "PublicDnsName"), ), Output( "PublicIP", Description="Public IP address of the EC2 instance", Value=GetAtt(resources['ec2'], "PublicIp"), ) ]) print(tpl.to_yaml())
def generate_template(service_name): t = Template() t.add_version('2010-09-09') t.add_description("""\ AWS CloudFormation Template for AWS Exploitation Lab """) t.add_mapping("PublicRegionMap", ami_public_mapping) t.add_mapping("PrivateRegionMap", ami_private_mapping) keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', Description='Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description=' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter(Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default='t2.micro', AllowedValues=[ 't2.micro', 't2.small', 't2.medium', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) ref_stack_id = Ref('AWS::StackId') ec2_role = t.add_resource(Role( "%sEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy( Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"]) ) ] ) )) ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] ec2_snapshot_policy_document = awacs.aws.Policy( Statement=[ awacs.aws.Statement( Sid="PermitEC2Snapshots", Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("ec2", "CreateSnapshot"), awacs.aws.Action("ec2", "ModifySnapshotAttribute"), ], Resource=["*"] ) ] ) ec2_snapshot_policy = Policy( PolicyName="EC2SnapshotPermissions", PolicyDocument=ec2_snapshot_policy_document ) priv_ec2_role = t.add_resource(Role( "%sPrivEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy( Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"]) ) ] ), Policies=[ec2_snapshot_policy] )) priv_ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] VPC_ref = t.add_resource( VPC( 'VPC', CidrBlock='10.0.0.0/16', Tags=Tags( Application=ref_stack_id))) instanceProfile = t.add_resource( InstanceProfile( "InstanceProfile", InstanceProfileName="%sInstanceRole" % (service_name), Roles=[Ref(ec2_role)])) privInstanceProfile = t.add_resource( InstanceProfile( "PrivInstanceProfile", InstanceProfileName="%sPrivInstanceRole" % (service_name), Roles=[Ref(priv_ec2_role)])) public_subnet = t.add_resource( Subnet( '%sSubnetPublic' % service_name, MapPublicIpOnLaunch=True, CidrBlock='10.0.1.0/24', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sSubnet_public" % (service_name)) ) ) private_subnet = t.add_resource( Subnet( '%sSubnetPrivate' % service_name, MapPublicIpOnLaunch=False, CidrBlock='10.0.2.0/24', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sSubnet_private" % (service_name)) ) ) internetGateway = t.add_resource( InternetGateway( 'InternetGateway', Tags=Tags( Application=ref_stack_id, Name="%sInternetGateway" % service_name))) gatewayAttachment = t.add_resource( VPCGatewayAttachment( 'AttachGateway', VpcId=Ref(VPC_ref), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable( 'RouteTable', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sRouteTable" % service_name))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Only associate this Route Table with the public subnet subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(routeTable), )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='%sSecurityGroup' % service_name, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='1080', ToPort='1080', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='0', ToPort='65535', CidrIp="10.0.0.0/8"), ], VpcId=Ref(VPC_ref), ) ) public_instance = t.add_resource( Instance( "Public%sInstance" % service_name, ImageId=FindInMap("PublicRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet))], UserData=Base64(public_instance_userdata), Tags=Tags( Application=ref_stack_id, Name='%sPublicInstance' % (service_name)) ) ) private_instance = t.add_resource( Instance( "Private%sInstance" % service_name, ImageId=FindInMap("PrivateRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(instanceSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(private_subnet))], UserData=Base64(private_instance_userdata), Tags=Tags( Application=ref_stack_id, Name='%sPrivateInstance' % (service_name)), IamInstanceProfile="%sPrivInstanceRole" % (service_name) ) ) outputs = [] outputs.append( Output( "PublicIP", Description="IP Address of Public Instance", Value=GetAtt(public_instance, "PublicIp"), ) ) t.add_output(outputs) # Set up S3 Bucket and CloudTrail S3Bucket = t.add_resource( Bucket( "S3Bucket", DeletionPolicy="Retain" ) ) S3PolicyDocument=awacs.aws.PolicyDocument( Id='EnforceServersideEncryption', Version='2012-10-17', Statement=[ awacs.aws.Statement( Sid='PermitCTBucketPut', Action=[s3.PutObject], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket), "/*"])], ), awacs.aws.Statement( Sid='PermitCTBucketACLRead', Action=[s3.GetBucketAcl], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket)])], ) ] ) S3BucketPolicy = t.add_resource( BucketPolicy( "BucketPolicy", PolicyDocument=S3PolicyDocument, Bucket=Ref(S3Bucket), DependsOn=[S3Bucket] ) ) myTrail = t.add_resource( Trail( "CloudTrail", IsLogging=True, S3BucketName=Ref(S3Bucket), DependsOn=["BucketPolicy"], ) ) myTrail.IsMultiRegionTrail = True myTrail.IncludeGlobalServiceEvents = True return t.to_json()
ViewerProtocolPolicy='redirect-to-https') ], CustomErrorResponses=[ cloudfront.CustomErrorResponse(ErrorCode=404, ResponseCode=200, ResponsePagePath='/index.html') ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( ForwardedValues=cloudfront.ForwardedValues(QueryString=False), TargetOriginId=Join('-', ['S3', Ref(bucket_resource)]), ViewerProtocolPolicy='redirect-to-https'), DefaultRootObject='index.html', Enabled=True, IPV6Enabled=True, Origins=[ cloudfront.Origin(DomainName=GetAtt(bucket_resource, 'DomainName'), Id=Join('-', ['S3', Ref(bucket_resource)]), S3OriginConfig=cloudfront.S3Origin()) ], ViewerCertificate=cloudfront.ViewerCertificate( AcmCertificateArn=Ref(certificate_arn_parameter), SslSupportMethod='sni-only')))) ci_user_resource = template.add_resource( iam.User('CiUser', UserName=ci_user_name_variable, Policies=[ iam.Policy(PolicyName='CiUserPolicy', PolicyDocument={ 'Version':
def to_json(self): if self._json is not None: return self._json # Validity checks if len(self.subnet_ids) < 2: raise ValidationException( "Use .subnet_id() to specify at least two ELB subnets") if len(self.cert_ids) < 1: raise ValidationException( "Use .certificate_id() to specify at least one certificate") if not self._ecs_redirect and len(self.default_targets) < 1: raise ValidationException( "Use .default_target() to specify at least one default target or .ecs_redirect(" ") to set up a redirect container") for (name, tp) in self.target_paths.iteritems(): if len(set(map(lambda h: h.type, tp.hosts))) != 1: raise ValidationException( "Inconsistent target types for %s. All hosts for a given path must have the " "same type (ip or instance)." % name) # Build Security Group if self._custom_elb_sgs: elb_sgs = self._custom_elb_sgs else: elb_sg = SecurityGroup( "ElbSecurityGroup", GroupDescription=Sub("${AWS::StackName}-ElbSg"), Tags=self.tags_with(Name=Sub("${AWS::StackName}-ElbSg")), VpcId=self.vpc_id, SecurityGroupEgress=[ SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="-1") ], SecurityGroupIngress=self._sg_rules) self.template.add_resource(elb_sg) self.template.add_output( Output("ElbSecurityGroupOutput", Description="Security group ID assigned to the ELB", Value=Ref(elb_sg), Export=Export(Sub("${AWS::StackName}-ElbSg")))) # Build Attachment Security Group inst_sg = SecurityGroup( "InstanceSecurityGroup", GroupDescription=Sub("${AWS::StackName}-InstSg"), Tags=self.tags_with(Name=Sub("${AWS::StackName}-InstSg")), VpcId=self.vpc_id, SecurityGroupEgress=[ SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="-1") ], SecurityGroupIngress=[ SecurityGroupRule(IpProtocol="-1", SourceSecurityGroupId=Ref(elb_sg)) ]) self.template.add_resource(inst_sg) self.template.add_output( Output("InstanceSecurityGroupOutput", Description="Convenience SG to assign to instances", Value=Ref(inst_sg), Export=Export(Sub("${AWS::StackName}-InstSg")))) elb_sgs = [Ref("ElbSecurityGroup")] # Build ELB elb = LoadBalancer("ELB", SecurityGroups=elb_sgs, Scheme=self._elb_scheme, Subnets=self.subnet_ids, Tags=self.tags_with(Name=Ref("AWS::StackName")), LoadBalancerAttributes=self.elb_attributes()) if self._elb_name: elb.Name = self._elb_name self.template.add_resource(elb) self.template.add_output( Output("ElbArnOutput", Description="ARN of the ELB", Value=Ref(elb), Export=Export(Sub("${AWS::StackName}-ElbArn")))) self.template.add_output( Output("ElbDnsOutput", Description="DNS name of the ELB", Value=GetAtt("ELB", "DNSName"), Export=Export(Sub("${AWS::StackName}-ElbDns")))) # Build Default Target Group if self._ecs_redirect: default_tg_protocol = "HTTP" else: default_tg_protocol = self.default_targets[0].protocol default_tg = TargetGroup( "DefaultTargetGroup", Port=8080, Protocol=default_tg_protocol, Tags=self.tags_with(Name=Sub("${AWS::StackName}-Default")), VpcId=self.vpc_id, Targets=list( map(lambda h: TargetDescription(Id=h.host, Port=h.port), self.default_targets)), HealthyThresholdCount=2, Matcher=Matcher(HttpCode="200-399")) self.template.add_resource(default_tg) self.attach_alarm(default_tg) # Build Listener self.template.add_resource( Listener("HttpsListener", Certificates=list( map(lambda i: Certificate(CertificateArn=i), self.cert_ids)), DefaultActions=[ Action(Type="forward", TargetGroupArn=Ref("DefaultTargetGroup")) ], LoadBalancerArn=Ref("ELB"), Port=443, Protocol="HTTPS")) # Build HTTP redirect if len(self.http_redirect_targets) > 0: # Build Redirect Target Group http_tg = TargetGroup( "RedirectTargetGroup", Port=8080, Protocol=self.http_redirect_targets[0].protocol, Tags=self.tags_with(Name=Sub("${AWS::StackName}-Redirect")), VpcId=self.vpc_id, Targets=list( map(lambda h: TargetDescription(Id=h.host, Port=h.port), self.http_redirect_targets)), HealthyThresholdCount=2, Matcher=Matcher(HttpCode="200-399")) self.template.add_resource(http_tg) self.attach_alarm(http_tg) if self._ecs_redirect or len(self.http_redirect_targets) > 0: if self._ecs_redirect: redirect_tg = "DefaultTargetGroup" else: redirect_tg = "RedirectTargetGroup" # Build Listener self.template.add_resource( Listener("HttpListener", DefaultActions=[ Action(Type="forward", TargetGroupArn=Ref(redirect_tg)) ], LoadBalancerArn=Ref("ELB"), Port=80, Protocol="HTTP")) # Build Target Groups & Rules for (name, tp) in self.target_paths.iteritems(): name_an = alpha_numeric_name(name) tag_name = taggable_name(name) g = TargetGroup( "PathTg" + name_an, Port=tp.hosts[0].port, Protocol=tp.hosts[0].protocol, Tags=self.tags_with(Name="%s/%s" % (self.env_name, tag_name), TargetPath=tag_name), Targets=list(map(lambda h: h.to_target_desc(), tp.hosts)), VpcId=self.vpc_id, HealthCheckPath="/%s" % name, HealthyThresholdCount=2, Matcher=tp.health_check_matcher) # TODO: We should probably explicitly specify this for every TG. Not # doing that now because it will cause lots of updates. Maybe # in 0.4? if len(tp.hosts) > 0 and tp.hosts[0].type != "instance": g.TargetType = tp.hosts[0].type if self.sticky: g.TargetGroupAttributes = [ TargetGroupAttribute(Key="stickiness.enabled", Value="true"), TargetGroupAttribute(Key="stickiness.type", Value="lb_cookie") ] self.template.add_resource(g) self.attach_alarm(g) self.template.add_resource( ListenerRule( "PathRl" + name_an, Actions=[Action(Type="forward", TargetGroupArn=Ref(g))], Conditions=[ Condition(Field="path-pattern", Values=["/%s/*" % name]) ], ListenerArn=Ref("HttpsListener"), Priority=self.priority_hash(name))) self.template.add_resource( ListenerRule( "PathRln" + name_an, Actions=[Action(Type="forward", TargetGroupArn=Ref(g))], Conditions=[ Condition(Field="path-pattern", Values=["/%s" % name]) ], ListenerArn=Ref("HttpsListener"), Priority=self.priority_hash(name))) # Build Alternate Listeners for al in self.alt_listeners: tg_name = "AltTg%d" % al.port tg_protocol = al.hosts[0].protocol tg = TargetGroup( tg_name, Port=9999, Protocol=tg_protocol, Tags=self.tags_with(Name=Sub("${AWS::StackName}-%s" % tg_name)), VpcId=self.vpc_id, Targets=list( map(lambda h: TargetDescription(Id=h.host, Port=h.port), al.hosts)), HealthyThresholdCount=2, Matcher=Matcher(HttpCode="200-399")) self.template.add_resource(tg) self.attach_alarm(tg) listener = Listener("AltListener%d" % al.port, DefaultActions=[ Action(Type="forward", TargetGroupArn=Ref(tg_name)) ], LoadBalancerArn=Ref("ELB"), Port=al.port, Protocol=al.protocol) if al.protocol == "HTTPS": listener.Certificates = list( map(lambda i: Certificate(CertificateArn=i), self.cert_ids)) self.template.add_resource(listener) self._json = self.template.to_json() return self._json
])) t.add_resource( ec2.Instance( "instance", ImageId="ami-a4c7edb2", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, )) t.add_output( Output( "InstancePublicIp", Description="Public IP of our instance.", Value=GetAtt("instance", "PublicIp"), )) t.add_output( Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("instance", "PublicDnsName"), ":", ApplicationPort ]), )) print t.to_json()
def create_redis_host_output(template, redis_resource): return template.add_output( Output('RedisHost', Description='The host of the Redis instance', Value=GetAtt(redis_resource, 'RedisEndpoint.Address')))