def generate(env='pilot'): template = Template() template.set_version("2010-09-09") # ExistingVPC = template.add_parameter(Parameter( # "ExistingVPC", # Type="AWS::EC2::VPC::Id", # Description=( # "The VPC ID that includes the security groups in the" # "ExistingSecurityGroups parameter." # ), # )) # # ExistingSecurityGroups = template.add_parameter(Parameter( # "ExistingSecurityGroups", # Type="List<AWS::EC2::SecurityGroup::Id>", # )) param_spider_lambda_memory_size = template.add_parameter( Parameter( 'SpiderLambdaMemorySize', Type=NUMBER, Description='Amount of memory to allocate to the Lambda Function', Default='128', AllowedValues=MEMORY_VALUES ) ) param_spider_lambda_timeout = template.add_parameter( Parameter( 'SpiderLambdaTimeout', Type=NUMBER, Description='Timeout in seconds for the Lambda function', Default='60' ) ) spider_tasks_queue_dlq_name = f'{env}-spider-tasks-dlq' spider_tasks_queue_dlq = template.add_resource( Queue( "SpiderTasksDLQ", QueueName=spider_tasks_queue_dlq_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), ) ) spider_tasks_queue_name = f"{env}-spider-tasks" spider_tasks_queue = template.add_resource( Queue( "SpiderTasksQueue", QueueName=spider_tasks_queue_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), VisibilityTimeout=300, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(spider_tasks_queue_dlq, "Arn"), maxReceiveCount=2, ), DependsOn=[spider_tasks_queue_dlq], ) ) spider_lambda_role = template.add_resource( Role( "SpiderLambdaRole", Path="/", Policies=[ Policy( PolicyName="root", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="root", Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ Action("logs", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("s3", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("sqs", "*") ] ), ] ), ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ) ) spider_file_path = './spider/index.js' spider_code = open(spider_file_path, 'r').readlines() spider_lambda = template.add_resource( Function( "SpiderLambda", Code=Code( S3Bucket='spider-lambda', S3Key=f'{env}.zip', # ZipFile=Join("", spider_code) ), Handler="index.handler", Role=GetAtt(spider_lambda_role, "Arn"), Runtime="nodejs12.x", Layers=['arn:aws:lambda:us-east-1:342904801388:layer:spider-node-browser:1'], MemorySize=Ref(param_spider_lambda_memory_size), Timeout=Ref(param_spider_lambda_timeout), DependsOn=[spider_tasks_queue], ) ) # AllSecurityGroups = template.add_resource(CustomResource( # "AllSecurityGroups", # List=Ref(ExistingSecurityGroups), # AppendedItem=Ref("SecurityGroup"), # ServiceToken=GetAtt(spider_lambda, "Arn"), # )) # # SecurityGroup = template.add_resource(SecurityGroup( # "SecurityGroup", # SecurityGroupIngress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # VpcId=Ref(ExistingVPC), # GroupDescription="Allow HTTP traffic to the host", # SecurityGroupEgress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # )) # # AllSecurityGroups = template.add_output(Output( # "AllSecurityGroups", # Description="Security Groups that are associated with the EC2 instance", # Value=Join(", ", GetAtt(AllSecurityGroups, "Value")), # )) source_sns_name = f'{env}-source-sns-topic' source_sns_topic = template.add_resource( Topic( "SNSSource", TopicName=source_sns_name, Subscription=[ Subscription( Endpoint=GetAtt(spider_tasks_queue, "Arn"), Protocol='sqs', ) ], DependsOn=[spider_tasks_queue] ) ) source_sns_topic_policy = template.add_resource( TopicPolicy( "SourceForwardingTopicPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowS3PutMessageInSNS", Statement=[ Statement( Sid="AllowS3PutMessages", Principal=Principal("Service", "s3.amazonaws.com"), Effect=Allow, Action=[ Action("sns", "Publish"), ], Resource=["*"], ) ] ), Topics=[Ref(source_sns_topic)], ) ) sns_sqs_policy = template.add_resource( QueuePolicy( "AllowSNSPutMessagesInSQS", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowSNSPutMessagesInSQS", Statement=[ Statement( Sid="AllowSNSPutMessagesInSQS2", Principal=Principal("*"), Effect=Allow, Action=[ Action("sqs", "SendMessage"), ], Resource=["*"], ) ] ), Queues=[Ref(spider_tasks_queue)], DependsOn=[spider_tasks_queue], ) ) # Buckets source_bucket_name = f'{env}-source-bucket' source_bucket = template.add_resource( Bucket( "SourceBucket", BucketName=source_bucket_name, NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations( Topic=Ref(source_sns_topic), Event="s3:ObjectCreated:*", ) ], ), DependsOn=[source_sns_topic_policy], ) ) results_bucket_name = f'{env}-results-bucket' results_bucket = template.add_resource( Bucket( "ResultsBucket", BucketName=results_bucket_name, ) ) # Lambda trigger template.add_resource( EventSourceMapping( "TriggerLambdaSpiderFromSQS", EventSourceArn=GetAtt(spider_tasks_queue, "Arn"), FunctionName=Ref(spider_lambda), BatchSize=1, # Default process tasks one by one ) ) return template.to_json()
def generate_vpc_template(layers, az_count, cidr_block): TPL = Template() TPL.set_description('VPC - Version 2019-06-05') TPL.set_metadata({'Author': 'https://github.com/johnpreston'}) VPC = VPCType('VPC', CidrBlock=cidr_block, EnableDnsHostnames=True, EnableDnsSupport=True, Tags=Tags(Name=Ref('AWS::StackName'), EnvironmentName=Ref('AWS::StackName'))) IGW = InternetGateway("InternetGateway") IGW_ATTACH = VPCGatewayAttachment("VPCGatewayAttachement", InternetGatewayId=Ref(IGW), VpcId=Ref(VPC)) DHCP_OPTIONS = DHCPOptions('VpcDhcpOptions', DomainName=Sub(f'${{AWS::StackName}}.local'), DomainNameServers=['AmazonProvidedDNS'], Tags=Tags(Name=Sub(f'DHCP-${{{VPC.title}}}'))) DHCP_ATTACH = VPCDHCPOptionsAssociation('VpcDhcpOptionsAssociate', DhcpOptionsId=Ref(DHCP_OPTIONS), VpcId=Ref(VPC)) DNS_HOSTED_ZONE = HostedZone( 'VpcHostedZone', VPCs=[HostedZoneVPCs(VPCId=Ref(VPC), VPCRegion=Ref('AWS::Region'))], Name=Sub(f'${{AWS::StackName}}.local'), HostedZoneTags=Tags(Name=Sub(f'ZoneFor-${{{VPC.title}}}'))) TPL.add_resource(VPC) TPL.add_resource(IGW) TPL.add_resource(IGW_ATTACH) TPL.add_resource(DHCP_OPTIONS) TPL.add_resource(DHCP_ATTACH) TPL.add_resource(DNS_HOSTED_ZONE) STORAGE_RTB = TPL.add_resource( RouteTable('StorageRtb', VpcId=Ref(VPC), Tags=Tags(Name='StorageRtb'))) STORAGE_SUBNETS = [] for count, subnet_cidr in zip(az_count, layers['stor']): subnet = Subnet( f'StorageSubnet{alpha[count].upper()}', CidrBlock=subnet_cidr, VpcId=Ref(VPC), AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-Storage-{alpha[count]}'), Usage="Storage")) subnet_assoc = TPL.add_resource( SubnetRouteTableAssociation( f'StorageSubnetAssoc{alpha[count].upper()}', SubnetId=Ref(subnet), RouteTableId=Ref(STORAGE_RTB))) STORAGE_SUBNETS.append(subnet) TPL.add_resource(subnet) PUBLIC_RTB = TPL.add_resource( RouteTable('PublicRtb', VpcId=Ref(VPC), Tags=Tags(Name='PublicRtb'))) PUBLIC_ROUTE = TPL.add_resource( Route('PublicDefaultRoute', GatewayId=Ref(IGW), RouteTableId=Ref(PUBLIC_RTB), DestinationCidrBlock='0.0.0.0/0')) PUBLIC_SUBNETS = [] NAT_GATEWAYS = [] for count, subnet_cidr in zip(az_count, layers['pub']): subnet = Subnet( f'PublicSubnet{alpha[count].upper()}', CidrBlock=subnet_cidr, VpcId=Ref(VPC), AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'), MapPublicIpOnLaunch=True, Tags=Tags(Name=Sub(f'${{AWS::StackName}}-Public-{alpha[count]}'))) eip = TPL.add_resource( EIP(f"NatGatewayEip{alpha[count].upper()}", Domain='vpc')) nat = NatGateway(f"NatGatewayAz{alpha[count].upper()}", AllocationId=GetAtt(eip, 'AllocationId'), SubnetId=Ref(subnet)) subnet_assoc = TPL.add_resource( SubnetRouteTableAssociation( f'PublicSubnetsRtbAssoc{alpha[count].upper()}', RouteTableId=Ref(PUBLIC_RTB), SubnetId=Ref(subnet))) NAT_GATEWAYS.append(nat) PUBLIC_SUBNETS.append(subnet) TPL.add_resource(nat) TPL.add_resource(subnet) APP_SUBNETS = [] APP_RTBS = [] for count, subnet_cidr, nat in zip(az_count, layers['app'], NAT_GATEWAYS): SUFFIX = alpha[count].upper() subnet = Subnet( f'AppSubnet{SUFFIX}', CidrBlock=subnet_cidr, VpcId=Ref(VPC), AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-App-{alpha[count]}'))) APP_SUBNETS.append(subnet) rtb = RouteTable(f'AppRtb{alpha[count].upper()}', VpcId=Ref(VPC), Tags=Tags(Name=f'AppRtb{alpha[count].upper()}')) APP_RTBS.append(rtb) route = Route(f'AppRoute{alpha[count].upper()}', NatGatewayId=Ref(nat), RouteTableId=Ref(rtb), DestinationCidrBlock='0.0.0.0/0') subnet_assoc = SubnetRouteTableAssociation( f'SubnetRtbAssoc{alpha[count].upper()}', RouteTableId=Ref(rtb), SubnetId=Ref(subnet)) TPL.add_resource(subnet) TPL.add_resource(rtb) TPL.add_resource(route) TPL.add_resource(subnet_assoc) APP_S3_ENDPOINT = VPCEndpoint( 'AppS3Endpoint', VpcId=Ref(VPC), RouteTableIds=[Ref(rtb) for rtb in APP_RTBS], ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'), VpcEndpointType='Gateway', ) PUBLIC_S3_ENDPOINT = VPCEndpoint( 'PublicS3Endpoint', VpcId=Ref(VPC), RouteTableIds=[Ref(PUBLIC_RTB)], ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'), VpcEndpointType='Gateway', ) STORAGE_S3_ENDPOINT = VPCEndpoint( 'StorageS3Endpoint', VpcId=Ref(VPC), RouteTableIds=[Ref(STORAGE_RTB)], ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'), VpcEndpointType='Gateway') RESOURCES = [] for count in az_count: resource = TPL.add_resource(EIP(f'Eip{count}', Domain='vpc')) RESOURCES.append(resource) TPL.add_resource(APP_S3_ENDPOINT) TPL.add_resource(PUBLIC_S3_ENDPOINT) TPL.add_resource(STORAGE_S3_ENDPOINT) SG_RULES = [] for subnet in layers['app']: RULE = SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp=subnet, ) SG_RULES.append(RULE) ENDPOINT_SG = TPL.add_resource( SecurityGroup( 'VpcEndpointSecurityGroup', VpcId=Ref(VPC), GroupDescription='SG for all Interface VPC Endpoints', SecurityGroupIngress=SG_RULES, Tags=Tags(Name="sg-endpoints"), )) APP_SNS_ENDPOINT = VPCEndpoint( 'AppSNSEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.sns'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SNS_ENDPOINT) APP_SQS_ENDPOINT = VPCEndpoint( 'AppSQSEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.sqs'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SQS_ENDPOINT) APP_ECR_API_ENDPOINT = VPCEndpoint( 'AppECRAPIEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ecr.api'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_ECR_API_ENDPOINT) APP_ECR_DKR_ENDPOINT = VPCEndpoint( 'AppECRDKREndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ecr.dkr'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_ECR_DKR_ENDPOINT) APP_SECRETS_MANAGER_ENDPOINT = VPCEndpoint( 'AppSecretsManagerEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.secretsmanager'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SECRETS_MANAGER_ENDPOINT) APP_SSM_ENDPOINT = VPCEndpoint( 'AppSSMEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ssm'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SSM_ENDPOINT) APP_SSM_MESSAGES_ENDPOINT = VPCEndpoint( 'AppSSMMessagesEndpoint', VpcId=Ref(VPC), SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS], SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')], ServiceName=Sub('com.amazonaws.${AWS::Region}.ssmmessages'), VpcEndpointType='Interface', PrivateDnsEnabled=True) TPL.add_resource(APP_SSM_MESSAGES_ENDPOINT) ################################################################################ # # OUTPUTS # TPL.add_output(object_outputs(VPC, name_is_id=True)) TPL.add_output(object_outputs(APP_SQS_ENDPOINT, name_is_id=True)) TPL.add_output(object_outputs(APP_SNS_ENDPOINT, name_is_id=True)) TPL.add_output( comments_outputs([{ 'EIP': Join(',', [GetAtt(resource, "AllocationId") for resource in RESOURCES]) }, { 'PublicSubnets': Join(',', [Ref(subnet) for subnet in PUBLIC_SUBNETS]) }, { 'StorageSubnets': Join(',', [Ref(subnet) for subnet in STORAGE_SUBNETS]) }, { 'ApplicationSubnets': Join(',', [Ref(subnet) for subnet in APP_SUBNETS]) }, { 'StackName': Ref('AWS::StackName') }, { 'VpcZoneId': Ref(DNS_HOSTED_ZONE) }])) return TPL
EvaluationPeriods="120", MetricName="TestMetric", Namespace="AWS/ElasticMapReduce", Period="300", Statistic="AVERAGE", Threshold="50", Unit="PERCENT", Dimensions=[ emr.MetricDimension('my.custom.master.property', 'my.custom.master.value') ]))) ] return rules template = Template() template.set_description( "Sample CloudFormation template for creating an EMR cluster") keyname = template.add_parameter( Parameter("KeyName", Description="Name of an existing EC2 KeyPair to enable SSH " "to the instances", Type=KEY_PAIR_NAME)) subnet = template.add_parameter( Parameter("Subnet", Description="Subnet ID for creating the EMR cluster", Type=SUBNET_ID)) spot = template.add_parameter(
def setup_method(self): self.template = Template()
#!/usr/bin/env python from troposphere import Base64, FindInMap, GetAtt from troposphere import Parameter, Output, Ref, Template, Condition, Equals, And, Or, Not, If import troposphere.ec2 as ec2 tpl = Template() tpl.add_version("2010-09-09") tpl.add_description("Creates EC2 security group and instance") params= {} conditions = {} mappings = {} resources = {} params['keyname'] = tpl.add_parameter( Parameter( "KeyName", Description="Name of an existing EC2 KeyPair to enable access to the instance", Type="String", Default="", ConstraintDescription="must be a string" ) ) params['os'] = tpl.add_parameter( Parameter( "OperatingSystem", Description="Chosen operating system", Type="String", Default="AmazonLinux2",
#!/usr/bin/env python """ Script to create a new Lambda Layer CFN template via Troposphere """ from datetime import datetime import argparse from troposphere import (Parameter, Template, GetAtt, Sub, Ref) from troposphere.awslambda import (LayerVersion, Content) VERSION = datetime.utcnow().isoformat() TEMPLATE = Template() TEMPLATE.set_description( f'Template to create a Lambda layer. Version {VERSION}') PARSER = argparse.ArgumentParser() PARSER.add_argument('--runtimes', action='append', required=True) PARSER.add_argument('--s3-bucket', required=True) PARSER.add_argument('--s3-key', required=True) PARSER.add_argument('--json', action='store_true') ARGS = PARSER.parse_args() LAYER_NAME = TEMPLATE.add_parameter( Parameter('LayerName', Type="String", AllowedPattern='[a-z]+')) LAYER = TEMPLATE.add_resource( LayerVersion('LayerVersion', CompatibleRuntimes=ARGS.runtimes, Description=Sub(f'Layer ${{{LAYER_NAME.title}}}'), LayerName=Ref(LAYER_NAME),
def generate_template(service_name): t = Template() t.add_version('2010-09-09') t.add_description("""\ AWS CloudFormation Template for AWS Exploitation Lab """) t.add_mapping("PublicRegionMap", ami_public_mapping) t.add_mapping("PrivateRegionMap", ami_private_mapping) keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', Description='Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description=' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter(Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default='t2.micro', AllowedValues=[ 't2.micro', 't2.small', 't2.medium', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) ref_stack_id = Ref('AWS::StackId') ec2_role = t.add_resource(Role( "%sEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy( Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"]) ) ] ) )) ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] ec2_snapshot_policy_document = awacs.aws.Policy( Statement=[ awacs.aws.Statement( Sid="PermitEC2Snapshots", Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("ec2", "CreateSnapshot"), awacs.aws.Action("ec2", "ModifySnapshotAttribute"), ], Resource=["*"] ) ] ) ec2_snapshot_policy = Policy( PolicyName="EC2SnapshotPermissions", PolicyDocument=ec2_snapshot_policy_document ) priv_ec2_role = t.add_resource(Role( "%sPrivEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy( Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"]) ) ] ), Policies=[ec2_snapshot_policy] )) priv_ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] VPC_ref = t.add_resource( VPC( 'VPC', CidrBlock='10.0.0.0/16', Tags=Tags( Application=ref_stack_id))) instanceProfile = t.add_resource( InstanceProfile( "InstanceProfile", InstanceProfileName="%sInstanceRole" % (service_name), Roles=[Ref(ec2_role)])) privInstanceProfile = t.add_resource( InstanceProfile( "PrivInstanceProfile", InstanceProfileName="%sPrivInstanceRole" % (service_name), Roles=[Ref(priv_ec2_role)])) public_subnet = t.add_resource( Subnet( '%sSubnetPublic' % service_name, MapPublicIpOnLaunch=True, CidrBlock='10.0.1.0/24', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sSubnet_public" % (service_name)) ) ) private_subnet = t.add_resource( Subnet( '%sSubnetPrivate' % service_name, MapPublicIpOnLaunch=False, CidrBlock='10.0.2.0/24', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sSubnet_private" % (service_name)) ) ) internetGateway = t.add_resource( InternetGateway( 'InternetGateway', Tags=Tags( Application=ref_stack_id, Name="%sInternetGateway" % service_name))) gatewayAttachment = t.add_resource( VPCGatewayAttachment( 'AttachGateway', VpcId=Ref(VPC_ref), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable( 'RouteTable', VpcId=Ref(VPC_ref), Tags=Tags( Application=ref_stack_id, Name="%sRouteTable" % service_name))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Only associate this Route Table with the public subnet subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(routeTable), )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='%sSecurityGroup' % service_name, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='1080', ToPort='1080', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule( IpProtocol='tcp', FromPort='0', ToPort='65535', CidrIp="10.0.0.0/8"), ], VpcId=Ref(VPC_ref), ) ) public_instance = t.add_resource( Instance( "Public%sInstance" % service_name, ImageId=FindInMap("PublicRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet))], UserData=Base64(public_instance_userdata), Tags=Tags( Application=ref_stack_id, Name='%sPublicInstance' % (service_name)) ) ) private_instance = t.add_resource( Instance( "Private%sInstance" % service_name, ImageId=FindInMap("PrivateRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[ Ref(instanceSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(private_subnet))], UserData=Base64(private_instance_userdata), Tags=Tags( Application=ref_stack_id, Name='%sPrivateInstance' % (service_name)), IamInstanceProfile="%sPrivInstanceRole" % (service_name) ) ) outputs = [] outputs.append( Output( "PublicIP", Description="IP Address of Public Instance", Value=GetAtt(public_instance, "PublicIp"), ) ) t.add_output(outputs) # Set up S3 Bucket and CloudTrail S3Bucket = t.add_resource( Bucket( "S3Bucket", DeletionPolicy="Retain" ) ) S3PolicyDocument=awacs.aws.PolicyDocument( Id='EnforceServersideEncryption', Version='2012-10-17', Statement=[ awacs.aws.Statement( Sid='PermitCTBucketPut', Action=[s3.PutObject], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket), "/*"])], ), awacs.aws.Statement( Sid='PermitCTBucketACLRead', Action=[s3.GetBucketAcl], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket)])], ) ] ) S3BucketPolicy = t.add_resource( BucketPolicy( "BucketPolicy", PolicyDocument=S3PolicyDocument, Bucket=Ref(S3Bucket), DependsOn=[S3Bucket] ) ) myTrail = t.add_resource( Trail( "CloudTrail", IsLogging=True, S3BucketName=Ref(S3Bucket), DependsOn=["BucketPolicy"], ) ) myTrail.IsMultiRegionTrail = True myTrail.IncludeGlobalServiceEvents = True return t.to_json()
def test_parameter(self): t = Template() p = Parameter("MyParameter", Type="String") t.add_parameter(p) with self.assertRaises(ValueError): t.add_parameter(p)
def test_resource(self): t = Template() r = FakeAWSObject('fake', callcorrect=True) t.add_resource(r) with self.assertRaises(ValueError): t.add_resource(r)
def test_invalid_parameter_property_in_template(self): t = Template() p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*") t.add_parameter(p) with self.assertRaises(ValueError): t.to_json()
def test_output(self): t = Template() o = Output("MyOutput", Value="myvalue") t.add_output(o) with self.assertRaises(ValueError): t.add_output(o)
def test_badrequired(self): with self.assertRaises(ValueError): t = Template() t.add_resource(Instance('ec2instance')) t.to_json()
def test_mutualexclusion(self): t = Template() t.add_resource(FakeAWSObject('fake', callcorrect=True, singlelist=[10])) with self.assertRaises(ValueError): t.to_json()
def generate_queues_template(QueueNamePrefix, Environment): QueueName = f'{QueueNamePrefix}-{Environment}' DLQQueueName = f'{QueueNamePrefix}DLQ-{Environment}' t = Template(Description='A template for a messaging queue') t.version = '2010-09-09' KMSKey = t.add_resource( Key('KMSKey', Description=f'KMS Key for encrypting {QueueName}', Enabled=True, EnableKeyRotation=True, KeyPolicy=Policy( Version='2012-10-17', Statement=[ Statement(Sid='Enable IAM User Permissions', Effect=Allow, Principal=AWSPrincipal( Sub('arn:aws:iam::${AWS::AccountId}:root')), Action=[KmsAction(All)], Resource=AllResources), Statement(Sid='Allow access for Key Administrators', Effect=Allow, Principal=AWSPrincipal([ Sub(f'{USER}/frank'), Sub(f'{USER}/moonunit') ]), Action=[ KmsAction('Create*'), KmsAction('Describe*'), KmsAction('Enable*'), KmsAction('List*'), KmsAction('Put*'), KmsAction('Update*'), KmsAction('Revoke*'), KmsAction('Disable*'), KmsAction('Get*'), KmsAction('Delete*'), KmsAction('ScheduleKeyDeletion'), KmsAction('CancelKeyDeletion') ], Resource=AllResources) ]))) t.add_resource( Alias('KMSKeyAlias', AliasName=f'alias/{QueueName}', TargetKeyId=Ref(KMSKey))) dlq = t.add_resource( Queue( 'DeadLetterQueue', QueueName=DLQQueueName, MaximumMessageSize=262144, # 256KiB MessageRetentionPeriod=1209600, # 14 days VisibilityTimeout=30)) t.add_resource( Queue( 'PrimaryQueue', QueueName=QueueName, MaximumMessageSize=262144, # 256KiB MessageRetentionPeriod=1209600, # 14 days VisibilityTimeout=30, RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( dlq.title, 'Arn'), maxReceiveCount=10), KmsMasterKeyId=Ref(KMSKey), KmsDataKeyReusePeriodSeconds=300)) t.add_output([ Output('QueueArn', Description=f'ARN of {QueueName} Queue', Value=GetAtt('PrimaryQueue', 'Arn'), Export=Export(Name(Sub('${AWS::StackName}:PrimaryQueueArn')))), Output('KmsKeyArn', Description=f'KMS Key ARN for {QueueName} Queue', Value=GetAtt('KMSKey', 'Arn'), Export=Export(Name(Sub('${AWS::StackName}:KmsKeyArn')))) ]) return t
def reset_template(self): self.template = Template() self._rendered = None self._version = None
def test_ref(self): name = 'fake' t = Template() resource = t.add_resource(Instance(name)) self.assertEqual(resource.name, name)
from re import M from troposphere import (Template, Ref, Parameter, Sub, GetAtt, config, s3, sns, iam) from awacs.aws import (PolicyDocument, Statement, Principal, Allow) import awacs.s3 import awacs.sns import awacs.sts config_service_principal = Principal('Service', 'config.amazonaws.com') template = Template( Description='Create Config Rules ready to aggregate to SecurityHub') notification_email = template.add_parameter( Parameter('NotificationEmail', Default='*****@*****.**', Description= 'Email address for AWS Config notifications (for new topics).', Type='String')) aggregator_account_id = template.add_parameter( Parameter( 'AggregatorAccountId', Description='Account ID of SecurityHub aggregation account', Type='String', )) config_bucket = template.add_resource( s3.Bucket('ConfigBucket', DeletionPolicy='Retain')) config_bucket_policy = template.add_resource(
def test_required_title_error(self): with self.assertRaisesRegexp(ValueError, "title:"): t = Template() t.add_resource(Instance('ec2instance')) t.to_json()
def GenerateStepJenkinsLayer(): t = Template() t.add_description("""\ Jenkins for Step Hackathon Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) vpcid_param = t.add_parameter( Parameter( "VpcId", Type="String", Description="VpcId of your existing Virtual Private Cloud (VPC)", Default="vpc-fab00e9f")) subnets = t.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", Description=( "The list SubnetIds, for public subnets in the " "region and in your Virtual Private Cloud (VPC) - minimum one" ), Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8")) keypair_param = t.add_parameter( Parameter("KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", Default="glueteam")) jenkins_ami_id_param = t.add_parameter( Parameter("JenkinsAmiId", Description="Jenkins server AMI ID (default: ami-f3641a84)", Type="String", Default="ami-f3641a84")) operations_subdomain_hosted_zone_param = t.add_parameter( Parameter("DashsoftHostedZoneParam", Description="HostedZone (default: hackathon.operations.dk)", Type="String", Default="hackathon.operations.dk")) iam_role_param = t.add_parameter( Parameter( "IamRole", Description="IAM Role name", Type="String", )) # --------- Jenkins instance jenkins_sg = t.add_resource( ec2.SecurityGroup( 'JenkinsSG', GroupDescription='Security group for Jenkins host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "SG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp="0.0.0.0/0", ), ])) jenkins_eip = t.add_resource(ec2.EIP( 'JenkinsEIP', Domain='vpc', )) jenkins_eth0 = t.add_resource( ec2.NetworkInterface( "JenkinsEth0", Description=Join("", [Ref(stackname_param), " Eth0"]), GroupSet=[ Ref(jenkins_sg), ], SourceDestCheck=True, SubnetId=Select(0, Ref(subnets)), Tags=Tags( Name=Join("", [Ref(stackname_param), " Interface 0"]), Interface="eth0", ))) jenkins_host = t.add_resource( ec2.Instance( 'JenkinsHost', ImageId=Ref(jenkins_ami_id_param), InstanceType='m3.medium', KeyName=Ref(keypair_param), IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( NetworkInterfaceId=Ref(jenkins_eth0), DeviceIndex="0", ), ], Tags=Tags(Name=Ref(stackname_param), Id=Ref(stackname_param)), UserData=Base64(Join('', [ '#!/bin/bash\n', ])), )) jenkins_eip_assoc = t.add_resource( ec2.EIPAssociation( "JenkinsEIPAssoc", NetworkInterfaceId=Ref(jenkins_eth0), AllocationId=GetAtt("JenkinsEIP", "AllocationId"), PrivateIpAddress=GetAtt("JenkinsEth0", "PrimaryPrivateIpAddress"), )) jenkins_host_cname = t.add_resource( route53.RecordSetType( "JenkinsHostCname", HostedZoneName=Join( "", [Ref(operations_subdomain_hosted_zone_param), "."]), Comment=Join("", ["Jenkins host CNAME for ", Ref(stackname_param)]), Name=Join( "", ["jenkins.", Ref(operations_subdomain_hosted_zone_param), "."]), Type="A", TTL="60", ResourceRecords=[GetAtt("JenkinsHost", "PublicIp")], DependsOn="JenkinsEIPAssoc")) return t
def __init__(self, vpc_config, description="VPC built by VPCBuilder"): self.__template = Template() self.__template.set_version("2010-09-09") self.__template.set_description(description) self.__vpc_config = vpc_config
def main(): """Generates the CloudFormation template""" template = Template() template.add_version("2010-09-09") template.add_description( 'This template deploys a VPC, with a pair of public and private subnets spread ' + 'across two Availabilty Zones. It deploys an Internet Gateway, with a default ' + 'route on the public subnets. It deploys a pair of NAT Gateways (one in each AZ), ' + 'and default routes for them in the private subnets.' ) # Parameters # EnvironmentName env_param = template.add_parameter(Parameter( 'EnvironmentName', Type='String', Description='An environment name that will be prefixed to resource names', )) # VPC CIDR vpc_cidr_param = template.add_parameter(Parameter( 'VpcCIDR', Type='String', Description='Please enter the IP range (CIDR notation) for this VPC', Default='10.192.0.0/16', )) # PublicSubnet1CIDR pub_subnet_1_param = template.add_parameter(Parameter( 'PublicSubnet1CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the public subnet ' + 'in the first Availability Zone', Default='10.192.10.0/24', )) # PublicSubnet2CIDR pub_subnet_2_param = template.add_parameter(Parameter( 'PublicSubnet2CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the public subnet ' + 'in the second Availability Zone', Default='10.192.11.0/24', )) # PrivateSubnet1CIDR prvt_subnet_1_param = template.add_parameter(Parameter( 'PrivateSubnet1CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the private subnet ' + 'in the first Availability Zone', Default='10.192.20.0/24', )) # PrivateSubnet2CIDR prvt_subnet_2_param = template.add_parameter(Parameter( 'PrivateSubnet2CIDR', Type='String', Description='Please enter the IP range (CIDR notation) for the private subnet ' + 'in the second Availability Zone', Default='10.192.21.0/24', )) # Resources # VPC vpc = template.add_resource( VPC( 'VPC', CidrBlock=Ref(vpc_cidr_param), Tags=Tags(Name=Ref(env_param)), ) ) # InternetGateway internet_gateway = template.add_resource( InternetGateway( 'InternetGateway', Tags=Tags(Name=Ref(env_param)), ) ) # InternetGatewayAttachment template.add_resource( VPCGatewayAttachment( 'InternetGatewayAttachment', InternetGatewayId=Ref(internet_gateway), VpcId=Ref(vpc), ) ) # PublicSubnet1 pub_subnet1 = template.add_resource( Subnet( 'PublicSubnet1', VpcId=Ref(vpc), AvailabilityZone=Select('0', GetAZs("")), CidrBlock=Ref(pub_subnet_1_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Public Subnet (AZ1)')), ) ) # PublicSubnet2 pub_subnet2 = template.add_resource( Subnet( 'PublicSubnet2', VpcId=Ref(vpc), AvailabilityZone=Select('1', GetAZs("")), CidrBlock=Ref(pub_subnet_2_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Public Subnet (AZ2)')), ) ) # PrivateSubnet1 prvt_subnet1 = template.add_resource( Subnet( 'PrivateSubnet1', VpcId=Ref(vpc), AvailabilityZone=Select('0', GetAZs("")), CidrBlock=Ref(prvt_subnet_1_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Private Subnet (AZ1)')), ) ) # PrivateSubnet2 prvt_subnet2 = template.add_resource( Subnet( 'PrivateSubnet2', VpcId=Ref(vpc), AvailabilityZone=Select('1', GetAZs("")), CidrBlock=Ref(prvt_subnet_2_param), MapPublicIpOnLaunch=False, Tags=Tags(Name=Sub('${EnvironmentName} Private Subnet (AZ2)')), ) ) # NatGateway1EIP nat_gateway1_eip = template.add_resource( EIP( 'NatGateway1EIP', DependsOn='InternetGatewayAttachment', Domain='vpc', ) ) # NatGateway2EIP nat_gateway2_eip = template.add_resource( EIP( 'NatGateway2EIP', DependsOn='InternetGatewayAttachment', Domain='vpc', ) ) # NatGateway1 nat_gateway1 = template.add_resource( NatGateway( 'NatGateway1', AllocationId=GetAtt(nat_gateway1_eip, 'AllocationId'), SubnetId=Ref(pub_subnet1), ) ) # NatGateway2 nat_gateway2 = template.add_resource( NatGateway( 'NatGateway2', AllocationId=GetAtt(nat_gateway2_eip, 'AllocationId'), SubnetId=Ref(pub_subnet2), ) ) # PublicRouteTable pub_route_table = template.add_resource( RouteTable( 'PublicRouteTable', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Public Routes')), ) ) # DefaultPublicRoute template.add_resource( Route( 'DefaultPublicRoute', RouteTableId=Ref(pub_route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(internet_gateway), ) ) # PublicSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PublicSubnet1RouteTableAssociation', RouteTableId=Ref(pub_route_table), SubnetId=Ref(pub_subnet1), ) ) # PublicSubnet2RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PublicSubnet2RouteTableAssociation', RouteTableId=Ref(pub_route_table), SubnetId=Ref(pub_subnet2), ) ) # PrivateRouteTable1 prvt_route_table1 = template.add_resource( RouteTable( 'PrivateRouteTable1', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Private Routes (AZ1)')), ) ) # DefaultPrivateRoute1 template.add_resource( Route( 'DefaultPrivateRoute1', RouteTableId=Ref(prvt_route_table1), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat_gateway1), ) ) # PrivateSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PrivateSubnet1RouteTableAssociation', RouteTableId=Ref(prvt_route_table1), SubnetId=Ref(prvt_subnet1), ) ) # PrivateRouteTable2 prvt_route_table2 = template.add_resource( RouteTable( 'PrivateRouteTable2', VpcId=Ref(vpc), Tags=Tags(Name=Sub('${EnvironmentName} Private Routes (AZ2)')), ) ) # DefaultPrivateRoute2 template.add_resource( Route( 'DefaultPrivateRoute2', RouteTableId=Ref(prvt_route_table2), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat_gateway2), ) ) # PrivateSubnet1RouteTableAssociation template.add_resource( SubnetRouteTableAssociation( 'PrivateSubnet2RouteTableAssociation', RouteTableId=Ref(prvt_route_table2), SubnetId=Ref(prvt_subnet2), ) ) # Outputs template.add_output(Output( 'VPC', Description='A reference to the created VPC', Value=Ref(vpc), )) template.add_output(Output( 'PublicSubnets', Description='A list of the public subnets', Value=Join(',', [Ref(pub_subnet1), Ref(pub_subnet2)]), )) template.add_output(Output( 'PrivateSubnets', Description='A list of the private subnets', Value=Join(',', [Ref(prvt_subnet1), Ref(prvt_subnet2)]), )) template.add_output(Output( 'PublicSubnet1', Description='A reference to the public subnet in the 1st Availability Zone', Value=Ref(pub_subnet1), )) template.add_output(Output( 'PublicSubnet2', Description='A reference to the public subnet in the 2nd Availability Zone', Value=Ref(pub_subnet2), )) template.add_output(Output( 'PrivateSubnet1', Description='A reference to the private subnet in the 1st Availability Zone', Value=Ref(prvt_subnet1), )) template.add_output(Output( 'PrivateSubnet2', Description='A reference to the private subnet in the 2nd Availability Zone', Value=Ref(prvt_subnet2), )) print(template.to_json())
def test_simple_table(self): serverless_table = SimpleTable("SomeTable") t = Template() t.add_resource(serverless_table) t.to_json()
def create_template(self): if self.template_type == 'project_role': template = Template() namespace_param = template.add_parameter( Parameter( "IAMNamespace", Description="Namespace for IAM users, policies, etc.", Type="String", Default="/")) uppercase_env_prefix_param = template.add_parameter( Parameter( "UppercaseAwsEnvironmentPrefix", Description= "Uppercase abbreviation for AWS account (i.e. DEV,QA,PROD)", Type="String")) lowercase_env_prefix_param = template.add_parameter( Parameter( "LowercaseAwsEnvironmentPrefix", Description= "Lowercase abbreviation for AWS account (i.e. dev,qa,prod)", Type="String")) aws_account_number_param = template.add_parameter( Parameter("AccountNumber", Description="AWS Account Number", Type="String")) uppercase_project_name_param = template.add_parameter( Parameter("UppercaseProjectName", Description="Uppercase Project Name", Type="String")) lowercase_project_name_param = template.add_parameter( Parameter("LowercaseProjectName", Description="Lowercase Project Name", Type="String")) pd = PolicyDocument(Version="2012-10-17", Id="Account-Permissions", Statement=self.create_policy_document()) iam_group = template.add_resource( Group( 'IamGroup', #Join('-', [Ref(uppercase_env_prefix_param),Ref(uppercase_project_name_param)]) Path=Ref(namespace_param), GroupName=Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param) ]) #'ManagedPolicyArns': ([basestring], False), #Policies'= ([Policy], False) )) iam_managed_policy = template.add_resource( ManagedPolicy("ManagedPolicy", Description=Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param), 'project' ]), Groups=[ Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param) ]) ], ManagedPolicyName=Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param) ]), Path=Ref(namespace_param), PolicyDocument=pd)) if self.debug: print(template.to_json()) with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp: tmp.write(template.to_json()) self._config.pop('meta-parameters', None) if (not os.path.exists(self.cwd + '/template.json') and not self._config['environment']['template']): with open(self.cwd + '/template.json', 'w') as file: file.write(template.to_json()) file.close() else: if self.debug: print('Not creating template.json') return tmp.name elif self.template_type == 'project_role_jump_account': self._config['parameters'].pop('Resources', None) template = Template() namespace_param = template.add_parameter( Parameter( "IAMNamespace", Description="Namespace for IAM users, policies, etc.", Type="String", Default="/")) uppercase_env_prefix_param = template.add_parameter( Parameter( "UppercaseAwsEnvironmentPrefix", Description= "Uppercase abbreviation for AWS account (i.e. DEV,QA,PROD)", Type="String")) lowercase_env_prefix_param = template.add_parameter( Parameter( "LowercaseAwsEnvironmentPrefix", Description= "Lowercase abbreviation for AWS account (i.e. dev,qa,prod)", Type="String")) aws_account_number_param = template.add_parameter( Parameter("AccountNumber", Description="AWS Account Number", Type="String")) uppercase_project_name_param = template.add_parameter( Parameter("UppercaseProjectName", Description="Uppercase Project Name", Type="String")) lowercase_project_name_param = template.add_parameter( Parameter("LowercaseProjectName", Description="Lowercase Project Name", Type="String")) pd = PolicyDocument(Version="2012-10-17", Statement=self.create_policy_document()) iam_policy = template.add_resource( ManagedPolicy('ManagedPolicy', Description=Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param), 'project' ]), PolicyDocument=pd, ManagedPolicyName=Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param) ]), Path=Ref(namespace_param))) iam_group = template.add_resource( Group("Group", GroupName=Join('-', [ Ref(uppercase_env_prefix_param), Ref(uppercase_project_name_param) ]))) if self.debug: print(template.to_json()) with tempfile.NamedTemporaryFile(mode='w', suffix='.rdr', delete=False) as tmp: tmp.write(template.to_json()) self._config.pop('meta-parameters', None) if (not os.path.exists(self.cwd + '/template.json') and not self._config['environment']['template']): with open(self.cwd + '/template.json', 'w') as file: file.write(template.to_json()) file.close() else: if self.debug: print('Not creating template.json') if self.debug: print('template file is: ' + str(tmp.name)) return tmp.name else: print('incorrect template type') sys.exit(1)
def roleTemplate(app,env,nameBucket): template = Template() Ec2Role = template.add_resource(Role( "Ec2Role", RoleName="ec2-role", AssumeRolePolicyDocument={"Statement": [{ "Effect": "Allow", "Principal": { "Service": [ "ec2.amazonaws.com" ] }, "Action": [ "sts:AssumeRole" ] }]}, Policies=[Policy( PolicyName="ec2-policy", PolicyDocument={ "Statement": [ { "Action": [ "ec2:*", ], "Effect": "Allow", "Resource": ["*"] } ] } ), Policy( PolicyName="s3List", PolicyDocument={ "Statement": [ { "Action": ["s3:ListBucket"], "Resource":[Join("",["arn:aws:s3:::",ImportValue("Bucket"+nameBucket+app+env)])], "Effect": "Allow" } ] } ), Policy( PolicyName="s3Permissions", PolicyDocument={ "Statement": [ { "Effect": "Allow", "Action": [ "s3:PutObject", "s3:GetObject", "s3:DeleteObject" ], "Resource": [Join("",["arn:aws:s3:::",ImportValue("Bucket"+nameBucket+app+env),"/*"])] } ] } ) ] ) ) profile = template.add_resource(InstanceProfile( "InstanceProfile", Roles=[Ref(Ec2Role)] )) output = template.add_output(Output( "iamRole"+app+env, Description="IAM Role", Value=Ref(profile), Export=Export("Role-"+app+"-"+env) )) return (template.to_json())
# ================================================== # This stack creates the API infrastructure. # ================================================== from troposphere import Template, Parameter, Ref, GetAtt, Join, Output import troposphere.s3 as s3 import troposphere.iam as iam import troposphere.cloudfront as cloudfront import uuid # ================================================== # Template details. # ================================================== template = Template( 'Create the infrastructure needed to run the Connected Together Admin') template.add_version('2010-09-09') # ================================================== # Parameters. # ================================================== uuid_parameter = template.add_parameter( Parameter('Uuid', Type='String', Default=str(uuid.uuid4()), Description='The unique ID for this stack.', MinLength='36', MaxLength='36')) environment_parameter = template.add_parameter( Parameter( 'Environment', Type='String',
def __init__(self, sceptre_user_data): self.template = Template() self.sceptre_user_data = sceptre_user_data self.template.add_description( "Lambda Function for Unregisteration of Runners")
def main(**launch_parameters): try: t = Template() t.set_version("2010-09-09") t.set_description("(SOCA) - Base template to deploy DCV nodes") allow_anonymous_data_collection = launch_parameters["DefaultMetricCollection"] # Launch Actual Capacity instance = ec2.Instance(str(launch_parameters["session_name"])) instance.BlockDeviceMappings = [{'DeviceName': "/dev/xvda" if launch_parameters["base_os"] == "amazonlinux2" else "/dev/sda1", 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 30 if launch_parameters["disk_size"] is False else int(launch_parameters["disk_size"]), 'VolumeType': 'gp2', 'Encrypted': True} }] instance.ImageId = launch_parameters["image_id"] instance.SecurityGroupIds = [launch_parameters["security_group_id"]] if launch_parameters["hibernate"] is True: instance.HibernationOptions = ec2.HibernationOptions(Configured=True) instance.InstanceType = launch_parameters["instance_type"] instance.SubnetId = random.choice(launch_parameters["soca_private_subnets"]) if len(launch_parameters["soca_private_subnets"]) > 1 else launch_parameters["soca_private_subnets"][0] instance.IamInstanceProfile = launch_parameters["ComputeNodeInstanceProfileArn"].split("instance-profile/")[-1] instance.KeyName = launch_parameters["KeyName"] instance.UserData = Base64(Sub((launch_parameters["user_data"]))) instance.Tags = base_Tags( Name=str(launch_parameters["cluster_id"] + "-" + launch_parameters["session_name"] + "-" + launch_parameters["user"]), _soca_JobName=str(launch_parameters["session_name"]), _soca_JobOwner=str(launch_parameters["user"]), _soca_NodeType="dcv", _soca_JobProject="desktop", _soca_DCVSupportHibernate=str(launch_parameters["hibernate"]).lower(), _soca_ClusterId=str(launch_parameters["cluster_id"]), _soca_DCVSessionUUID=str(launch_parameters["session_uuid"]), _soca_DCVSystem=str(launch_parameters["base_os"])) t.add_resource(instance) # Begin Custom Resource # Change Mapping to No if you want to disable this if allow_anonymous_data_collection is True: metrics = CustomResourceSendAnonymousMetrics("SendAnonymousData") metrics.ServiceToken = launch_parameters["SolutionMetricLambda"] metrics.DesiredCapacity = "1" metrics.InstanceType = str(launch_parameters["instance_type"]) metrics.Efa = "false" metrics.ScratchSize = "0" metrics.RootSize = str(launch_parameters["disk_size"]) metrics.SpotPrice = "false" metrics.BaseOS = str(launch_parameters["base_os"]) metrics.StackUUID = str(launch_parameters["session_uuid"]) metrics.KeepForever = "false" metrics.FsxLustre = str({"fsx_lustre": "false", "existing_fsx": "false", "s3_backend": "false", "import_path": "false", "export_path": "false", "deployment_type": "false", "per_unit_throughput": "false", "capacity": 1200}) metrics.TerminateWhenIdle = "false" metrics.Dcv = "true" t.add_resource(metrics) # End Custom Resource # Tags must use "soca:<Key>" syntax template_output = t.to_yaml().replace("_soca_", "soca:") return {'success': True, 'output': template_output} except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] return {'success': False, 'output': 'cloudformation_builder.py: ' + (str(e) + ': error :' + str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))}
def __init__(self, sceptre_user_data): self.template = Template() self.sceptre_user_data = sceptre_user_data self.add_vpc()
from troposphere import ( Base64, ec2, GetAtt, Join, Output, Parameter, Ref, Template, ) ApplicationPort = "3000" PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_resource( ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort),
def create_cloudformation_stack(args): print("Hello AWS!") # Connect to EC2. Get a cloudformation client session = boto3.Session(profile_name='f_project') client = session.client('cloudformation', region_name='sa-east-1') stack_name = args.stack_name ssh_key = args.ssh_key_name sec_group_name = 'TesisSecurityGroup' # Create stack template. cloudformation_template = Template() # Add parameters -> SSH key ssh_key_parameter = cloudformation_template.add_parameter( ssh_parameter(ssh_key)) cloudformation_template.add_output( cf_output("SSHKey", "SSH Key to log into instances", 'KeyName')) # Add roles and policies (bucket) policy_name = 'RolePolicies' role_name = 'InstanceRole' profile_name = 'InstanceProfile' ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') cloudformation_template, subnet = set_cloudformation_settings( cloudformation_template, ref_stack_id) # Add security group ssh_sec_group = add_ssh_security_group(sec_group_name) cloudformation_template.add_resource(ssh_sec_group) # Read the environment information from the config file cfg_parser = ConfigObj(CFG_FILE) instance_size = cfg_parser['aws_config']['INSTANCE_SIZE'] bucket_name = cfg_parser['aws_config']['BUCKET'] # Add bucket access policies cloudformation_template.add_resource( allow_bucket_access_role(role_name)) # 1 cloudformation_template.add_resource( bucket_access_policy(policy_name, role_name, bucket_name)) # 2 cloudformation_template.add_resource( instance_profile_bucket(profile_name, role_name)) # 3 for instance_id in cfg_parser["Instances"]: instance = cfg_parser["Instances"][instance_id] name = instance['name'] print("Instance name:", name) ami_id = instance['ami_id'] ip = instance['ip'] bootstrap_file = instance['local_bootstrap_file'] bootstrap_path = os.path.join(os.getcwd(), BOOTSTRAP_FOLDER, name, bootstrap_file) aws_instance = EnvInstance(name) aws_instance.create_instance_template( ami_id, instance_size, ip, ssh_key_parameter, bootstrap_path, sec_group_name, # TODO: unused subnet) aws_instance.set_bucket_access(role_name, profile_name, bucket_name) aws_instance.add_to_security_group(ssh_sec_group) # aws_instance.add_to_security_group(ref_stack_id) cloudformation_template.add_resource(aws_instance) cloudformation_template.add_output( cf_output("%sInstance" % name, "%s: IP %s" % (name, ip), name)) print("Instance added to template!") try: # Create stack client.create_stack(StackName=stack_name, TemplateBody=cloudformation_template.to_json(), Capabilities=['CAPABILITY_IAM']) # Wait until stack is created while client.describe_stacks( StackName=stack_name )["Stacks"][0]["StackStatus"] != StackState.created: # Add timeout -> and delete stack print("Creating Environment...") time.sleep(15) else: print("CloudFormation Stack created") except ClientError: formatted_lines = traceback.format_exc().splitlines() print(traceback.format_exc()) print(formatted_lines[0]) print(formatted_lines[-1]) print("CloudFormation Stack could not be created!")