def emit_configuration(): create_bucket = template.add_parameter( Parameter( "CreateDeployerBucket", Type="String", Description="Wheter or not to create the deployer bucket", Default='no', AllowedValues=['yes', 'no'] ) ) condition_name = "DeployerBucketCondition" conditions = { condition_name: Equals( Ref(create_bucket), "yes" ) } for c in conditions: template.add_condition(c, conditions[c]), bucket_name = Join('.', ['deployer', CLOUDNAME, Ref("AWS::Region"), CLOUDENV, 'leafme']) bucket = template.add_resource( Bucket( "DeployerBucket", BucketName=bucket_name, DeletionPolicy="Retain", Condition=condition_name ) )
def emit_configuration(): # Build the 6 sqs queues for farragut queues = [ QueueConfig('farragut-aggregate-{0}'.format(CLOUDENV), 1800, 345600, 262144), QueueConfig('farragut-hourly-{0}'.format(CLOUDENV), 180, 345600, 262144), QueueConfig('farragut-leaf-site-{0}'.format(CLOUDENV), 30, 345600, 262144), QueueConfig('farragut-leaf-{0}'.format(CLOUDENV), 30, 345600, 262144), QueueConfig('farragut-{0}'.format(CLOUDENV), 1800, 345600, 262144), QueueConfig('farragut-import-{0}'.format(CLOUDENV), 30, 345600, 262144) ] for q in queues: template.add_resource( Queue( cfn.sanitize_id(q.name), VisibilityTimeout=q.visibility, MessageRetentionPeriod=q.retention, MaximumMessageSize=q.max_size, QueueName=q.name ) )
def emit_configuration(): # Build the 6 sqs queues for farragut queues = [ QueueConfig('farragut-aggregate-{0}'.format(CLOUDENV), 1800, 345600, 262144), QueueConfig('farragut-hourly-{0}'.format(CLOUDENV), 180, 345600, 262144), QueueConfig('farragut-leaf-site-{0}'.format(CLOUDENV), 30, 345600, 262144), QueueConfig('farragut-leaf-{0}'.format(CLOUDENV), 30, 345600, 262144), QueueConfig('farragut-{0}'.format(CLOUDENV), 1800, 345600, 262144), QueueConfig('farragut-import-{0}'.format(CLOUDENV), 30, 345600, 262144) ] for q in queues: template.add_resource( Queue(cfn.sanitize_id(q.name), VisibilityTimeout=q.visibility, MessageRetentionPeriod=q.retention, MaximumMessageSize=q.max_size, QueueName=q.name))
def emit_configuration(): # BEGIN SSH-ACCESSIBLE SECURITY GROUP ssh_ingress_rules = [ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=DEFAULT_ROUTE, FromPort=p, ToPort=p) for p in [22] ] ssh_security_group = template.add_resource( ec2.SecurityGroup( "SSHAccessible", # TODO: this needs to not be DEFAULT_ROUTE. GroupDescription='allows SSH into the machine.', VpcId=Ref(cfn.vpcs[0]), SecurityGroupIngress=ssh_ingress_rules, DependsOn=cfn.vpcs[0].title))
def emit_configuration(): # BEGIN SSH-ACCESSIBLE SECURITY GROUP ssh_ingress_rules = [ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=DEFAULT_ROUTE, FromPort=p, ToPort=p ) for p in [22] ] ssh_security_group = template.add_resource( ec2.SecurityGroup( "SSHAccessible", # TODO: this needs to not be DEFAULT_ROUTE. GroupDescription='allows SSH into the machine.', VpcId=Ref(cfn.vpcs[0]), SecurityGroupIngress=ssh_ingress_rules, DependsOn=cfn.vpcs[0].title ) )
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") zookeeper_instance_class = template.add_parameter( Parameter( 'ZookeeperInstanceType', Type='String', Default='m3.medium', Description='Zookeeper instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' ) ) create_zookeeper_bucket = template.add_parameter( Parameter( 'CreateZookeeperBucket', Type='String', Description='Whether or not to create the Zookeeper bucket. This option is provided in case the bucket already exists.', Default='no', AllowedValues=['yes', 'no'], ConstraintDescription='Answer must be yes or no' ) ) conditions = { "ZookeeperBucketCondition": Equals( Ref(create_zookeeper_bucket), "yes" ) } for c in conditions: template.add_condition(c, conditions[c]) ingress_rules = [ SecurityGroupRule( IpProtocol='tcp', CidrIp='{0}.0.0/16'.format(CIDR_PREFIX), FromPort=p, ToPort=p ) for p in [2181, 8080] ] ingress_rules.append( SecurityGroupRule( IpProtocol='tcp', CidrIp=DEFAULT_ROUTE, FromPort=22, ToPort=2222 ) ) zookeeper_sg = template.add_resource( SecurityGroup( "Zookeeper", GroupDescription="Security Group for ZooKeeper instances", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title ) ) # Now add in another ingress rule that allows zookeepers to talk to each other # in the same SG for port in [2888, 3888]: template.add_resource( SecurityGroupIngress( "ZookeeperSelfIngress{0}".format(port), IpProtocol='tcp', FromPort=port, ToPort=port, GroupId=Ref(zookeeper_sg), SourceSecurityGroupId=Ref(zookeeper_sg), DependsOn=zookeeper_sg.title ) ) # Create the zookeeper s3 bucket zookeeper_bucket_name = Join('.', ['zookeeper', CLOUDNAME, region, CLOUDENV, 'leafme']) zookeeper_bucket = template.add_resource( Bucket( "ZookeeperBucket", BucketName=zookeeper_bucket_name, DeletionPolicy='Retain', Condition="ZookeeperBucketCondition" ) ) zookeeper_role_name = '.'.join(['zookeeper', CLOUDNAME, CLOUDENV]) zookeeper_iam_role = template.add_resource( Role( "ZookeeperIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy( PolicyName="ZookeeperDefaultPolicy", PolicyDocument=json.loads(cfn.load_template("default_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" } )) ), Policy( PolicyName="ZookeeperPolicy", PolicyDocument=json.loads(cfn.load_template("zookeeper_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} )) ) ], DependsOn=vpc.title ) ) zookeeper_instance_profile = template.add_resource( InstanceProfile( "zookeeperInstanceProfile", Path="/", Roles=[Ref(zookeeper_iam_role)], DependsOn=zookeeper_iam_role.title ) ) zookeeper_user_data = cfn.load_template("default-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "zookeeper"} ) # Launch Configuration for zookeepers zookeeper_launchcfg = template.add_resource( LaunchConfiguration( "ZookeeperLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(zookeeper_instance_class), IamInstanceProfile=Ref(zookeeper_instance_profile), AssociatePublicIpAddress=not USE_PRIVATE_SUBNETS, KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(zookeeper_sg)], DependsOn=[zookeeper_instance_profile.title, zookeeper_sg.title], UserData=Base64(zookeeper_user_data) ) ) # Create the zookeeper autoscaling group zookeeper_asg_name = '.'.join(['zookeeper', CLOUDNAME, CLOUDENV]) zookeeper_asg = template.add_resource( AutoScalingGroup( "ZookeeperASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="3", LaunchConfigurationName=Ref(zookeeper_launchcfg), MinSize="3", MaxSize="3", NotificationConfiguration=autoscaling.NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ] ), VPCZoneIdentifier=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.MASTER)] ) )
def emit_configuration(): # Parameters here jenkins_instance_class = template.add_parameter( Parameter( 'JenkinsInstanceType', Type='String', Default='t2.micro', Description='Chef jenkins instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' ) ) # jenkins IAM role jenkins_role_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) jenkins_iam_role = template.add_resource( iam.Role( 'JenkinsIamRole', AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ iam.Policy( PolicyName='JenkinsPolicy', PolicyDocument=json.loads(cfn.load_template("jenkins_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} )) ), iam.Policy( PolicyName='JenkinsDefaultPolicy', PolicyDocument=json.loads(cfn.load_template("default_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} )) ) ], DependsOn=cfn.vpcs[0].title ) ) jenkins_instance_profile = template.add_resource( iam.InstanceProfile( "JenkinsInstanceProfile", Path="/", Roles=[Ref(jenkins_iam_role)], DependsOn=jenkins_iam_role.title ) ) jenkins_user_data = cfn.load_template("default-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "jenkins"} ) ingress_rules = [ ec2.SecurityGroupRule( IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 22), ('tcp', 80), ('tcp', 443)] ] security_group = template.add_resource( ec2.SecurityGroup( "JenkinsSecurityGroup", GroupDescription='Security Group for jenkins instances', VpcId=Ref(cfn.vpcs[0]), SecurityGroupIngress=ingress_rules, DependsOn=cfn.vpcs[0].title, Tags=Tags(Name='.'.join(['jenkins-sg', CLOUDNAME, CLOUDENV])) ) ) launch_cfg = template.add_resource( autoscaling.LaunchConfiguration( "JenkinsLaunchConfiguration", ImageId=FindInMap('RegionMap', Ref("AWS::Region"), int(cfn.Amis.EBS)), InstanceType=Ref(jenkins_instance_class), IamInstanceProfile=Ref(jenkins_instance_profile), AssociatePublicIpAddress=not USE_PRIVATE_SUBNETS, BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True ) ) ], KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(security_group)], DependsOn=[jenkins_instance_profile.title, security_group.title], UserData=Base64(jenkins_user_data) ) ) asg_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) asg = template.add_resource( autoscaling.AutoScalingGroup( "JenkinsASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(launch_cfg), MinSize="1", MaxSize="1", NotificationConfiguration=autoscaling.NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_TERMINATE_ERROR, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR ] ), VPCZoneIdentifier=[Ref(sn) for sn in cfn.get_vpc_subnets(cfn.vpcs[0], cfn.SubnetTypes.PLATFORM)] ) )
def emit_configuration(): # Build an SQS queue for the babysitter """create_queue = template.add_parameter( Parameter( 'CreateDeregistrationTopic', Type='String', Description='Whether or not to create the Chef Deregistration queue. This option is provided in case the queue already exists.', Default='no', AllowedValues=['yes', 'no'], ConstraintDescription='Answer must be yes or no' ) ) conditions = { "CreateDeregCondition": Equals( Ref(create_queue), "yes" ) } for c in conditions: template.add_condition(c, conditions[c])""" queue_name = '_'.join(['chef-deregistration', CLOUDNAME, CLOUDENV]) queue = template.add_resource( Queue( cfn.sanitize_id(queue_name), VisibilityTimeout=60, MessageRetentionPeriod=1209600, MaximumMessageSize=16384, QueueName=queue_name, )) alert_topic = template.add_resource( Topic( cfn.sanitize_id("BabysitterAlarmTopic{0}".format(CLOUDENV)), DisplayName='Babysitter Alarm', TopicName=queue_name, Subscription=[ Subscription(Endpoint=GetAtt(queue, "Arn"), Protocol='sqs'), ], DependsOn=queue.title, )) queue_depth_alarm = template.add_resource( Alarm( "BabysitterQueueDepthAlarm", AlarmDescription= 'Alarm if the queue depth grows beyond 200 messages', Namespace='AWS/SQS', MetricName='ApproximateNumberOfMessagesVisible', Dimensions=[ MetricDimension(Name='QueueName', Value=GetAtt(queue, "QueueName")) ], Statistic='Sum', Period='300', EvaluationPeriods='1', Threshold='200', ComparisonOperator='GreaterThanThreshold', #AlarmActions=[Ref(alert_topic), ], #InsufficientDataActions=[Ref(alert_topic), ], DependsOn=alert_topic.title, ), ) queue_policy = { "Version": "2012-10-17", "Id": "BabysitterSNSPublicationPolicy", "Statement": [{ "Sid": "AllowSNSPublishing", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["sqs:SendMessage"], "Resource": GetAtt(queue, "Arn"), "Condition": { "ArnEquals": { "aws:SourceArn": Ref(alert_topic) } } }] } # Publish all events from SNS to the Queue template.add_resource( QueuePolicy( "BabysitterPublishSNStoSQSPolicy", Queues=[Ref(queue)], PolicyDocument=queue_policy, DependsOn=[queue.title, alert_topic.title], )) cfn.alert_topic = alert_topic
def emit_configuration(): # Build an SQS queue for the babysitter """create_queue = template.add_parameter( Parameter( 'CreateDeregistrationTopic', Type='String', Description='Whether or not to create the Chef Deregistration queue. This option is provided in case the queue already exists.', Default='no', AllowedValues=['yes', 'no'], ConstraintDescription='Answer must be yes or no' ) ) conditions = { "CreateDeregCondition": Equals( Ref(create_queue), "yes" ) } for c in conditions: template.add_condition(c, conditions[c])""" queue_name = '_'.join(['chef-deregistration', CLOUDNAME, CLOUDENV]) queue = template.add_resource( Queue( cfn.sanitize_id(queue_name), VisibilityTimeout=60, MessageRetentionPeriod=1209600, MaximumMessageSize=16384, QueueName=queue_name, ) ) alert_topic = template.add_resource( Topic( cfn.sanitize_id("BabysitterAlarmTopic{0}".format(CLOUDENV)), DisplayName='Babysitter Alarm', TopicName=queue_name, Subscription=[ Subscription( Endpoint=GetAtt(queue, "Arn"), Protocol='sqs' ), ], DependsOn=queue.title, ) ) queue_depth_alarm = template.add_resource( Alarm( "BabysitterQueueDepthAlarm", AlarmDescription='Alarm if the queue depth grows beyond 200 messages', Namespace='AWS/SQS', MetricName='ApproximateNumberOfMessagesVisible', Dimensions=[ MetricDimension( Name='QueueName', Value=GetAtt(queue, "QueueName") ) ], Statistic='Sum', Period='300', EvaluationPeriods='1', Threshold='200', ComparisonOperator='GreaterThanThreshold', #AlarmActions=[Ref(alert_topic), ], #InsufficientDataActions=[Ref(alert_topic), ], DependsOn=alert_topic.title, ), ) queue_policy = { "Version": "2012-10-17", "Id": "BabysitterSNSPublicationPolicy", "Statement": [{ "Sid": "AllowSNSPublishing", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["sqs:SendMessage"], "Resource": GetAtt(queue, "Arn"), "Condition": { "ArnEquals": {"aws:SourceArn": Ref(alert_topic)} } }] } # Publish all events from SNS to the Queue template.add_resource( QueuePolicy( "BabysitterPublishSNStoSQSPolicy", Queues=[Ref(queue)], PolicyDocument=queue_policy, DependsOn=[queue.title, alert_topic.title], ) ) cfn.alert_topic = alert_topic
def emit_configuration(): # Parameters here jenkins_instance_class = template.add_parameter( Parameter( 'JenkinsInstanceType', Type='String', Default='t2.micro', Description='Chef jenkins instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) # jenkins IAM role jenkins_role_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) jenkins_iam_role = template.add_resource( iam.Role('JenkinsIamRole', AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ iam.Policy(PolicyName='JenkinsPolicy', PolicyDocument=json.loads( cfn.load_template( "jenkins_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))), iam.Policy(PolicyName='JenkinsDefaultPolicy', PolicyDocument=json.loads( cfn.load_template( "default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))) ], DependsOn=cfn.vpcs[0].title)) jenkins_instance_profile = template.add_resource( iam.InstanceProfile("JenkinsInstanceProfile", Path="/", Roles=[Ref(jenkins_iam_role)], DependsOn=jenkins_iam_role.title)) jenkins_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "jenkins" }) ingress_rules = [ ec2.SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 22), ('tcp', 80), ('tcp', 443)] ] security_group = template.add_resource( ec2.SecurityGroup( "JenkinsSecurityGroup", GroupDescription='Security Group for jenkins instances', VpcId=Ref(cfn.vpcs[0]), SecurityGroupIngress=ingress_rules, DependsOn=cfn.vpcs[0].title, Tags=Tags(Name='.'.join(['jenkins-sg', CLOUDNAME, CLOUDENV])))) launch_cfg = template.add_resource( autoscaling.LaunchConfiguration( "JenkinsLaunchConfiguration", ImageId=FindInMap('RegionMap', Ref("AWS::Region"), int(cfn.Amis.EBS)), InstanceType=Ref(jenkins_instance_class), IamInstanceProfile=Ref(jenkins_instance_profile), AssociatePublicIpAddress=not USE_PRIVATE_SUBNETS, BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(security_group)], DependsOn=[jenkins_instance_profile.title, security_group.title], UserData=Base64(jenkins_user_data))) asg_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) asg = template.add_resource( autoscaling.AutoScalingGroup( "JenkinsASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(launch_cfg), MinSize="1", MaxSize="1", NotificationConfiguration=autoscaling.NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_TERMINATE_ERROR, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(cfn.vpcs[0], cfn.SubnetTypes.PLATFORM) ]))
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") dbname = template.add_parameter( Parameter( "RDSDatabaseInstanceName", Default="reporting{0}".format(CLOUDENV), Description="Postgres Instance Name", Type="String", MinLength="1", MaxLength="63", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription= "Must begin with a letter and contain only alphanumeric characters" )) dbuser = template.add_parameter( Parameter( "RDSDatabaseUser", Default="sa", Description="The database admin account username", Type="String", MinLength="1", MaxLength="63", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription="Must being with a letter and be alphanumeric" )) dbpassword = template.add_parameter( Parameter( "RDSDatabasePassword", NoEcho=True, Description="The database admin account password", Type="String", MinLength="1", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="Must contain only alphanumeric characters.", Default="LeafLeaf123")) dbclass = template.add_parameter( Parameter("RDSInstanceClass", Default="db.t2.medium", Description="Database instance size", Type="String", AllowedValues=[ "db.t2.small", "db.t2.medium", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge" ])) allocated_storage = template.add_parameter( Parameter("RDSAllocatedStorage", Default="100", Description="The size of the Postgres Database (GB)", Type="Number", MinValue="5", MaxValue="512", ConstraintDescription="Must be between 5 and 512 GB")) db_subnet_group = template.add_resource( DBSubnetGroup( "RDSSubnetGroup", DBSubnetGroupDescription="Subnets available for RDS in {0}".format( CLOUDNAME), SubnetIds=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ])) ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 5432)] ] security_group = template.add_resource( SecurityGroup("RDSDatabaseSecurityGroup", GroupDescription="Security group for Postgres Instances", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title)) database = template.add_resource( DBInstance( "RDSPostgresInstance", DBInstanceIdentifier=Ref(dbname), AllocatedStorage=Ref(allocated_storage), DBInstanceClass=Ref(dbclass), Engine="postgres", EngineVersion="9.3.6", MasterUsername=Ref(dbuser), MasterUserPassword=Ref(dbpassword), DBSubnetGroupName=Ref(db_subnet_group), VPCSecurityGroups=[Ref(security_group)], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ])) template.add_output( Output("ConnectionString", Description="JDBC connection string for Postgres", Value=Join("", [ GetAtt("RDSPostgresInstance", "Endpoint.Address"), GetAtt("RDSPostgresInstance", "Endpoint.Port") ])))
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") chefserver_instance_class = template.add_parameter( Parameter( 'ChefServerInstanceType', Type='String', Default='t2.medium', Description='Chef Server instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) # Create IAM role for the chefserver instance # load the policies default_policy = json.loads( cfn.load_template("default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" })) chefserver_role_name = '.'.join(['chefserver', CLOUDNAME, CLOUDENV]) chefserver_iam_role = template.add_resource( Role("ChefServerIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy(PolicyName="ChefServerPolicy", PolicyDocument=json.loads( cfn.load_template( "chefserver_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))), Policy(PolicyName="ChefserverDefaultPolicy", PolicyDocument=default_policy) ], DependsOn=vpc.title)) chefserver_instance_profile = template.add_resource( InstanceProfile("chefserverInstanceProfile", Path="/", Roles=[Ref(chefserver_iam_role)], DependsOn=chefserver_iam_role.title)) chefserver_user_data = cfn.load_template("chefserver-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "chefserver" }) chefserver_ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp='{0}.0.0/16'.format(CIDR_PREFIX), FromPort=p[1], ToPort=p[1]) for p in [('tcp', 80), ('tcp', 443)] ] chefserver_sg = template.add_resource( SecurityGroup("ChefServer", GroupDescription="Security Group for the Chef server", VpcId=Ref(vpc), SecurityGroupIngress=chefserver_ingress_rules, DependsOn=vpc.title)) chefserver_name = cfn.sanitize_id("ChefServer", CLOUDNAME, CLOUDENV) chefserver_instance = template.add_resource( Instance(chefserver_name, DependsOn=vpc.title, InstanceType=Ref(chefserver_instance_class), KeyName=Ref(cfn.keyname), SourceDestCheck=False, ImageId=FindInMap('RegionMap', region, int(cfn.Amis.EBS)), NetworkInterfaces=[ NetworkInterfaceProperty( Description='Network interface for {0}'.format( chefserver_name), GroupSet=[Ref(chefserver_sg)], SubnetId=Ref( cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)[0]), AssociatePublicIpAddress=True, DeviceIndex=0, DeleteOnTermination=True) ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize=50, DeleteOnTermination=False)) ]))
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") mesos_instance_class = template.add_parameter( Parameter( 'MesosInstanceType', Type='String', Default='m3.large', Description='Mesos instance type (for workers and masters)', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 22), ('tcp', 5050), ('tcp', 8080)] ] mesos_security_group = template.add_resource( SecurityGroup("Mesos", GroupDescription="Security Group for Mesos instances", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title)) # Allow any mesos instances to talk to each other template.add_resource( SecurityGroupIngress("MesosSelfIngress", IpProtocol='-1', FromPort=0, ToPort=65535, GroupId=Ref(mesos_security_group), SourceSecurityGroupId=Ref(mesos_security_group), DependsOn=mesos_security_group.title)) default_policy = json.loads( cfn.load_template("default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" })) mesos_policy = json.loads( cfn.load_template("mesos_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" })) # IAM role here iam_role = template.add_resource( Role("MesosIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy(PolicyName='MesosDefaultPolicy', PolicyDocument=default_policy), Policy(PolicyName='MesosIamPolicy', PolicyDocument=mesos_policy) ], DependsOn=vpc.title)) # Instance profile here instance_profile = template.add_resource( InstanceProfile("mesosInstanceProfile", Path="/", Roles=[Ref(iam_role)], DependsOn=iam_role.title)) # UserData here master_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "mesos_master" }) # LaunchConfiguration for master mesos master_launch_configuration = template.add_resource( LaunchConfiguration( "MesosMasterLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(mesos_instance_class), IamInstanceProfile=Ref(instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(mesos_security_group)], DependsOn=[instance_profile.title, mesos_security_group.title], AssociatePublicIpAddress=False, UserData=Base64(master_user_data))) # Autoscaling Group for master Mesos master_asg_name = '.'.join(['mesos-master', CLOUDNAME, CLOUDENV]) master_asg = template.add_resource( AutoScalingGroup( "MesosMasterASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="3", LaunchConfigurationName=Ref(master_launch_configuration), MinSize="3", MaxSize="3", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.MASTER) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.MASTER) ])) # Worker Mesos worker_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "mesos_slave" }) worker_launch_configuration = template.add_resource( LaunchConfiguration( "MesosWorkerLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(mesos_instance_class), IamInstanceProfile=Ref(instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(mesos_security_group)], DependsOn=[instance_profile.title, mesos_security_group.title], AssociatePublicIpAddress=False, UserData=Base64(worker_user_data))) worker_asg_name = '.'.join(['mesos-worker', CLOUDNAME, CLOUDENV]), worker_asg = template.add_resource( AutoScalingGroup( "MesosWorkerASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="3", LaunchConfigurationName=Ref(worker_launch_configuration), MinSize="3", MaxSize="12", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.WORKER) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.WORKER) ]))
def emit_configuration(): vpc = cfn.vpcs[0] instance_class = template.add_parameter( Parameter( 'RegistryInstanceType', Type='String', Default='m3.medium', Description='Registry instance type', AllowedValues=cfn.usable_instances(), ) ) create_bucket = template.add_parameter( Parameter( 'CreateDockerRegistryBucket', Type='String', Description='Whether or not to create the Docker Registry bucket.', Default='no', AllowedValues=['yes', 'no'] ) ) condition_name = "DockerRegistryBucketCondition" conditions = { condition_name: Equals( Ref(create_bucket), "yes" ) } for c in conditions: template.add_condition(c, conditions[c]) # Create the registry bucket bucket_name = Join('.', ['docker-registry', CLOUDNAME, Ref("AWS::Region"), CLOUDENV, 'leafme']) bucket = template.add_resource( Bucket( "DockerRegistryBucket", BucketName=bucket_name, DeletionPolicy='Retain', Condition=condition_name ) ) ingress_rules = [ SecurityGroupRule( IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 80), ('tcp', 22)] ] sg = template.add_resource( SecurityGroup( "DockerRegistry", GroupDescription="Security Group for Docker Registries", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title ) ) policy_vars = { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" } # IAM role for docker registry policy = json.loads(cfn.load_template("registry_policy.json.j2", policy_vars)) default_policy = json.loads(cfn.load_template("default_policy.json.j2", policy_vars)) iam_role = template.add_resource( Role( "DockerRegistryIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy( PolicyName="RegistryDefaultPolicy", PolicyDocument=default_policy ), Policy( PolicyName="RegistryPolicy", PolicyDocument=policy ) ], DependsOn=vpc.title ) ) instance_profile = template.add_resource( InstanceProfile( "DockerRegistryInstanceProfile", Path="/", Roles=[Ref(iam_role)], DependsOn=iam_role.title ) ) user_data = cfn.load_template("default-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "docker_registry"} ) launch_config = template.add_resource( LaunchConfiguration( "RegistryLaunchConfiguration", ImageId=FindInMap('RegionMap', Ref("AWS::Region"), int(cfn.Amis.INSTANCE)), InstanceType=Ref(instance_class), IamInstanceProfile=Ref(instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(sg)], DependsOn=[instance_profile.title, sg.title], AssociatePublicIpAddress=False, UserData=Base64(user_data) ) ) asg = template.add_resource( AutoScalingGroup( "RegistryAutoscalingGroup", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(launch_config), MinSize="1", MaxSize="1", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ] ), VPCZoneIdentifier=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)] ) )
def emit_configuration(): vpc = cfn.vpcs[0] instance_class = template.add_parameter( Parameter( 'RegistryInstanceType', Type='String', Default='m3.medium', Description='Registry instance type', AllowedValues=cfn.usable_instances(), )) create_bucket = template.add_parameter( Parameter( 'CreateDockerRegistryBucket', Type='String', Description='Whether or not to create the Docker Registry bucket.', Default='no', AllowedValues=['yes', 'no'])) condition_name = "DockerRegistryBucketCondition" conditions = {condition_name: Equals(Ref(create_bucket), "yes")} for c in conditions: template.add_condition(c, conditions[c]) # Create the registry bucket bucket_name = Join( '.', ['docker-registry', CLOUDNAME, Ref("AWS::Region"), CLOUDENV, 'leafme']) bucket = template.add_resource( Bucket("DockerRegistryBucket", BucketName=bucket_name, DeletionPolicy='Retain', Condition=condition_name)) ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 80), ('tcp', 22)] ] sg = template.add_resource( SecurityGroup("DockerRegistry", GroupDescription="Security Group for Docker Registries", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title)) policy_vars = {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} # IAM role for docker registry policy = json.loads( cfn.load_template("registry_policy.json.j2", policy_vars)) default_policy = json.loads( cfn.load_template("default_policy.json.j2", policy_vars)) iam_role = template.add_resource( Role("DockerRegistryIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy(PolicyName="RegistryDefaultPolicy", PolicyDocument=default_policy), Policy(PolicyName="RegistryPolicy", PolicyDocument=policy) ], DependsOn=vpc.title)) instance_profile = template.add_resource( InstanceProfile("DockerRegistryInstanceProfile", Path="/", Roles=[Ref(iam_role)], DependsOn=iam_role.title)) user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "docker_registry" }) launch_config = template.add_resource( LaunchConfiguration("RegistryLaunchConfiguration", ImageId=FindInMap('RegionMap', Ref("AWS::Region"), int(cfn.Amis.INSTANCE)), InstanceType=Ref(instance_class), IamInstanceProfile=Ref(instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(sg)], DependsOn=[instance_profile.title, sg.title], AssociatePublicIpAddress=False, UserData=Base64(user_data))) asg = template.add_resource( AutoScalingGroup( "RegistryAutoscalingGroup", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(launch_config), MinSize="1", MaxSize="1", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM) ]))
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") vpn_instance_class = template.add_parameter( Parameter( 'VPNInstanceType', Type='String', Default='m3.medium', Description='VPN instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) vpn_ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 22), ('udp', 1194)] ] vpn_sg = template.add_resource( SecurityGroup("VPN", GroupDescription="Security Group for VPN ingress.", VpcId=Ref(vpc), SecurityGroupIngress=vpn_ingress_rules, DependsOn=vpc.title)) # IAM role for vpn vpn_policy = json.loads( cfn.load_template("vpn_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-west-2" })) default_policy = json.loads( cfn.load_template("default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-west-2" })) vpn_role_name = '.'.join(['vpn', CLOUDNAME, CLOUDENV]) vpn_iam_role = template.add_resource( Role("VPNIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy(PolicyName="VPNDefaultPolicy", PolicyDocument=default_policy), Policy(PolicyName="VPNPolicy", PolicyDocument=vpn_policy) ], DependsOn=vpc.title)) vpn_instance_profile = template.add_resource( InstanceProfile("vpnInstanceProfile", Path="/", Roles=[Ref(vpn_iam_role)], DependsOn=vpn_iam_role.title)) vpn_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "vpn" }) # Launch Configuration for vpns vpn_launchcfg = template.add_resource( LaunchConfiguration( "VPNLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(vpn_instance_class), IamInstanceProfile=Ref(vpn_instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(vpn_sg)], DependsOn=[vpn_instance_profile.title, vpn_sg.title], AssociatePublicIpAddress=True, UserData=Base64(vpn_user_data))) # Create the babysitter autoscaling group vpn_asg_name = '.'.join(['vpn', CLOUDNAME, CLOUDENV]) vpn_asg = template.add_resource( AutoScalingGroup( "VPNASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(vpn_launchcfg), MinSize="1", MaxSize="1", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.VPN) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.VPN) ]))
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") chefserver_instance_class = template.add_parameter( Parameter( 'ChefServerInstanceType', Type='String', Default='t2.medium', Description='Chef Server instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' ) ) # Create IAM role for the chefserver instance # load the policies default_policy = json.loads(cfn.load_template("default_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} )) chefserver_role_name = '.'.join(['chefserver', CLOUDNAME, CLOUDENV]) chefserver_iam_role = template.add_resource( Role( "ChefServerIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy( PolicyName="ChefServerPolicy", PolicyDocument=json.loads( cfn.load_template("chefserver_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} ) ) ), Policy( PolicyName="ChefserverDefaultPolicy", PolicyDocument=default_policy ) ], DependsOn=vpc.title ) ) chefserver_instance_profile = template.add_resource( InstanceProfile( "chefserverInstanceProfile", Path="/", Roles=[Ref(chefserver_iam_role)], DependsOn=chefserver_iam_role.title ) ) chefserver_user_data = cfn.load_template("chefserver-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "chefserver"} ) chefserver_ingress_rules = [ SecurityGroupRule( IpProtocol=p[0], CidrIp='{0}.0.0/16'.format(CIDR_PREFIX), FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 80), ('tcp', 443)] ] chefserver_sg = template.add_resource( SecurityGroup( "ChefServer", GroupDescription="Security Group for the Chef server", VpcId=Ref(vpc), SecurityGroupIngress=chefserver_ingress_rules, DependsOn=vpc.title ) ) chefserver_name = cfn.sanitize_id("ChefServer", CLOUDNAME, CLOUDENV) chefserver_instance = template.add_resource(Instance( chefserver_name, DependsOn=vpc.title, InstanceType=Ref(chefserver_instance_class), KeyName=Ref(cfn.keyname), SourceDestCheck=False, ImageId=FindInMap('RegionMap', region, int(cfn.Amis.EBS)), NetworkInterfaces=[ NetworkInterfaceProperty( Description='Network interface for {0}'.format(chefserver_name), GroupSet=[Ref(chefserver_sg)], SubnetId=Ref(cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)[0]), AssociatePublicIpAddress=True, DeviceIndex=0, DeleteOnTermination=True ) ], BlockDeviceMappings=[ BlockDeviceMapping( DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize=50, DeleteOnTermination=False ) ) ] ))
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") vpn_instance_class = template.add_parameter( Parameter( 'VPNInstanceType', Type='String', Default='m3.medium', Description='VPN instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' ) ) vpn_ingress_rules = [ SecurityGroupRule( IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 22), ('udp', 1194)] ] vpn_sg = template.add_resource( SecurityGroup( "VPN", GroupDescription="Security Group for VPN ingress.", VpcId=Ref(vpc), SecurityGroupIngress=vpn_ingress_rules, DependsOn=vpc.title ) ) # IAM role for vpn vpn_policy = json.loads(cfn.load_template("vpn_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-west-2"} )) default_policy = json.loads(cfn.load_template("default_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-west-2"} )) vpn_role_name = '.'.join(['vpn', CLOUDNAME, CLOUDENV]) vpn_iam_role = template.add_resource( Role( "VPNIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy( PolicyName="VPNDefaultPolicy", PolicyDocument=default_policy ), Policy( PolicyName="VPNPolicy", PolicyDocument=vpn_policy ) ], DependsOn=vpc.title ) ) vpn_instance_profile = template.add_resource( InstanceProfile( "vpnInstanceProfile", Path="/", Roles=[Ref(vpn_iam_role)], DependsOn=vpn_iam_role.title ) ) vpn_user_data = cfn.load_template("default-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "vpn"} ) # Launch Configuration for vpns vpn_launchcfg = template.add_resource( LaunchConfiguration( "VPNLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(vpn_instance_class), IamInstanceProfile=Ref(vpn_instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(vpn_sg)], DependsOn=[vpn_instance_profile.title, vpn_sg.title], AssociatePublicIpAddress=True, UserData=Base64(vpn_user_data) ) ) # Create the babysitter autoscaling group vpn_asg_name = '.'.join(['vpn', CLOUDNAME, CLOUDENV]) vpn_asg = template.add_resource( AutoScalingGroup( "VPNASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(vpn_launchcfg), MinSize="1", MaxSize="1", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ] ), VPCZoneIdentifier=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.VPN)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.VPN)] ) )
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") mesos_instance_class = template.add_parameter( Parameter( 'MesosInstanceType', Type='String', Default='m3.large', Description='Mesos instance type (for workers and masters)', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' ) ) ingress_rules = [ SecurityGroupRule( IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 22), ('tcp', 5050), ('tcp', 8080)] ] mesos_security_group = template.add_resource( SecurityGroup( "Mesos", GroupDescription="Security Group for Mesos instances", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title ) ) # Allow any mesos instances to talk to each other template.add_resource( SecurityGroupIngress( "MesosSelfIngress", IpProtocol='-1', FromPort=0, ToPort=65535, GroupId=Ref(mesos_security_group), SourceSecurityGroupId=Ref(mesos_security_group), DependsOn=mesos_security_group.title ) ) default_policy = json.loads(cfn.load_template("default_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} )) mesos_policy = json.loads(cfn.load_template("mesos_policy.json.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"} )) # IAM role here iam_role = template.add_resource( Role( "MesosIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy( PolicyName='MesosDefaultPolicy', PolicyDocument=default_policy ), Policy( PolicyName='MesosIamPolicy', PolicyDocument=mesos_policy ) ], DependsOn=vpc.title ) ) # Instance profile here instance_profile = template.add_resource( InstanceProfile( "mesosInstanceProfile", Path="/", Roles=[Ref(iam_role)], DependsOn=iam_role.title ) ) # UserData here master_user_data = cfn.load_template("default-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "mesos_master"} ) # LaunchConfiguration for master mesos master_launch_configuration = template.add_resource( LaunchConfiguration( "MesosMasterLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(mesos_instance_class), IamInstanceProfile=Ref(instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(mesos_security_group)], DependsOn=[instance_profile.title, mesos_security_group.title], AssociatePublicIpAddress=False, UserData=Base64(master_user_data) ) ) # Autoscaling Group for master Mesos master_asg_name = '.'.join(['mesos-master', CLOUDNAME, CLOUDENV]) master_asg = template.add_resource( AutoScalingGroup( "MesosMasterASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="3", LaunchConfigurationName=Ref(master_launch_configuration), MinSize="3", MaxSize="3", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ] ), VPCZoneIdentifier=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.MASTER)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.MASTER)] ) ) # Worker Mesos worker_user_data = cfn.load_template("default-init.bash.j2", {"env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "mesos_slave"} ) worker_launch_configuration = template.add_resource( LaunchConfiguration( "MesosWorkerLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(mesos_instance_class), IamInstanceProfile=Ref(instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(mesos_security_group)], DependsOn=[instance_profile.title, mesos_security_group.title], AssociatePublicIpAddress=False, UserData=Base64(worker_user_data) ) ) worker_asg_name = '.'.join(['mesos-worker', CLOUDNAME, CLOUDENV]), worker_asg = template.add_resource( AutoScalingGroup( "MesosWorkerASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="3", LaunchConfigurationName=Ref(worker_launch_configuration), MinSize="3", MaxSize="12", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ] ), VPCZoneIdentifier=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.WORKER)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.WORKER)] ) )
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") dbname = template.add_parameter( Parameter( "RDSDatabaseInstanceName", Default="reporting{0}".format(CLOUDENV), Description="Postgres Instance Name", Type="String", MinLength="1", MaxLength="63", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription="Must begin with a letter and contain only alphanumeric characters" ) ) dbuser = template.add_parameter( Parameter( "RDSDatabaseUser", Default="sa", Description="The database admin account username", Type="String", MinLength="1", MaxLength="63", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription="Must being with a letter and be alphanumeric" ) ) dbpassword = template.add_parameter( Parameter( "RDSDatabasePassword", NoEcho=True, Description="The database admin account password", Type="String", MinLength="1", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="Must contain only alphanumeric characters.", Default="LeafLeaf123" ) ) dbclass = template.add_parameter( Parameter( "RDSInstanceClass", Default="db.t2.medium", Description="Database instance size", Type="String", AllowedValues=[ "db.t2.small", "db.t2.medium", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge" ] ) ) allocated_storage = template.add_parameter( Parameter( "RDSAllocatedStorage", Default="100", Description="The size of the Postgres Database (GB)", Type="Number", MinValue="5", MaxValue="512", ConstraintDescription="Must be between 5 and 512 GB" ) ) db_subnet_group = template.add_resource( DBSubnetGroup( "RDSSubnetGroup", DBSubnetGroupDescription="Subnets available for RDS in {0}".format(CLOUDNAME), SubnetIds=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)] ) ) ingress_rules = [ SecurityGroupRule( IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 5432)]] security_group = template.add_resource( SecurityGroup( "RDSDatabaseSecurityGroup", GroupDescription="Security group for Postgres Instances", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title ) ) database = template.add_resource( DBInstance( "RDSPostgresInstance", DBInstanceIdentifier=Ref(dbname), AllocatedStorage=Ref(allocated_storage), DBInstanceClass=Ref(dbclass), Engine="postgres", EngineVersion="9.3.6", MasterUsername=Ref(dbuser), MasterUserPassword=Ref(dbpassword), DBSubnetGroupName=Ref(db_subnet_group), VPCSecurityGroups=[Ref(security_group)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)] ) ) template.add_output( Output( "ConnectionString", Description="JDBC connection string for Postgres", Value=Join("", [ GetAtt("RDSPostgresInstance", "Endpoint.Address"), GetAtt("RDSPostgresInstance", "Endpoint.Port") ]) ) )
def emit_configuration(): vpc = cfn.vpcs[0] dbname = template.add_parameter( Parameter('RedshiftDatabaseName', Description='The name of database to create within redshift', Type="String", Default="farragut", AllowedPattern="[a-z0-9]*", ConstraintDescription="Must be alphanumeric")) clustertype = template.add_parameter( Parameter('RedshiftClusterType', Description="The type of cluster to build", Type="String", Default="single-node", AllowedValues=["single-node", "multi-node"])) numberofnodes = template.add_parameter( Parameter( "RedshiftNumberOfNodes", Description="The number of compute nodes in the redshift cluster. " "When cluster type is specified as: 1) single-node, the NumberOfNodes " "parameter should be specified as 1, 2) multi-node, the NumberOfNodes " "parameter should be greater than 1", Type="Number", Default="1", )) nodetype = template.add_parameter( Parameter( "RedshiftNodeType", Description= "The node type to be provisioned for the redshift cluster", Type="String", Default="dw2.large", )) masterusername = template.add_parameter( Parameter("RedshiftMasterUsername", Description= "The user name associated with the master user account for " "the redshift cluster that is being created", Type="String", Default="sa", AllowedPattern="([a-z])([a-z]|[0-9])*")) masteruserpassword = template.add_parameter( Parameter( "RedshiftMasterUserPassword", Description= "The password associated with the master user account for the " "redshift cluster that is being created.", Type="String", NoEcho=True, Default="LeafLeaf123")) ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 5439)] ] rs_security_group = template.add_resource( SecurityGroup( "RedshiftSecurityGroup", GroupDescription="SecurityGroup for the {0} Redshift cluster". format(CLOUDENV), VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title)) cluster_subnet_group = template.add_resource( ClusterSubnetGroup( "RedshiftClusterSubnetGroup", Description="Redshift {0} cluster subnet group".format(CLOUDENV), SubnetIds=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ])) conditions = { "IsMultiNodeCluster": Equals(Ref("RedshiftClusterType"), "multi-mode"), } for k in conditions: template.add_condition(k, conditions[k]) redshiftcluster = template.add_resource( Cluster("RedshiftCluster", ClusterType=Ref("RedshiftClusterType"), NumberOfNodes=If("IsMultiNodeCluster", Ref("RedshiftNumberOfNodes"), Ref("AWS::NoValue")), NodeType=Ref("RedshiftNodeType"), DBName=Ref("RedshiftDatabaseName"), MasterUsername=Ref("RedshiftMasterUsername"), MasterUserPassword=Ref("RedshiftMasterUserPassword"), ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"), DeletionPolicy="Snapshot", ClusterSubnetGroupName=Ref(cluster_subnet_group), VpcSecurityGroupIds=[Ref("RedshiftSecurityGroup")], DependsOn=[ cluster_subnet_group.title, rs_security_group.title ])) log_activity_parameter = AmazonRedshiftParameter( "AmazonRedshiftParameterEnableUserLogging", ParameterName="enable_user_activity_logging", ParameterValue="true", ) redshiftclusterparametergroup = template.add_resource( ClusterParameterGroup( "RedshiftClusterParameterGroup", Description="Cluster parameter group", ParameterGroupFamily="redshift-1.0", Parameters=[log_activity_parameter], )) template.add_output( Output( "RedshiftClusterEndpoint", Value=Join(":", [ GetAtt(redshiftcluster, "Endpoint.Address"), GetAtt(redshiftcluster, "Endpoint.Port") ]), ))
def emit_configuration(): vpc = cfn.vpcs[0] dbname = template.add_parameter( Parameter( 'RedshiftDatabaseName', Description='The name of database to create within redshift', Type="String", Default="farragut", AllowedPattern="[a-z0-9]*", ConstraintDescription="Must be alphanumeric" ) ) clustertype = template.add_parameter( Parameter( 'RedshiftClusterType', Description="The type of cluster to build", Type="String", Default="single-node", AllowedValues=["single-node", "multi-node"] ) ) numberofnodes = template.add_parameter( Parameter( "RedshiftNumberOfNodes", Description="The number of compute nodes in the redshift cluster. " "When cluster type is specified as: 1) single-node, the NumberOfNodes " "parameter should be specified as 1, 2) multi-node, the NumberOfNodes " "parameter should be greater than 1", Type="Number", Default="1", ) ) nodetype = template.add_parameter( Parameter( "RedshiftNodeType", Description="The node type to be provisioned for the redshift cluster", Type="String", Default="dw2.large", ) ) masterusername = template.add_parameter(Parameter( "RedshiftMasterUsername", Description="The user name associated with the master user account for " "the redshift cluster that is being created", Type="String", Default="sa", AllowedPattern="([a-z])([a-z]|[0-9])*" )) masteruserpassword = template.add_parameter(Parameter( "RedshiftMasterUserPassword", Description="The password associated with the master user account for the " "redshift cluster that is being created.", Type="String", NoEcho=True, Default="LeafLeaf123" )) ingress_rules = [ SecurityGroupRule( IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1] ) for p in [('tcp', 5439)] ] rs_security_group = template.add_resource( SecurityGroup( "RedshiftSecurityGroup", GroupDescription="SecurityGroup for the {0} Redshift cluster".format(CLOUDENV), VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title ) ) cluster_subnet_group = template.add_resource( ClusterSubnetGroup( "RedshiftClusterSubnetGroup", Description="Redshift {0} cluster subnet group".format(CLOUDENV), SubnetIds=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)], DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)] ) ) conditions = { "IsMultiNodeCluster": Equals( Ref("RedshiftClusterType"), "multi-mode" ), } for k in conditions: template.add_condition(k, conditions[k]) redshiftcluster = template.add_resource(Cluster( "RedshiftCluster", ClusterType=Ref("RedshiftClusterType"), NumberOfNodes=If("IsMultiNodeCluster", Ref("RedshiftNumberOfNodes"), Ref("AWS::NoValue")), NodeType=Ref("RedshiftNodeType"), DBName=Ref("RedshiftDatabaseName"), MasterUsername=Ref("RedshiftMasterUsername"), MasterUserPassword=Ref("RedshiftMasterUserPassword"), ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"), DeletionPolicy="Snapshot", ClusterSubnetGroupName=Ref(cluster_subnet_group), VpcSecurityGroupIds=[Ref("RedshiftSecurityGroup")], DependsOn=[cluster_subnet_group.title, rs_security_group.title] )) log_activity_parameter = AmazonRedshiftParameter( "AmazonRedshiftParameterEnableUserLogging", ParameterName="enable_user_activity_logging", ParameterValue="true", ) redshiftclusterparametergroup = template.add_resource(ClusterParameterGroup( "RedshiftClusterParameterGroup", Description="Cluster parameter group", ParameterGroupFamily="redshift-1.0", Parameters=[log_activity_parameter], )) template.add_output(Output( "RedshiftClusterEndpoint", Value=Join(":", [GetAtt(redshiftcluster, "Endpoint.Address"), GetAtt(redshiftcluster, "Endpoint.Port")]), ))