def __init__(self, key): LoadBalancers = [] for n in cfg.LoadBalancerClassic: LoadBalancers.append(Ref(f'LoadBalancerClassic{n}')) TargetGroups = [] for n in cfg.LoadBalancerApplication: TargetGroups.append(Ref(f'TargetGroup{n}')) # Resources AS_ScheduledActionsEC2('ScheduledAction') # AS_ScalingPoliciesEC2() LaunchConfiguration = AS_LaunchConfiguration() Tags = LaunchConfiguration.Tags R_ASG = ASAutoScalingGroup('AutoScalingGroup') R_ASG.LoadBalancerNames = LoadBalancers R_ASG.TargetGroupARNs = TargetGroups R_ASG.Tags.extend(Tags) R_ASG.Tags.extend([ If('SpotAuto', asg.Tag('spot-enabled', 'true', True), Ref('AWS::NoValue')), If( 'SpotAutoMinOnDemandNumber', asg.Tag('autospotting_min_on_demand_number', get_endvalue('SpotAutoMinOnDemandNumber'), True), Ref('AWS::NoValue')), If( 'SpotAutoAllowedInstances', asg.Tag('autospotting_allowed_instance_types', get_endvalue('SpotAutoAllowedInstances'), True), Ref('AWS::NoValue')) ]) R_ASGSpot = ASAutoScalingGroup('AutoScalingGroupSpot', spot=True) R_ASGSpot.LoadBalancerNames = LoadBalancers R_ASGSpot.TargetGroupARNs = TargetGroups R_ASGSpot.Tags.extend(Tags) # Notifications currently are not associeted to "any actions" - # now using CW events - this way works with autospotting too try: cfg.NotificationConfiguration except Exception as e: pass else: NotificationConfiguration = ASNotificationConfiguration() R_ASG.NotificationConfigurations = [NotificationConfiguration] R_ASGSpot.NotificationConfigurations = [NotificationConfiguration] add_obj([ R_ASG, ]) if cfg.SpotASG: add_obj(R_ASGSpot) self.LaunchConfiguration = LaunchConfiguration
def __init__(self, title, spot=None, **kwargs): super().__init__(title, **kwargs) if spot: CapacityDesiredASGMainIsSpot = get_endvalue('CapacityDesired') CapacityDesiredASGMainIsNotSpot = 0 CapacityMinASGMainIsSpot = get_endvalue('CapacityMin') CapacityMinASGMainIsNotSpot = 0 self.Condition = 'SpotASG' self.LaunchConfigurationName = Ref('LaunchConfigurationSpot') self.UpdatePolicy = ASUpdatePolicy(spot=True) else: CapacityDesiredASGMainIsSpot = 0 CapacityDesiredASGMainIsNotSpot = get_endvalue('CapacityDesired') CapacityMinASGMainIsSpot = 0 CapacityMinASGMainIsNotSpot = get_endvalue('CapacityMin') self.LaunchConfigurationName = Ref('LaunchConfiguration') self.UpdatePolicy = ASUpdatePolicy() self.AvailabilityZones = GetAZs() if cfg.SpotASG: self.DesiredCapacity = If('ASGMainIsSpot', CapacityDesiredASGMainIsSpot, CapacityDesiredASGMainIsNotSpot) self.MinSize = If('ASGMainIsSpot', CapacityMinASGMainIsSpot, CapacityMinASGMainIsNotSpot) else: self.DesiredCapacity = get_endvalue('CapacityDesired') self.MinSize = get_endvalue('CapacityMin') self.CreationPolicy = pol.CreationPolicy( ResourceSignal=pol.ResourceSignal( Count=self.DesiredCapacity, Timeout=get_endvalue('AutoscalingCreationTimeout'))) self.HealthCheckGracePeriod = get_endvalue('HealthCheckGracePeriod') self.HealthCheckType = get_endvalue('HealthCheckType') self.MaxSize = get_endvalue('CapacityMax') self.MetricsCollection = [asg.MetricsCollection(Granularity='1Minute')] self.Tags = [ asg.Tag(('Name'), Ref('EnvRole'), True), asg.Tag(('EnvStackName'), Ref('AWS::StackName'), True), ] self.TerminationPolicies = ['OldestInstance'] if cfg.VPCZoneIdentifier == 'SubnetsPublic': self.VPCZoneIdentifier = Split(',', get_expvalue('SubnetsPublic')) else: self.VPCZoneIdentifier = Split(',', get_expvalue('SubnetsPrivate'))
def create_auto_scaling_resources(self, worker_security_group, worker_lb): worker_launch_config_name = 'lcWorker' worker_launch_config = self.add_resource( asg.LaunchConfiguration( worker_launch_config_name, EbsOptimized=True, ImageId=Ref(self.worker_ami), IamInstanceProfile=Ref(self.worker_instance_profile), InstanceType=Ref(self.worker_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(worker_security_group)], UserData=Base64(Join('', self.get_cloud_config())))) worker_auto_scaling_group_name = 'asgWorker' worker_asg = self.add_resource( asg.AutoScalingGroup( worker_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), Cooldown=300, DesiredCapacity=Ref(self.worker_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(worker_launch_config), LoadBalancerNames=[Ref(worker_lb)], MaxSize=Ref(self.worker_auto_scaling_max), MinSize=Ref(self.worker_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'Worker', True)])) self.add_resource( asg.ScheduledAction( 'schedWorkerAutoScalingStart', AutoScalingGroupName=Ref(worker_asg), DesiredCapacity=Ref( self.worker_auto_scaling_schedule_start_capacity), Recurrence=Ref( self.worker_auto_scaling_schedule_start_recurrence))) self.add_resource( asg.ScheduledAction( 'schedWorkerAutoScalingEnd', AutoScalingGroupName=Ref(worker_asg), DesiredCapacity=Ref( self.worker_auto_scaling_schedule_end_capacity), Recurrence=Ref( self.worker_auto_scaling_schedule_end_recurrence))) return worker_asg
def add_autoscaling_ondemand(self): self.AutoscalingGroupOnDemand = self.template.add_resource( autoscaling.AutoScalingGroup( "AutoscalingGroupOnDemand", DesiredCapacity=self. sceptre_user_data["desired_capacity_ondemand"], LaunchConfigurationName=Ref(self.launchconfig_ondemand), MinSize=self.sceptre_user_data["minimum_capacity_ondemand"], MaxSize=self.sceptre_user_data["maximum_capacity_ondemand"], VPCZoneIdentifier=self.subnets, LoadBalancerNames=[Ref(self.LoadBalancer)], AvailabilityZones=GetAZs(""), HealthCheckType="ELB", HealthCheckGracePeriod=10, Tags=[ autoscaling.Tag("Name", "web-server-ondemand", True), autoscaling.Tag("service", self.service_tag, True), autoscaling.Tag("lifecycle", "ondemand", True) ]))
def get_autoscaling_tags(self, service_override=None, role_override=None): """ Get the default autoscaling tags for this environment :return: """ return [ autoscaling.Tag('{}:environment'.format(constants.TAG), self.env, True), autoscaling.Tag('{}:sysenv'.format(constants.TAG), self.sysenv, True), autoscaling.Tag( '{}:service'.format(constants.TAG), service_override if service_override else self.template_name, True), autoscaling.Tag('{}:role'.format(constants.TAG), role_override if role_override else self.name, True), autoscaling.Tag('{}:team'.format(constants.TAG), self.TEAM['email'], True), ]
def create_auto_scaling_resources(self, tile_server_security_group, tile_server_lb): tile_server_launch_config_name = 'lcTileServer' tile_server_launch_config = self.add_resource( asg.LaunchConfiguration( tile_server_launch_config_name, ImageId=Ref(self.tile_server_ami), IamInstanceProfile=Ref(self.tile_server_instance_profile), InstanceType=Ref(self.tile_server_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(tile_server_security_group)], UserData=Base64( Join('', self.get_cloud_config())) )) tile_server_auto_scaling_group_name = 'asgTileServer' self.add_resource( asg.AutoScalingGroup( tile_server_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), Cooldown=300, DesiredCapacity=Ref(self.tile_server_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(tile_server_launch_config), LoadBalancerNames=[Ref(tile_server_lb)], MaxSize=Ref(self.tile_server_auto_scaling_max), MinSize=Ref(self.tile_server_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ] ) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'TileServer', True)] ) )
def create_asg( self, layer_name, instance_profile, instance_type=None, ami_name='ubuntu1404LtsAmiId', ec2_key=None, user_data=None, default_instance_type=None, security_groups=None, min_size=1, max_size=1, root_volume_size=None, root_volume_type=None, include_ephemerals=True, number_ephemeral_vols=2, ebs_data_volumes=None, #[{'size':'100', 'type':'gp2', 'delete_on_termination': True, 'iops': 4000, 'volume_type': 'io1'}] custom_tags=None, load_balancer=None, instance_monitoring=False, subnet_type='private', launch_config_metadata=None, creation_policy=None, update_policy=None, depends_on=None): ''' Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group @param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed @param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group @param instance_type [Troposphere.Parameter | string] Reference to the AWS EC2 Instance Type to deploy. @param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region @param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group @param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group @param default_instance_type [string - AWS Instance Type] AWS instance type to set as the default for the input parameter defining the instance type for this layer_name @param security_groups [Troposphere.ec2.SecurityGroup[]] array of security groups to be applied to instances within this Auto Scaling group @param min_size [int] value to set as the minimum number of instances for the Auto Scaling group @param max_size [int] value to set as the maximum number of instances for the Auto Scaling group @param root_volume_size [int] size (in GiB) to assign to the root volume of the launched instance @param include_ephemerals [Boolean] indicates that ephemeral volumes should be included in the block device mapping of the Launch Configuration @param number_ephemeral_vols [int] number of ephemeral volumes to attach within the block device mapping Launch Configuration @param ebs_data_volumes [list] dictionary pair of size and type data properties in a list used to create ebs volume attachments @param custom_tags [Troposphere.autoscaling.Tag[]] Collection of Auto Scaling tags to be assigned to the Auto Scaling Group @param load_balancer [Troposphere.elasticloadbalancing.LoadBalancer] Object reference to an ELB to be assigned to this auto scaling group @param instance_monitoring [Boolean] indicates that detailed monitoring should be turned on for all instnaces launched within this Auto Scaling group @param subnet_type [string {'public', 'private'}] string indicating which type of subnet (public or private) instances should be launched into ''' if subnet_type not in ['public', 'private']: raise RuntimeError( 'Unable to determine which type of subnet instances should be launched into. ' + str(subnet_type) + ' is not one of ["public", "private"].') if ec2_key != None and type(ec2_key) != Ref: ec2_key = Ref(ec2_key) elif ec2_key == None: ec2_key = Ref(self.template.parameters['ec2Key']) if default_instance_type == None: default_instance_type = 'm1.small' if type(instance_type) != str: instance_type = Ref(instance_type) sg_list = [] for sg in security_groups: if isinstance(sg, Ref): sg_list.append(sg) else: sg_list.append(Ref(sg)) launch_config_obj = autoscaling.LaunchConfiguration( layer_name + 'LaunchConfiguration', IamInstanceProfile=Ref(instance_profile), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), ami_name), InstanceType=instance_type, SecurityGroups=sg_list, KeyName=ec2_key, Metadata=(launch_config_metadata or None), InstanceMonitoring=instance_monitoring) if user_data != None: launch_config_obj.UserData = user_data block_devices = [] if root_volume_type != None and root_volume_size != None: ebs_device = ec2.EBSBlockDevice(VolumeSize=root_volume_size) if root_volume_type != None: ebs_device.VolumeType = root_volume_type block_devices.append( ec2.BlockDeviceMapping(DeviceName='/dev/sda1', Ebs=ebs_device)) device_names = ['/dev/sd%s' % c for c in 'bcdefghijklmnopqrstuvwxyz'] if ebs_data_volumes != None and len(ebs_data_volumes) > 0: for ebs_volume in ebs_data_volumes: device_name = device_names.pop() ebs_block_device = ec2.EBSBlockDevice( DeleteOnTermination=ebs_volume.get('delete_on_termination', True), VolumeSize=ebs_volume.get('size', '100'), VolumeType=ebs_volume.get('type', 'gp2')) if 'iops' in ebs_volume: ebs_block_device.Iops = int(ebs_volume.get('iops')) if 'snapshot_id' in ebs_volume: ebs_block_device.SnapshotId = ebs_volume.get('snapshot_id') block_devices.append( ec2.BlockDeviceMapping(DeviceName=device_name, Ebs=ebs_block_device)) if include_ephemerals and number_ephemeral_vols > 0: device_names.reverse() for x in range(0, number_ephemeral_vols): device_name = device_names.pop() block_devices.append( ec2.BlockDeviceMapping(DeviceName=device_name, VirtualName='ephemeral' + str(x))) if len(block_devices) > 0: launch_config_obj.BlockDeviceMappings = block_devices launch_config = self.template.add_resource(launch_config_obj) if depends_on: auto_scaling_obj = autoscaling.AutoScalingGroup( layer_name + 'AutoScalingGroup', AvailabilityZones=self.azs, LaunchConfigurationName=Ref(launch_config), MaxSize=max_size, MinSize=min_size, DesiredCapacity=min(min_size, max_size), VPCZoneIdentifier=self.subnets[subnet_type.lower()], TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ], DependsOn=depends_on) else: auto_scaling_obj = autoscaling.AutoScalingGroup( layer_name + 'AutoScalingGroup', AvailabilityZones=self.azs, LaunchConfigurationName=Ref(launch_config), MaxSize=max_size, MinSize=min_size, DesiredCapacity=min(min_size, max_size), VPCZoneIdentifier=self.subnets[subnet_type.lower()], TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ]) lb_tmp = [] if load_balancer is not None: try: if type(load_balancer) is dict: for lb in load_balancer: lb_tmp.append(Ref(load_balancer[lb])) elif type(load_balancer) is not Ref: for lb in load_balancer: lb_tmp.append(Ref(lb)) else: lb_tmp.append(load_balancer) except TypeError: lb_tmp.append(Ref(load_balancer)) else: lb_tmp = None if lb_tmp is not None and len(lb_tmp) > 0: auto_scaling_obj.LoadBalancerNames = lb_tmp if creation_policy is not None: auto_scaling_obj.resource['CreationPolicy'] = creation_policy if update_policy is not None: auto_scaling_obj.resource['UpdatePolicy'] = update_policy if custom_tags != None and len(custom_tags) > 0: if type(custom_tags) != list: custom_tags = [custom_tags] auto_scaling_obj.Tags = custom_tags else: auto_scaling_obj.Tags = [] auto_scaling_obj.Tags.append(autoscaling.Tag('Name', layer_name, True)) return self.template.add_resource(auto_scaling_obj)
def setup_vpn(config, template): stack = config['stack'] region = config['region'] vpc_name = config['vpc'] public_subnets = [] private_subnets = [] customer_gateways = [] nat_ec2_instances = [] if region == None: print_err('%(stack)s: missing region\n' % locals()) sys.exit(1) vpcs_file = read_yaml_file('configuration/vpcs.yaml') vpcs = vpcs_file['vpcs'] connections = vpcs_file['connections'] eips = read_yaml_file('configuration/eips.yaml') # NOTE: we look for the base VPC in 'vpcs' and in eips # EIP's are allocated per VPC, since it's easier to manage if vpc_name not in vpcs: print_err('%(vpc_name)s: not found in vpcs\n' % locals()) sys.exit(1) if vpc_name not in eips: print_err( '%(stack)s: not found in eips; execute "scripts/manage-eips"\n' % locals()) sys.exit(1) vpc_id = get_vpc_id(vpc_name, region) incoming_connections = map( lambda x: x.keys()[0] if isinstance(x, dict) else x, list( itertools.chain.from_iterable( x['from'] for x in connections.values() if 'to' in x and vpc_name in x['to']))) outgoing_connections = map( lambda x: x.keys()[0] if isinstance(x, dict) else x, list( itertools.chain.from_iterable( x['to'] for x in connections.values() if 'from' in x and vpc_name in x['from']))) # if we expect incoming VPN connections then setup a VPN gateway if incoming_connections: vpn_gateway = template.add_resource( ec2.VPNGateway( 'VpnGateway', Type='ipsec.1', Tags=Tags( Name=stack, VPC=vpc_name, ), )) vpn_gateway_attachment = template.add_resource( ec2.VPCGatewayAttachment( 'VpcGatewayAttachment', VpcId=vpc_id, VpnGatewayId=Ref(vpn_gateway), )) vpn_gateway_route_propegation = template.add_resource( ec2.VPNGatewayRoutePropagation( 'VpnGatewayRoutePropagation', RouteTableIds=get_route_table_ids(vpc_id, region), VpnGatewayId=Ref(vpn_gateway), DependsOn=Name(vpn_gateway_attachment), )) for index, connection_from in enumerate(incoming_connections, 1): if connection_from not in vpcs: print_err( '%(stack)s: vpn from "%(connection_from)s" not found in vpcs\n' % locals()) sys.exit(1) if connection_from not in eips: print_err( '%(stack)s: vpn from "%(connection_from)s" not found in eips\n' % locals()) sys.exit(1) alphanumeric_id = ''.join( [y.title() for y in connection_from.split('-')]) customer_gateway = template.add_resource( ec2.CustomerGateway( alphanumeric_id + 'CGW', BgpAsn=vpcs[connection_from]['bgp_asn'], IpAddress=eips[connection_from]['public_ip'], Type='ipsec.1', Tags=Tags( Name='%(connection_from)s to %(stack)s' % locals(), VPC=vpc_name, ), )) vpn_connection = template.add_resource( ec2.VPNConnection( alphanumeric_id + 'VPNConnection', # We want this to always be 'False', for BGP StaticRoutesOnly=config['static_routing'], Type='ipsec.1', VpnGatewayId=Ref(vpn_gateway), CustomerGatewayId=Ref(customer_gateway), Tags=Tags( Name='%s CGW: IP %s' % (connection_from, eips[connection_from]['public_ip']), # The Tag 'RemoteVPC' is queried by # configuration process on the remote VPC's NAT # instance to identify the Virtual Connection they # should connect to. # It refers to the VPC stack name, not the WAN stack name RemoteVPC=connection_from, RemoteIp=eips[connection_from]['public_ip'], VPC=vpc_name, ), )) # Add static routes to the subnets behind each incoming VPN connection # NOTE: Can't be used when StaticRoutesOnly is False (which is required # when using BGP) if config['static_routing']: vpn_connection_static_route = template.add_resource( ec2.VPNConnectionRoute( '%(connection_from)s Static Route' % locals(), VpnConnectionId=Ref(vpn_connection), DestinationCidrBlock=vpcs[connection_from]['cidr'], )) customer_gateways.append(customer_gateway) else: vpn_gateway = None if outgoing_connections: if not region in config['nat']['ami_id']: print_err('AMI ID not configured for region "%(region)s"\n' % locals()) sys.exit(1) nat_sg = template.add_resource( ec2.SecurityGroup( 'NatSg', VpcId=vpc_id, GroupDescription='%(stack)s router Security Group' % locals(), SecurityGroupEgress=[ ec2.SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1', FromPort='-1', ToPort='-1', ) ], SecurityGroupIngress= # Allow all traffic from internal networks map( lambda cidr: ec2.SecurityGroupRule(CidrIp=cidr, IpProtocol='-1', FromPort='-1', ToPort='-1'), ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']) + # Allow all traffic from all other locations on our WAN map( lambda eip: ec2.SecurityGroupRule( CidrIp=eips[eip]['public_ip'] + '/32', IpProtocol='-1', FromPort='-1', ToPort='-1'), eips.keys()) + # Optional extra traffic sources map( lambda cidr: ec2.SecurityGroupRule(CidrIp=cidr, IpProtocol='-1', FromPort='-1', ToPort='-1'), config['nat']['extra_ingress_sources'] or {}), Tags=Tags(Name='%(stack)s router' % locals(), ), )) if 'openvpn_server' in config and config['openvpn_server']: nat_sg.SecurityGroupIngress.append( ec2.SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='udp', FromPort='1194', ToPort='1194', )) if 'external_tld' in config: template.add_resource( route53.RecordSetType( 'OpenVpnDnsRecord', Comment='%(stack)s OpenVPN server' % locals(), HostedZoneName=config['external_tld'] + '.', Name='%s.%s.' % (vpc_name, config['external_tld']), ResourceRecords=[eips[vpc_name]['public_ip']], TTL='900', Type='A')) assume_role_policy_statement = awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Principal=awacs.aws.Principal(principal='Service', resources=['ec2.amazonaws.com']), Action=[awacs.sts.AssumeRole], ) ]) root_role = template.add_resource( iam.Role( 'RootRole', AssumeRolePolicyDocument=assume_role_policy_statement, Path='/', )) root_role_policy = template.add_resource( iam.PolicyType( 'RootRolePolicy', PolicyName='AllowAllPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': '*', 'Effect': 'Allow', 'Resource': '*', }] }, Roles=[Ref(root_role)], )) root_instance_profile = template.add_resource( iam.InstanceProfile( 'RootInstanceProfile', Path='/', Roles=[Ref(root_role)], )) for index, egress_config in enumerate(config['nat']['sg_egress_rules'], 1): template.add_resource( ec2.SecurityGroupEgress( 'NatSgEgressRule%d' % index, ToPort=egress_config['port'], FromPort=egress_config['port'], IpProtocol=egress_config['protocol'], CidrIp=egress_config['cidr'], GroupId=Ref(nat_sg), )) launch_configuration = template.add_resource( autoscaling.LaunchConfiguration( 'Ec2NatLaunchConfiguration', AssociatePublicIpAddress=True, SecurityGroups=[Ref(nat_sg)], IamInstanceProfile=Ref(root_instance_profile), ImageId=config['nat']['ami_id'][region], KeyName=config['nat']['key_name'], InstanceType=config['nat']['instance_type'], UserData=build_user_data(stack), )) AutoScalingGroup = template.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', VPCZoneIdentifier=get_public_subnet_ids(vpc_id, region), TerminationPolicies=['ClosestToNextInstanceHour'], MinSize=1, MaxSize=2, ##### # TODO: Have to find a way for VyOS to send the signal without # having access to cfn-signal script (old python version) # That's also the reason we allow one instance - since ha-nat # can't send the signal #### # CreationPolicy=policies.CreationPolicy( # ResourceSignal=policies.ResourceSignal( # Count=2, # Timeout='PT10M', # ), # ), LaunchConfigurationName=Ref(launch_configuration), HealthCheckType='EC2', UpdatePolicy=policies.UpdatePolicy( AutoScalingRollingUpdate=policies.AutoScalingRollingUpdate( MaxBatchSize=1, MinInstancesInService=1, PauseTime='PT2M', # TODO: switch to 'True' when we teach VyOS to send signal WaitOnResourceSignals=False, )), Tags=[ autoscaling.Tag('Name', stack + ' router', True), autoscaling.Tag('VPC', vpc_name, True), # Just have to be unique for this provisioning run, could # be any unique string autoscaling.Tag( 'Version', datetime.datetime.utcnow().strftime( '%Y-%m-%d %H:%M:%S.%f'), True), ], ))
def AS_LaunchTemplate(): cfg.use_cfn_init = True InitConfigSets = ASInitConfigSets() CfmInitArgs = {} IBoxEnvApp = [] Tags_List = [] UserDataApp = [] for n in cfg.Apps: name = f"Apps{n}" # Ex. Apps1 envname = f"EnvApp{n}Version" # Ex EnvApp1Version reponame = f"{name}RepoName" # Ex Apps1RepoName UserDataApp.extend(["#${%s}\n" % envname]) p_EnvAppVersion = Parameter( envname, Description=f"Application {n} version", AllowedPattern="^[a-zA-Z0-9-_.]*$", ) p_AppsRepoName = Parameter( reponame, Description=f"App {n} Repo Name - empty for default based on env/role", AllowedPattern="^[a-zA-Z0-9-_.]*$", ) # parameters add_obj( [ p_EnvAppVersion, p_AppsRepoName, ] ) # conditions add_obj( { name: And( Not(Equals(Ref(envname), "")), Not(get_condition("", "equals", "None", reponame)), ) } ) InitConfigApps = ASInitConfigApps(name) CfmInitArgs[name] = InitConfigApps InitConfigAppsBuilAmi = ASInitConfigAppsBuildAmi(name) # AUTOSPOT - Let cfn-init always prepare instances on boot # CfmInitArgs[name + 'BuildAmi'] = InitConfigAppsBuilAmi CfmInitArgs[name] = InitConfigAppsBuilAmi IBoxEnvApp.extend( [ f"export EnvApp{n}Version=", Ref(envname), "\n", f"export EnvRepo{n}Name=", get_endvalue(reponame), "\n", ] ) InitConfigSetsApp = If(name, name, Ref("AWS::NoValue")) InitConfigSetsAppBuilAmi = If(name, f"{name}BuildAmi", Ref("AWS::NoValue")) IndexSERVICES = InitConfigSets.data["default"].index("SERVICES") InitConfigSets.data["default"].insert(IndexSERVICES, InitConfigSetsApp) # AUTOSPOT - Let cfn-init always prepare instances on boot # InitConfigSets.data['buildamifull'].append( # InitConfigSetsAppBuilAmi) InitConfigSets.data["buildamifull"].append(InitConfigSetsApp) Tags_List.append(asg.Tag(envname, Ref(envname), True)) # outputs Output_app = Output(envname, Value=Ref(envname)) Output_repo = Output(reponame, Value=get_endvalue(reponame)) add_obj([Output_app, Output_repo]) InitConfigSetup = ASInitConfigSetup() InitConfigSetup.ibox_env_app = IBoxEnvApp InitConfigSetup.setup() InitConfigCodeDeploy = ASInitConfigCodeDeploy() CfmInitArgs["SETUP"] = InitConfigSetup CfmInitArgs["CWAGENT"] = ASInitConfigCloudWatchAgent("") if cfg.CodeDeploy: CfmInitArgs["CODEDEPLOY"] = InitConfigCodeDeploy if not getattr(cfg, "IBOX_LAUNCH_TEMPLATE_NO_WAIT_ELB_HEALTH", False): for lb in cfg.LoadBalancer: # LoadBalancerClassic if cfg.LoadBalancerType == "Classic": InitConfigELB = ASInitConfigELBClassic(scheme=lb) CfmInitArgs["ELBWAITER"] = InitConfigELB # LoadBalancerApplication if cfg.LoadBalancerType == "Application": InitConfigELB = ASInitConfigELBApplication(scheme=lb) CfmInitArgs["ELBWAITER"] = InitConfigELB # LoadBalancerNetwork if cfg.LoadBalancerType == "Network": for k in cfg.Listeners: InitConfigELB = ASInitConfigELBApplication( scheme=f"TargetGroupListeners{k}{lb}" ) CfmInitArgs["ELBWAITER"] = InitConfigELB if getattr(cfg, "IBOX_LAUNCH_TEMPLATE_NO_SG_EXTRA", False): SecurityGroups = [] else: SecurityGroups = cfg.SecurityGroupsImport # Resources R_LaunchTemplate = ec2.LaunchTemplate( "LaunchTemplate", LaunchTemplateName=Sub("${AWS::StackName}-${EnvRole}"), LaunchTemplateData=ASLaunchTemplateData( "LaunchTemplateData", UserDataApp=UserDataApp ), ) R_LaunchTemplate.LaunchTemplateData.NetworkInterfaces[0].Groups.extend( SecurityGroups ) # Import role specific cfn definition try: # Do not use role but direct cfg yaml configuration (ecs + cluster) cfn_envrole = f"cfn_{cfg.IBOX_ROLE_EX}" except Exception: cfn_envrole = f"cfn_{cfg.envrole}" cfn_envrole = cfn_envrole.replace("-", "_") if cfn_envrole in globals(): # Ex cfn_client_portal CfnRole = globals()[cfn_envrole]() CfmInitArgs.update(CfnRole) if cfg.use_cfn_init: R_LaunchTemplate.Metadata = cfm.Metadata( { "CloudFormationInitVersion": If( "CloudFormationInit", Ref("EnvStackVersion"), Ref("AWS::NoValue"), ) }, cfm.Init(InitConfigSets, **CfmInitArgs), cfm.Authentication( { "CfnS3Auth": cfm.AuthenticationBlock( type="S3", buckets=[ Sub(cfg.BucketNameAppRepository), Sub(cfg.BucketNameAppData), ], roleName=Ref("RoleInstance"), ) } ), ) add_obj(R_LaunchTemplate) Tags = asg.Tags() Tags.tags = Tags_List return Tags
def configure(self): """ This template creates a mesos-master per subnet in the VPC """ config = constants.ENVIRONMENTS[self.env]['mesos']['master'] self.defaults = { 'instance_type': config.get('instance_type', 't3.large') } self.add_description('Sets up Mesos Masters in all Zones') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'ivy-mesos', _global_config.get('ami_owner', 'self')))) _mesos_master_security_group = self.add_resource( ec2.SecurityGroup( 'MesosMasterSecurityGroup', VpcId=self.vpc_id, GroupDescription='Security Group for MesosMaster Instances', SecurityGroupIngress=[ { 'IpProtocol': 'tcp', 'FromPort': 2181, 'ToPort': 2181, 'CidrIp': self.vpc_cidr }, # zk { 'IpProtocol': 'tcp', 'FromPort': 4400, 'ToPort': 4400, 'CidrIp': self.vpc_cidr }, # chronos { 'IpProtocol': 'tcp', 'FromPort': 5050, 'ToPort': 5051, 'CidrIp': self.vpc_cidr }, # mesos { 'IpProtocol': 'tcp', 'FromPort': 8080, 'ToPort': 8080, 'CidrIp': self.vpc_cidr }, # marathon { 'IpProtocol': 'tcp', 'FromPort': 8500, 'ToPort': 8500, 'CidrIp': self.vpc_cidr }, # consul ui { 'IpProtocol': 'tcp', 'FromPort': 8300, 'ToPort': 8301, 'CidrIp': self.vpc_cidr }, # consul rpc/lan serf { 'IpProtocol': 'tcp', 'FromPort': 8302, 'ToPort': 8302, 'CidrIp': constants.SUPERNET }, # consul wan serf { 'IpProtocol': 'udp', 'FromPort': 8300, 'ToPort': 8301, 'CidrIp': self.vpc_cidr }, # consul rpc/lan serf (udp) { 'IpProtocol': 'udp', 'FromPort': 8302, 'ToPort': 8302, 'CidrIp': constants.SUPERNET }, # consul wan serf (udp) ], SecurityGroupEgress=[{ 'IpProtocol': '-1', 'FromPort': 0, 'ToPort': 65535, 'CidrIp': '0.0.0.0/0' }])) self.add_resource( ec2.SecurityGroupIngress( 'MesosMasterIngressSecurityGroup', GroupId=Ref(_mesos_master_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_mesos_master_security_group) # this allows members all traffic (for replication) )) self.add_security_group(Ref(_mesos_master_security_group)) masters = [(index, ip) for index, ip in enumerate(config['masters'], 1)] subnets = self.get_subnets('private') for master in masters: zone_index, master_ip = master subnet = [ s for s in subnets if netaddr.IPAddress(master_ip) in netaddr.IPNetwork(s['CidrBlock']) ][0] _mesos_master_eni = ec2.NetworkInterface( 'MesosMasterInstanceENI{}'.format( subnet['AvailabilityZone'][-1]), Description='ENI for Mesos Master ENV: {0} PrivateSubnet {1}'. format(self.env, subnet['SubnetId']), GroupSet=self.security_groups, PrivateIpAddress=master_ip, SourceDestCheck=True, SubnetId=subnet['SubnetId'], Tags=self.get_tags(service_override="Mesos", role_override='MesosMaster-{}'.format( subnet['AvailabilityZone']))) self.add_resource(_mesos_master_eni) _user_data_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__ENI_IP__', master_ip), ('__ZK_SERVER_ID__', zone_index), ('__HOSTS_ENTRIES__', '\n'.join([ '{0} mesos-master-{1}.node.{2}.{3} mesos-master-{1}'. format(ip, index, self.env, constants.TAG) for index, ip in masters ])), ('__ZK_CONNECT__', ','.join(['{}:2181'.format(z[1]) for z in masters])), ('__ZK_PEERS__', '\n'.join([ 'server.{0}={1}:2888:3888'.format(index, ip) for index, ip in masters ])))) _user_data = Sub( _user_data_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(_mesos_master_eni), }) _mesos_master_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( 'MesosMasterLaunchConfiguration{}'.format( subnet['AvailabilityZone'][-1]), AssociatePublicIpAddress=False, BlockDeviceMappings=get_block_device_mapping( self.parameters['InstanceType'].resource['Default']), SecurityGroups=self.security_groups, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_user_data))) self.add_resource( autoscaling.AutoScalingGroup( 'MesosMasterASGroup{}'.format( subnet['AvailabilityZone'][-1]), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref( _mesos_master_launch_configuration), MinSize=0, MaxSize=1, # DesiredCapacity=1, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override="MesosMaster", role_override='MesosMaster-{}'.format( subnet['AvailabilityZone'])) + [ autoscaling.Tag( 'Name', '{}Mesos-Master-{}'.format( self.env, subnet['AvailabilityZone']), True), # tag to allow consul to discover the hosts # autoscaling.Tag('{}:consul_master'.format(constants.TAG), self.env, True) ]))
HealthCheckType='ELB', LaunchConfigurationName=Ref(tiler_launch_config), LoadBalancerNames=[Ref(tiler_load_balancer)], MaxSize=10, MinSize=1, NotificationConfigurations=[asg.NotificationConfigurations( TopicARN=Ref(notification_arn_param), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ] )], VPCZoneIdentifier=Ref(private_subnets_param), Tags=[asg.Tag('Name', 'Tiler', True)] )) # # CloudWatch Resources # t.add_resource(cw.Alarm( 'alarmTilerBackend4XX', AlarmDescription='Tiler API server backend 4XXs', AlarmActions=[Ref(notification_arn_param)], Statistic='Sum', Period=300, Threshold='20', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_4XX',
' protocol: TCP\n', ' hostIP: $private_ipv4\n', ' - name: kube2consul\n', ' image: jmccarty3/kube2consul:latest\n', ' command:\n', ' - /kube2consul\n', ' - -consul-agent=http://127.0.0.1:8500\n', ' - -kube_master_url=http://127.0.0.1:8080\n', ])), )) AUTO_SCALING_GROUP = TEMPLATE.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', DesiredCapacity='1', Tags=[autoscaling.Tag('Name', 'Kubernetes Master', True)], LaunchConfigurationName=Ref(LAUNCH_CONFIGURATION), LoadBalancerNames=[ Ref(API_SERVER_LOAD_BALANCER), Ref(CONSUL_HTTP_API_LOAD_BALANCER) ], MinSize='1', MaxSize='3', VPCZoneIdentifier=[Ref(SUBNET)], UpdatePolicy=policies.UpdatePolicy( AutoScalingRollingUpdate=policies.AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1', ), ), ))
def __init__(self, name='HaCluster', ami_name='amazonLinuxAmiId', user_data='', env_vars={}, min_size=1, max_size=1, desired_capacity=DEFAULT_TO_MIN_SIZE, instance_type='t2.micro', subnet_layer=None, elb_scheme=SCHEME_INTERNET_FACING, elb_listeners=[{ 'elb_protocol': 'HTTP', 'elb_port': 80 }], elb_health_check_port=None, elb_health_check_protocol='TCP', elb_health_check_path='', elb_idle_timeout=None, update_policy_PauseTime='PT1M', update_policy_MinInstancesInService=0, update_policy_MaxBatchSize=1, cname='', custom_tags={}, elb_custom_tags={}, scaling_policies=None, creation_policy_timeout=None, allow_default_ingress=True): # This will be the name used in resource names and descriptions self.name = name # This is the name used to identify the AMI from the ami_cache.json file self.ami_name = ami_name # This is the contents of the userdata script as a string self.user_data = user_data # This is a dictionary of environment variables to inject into the instances self.env_vars = env_vars # These define the lower and upper boundaries of the autoscaling group self.min_size = min_size self.max_size = max_size self.desired_capacity = desired_capacity # The type of instance for the autoscaling group self.instance_type = instance_type # This is the subnet layer that the ASG is in (public, private, ...) self.subnet_layer = subnet_layer # This is the type of ELB: internet-facing gets a publicly accessible DNS, while internal is only accessible to the VPC self.elb_scheme = elb_scheme # This should be a list of dictionaries defining each listener for the ELB # Each dictionary can contain elb_port [required], elb_protocol, instance_port, instance_protocol, ssl_cert_name self.elb_listeners = elb_listeners # This is the health check port for the cluster self.elb_health_check_port = elb_health_check_port # The ELB health check protocol for the cluster (HTTP, HTTPS, TCP, SSL) self.elb_health_check_protocol = elb_health_check_protocol # The ELB health check path for the cluster (Only for HTTP and HTTPS) self.elb_health_check_path = elb_health_check_path # Add a creation policy with a custom timeout if one was specified if creation_policy_timeout: self.creation_policy = CreationPolicy( ResourceSignal=ResourceSignal( Timeout='PT' + str(creation_policy_timeout) + 'M')) else: self.creation_policy = None # Add update policy self.update_policy = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime=update_policy_PauseTime, MinInstancesInService=update_policy_MinInstancesInService, MaxBatchSize=update_policy_MaxBatchSize, # WaitOnResourceSignals=True )) # The Idle Timeout for the ELB (how long your connection can stay idle before being terminated) self.elb_idle_timeout = elb_idle_timeout # This is an optional fully qualified DNS name to create a CNAME in a private hosted zone self.cname = cname # Translate the custom_tags dict to a list of autoscaling Tags self.custom_tags = [] for key, value in custom_tags.iteritems(): self.custom_tags.append(autoscaling.Tag(key, value, True)) ## Save ELB tags for add_cluster_elb self.elb_custom_tags = elb_custom_tags # A list of dictionaries describing scaling policies to be passed to add_asg self.scaling_policies = scaling_policies # Indicates whether ingress rules should be added to the ELB for type-appropriate CIDR ranges # Internet facing ELBs would allow ingress from PUBLIC_ACCESS_CIDR and private ELBs will allow ingress from the VPC CIDR self.allow_default_ingress = allow_default_ingress super(HaCluster, self).__init__(template_name=self.name)
def __init__(self, parameters, vpc, loadbalancer): """ :type parameters Parameters :type vpc VPC :type loadbalancer LoadBalancer """ super(EC2, self).__init__() # Ec2 instance self.instance_role = iam.Role( "InstanceRole", AssumeRolePolicyDocument=aws.Policy(Statement=[ aws.Statement(Effect=aws.Allow, Action=[sts.AssumeRole], Principal=aws.Principal("Service", ["ec2.amazonaws.com"])) ]), Path="/", ) self.instance_role_policy = iam.PolicyType( "InstanceRolePolicy", PolicyName=Join("-", [Ref("AWS::StackName"), "instance-policy"]), PolicyDocument=aws.Policy(Statement=[ aws.Statement(Effect=aws.Allow, Action=[ aws.Action("logs", "CreateLogGroup"), aws.Action("logs", "CreateLogStream"), aws.Action("logs", "PutLogEvents"), aws.Action("logs", "DescribeLogStreams"), ], Resource=["arn:aws:logs:*:*:*"]) ]), Roles=[Ref(self.instance_role)]) self.instance_profile = iam.InstanceProfile( "InstanceProfile", Path="/", Roles=[Ref(self.instance_role)]) self.launch_configuration = autoscaling.LaunchConfiguration( "LaunchConfiguration", ImageId=FindInMap("AMIMap", Ref(AWS_REGION), "AMI"), InstanceType=Ref(parameters.ec2_instance_type), KeyName=Ref(parameters.key_pair), InstanceMonitoring=True, SecurityGroups=[ GetAtt(loadbalancer.instance_security_group, "GroupId"), ], IamInstanceProfile=Ref(self.instance_profile), ) self.auto_scaling_group = autoscaling.AutoScalingGroup( "AutoScalingGroup", LaunchConfigurationName=Ref(self.launch_configuration), MinSize=1, DesiredCapacity=1, MaxSize=10, HealthCheckType='ELB', HealthCheckGracePeriod=300, VPCZoneIdentifier=[ Ref(vpc.public_subnet_1), Ref(vpc.public_subnet_2) ], LoadBalancerNames=[Ref(loadbalancer.load_balancer)], Tags=[autoscaling.Tag("Name", Ref("AWS::StackName"), True)], UpdatePolicy=policies.UpdatePolicy( AutoScalingRollingUpdate=policies.AutoScalingRollingUpdate( PauseTime="PT30S", MinInstancesInService=1, MaxBatchSize=10, WaitOnResourceSignals=False)), TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ], MetricsCollection=[ autoscaling.MetricsCollection(Granularity="1Minute") ]) self.scale_up_policy = autoscaling.ScalingPolicy( "ScaleUPPolicy", AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(self.auto_scaling_group), PolicyType='StepScaling', MetricAggregationType='Average', StepAdjustments=[ autoscaling.StepAdjustments(MetricIntervalLowerBound=0, ScalingAdjustment=1) ], ) self.scale_down_policy = autoscaling.ScalingPolicy( "ScaleDOWNPolicy", AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(self.auto_scaling_group), PolicyType='StepScaling', MetricAggregationType='Average', StepAdjustments=[ autoscaling.StepAdjustments(MetricIntervalUpperBound=0, ScalingAdjustment=-1) ], ) self.ec2_high_cpu_usage_alarm = cloudwatch.Alarm( "EC2HighCPUUsageAlarm", ActionsEnabled=True, AlarmActions=[Ref(self.scale_up_policy)], ComparisonOperator='GreaterThanThreshold', Dimensions=[ cloudwatch.MetricDimension(Name='AutoScalingGroupName', Value=Ref(self.auto_scaling_group)) ], EvaluationPeriods=3, MetricName='CPUUtilization', Namespace='AWS/EC2', Period=300, Statistic='Average', Threshold='70', ) self.ec2_low_cpu_usage_alarm = cloudwatch.Alarm( "EC2LowCPUUsageAlarm", ActionsEnabled=True, AlarmActions=[Ref(self.scale_down_policy)], ComparisonOperator='LessThanThreshold', Dimensions=[ cloudwatch.MetricDimension(Name='AutoScalingGroupName', Value=Ref(self.auto_scaling_group)) ], EvaluationPeriods=3, MetricName='CPUUtilization', Namespace='AWS/EC2', Period=300, Statistic='Average', Threshold='20', )
def __init__(self): InitConfigSets = ASInitConfigSets() CfmInitArgs = {} IBoxEnvApp = [] Tags = [] UserDataApp = [] for n in cfg.Apps: name = f'Apps{n}' # Ex. Apps1 envname = f'EnvApp{n}Version' # Ex EnvApp1Version reponame = f'{name}RepoName' # Ex Apps1RepoName UserDataApp.extend(['#${%s}\n' % envname]) p_EnvAppVersion = Parameter(envname) p_EnvAppVersion.Description = f'Application {n} version' p_EnvAppVersion.AllowedPattern = '^[a-zA-Z0-9-_.]*$' p_AppsRepoName = Parameter(reponame) p_AppsRepoName.Description = ( f'App {n} Repo Name - empty for default based on env/role') p_AppsRepoName.AllowedPattern = '^[a-zA-Z0-9-_.]*$' # parameters add_obj([ p_EnvAppVersion, p_AppsRepoName, ]) # conditions add_obj({ name: And(Not(Equals(Ref(envname), '')), Not(get_condition('', 'equals', 'None', reponame))) }) InitConfigApps = ASInitConfigApps(name) CfmInitArgs[name] = InitConfigApps InitConfigAppsBuilAmi = ASInitConfigAppsBuildAmi(name) # AUTOSPOT - Let cfn-init always prepare instances on boot # CfmInitArgs[name + 'BuildAmi'] = InitConfigAppsBuilAmi CfmInitArgs[name] = InitConfigAppsBuilAmi IBoxEnvApp.extend([ f'export EnvApp{n}Version=', Ref(envname), "\n", f'export EnvRepo{n}Name=', get_endvalue(reponame), "\n", ]) InitConfigSetsApp = If(name, name, Ref('AWS::NoValue')) InitConfigSetsAppBuilAmi = If(name, f'{name}BuildAmi', Ref('AWS::NoValue')) IndexSERVICES = InitConfigSets.data['default'].index('SERVICES') InitConfigSets.data['default'].insert(IndexSERVICES, InitConfigSetsApp) # AUTOSPOT - Let cfn-init always prepare instances on boot # InitConfigSets.data['buildamifull'].append( # InitConfigSetsAppBuilAmi) InitConfigSets.data['buildamifull'].append(InitConfigSetsApp) Tags.append(asg.Tag(envname, Ref(envname), True)) # resources # FOR MULTIAPP CODEDEPLOY if len(cfg.Apps) > 1: r_DeploymentGroup = CDDeploymentGroup(f'DeploymentGroup{name}') r_DeploymentGroup.setup(index=n) add_obj(r_DeploymentGroup) # outputs Output_app = Output(envname) Output_app.Value = Ref(envname) add_obj(Output_app) Output_repo = Output(reponame) Output_repo.Value = get_endvalue(reponame) add_obj(Output_repo) InitConfigSetup = ASInitConfigSetup() InitConfigSetup.ibox_env_app = IBoxEnvApp InitConfigSetup.setup() InitConfigCodeDeploy = ASInitConfigCodeDeploy() CfmInitArgs['SETUP'] = InitConfigSetup CfmInitArgs['CWAGENT'] = ASInitConfigCloudWatchAgent('') if cfg.Apps: CfmInitArgs['CODEDEPLOY'] = InitConfigCodeDeploy CD_DeploymentGroup() # LoadBalancerClassic External if cfg.LoadBalancerClassicExternal: InitConfigELBExternal = ASInitConfigELBClassicExternal() CfmInitArgs['ELBWAITER'] = InitConfigELBExternal # LoadBalancerClassic Internal if cfg.LoadBalancerClassicInternal: InitConfigELBInternal = ASInitConfigELBClassicInternal() CfmInitArgs['ELBWAITER'] = InitConfigELBInternal # LoadBalancerApplication External if cfg.LoadBalancerApplicationExternal: InitConfigELBExternal = ASInitConfigELBApplicationExternal() CfmInitArgs['ELBWAITER'] = InitConfigELBExternal # LoadBalancerApplication Internal InitConfigELBInternal = ASInitConfigELBApplicationInternal() CfmInitArgs['ELBWAITER'] = InitConfigELBInternal SecurityGroups = SG_SecurityGroupsEC2().SecurityGroups # Resources R_LaunchConfiguration = ASLaunchConfiguration('LaunchConfiguration', UserDataApp=UserDataApp) R_LaunchConfiguration.SecurityGroups.extend(SecurityGroups) R_InstanceProfile = IAMInstanceProfile('InstanceProfile') # Import role specific cfn definition cfn_envrole = f'cfn_{cfg.classenvrole}' if cfn_envrole in globals(): # Ex cfn_client_portal CfnRole = globals()[cfn_envrole]() CfmInitArgs.update(CfnRole) R_LaunchConfiguration.Metadata = cfm.Metadata( { 'CloudFormationInitVersion': If( 'CloudFormationInit', Ref('EnvStackVersion'), Ref('AWS::NoValue'), ) }, cfm.Init(InitConfigSets, **CfmInitArgs), cfm.Authentication({ 'CfnS3Auth': cfm.AuthenticationBlock(type='S3', buckets=[ Sub(cfg.BucketAppRepository), Sub(cfg.BucketAppData) ], roleName=Ref('RoleInstance')) })) R_LaunchConfigurationSpot = ASLaunchConfiguration( 'LaunchConfigurationSpot', UserDataApp=UserDataApp, spot=True) R_LaunchConfigurationSpot.SecurityGroups = ( R_LaunchConfiguration.SecurityGroups) R_LaunchConfigurationSpot.SpotPrice = get_endvalue('SpotPrice') add_obj([ R_LaunchConfiguration, R_InstanceProfile, ]) if cfg.SpotASG: add_obj(R_LaunchConfigurationSpot) self.LaunchConfiguration = R_LaunchConfiguration self.Tags = Tags
def add_resources(self): """Add resources to template.""" template = self.template variables = self.get_variables() vpnrole = template.add_resource( iam.Role( 'VPNRole', AssumeRolePolicyDocument=iam_policies.assumerolepolicy('ec2'), ManagedPolicyArns=variables['VPNManagedPolicies'].ref, Path='/', Policies=[ iam.Policy( PolicyName=Join('-', [ 'customer-vpn-server-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ # ModifyInstanceAttribute is for src/dst check Statement(Action=[ awacs.ec2.DescribeRouteTables, awacs.ec2.DescribeAddresses, awacs.ec2.AssociateAddress, awacs.ec2.CreateRoute, awacs.ec2.ReplaceRoute, awacs.ec2.ModifyInstanceAttribute ], Effect=Allow, Resource=['*']), Statement( Action=[ awacs.aws.Action('s3', 'Get*'), awacs.aws.Action('s3', 'List*'), awacs.aws.Action('s3', 'Put*') ], Effect=Allow, Resource=[ Join( '', [ 'arn:aws:s3:::', variables['ChefDataBucketName'] .ref, # noqa pylint: disable=line-too-long '/', variables['EnvironmentName']. ref, '/', variables['BucketKey'].ref, '/*' ]) ]), Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[ Join('', [ 'arn:aws:s3:::', variables['ChefDataBucketName'].ref ]) # noqa pylint: disable=line-too-long ], Condition=Condition( StringLike( 's3:prefix', [ Join( '', [ variables[ 'EnvironmentName']. ref, # noqa pylint: disable=line-too-long '/', variables['BucketKey']. ref, # noqa pylint: disable=line-too-long '/*' ]) ]))) ])) ])) vpninstanceprofile = template.add_resource( iam.InstanceProfile('VPNInstanceProfile', Path='/', Roles=[Ref(vpnrole)])) amiid = template.add_resource( cfn_custom_classes.AMIId( 'AMIId', Condition='MissingVPNAMI', Platform=variables['VPNOS'].ref, Region=Ref('AWS::Region'), ServiceToken=variables['AMILookupArn'].ref)) # Lookup subnets from core VPC stack subnetlookuplambdarole = template.add_resource( iam.Role( 'SubnetLookupLambdaRole', Condition='PrivateSubnetCountOmitted', AssumeRolePolicyDocument=iam_policies.assumerolepolicy( 'lambda'), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ], Policies=[ iam.Policy( PolicyName=Join('-', [ 'subnetlookup-lambda-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[ awacs.aws.Action( 'cloudformation', 'DescribeStack*'), awacs.aws.Action( 'cloudformation', 'Get*') ], Effect=Allow, Resource=[ Join('', [ 'arn:aws:cloudformation:', Ref('AWS::Region'), ':', Ref('AWS::AccountId'), ':stack/', variables['CoreVPCStack'].ref, '/*' ]) ]) ])) ])) cfncustomresourcesubnetlookup = template.add_resource( awslambda.Function( 'CFNCustomResourceSubnetLookup', Condition='PrivateSubnetCountOmitted', Description='Find subnets created by core stack', Code=awslambda.Code( ZipFile=variables['SubnetLookupLambdaFunction']), Handler='index.handler', Role=GetAtt(subnetlookuplambdarole, 'Arn'), Runtime='python2.7', Timeout=10)) subnetlookup = template.add_resource( cfn_custom_classes.SubnetLookup( 'SubnetLookup', Condition='PrivateSubnetCountOmitted', CoreVPCStack=variables['CoreVPCStack'].ref, Region=Ref('AWS::Region'), ServiceToken=GetAtt(cfncustomresourcesubnetlookup, 'Arn'))) common_userdata_prefix = [ "#cloud-config\n", "package_update: true\n", "package_upgrade: false\n", "write_files:\n", " - path: /usr/local/bin/update_vpn_routes.sh\n", " permissions: '0755'\n", " content: |\n", " #!/bin/bash\n", " \n", " export AWS_DEFAULT_REGION=\"", Ref('AWS::Region'), "\"\n", " my_instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)\n", # noqa pylint: disable=line-too-long " \n", " publicroutetableid=", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PublicRouteTableId'), If( 'PublicRouteTableSpecified', variables['PublicRouteTable'].ref, ImportValue( Sub("${%s}-PublicRouteTable" % variables['CoreVPCStack'].name)))), # noqa pylint: disable=line-too-long "\n", " private_route_tables=(", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PrivateRouteTables'), If( '3PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref, variables['PrivateRouteTable3'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable3" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long If( '2PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long, If( 'PublicRouteTableSpecified', variables['PrivateRouteTable1'].ref, ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)))))), # noqa pylint: disable=line-too-long ")\n", "\n", " openvpnroutepubdest=", variables['VPNSubnet'].ref, "\n", " \n", " # Disabling sourceDestCheck\n", " aws ec2 modify-instance-attribute --instance-id ${my_instance_id} --source-dest-check \"{\\\"Value\\\": false}\"\n", # noqa pylint: disable=line-too-long " \n", " if aws ec2 describe-route-tables | grep ${openvpnroutepubdest}; then\n", # noqa pylint: disable=line-too-long " # Update 'OpenVPNRoutePub' to point to this instance\n", # noqa pylint: disable=line-too-long " aws ec2 replace-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Update private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 replace-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " else\n", " # Create 'OpenVPNRoutePub'\n", " aws ec2 create-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Create private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 create-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " fi\n", " \n", "\n", " - path: /etc/chef/sync_cookbooks.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " aws --region ", Ref('AWS::Region'), " s3 sync s3://", variables['ChefBucketName'].ref, "/", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "/ /etc/chef/\n", " if compgen -G \"/etc/chef/cookbooks-*.tar.gz\" > /dev/null; then\n", # noqa pylint: disable=line-too-long " echo \"Cookbook archive found.\"\n", " if [ -d \"/etc/chef/cookbooks\" ]; then\n", " echo \"Removing previously extracted cookbooks.\"\n", # noqa pylint: disable=line-too-long " rm -r /etc/chef/cookbooks\n", " fi\n", " echo \"Extracting highest numbered cookbook archive.\"\n", # noqa pylint: disable=line-too-long " cbarchives=(/etc/chef/cookbooks-*.tar.gz)\n", " tar -zxf \"${cbarchives[@]: -1}\" -C /etc/chef\n", " chown -R root:root /etc/chef\n", " fi\n", " \n", "\n", " - path: /etc/chef/perform_chef_run.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " chef-client -z -r '", If('ChefRunListSpecified', variables['ChefRunList'].ref, Join('', ['recipe[', variables['CustomerName'].ref, '_vpn]'])), "' -c /etc/chef/client.rb -E ", variables['EnvironmentName'].ref, " --force-formatter --no-color -F min\n", "\n", " - path: /etc/chef/client.rb\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " log_level :info\n", " log_location '/var/log/chef/client.log'\n", " ssl_verify_mode :verify_none\n", " cookbook_path '/etc/chef/cookbooks'\n", " node_path '/etc/chef/nodes'\n", " role_path '/etc/chef/roles'\n", " data_bag_path '/etc/chef/data_bags'\n", " environment_path '/etc/chef/environments'\n", " local_mode 'true'\n", "\n", " - path: /etc/chef/environments/", variables['EnvironmentName'].ref, ".json\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " {\n", " \"name\": \"", variables['EnvironmentName'].ref, "\",\n", " \"default_attributes\": {\n", " \"sturdy\": {\n", " \"openvpn\": {\n", " \"core_vpc_cidr\": \"", variables['VpcCidr'].ref, "\",\n", " \"vpn_elastic_ip\": \"", variables['VpnEipPublicIp'].ref, "\",\n", " \"vpn_subnet_cidr\": \"", variables['VPNSubnet'].ref, "\",\n", " \"chef_data_bucket_name\": \"", variables['ChefDataBucketName'].ref, "\",\n", " \"chef_data_bucket_folder\": \"", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "\",\n", " \"chef_data_bucket_region\": \"", Ref('AWS::Region'), "\"\n", " }\n", " }\n", " },\n", " \"json_class\": \"Chef::Environment\",\n", " \"description\": \"", variables['EnvironmentName'].ref, " environment\",\n", " \"chef_type\": \"environment\"\n", " }\n", "\n", "runcmd:\n", " - set -euf\n", " - echo 'Attaching EIP'\n", " - pip install aws-ec2-assign-elastic-ip\n", # Allowing this command to fail (with ||true) as sturdy_openvpn # 2.3.0+ can handle this association instead. This will be removed # entirely in the next major release of this module (at which time # use of the updated sturdy_openvpn cookbook will be required) " - aws-ec2-assign-elastic-ip --region ", Ref('AWS::Region'), " --valid-ips ", variables['VpnEipPublicIp'].ref, " || true\n", " - echo 'Updating Routes'\n", " - /usr/local/bin/update_vpn_routes.sh\n", " - echo 'Installing Chef'\n", " - curl --max-time 10 --retry-delay 5 --retry 5 -L https://www.chef.io/chef/install.sh | bash -s -- -v ", # noqa pylint: disable=line-too-long variables['ChefClientVersion'].ref, "\n", " - echo 'Configuring Chef'\n", " - mkdir -p /var/log/chef /etc/chef/data_bags /etc/chef/nodes /etc/chef/roles\n", # noqa pylint: disable=line-too-long " - chmod 0755 /etc/chef\n", " - /etc/chef/sync_cookbooks.sh\n", " - /etc/chef/perform_chef_run.sh\n" ] vpnserverlaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'VpnServerLaunchConfig', AssociatePublicIpAddress=True, BlockDeviceMappings=[ # CentOS AMIs don't include this by default ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], IamInstanceProfile=Ref(vpninstanceprofile), ImageId=If('MissingVPNAMI', GetAtt(amiid, 'ImageId'), variables['VPNAMI'].ref), InstanceType=variables['ManagementInstanceType'].ref, InstanceMonitoring=False, # extra granularity not worth cost KeyName=If('SSHKeySpecified', variables['KeyName'].ref, Ref('AWS::NoValue')), PlacementTenancy=variables['VpcInstanceTenancy'].ref, SecurityGroups=variables['VPNSecurityGroups'].ref, UserData=If( 'RHELUserData', Base64( Join( '', common_userdata_prefix + [ "yum_repos:\n", " epel:\n", " name: Extra Packages for $releasever - $basearch\n", # noqa pylint: disable=line-too-long " baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch\n", # noqa pylint: disable=line-too-long " enabled: true\n", " failovermethod: priority\n", " gpgcheck: true\n", " gpgkey: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7\n", # noqa pylint: disable=line-too-long "packages:\n", " - awscli\n", " - python-pip\n", " - python2-boto\n", " - python2-boto3\n" ])), Base64( Join( '', common_userdata_prefix + [ "packages:\n", " - awscli\n", " - python-pip\n", " - python-boto\n", " - python-boto3\n" ]))))) template.add_resource( autoscaling.AutoScalingGroup( 'VPNServerASG', MinSize=1, MaxSize=1, LaunchConfigurationName=Ref(vpnserverlaunchconfig), Tags=[ autoscaling.Tag( 'Name', Join('-', [ variables['CustomerName'].ref, 'vpn', variables['EnvironmentName'].ref ]), True), autoscaling.Tag('environment', variables['EnvironmentName'].ref, True), autoscaling.Tag('customer', variables['CustomerName'].ref, True) ], VPCZoneIdentifier=If( 'PublicSubnetsOmitted', GetAtt(subnetlookup.title, 'PublicSubnetList'), variables['PublicSubnets'].ref)))
def add_resources(self): """Add ASG to template.""" template = self.template variables = self.get_variables() role_policy_statements = [ Statement(Action=[awacs.aws.Action('elasticloadbalancing', '*')], Effect=Allow, Resource=['*']), Statement(Action=[ awacs.ssm.GetParameter, awacs.ec2.DescribeInstances, ], Effect=Allow, Resource=["*"]), ] targetgrouparnsomitted = 'TargetGroupARNsOmitted' template.add_condition( targetgrouparnsomitted, Equals(Join('', variables['TargetGroupARNs'].ref), '')) # Resources server_role = template.add_resource( iam.Role( 'ServerServerRole', AssumeRolePolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal('Service', ['ec2.amazonaws.com'])) ]), ManagedPolicyArns=variables['AppPolicies'].ref, Path='/', Policies=[ iam.Policy(PolicyName=Join('-', [ variables['Company'].ref, variables['Application'].ref, 'app-role', variables['Environment'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=role_policy_statements)), ])) server_profile = template.add_resource( iam.InstanceProfile('ServerInstanceProfile', Path='/', Roles=[Ref(server_role)])) server_launch_config = template.add_resource( autoscaling.LaunchConfiguration( 'LaunchConfig', IamInstanceProfile=Ref(server_profile), ImageId=variables['AppAMI'].ref, InstanceType=variables['AppInstanceType'].ref, InstanceMonitoring=True, KeyName=variables['KeyName'].ref, SecurityGroups=variables['AppSecurityGroups'].ref, UserData=variables['UserData'])) asg_tags = [ autoscaling.Tag( 'Name', Join('-', [ variables['Company'].ref, variables['Application'].ref, variables['Role'].ref, variables['Environment'].ref ]), True), autoscaling.Tag('Application', variables['Application'].ref, True), autoscaling.Tag('AutoAlarmCreation', 'True', True), autoscaling.Tag('Company', variables['Company'].ref, True), autoscaling.Tag('Environment', variables['Environment'].ref, True), # autoscaling.Tag('TechOwner', # variables['TechOwner'].ref, True), autoscaling.Tag('TechOwnerEmail', variables['TechOwnerEmail'].ref, True), autoscaling.Tag('Backup', variables['Backup'].ref, True), autoscaling.Tag('BackupHourly', variables['BackupHourly'].ref, True), # autoscaling.Tag('DataClassification', # variables['DataClassification'].ref, True), autoscaling.Tag('StatelessHa', variables['StatelessHaEnabled'].ref, True), autoscaling.Tag('MSBuildConfiguration', variables['MSBuildConfiguration'].ref, True), ] optional_tags = ['Role', 'Service'] for tag in optional_tags: if variables[tag].value != '': asg_tags.append(autoscaling.Tag(tag, variables[tag].ref, True)) auto_deploy = variables['ASGAutoDeploy'].value if auto_deploy == 'true': update_policy = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime=variables['AsgPauseTime'].ref, MinInstancesInService=variables['MinInstancesInService']. ref, MaxBatchSize='1', )) else: update_policy = UpdatePolicy(AutoScalingRollingUpdate=NoValue) server_asg = template.add_resource( autoscaling.AutoScalingGroup( 'AutoScaleGroup', AutoScalingGroupName=Join('-', [ variables['Company'].ref, variables['Application'].ref, variables['Role'].ref, variables['Environment'].ref ]), UpdatePolicy=update_policy, MinSize=variables['ASGMinValue'].ref, MaxSize=variables['ASGMaxValue'].ref, HealthCheckGracePeriod=variables['HealthCheckGracePeriod'].ref, HealthCheckType=variables['HealthCheckType'].ref, MetricsCollection=[ autoscaling.MetricsCollection(Granularity='1Minute') ], LaunchConfigurationName=Ref(server_launch_config), Tags=asg_tags, TargetGroupARNs=If(targetgrouparnsomitted, Ref('AWS::NoValue'), variables['TargetGroupARNs'].ref), VPCZoneIdentifier=variables['AppSubnets'].ref)) template.add_output( Output( 'ASG', Description='Name of autoscaling group', Value=Ref(server_asg), ))
# AutoScaling Group autoScalingGroupStrongjobs = t.add_resource( autoscaling.AutoScalingGroup( "StrongjobsGroup", UpdatePolicy=policies.UpdatePolicy( AutoScalingRollingUpdate=policies.AutoScalingRollingUpdate( MinInstancesInService="0", MaxBatchSize='1', )), LaunchConfigurationName=Ref(launchConfiguration), VPCZoneIdentifier=[Ref(subnetId)], MinSize="1", DesiredCapacity="1", MaxSize="1", Tags=[ autoscaling.Tag("environment", Ref(AWS_STACK_NAME), "true"), autoscaling.Tag("Name", Join("-", [Ref(AWS_STACK_NAME), "asg-strongjobs"]), "true"), autoscaling.Tag("application", "strongjobs", "true"), ], )) # CodeDeploy strongjobsApplication = t.add_resource( codedeploy.Application( "StrongjobsApplication", ApplicationName=Join("-", ["strongjobs", Ref(AWS_STACK_NAME)]))) strongjobsCodeDeploy = t.add_resource( codedeploy.DeploymentGroup(
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Kubernetes workers via EKS - V1.0.0 ' '- compatible with amazon-eks-node-v23+') # Metadata template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [ {'Label': {'default': 'EKS Cluster'}, 'Parameters': [variables[i].name for i in ['ClusterName', 'ClusterControlPlaneSecurityGroup']]}, {'Label': {'default': 'Worker Node Configuration'}, 'Parameters': [variables[i].name for i in ['NodeGroupName', 'NodeAutoScalingGroupMinSize', 'NodeAutoScalingGroupMaxSize', 'UseDesiredInstanceCount', 'NodeInstanceType', 'NodeInstanceProfile', 'NodeImageId', 'NodeVolumeSize', 'KeyName', 'UseSpotInstances', 'SpotBidPrice', 'BootstrapArguments']]}, {'Label': {'default': 'Worker Network Configuration'}, 'Parameters': [variables[i].name for i in ['VpcId', 'Subnets']]} ] } }) # Conditions template.add_condition( 'SetSpotPrice', Equals(variables['UseSpotInstances'].ref, 'yes') ) template.add_condition( 'DesiredInstanceCountSpecified', Equals(variables['UseDesiredInstanceCount'].ref, 'true') ) template.add_condition( 'KeyNameSpecified', Not(Equals(variables['KeyName'].ref, '')) ) # Resources nodesecuritygroup = template.add_resource( ec2.SecurityGroup( 'NodeSecurityGroup', GroupDescription='Security group for all nodes in the cluster', Tags=[ {'Key': Sub('kubernetes.io/cluster/${ClusterName}'), 'Value': 'owned'}, ], VpcId=variables['VpcId'].ref ) ) template.add_output( Output( 'NodeSecurityGroup', Description='Security group for all nodes in the cluster', Value=nodesecuritygroup.ref() ) ) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupIngress', Description='Allow node to communicate with each other', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='-1', FromPort=0, ToPort=65535 ) ) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupFromControlPlaneIngress', Description='Allow worker Kubelets and pods to receive ' 'communication from the cluster control plane', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref, # noqa IpProtocol='tcp', FromPort=1025, ToPort=65535 ) ) template.add_resource( ec2.SecurityGroupEgress( 'ControlPlaneEgressToNodeSecurityGroup', Description='Allow the cluster control plane to communicate ' 'with worker Kubelet and pods', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=1025, ToPort=65535 ) ) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupFromControlPlaneOn443Ingress', Description='Allow pods running extension API servers on port ' '443 to receive communication from cluster ' 'control plane', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref, # noqa IpProtocol='tcp', FromPort=443, ToPort=443 ) ) template.add_resource( ec2.SecurityGroupEgress( 'ControlPlaneEgressToNodeSecurityGroupOn443', Description='Allow the cluster control plane to communicate ' 'with pods running extension API servers on port ' '443', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=443, ToPort=443 ) ) template.add_resource( ec2.SecurityGroupIngress( 'ClusterControlPlaneSecurityGroupIngress', Description='Allow pods to communicate with the cluster API ' 'Server', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=443, ToPort=443 ) ) nodelaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'NodeLaunchConfig', AssociatePublicIpAddress=True, IamInstanceProfile=variables['NodeInstanceProfile'].ref, ImageId=variables['NodeImageId'].ref, InstanceType=variables['NodeInstanceType'].ref, KeyName=If( 'KeyNameSpecified', variables['KeyName'].ref, NoValue ), SecurityGroups=[nodesecuritygroup.ref()], SpotPrice=If('SetSpotPrice', variables['SpotBidPrice'].ref, NoValue), BlockDeviceMappings=[autoscaling.BlockDeviceMapping( DeviceName='/dev/xvda', Ebs=autoscaling.EBSBlockDevice( VolumeSize=variables['NodeVolumeSize'].ref, VolumeType='gp2', DeleteOnTermination=True ) )], UserData=Base64( Sub('\n'.join([ '#!/bin/bash', 'set -o xtrace', '/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}', # noqa '/opt/aws/bin/cfn-signal --exit-code $? \\', '--stack ${AWS::StackName} \\', '--resource NodeGroup \\', '--region ${AWS::Region}' ])) ) ) ) template.add_resource( autoscaling.AutoScalingGroup( 'NodeGroup', DesiredCapacity=If( 'DesiredInstanceCountSpecified', variables['NodeAutoScalingGroupMaxSize'].ref, NoValue ), LaunchConfigurationName=nodelaunchconfig.ref(), MinSize=variables['NodeAutoScalingGroupMinSize'].ref, MaxSize=variables['NodeAutoScalingGroupMaxSize'].ref, VPCZoneIdentifier=variables['Subnets'].ref, Tags=[ autoscaling.Tag( 'Name', Sub('${ClusterName}-${NodeGroupName}-Node'), True), autoscaling.Tag( Sub('kubernetes.io/cluster/${ClusterName}'), 'owned', True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1' ) ) ) )
def configure(self): """ Returns a vpn template """ self.defaults = {'instance_type': 't2.small'} self.service = 'vpn' self.add_description('Sets up VPNs') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) # Custom config per VPN for vpn in constants.ENVIRONMENTS[self.env]['vpn']: if not vpn['active']: continue _vpn_name = vpn['name'] _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] _role = 'vpn-{}'.format(_vpn_name) _vpn_security_group = self.add_resource( ec2.SecurityGroup( self.cfn_name('VPNSecurityGroup', _vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for VPN {}'.format( _vpn_name), SecurityGroupIngress=[{ "IpProtocol": "50", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "51", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "500", "ToPort": "500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "4500", "ToPort": "4500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET }], SecurityGroupEgress=[{ "IpProtocol": "50", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "51", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "500", "ToPort": "500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "4500", "ToPort": "4500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "udp", "FromPort": "123", "ToPort": "123", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET }])) _vpn_eip = self.add_resource( ec2.EIP(self.cfn_name('VPNInstanceEIP', _vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( self.cfn_name('VPNInstanceENI', _vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for VPN - {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(role_override=_role))) self.add_resource( ec2.EIPAssociation(self.cfn_name('AssociateVPNInstanceENI', _vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Set up Routes from all VPC subnets to the ENI _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] _local_subnets = iter( map( lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'], filter(lambda z: z in vpn.get('local_envs', []), constants.ENVIRONMENTS.keys()))) _local_subnets = list( itertools.chain(_local_subnets, [ self.vpc_metadata['cidrblock'], ])) # append remote vpc subnets _remote_subnets = iter( map( lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'], filter(lambda z: z in vpn.get('remote_envs', []), constants.ENVIRONMENTS.keys()))) _remote_subnets = list( itertools.chain(_remote_subnets, vpn.get('remote_subnets', []))) for remote_subnet in _remote_subnets: for route_table in _vpc_route_tables: self.add_resource( ec2.Route(self.cfn_name(_vpn_name, "VPNRoute", remote_subnet, route_table['RouteTableId']), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=remote_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _user_data_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__LOCAL_SUBNETS__', ','.join(sorted(_local_subnets))), ('__REMOTE_IP__', vpn['remote_ip']), ('__REMOTE_SUBNETS__', ','.join(sorted(_remote_subnets))), ('__SECRET__', vpn['secret']), ('__IKE__', vpn.get('ike', 'aes256-sha1-modp1536')), ('__IKE_LIFETIME__', vpn.get('ikelifetime', '28800s')), ('__ESP__', vpn.get('esp', 'aes256-sha1')), ('__KEYLIFE__', vpn.get('keylife', '1800s')), ('__IPTABLES_RULES__', '\n'.join(vpn.get('iptables_rules', ''))), ('__SERVICE__', self.service), ('__VPN_NAME__', _vpn_name), ('__TAG__', _vpn_name.lower()), ('__VPC_ID__', self.vpc_id))) _user_data = Sub( _user_data_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets, { 'CFN_EIP_ADDR': Ref(_vpn_eip), 'CFN_ENI_ID': Ref(_vpn_eni), }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( self.cfn_name('VPNLaunchConfiguration', _vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_user_data))) self.add_resource( autoscaling.AutoScalingGroup( self.cfn_name('VPNASGroup', _vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=1, MaxSize=1, DesiredCapacity=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(role_override=_role) + [autoscaling.Tag('Name', _role, True)]))
def set_up_stack(self): """Sets up the stack""" if not self.INPUTS or not self.STACK_NAME_PREFIX or not self.HEALTH_ENDPOINT: raise MKInputError( 'Must define INPUTS, STACK_NAME_PREFIX, and HEALTH_ENDPOINT') super(AppServerStack, self).set_up_stack() tags = self.get_input('Tags').copy() self.add_description('{} App Server Stack for Cac'.format( self.STACK_NAME_PREFIX)) assert isinstance(tags, dict), 'tags must be a dictionary' self.availability_zones = get_availability_zones() tags.update({'StackType': 'AppServer'}) self.default_tags = tags self.app_server_instance_type_parameter = self.add_parameter( Parameter( 'AppServerInstanceType', Type='String', Default='t2.medium', Description='NAT EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.'), source='AppServerInstanceType') self.param_app_server_iam_profile = self.add_parameter( Parameter('AppServerIAMProfile', Type='String', Description='IAM Profile for instances'), source='AppServerIAMProfile') self.app_server_ami = self.add_parameter(Parameter( 'AppServerAMI', Type='String', Description='{} Server EC2 AMI'.format(self.STACK_NAME_PREFIX)), source='AppServerAMI') self.keyname_parameter = self.add_parameter(Parameter( 'KeyName', Type='String', Default='cac', Description='Name of an existing EC2 key pair'), source='KeyName') self.param_color = self.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green', 'Orange']), source='StackColor') self.param_stacktype = self.add_parameter(Parameter( 'StackType', Type='String', Description='Stack type', AllowedValues=['Development', 'Staging', 'Production']), source='StackType') self.param_public_hosted_zone_name = self.add_parameter( Parameter('PublicHostedZoneName', Type='String', Description='Public hosted zone name'), source='PublicHostedZoneName') self.param_vpc = self.add_parameter(Parameter( 'VpcId', Type='String', Description='Name of an existing VPC'), source='VpcId') self.param_notification_arn = self.add_parameter( Parameter( 'GlobalNotificationsARN', Type='String', Description='Physical resource ID on an AWS::SNS::Topic for ' 'notifications'), source='GlobalNotificationsARN') self.param_ssl_certificate_arn = self.add_parameter( Parameter('SSLCertificateARN', Type='String', Description= 'Physical resource ID on an AWS::IAM::ServerCertificate ' 'for the application server load balancer'), source='SSLCertificateARN') self.param_public_subnets = self.add_parameter( Parameter('PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets'), source='AppServerPublicSubnets') self.param_private_subnets = self.add_parameter( Parameter('PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets'), source='AppServerPrivateSubnets') self.param_bastion_security_group = self.add_parameter( Parameter('BastionSecurityGroup', Type='String', Description='The ID of the bastion security group'), source='BastionSecurityGroup') self.param_database_security_group = self.add_parameter( Parameter('DatabaseSecurityGroup', Type='String', Description='The ID of the database security group'), source='DatabaseSecurityGroup') self.param_nat_security_group = self.add_parameter( Parameter('NATSecurityGroup', Type='String', Description='The ID of the NAT security group'), source='NATSecurityGroup') self.param_min_size = self.add_parameter(Parameter( 'ASGMinSize', Type='Number', Default='1', Description='Min size of ASG'), source='ASGMinSize') self.param_max_size = self.add_parameter(Parameter( 'ASGMaxSize', Type='Number', Default='1', Description='Max size of ASG'), source='ASGMaxSize') self.param_desired_capacity = self.add_parameter( Parameter('ASGDesiredCapacity', Type='Number', Default='1', Description='Desired capacity of ASG'), source='ASGDesiredCapacity') # # Security Group # app_server_load_balancer_security_group = self.add_resource( ec2.SecurityGroup( 'sgAppServerLoadBalancer', GroupDescription= 'Enables access to app servers via a load balancer', VpcId=Ref(self.param_vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [80, 443] ], Tags=Tags(Name='sgAppServerLoadBalancer', Color=Ref(self.param_color)))) app_server_security_group = self.add_resource( ec2.SecurityGroup( 'sgAppServer', GroupDescription='Enables access to App Servers', VpcId=Ref(self.param_vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [22, 80, 443] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=80, ToPort=80) for sg in [app_server_load_balancer_security_group] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=443, ToPort=443) for sg in [app_server_load_balancer_security_group] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [80, 443, PAPERTRAIL_PORT] ], Tags=Tags(Name='sgAppServer', Color=Ref(self.param_color)))) # ELB to App Server self.add_resource( ec2.SecurityGroupEgress( 'sgEgressELBtoAppHTTP', GroupId=Ref(app_server_load_balancer_security_group), DestinationSecurityGroupId=Ref(app_server_security_group), IpProtocol='tcp', FromPort=80, ToPort=80)) self.add_resource( ec2.SecurityGroupEgress( 'sgEgressELBtoAppHTTPS', GroupId=Ref(app_server_load_balancer_security_group), DestinationSecurityGroupId=Ref(app_server_security_group), IpProtocol='tcp', FromPort=443, ToPort=443)) # Bastion to App Server, app server to db, app server to inet rules = [(self.param_bastion_security_group, app_server_security_group, [80, 443, 22]), (app_server_security_group, self.param_database_security_group, [POSTGRES]), (app_server_security_group, self.param_nat_security_group, [80, 443, 22, 587, PAPERTRAIL_PORT])] for num, (srcsg, destsg, ports) in enumerate(rules): for port in ports: self.add_resource( ec2.SecurityGroupEgress( 'sgEgress{}p{}'.format(num, port), GroupId=Ref(srcsg), DestinationSecurityGroupId=Ref(destsg), IpProtocol='tcp', FromPort=port, ToPort=port)) self.add_resource( ec2.SecurityGroupIngress('sgIngress{}p{}'.format( num, port), GroupId=Ref(destsg), SourceSecurityGroupId=Ref(srcsg), IpProtocol='tcp', FromPort=port, ToPort=port)) # # ELB # app_server_load_balancer = self.add_resource( elb.LoadBalancer( 'elbAppServer', ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=300), CrossZone=True, SecurityGroups=[Ref(app_server_load_balancer_security_group)], Listeners=[ elb.Listener(LoadBalancerPort='80', Protocol='HTTP', InstancePort='80', InstanceProtocol='HTTP'), elb.Listener(LoadBalancerPort='443', Protocol='HTTPS', InstancePort='443', InstanceProtocol='HTTP', SSLCertificateId=Ref( self.param_ssl_certificate_arn)) ], HealthCheck=elb.HealthCheck( Target=self.HEALTH_ENDPOINT, HealthyThreshold='3', UnhealthyThreshold='2', Interval='30', Timeout='5', ), Subnets=Ref(self.param_public_subnets), Tags=Tags(Name='elbAppServer', Color=Ref(self.param_color)))) self.add_resource( cw.Alarm('alarmAppServerBackend4xx', AlarmActions=[Ref(self.param_notification_arn)], Statistic='Sum', Period=300, Threshold='5', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_4XX', Namespace='AWS/ELB', Dimensions=[ cw.MetricDimension( 'metricLoadBalancerName', Name='LoadBalancerName', Value=Ref(app_server_load_balancer)) ])) self.add_resource( cw.Alarm('alarmAppServerBackend5xx', AlarmActions=[Ref(self.param_notification_arn)], Statistic='Sum', Period=60, Threshold='0', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_5XX', Namespace='AWS/ELB', Dimensions=[ cw.MetricDimension( 'metricLoadBalancerName', Name='LoadBalancerName', Value=Ref(app_server_load_balancer)) ])) # # ASG # app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServer', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.param_app_server_iam_profile), InstanceType=Ref(self.app_server_instance_type_parameter), KeyName=Ref(self.keyname_parameter), SecurityGroups=[Ref(app_server_security_group)])) autoscaling_group = self.add_resource( asg.AutoScalingGroup( 'asgAppServer', AvailabilityZones=self.get_input( 'AppServerAvailabilityZones').split(','), Cooldown=300, DesiredCapacity=Ref(self.param_desired_capacity), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(app_server_launch_config), LoadBalancerNames=[Ref(app_server_load_balancer)], MaxSize=Ref(self.param_max_size), MinSize=Ref(self.param_min_size), NotificationConfiguration=asg.NotificationConfiguration( TopicARN=Ref(self.param_notification_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=Ref(self.param_private_subnets), Tags=[ asg.Tag('Name', '{}Server'.format(self.STACK_NAME_PREFIX), True), asg.Tag('Color', Ref(self.param_color), True) ])) # autoscaling policies autoscaling_policy_add = self.add_resource( asg.ScalingPolicy('scalingPolicyAddAppServer', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(autoscaling_group), Cooldown=600, ScalingAdjustment='1')) autoscaling_policy_remove = self.add_resource( asg.ScalingPolicy('scalingPolicyRemoveAppServer', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(autoscaling_group), Cooldown=600, ScalingAdjustment='-1')) if self.STACK_NAME_PREFIX == 'Otp': # trigger scale down if CPU avg usage < 10% for 3 consecutive 5 min periods self.add_resource( cw.Alarm('alarmAppServerLowCPU', AlarmActions=[Ref(autoscaling_policy_remove)], Statistic='Average', Period=300, Threshold='10', EvaluationPeriods=3, ComparisonOperator='LessThanThreshold', MetricName='CPUUtilization', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) # trigger scale up if CPU avg usage >= 30% for a 5 min period self.add_resource( cw.Alarm('alarmAppServerHighCPU', AlarmActions=[ Ref(self.param_notification_arn), Ref(autoscaling_policy_add) ], Statistic='Average', Period=300, Threshold='30', EvaluationPeriods=1, ComparisonOperator='GreaterThanOrEqualToThreshold', MetricName='CPUUtilization', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) else: # scale web servers based on network usage self.add_resource( cw.Alarm('alarmAppServerLowNetworkUsage', AlarmActions=[Ref(autoscaling_policy_remove)], Statistic='Average', Period=300, Threshold='500000', EvaluationPeriods=3, ComparisonOperator='LessThanThreshold', MetricName='NetworkOut', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) self.add_resource( cw.Alarm('alarmAppServerHighNetworkUsage', AlarmActions=[ Ref(self.param_notification_arn), Ref(autoscaling_policy_add) ], Statistic='Average', Period=300, Threshold='10000000', EvaluationPeriods=1, ComparisonOperator='GreaterThanOrEqualToThreshold', MetricName='NetworkOut', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) # # DNS name # self.create_resource( route53.RecordSetType( 'dnsName', Name=Join('.', [ Ref(self.param_color), Ref(self.param_stacktype), self.STACK_NAME_PREFIX, Ref(self.param_public_hosted_zone_name) ]), Type='A', AliasTarget=route53.AliasTarget( GetAtt(app_server_load_balancer, 'CanonicalHostedZoneNameID'), GetAtt(app_server_load_balancer, 'DNSName')), HostedZoneName=Ref(self.param_public_hosted_zone_name))) self.add_output([ Output('{}ServerLoadBalancerEndpoint'.format( self.STACK_NAME_PREFIX), Description='Application server endpoint', Value=GetAtt(app_server_load_balancer, 'DNSName')), Output('{}ServerLoadBalancerHostedZoneNameID'.format( self.STACK_NAME_PREFIX), Description='ID of canonical hosted zone name for ELB', Value=GetAtt(app_server_load_balancer, 'CanonicalHostedZoneNameID')) ])
def create_template(self) -> None: """Create template (main function called by Stacker).""" template = self.template template.add_version("2010-09-09") template.add_description( "Kubernetes workers via EKS - V1.0.0 " "- compatible with amazon-eks-node-v23+" ) # Metadata template.add_metadata( { "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": {"default": "EKS Cluster"}, "Parameters": [ self.variables[i].name for i in [ "ClusterName", "ClusterControlPlaneSecurityGroup", ] ], }, { "Label": {"default": "Worker Node Configuration"}, "Parameters": [ self.variables[i].name for i in [ "NodeGroupName", "NodeAutoScalingGroupMinSize", "NodeAutoScalingGroupMaxSize", "UseDesiredInstanceCount", "NodeInstanceType", "NodeInstanceProfile", "NodeImageId", "NodeVolumeSize", "KeyName", "UseSpotInstances", "SpotBidPrice", "BootstrapArguments", ] ], }, { "Label": {"default": "Worker Network Configuration"}, "Parameters": [ self.variables[i].name for i in ["VpcId", "Subnets"] ], }, ] } } ) # Conditions template.add_condition( "SetSpotPrice", Equals(self.variables["UseSpotInstances"].ref, "yes") ) template.add_condition( "DesiredInstanceCountSpecified", Equals(self.variables["UseDesiredInstanceCount"].ref, "true"), ) template.add_condition( "KeyNameSpecified", Not(Equals(self.variables["KeyName"].ref, "")) ) # Resources nodesecuritygroup = template.add_resource( ec2.SecurityGroup( "NodeSecurityGroup", GroupDescription="Security group for all nodes in the cluster", Tags=[ { "Key": Sub("kubernetes.io/cluster/${ClusterName}"), "Value": "owned", }, ], VpcId=self.variables["VpcId"].ref, ) ) template.add_output( Output( "NodeSecurityGroup", Description="Security group for all nodes in the cluster", Value=nodesecuritygroup.ref(), ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupIngress", Description="Allow node to communicate with each other", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="-1", FromPort=0, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupFromControlPlaneIngress", Description="Allow worker Kubelets and pods to receive " "communication from the cluster control plane", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=self.variables[ "ClusterControlPlaneSecurityGroup" ].ref, # noqa IpProtocol="tcp", FromPort=1025, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupEgress( "ControlPlaneEgressToNodeSecurityGroup", Description="Allow the cluster control plane to communicate " "with worker Kubelet and pods", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=1025, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupFromControlPlaneOn443Ingress", Description="Allow pods running extension API servers on port " "443 to receive communication from cluster " "control plane", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=self.variables[ "ClusterControlPlaneSecurityGroup" ].ref, # noqa IpProtocol="tcp", FromPort=443, ToPort=443, ) ) template.add_resource( ec2.SecurityGroupEgress( "ControlPlaneEgressToNodeSecurityGroupOn443", Description="Allow the cluster control plane to communicate " "with pods running extension API servers on port " "443", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=443, ToPort=443, ) ) template.add_resource( ec2.SecurityGroupIngress( "ClusterControlPlaneSecurityGroupIngress", Description="Allow pods to communicate with the cluster API " "Server", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=443, ToPort=443, ) ) nodelaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( "NodeLaunchConfig", AssociatePublicIpAddress=True, IamInstanceProfile=self.variables["NodeInstanceProfile"].ref, ImageId=self.variables["NodeImageId"].ref, InstanceType=self.variables["NodeInstanceType"].ref, KeyName=If("KeyNameSpecified", self.variables["KeyName"].ref, NoValue), SecurityGroups=[nodesecuritygroup.ref()], SpotPrice=If( "SetSpotPrice", self.variables["SpotBidPrice"].ref, NoValue ), BlockDeviceMappings=[ autoscaling.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=autoscaling.EBSBlockDevice( VolumeSize=self.variables["NodeVolumeSize"].ref, VolumeType="gp2", DeleteOnTermination=True, ), ) ], UserData=Base64( Sub( "\n".join( [ "#!/bin/bash", "set -o xtrace", "/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}", "/opt/aws/bin/cfn-signal --exit-code $? \\", "--stack ${AWS::StackName} \\", "--resource NodeGroup \\", "--region ${AWS::Region}", ] ) ) ), ) ) template.add_resource( autoscaling.AutoScalingGroup( "NodeGroup", DesiredCapacity=If( "DesiredInstanceCountSpecified", self.variables["NodeAutoScalingGroupMaxSize"].ref, NoValue, ), LaunchConfigurationName=nodelaunchconfig.ref(), MinSize=self.variables["NodeAutoScalingGroupMinSize"].ref, MaxSize=self.variables["NodeAutoScalingGroupMaxSize"].ref, VPCZoneIdentifier=self.variables["Subnets"].ref, Tags=[ autoscaling.Tag( "Name", Sub("${ClusterName}-${NodeGroupName}-Node"), True ), autoscaling.Tag( Sub("kubernetes.io/cluster/${ClusterName}"), "owned", True ), ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService="1", MaxBatchSize="1" ) ), ) )
def configure(self): """ Returns a Pritunl template """ self.defaults = {'instance_type': 't3.large'} self.service = 'pritunl' self.set_description('Sets up Pritunl servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _vpn_config = constants.ENVIRONMENTS[self.env]['pritunl'] _global_config = constants.ENVIRONMENTS[self.env] _bootstrap_mode = _vpn_config.get('bootstrap_mode', False) _bootstrap_ami = get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon') _ivy_ami = get_latest_ami_id(self.region, 'ivy-base', _global_config.get('ami_owner', 'self')) self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=_bootstrap_ami if _bootstrap_mode else _ivy_ami)) _public_dns = _vpn_config['public_dns'] _vpn_name = '{}Pritunl'.format(self.env) # We want the preferred subnet only. _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] # Add our security group _vpn_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(_vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for Pritunl {}'.format( _vpn_name), SecurityGroupIngress=[ { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, # Ping { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, # HTTPS { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": "0.0.0.0/0" }, # SSH { "IpProtocol": "udp", "FromPort": "10000", "ToPort": "20000", "CidrIp": "0.0.0.0/0" }, # HTTPS/OVPN { "IpProtocol": "tcp", "FromPort": "27017", "ToPort": "27017", "CidrIp": constants.SUPERNET }, # mongodb master { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET } # Replies from local VPC ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add EBS volume if local mongo used _data_volume = None if _vpn_config.get('local_mongo', False): self.add_iam_policy( iam.Policy( PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) _data_volume = ec2.Volume( '{}DataVolume'.format(_vpn_name), Size=_vpn_config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=_vpn_subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=_vpn_name) + [ec2.Tag('Name', _vpn_name + "-datavol")]) self.add_resource(_data_volume) # Add the elastic IP and the ENI for it, then attach it. _vpn_eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(_vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(_vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(service_override=self.service, role_override=_vpn_name))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(_vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Add a route53 DNS name if self.get_partition() != 'aws-us-gov': self.add_resource( route53.RecordSetGroup('{}Route53'.format(_vpn_name), HostedZoneName=constants.ENVIRONMENTS[ self.env]['route53_zone'], RecordSets=[ route53.RecordSet( Name=_public_dns, ResourceRecords=[Ref(_vpn_eip)], Type='A', TTL=600) ])) # Get all route tables in the VPC _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] # Set up the routing table for the VPC # Allow for changing client subnets in constants.py for client_subnet in _vpn_config['client_subnets']: for route_table in _vpc_route_tables: self.add_resource( ec2.Route('{}Route{}{}'.format( _vpn_name, client_subnet.translate({ ord("."): "", ord("/"): "" }), route_table['RouteTableId'].replace('-', '')), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=client_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _mongodb = _vpn_config.get('mongodb') _server_id = _vpn_config['server_id'] _userdata_template = self.get_cloudinit_template( _tpl_name="pritunl_bootstrap" if _bootstrap_mode else None, replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVER_ID__', _server_id), ('__SERVICE__', self.service), ('__MONGODB__', _mongodb if _mongodb else ''))) _userdata = Sub( _userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(_vpn_eni), 'CFN_EBS_ID': Ref(_data_volume) if _data_volume else '' }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(_vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(_vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=0, MaxSize=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=_vpn_name) + [autoscaling.Tag('Name', _vpn_name, True)]))
def configure(self): """ Returns a BIND template """ self.defaults = {'instance_type': 't3.micro'} self.service = 'bind' self.set_description('Sets up BIND DNS servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) config = constants.ENVIRONMENTS[self.env][self.service] # All subnets in public get a DNS server subnets = self.get_subnets('public') # Add our security group security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {}'.format(self.name), SecurityGroupIngress=[ { "IpProtocol": "tcp", "FromPort": "53", "ToPort": "53", "CidrIp": "0.0.0.0/0" }, # DNS TCP { "IpProtocol": "udp", "FromPort": "53", "ToPort": "53", "CidrIp": "0.0.0.0/0" }, # DNS UDP ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone'] zonefile = '' for zone in config['forwarders']: zonefile += "\n" + self.make_bind_zone(zone) for subnet in subnets: subnet_name = subnet['AvailabilityZone'] role = '{}-{}-{}'.format(self.env, self.service, subnet_name) # myenv-bind-us-west-2a # Add the elastic IP and the ENI for it, then attach it. eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(self.cfn_name(role)), Domain='vpc')) eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(self.cfn_name(role)), SubnetId=subnet['SubnetId'], Description='ENI for {}'.format(role), GroupSet=[Ref(security_group)] + self.security_groups, SourceDestCheck=True, Tags=self.get_tags(service_override=self.service, role_override=role))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format( self.cfn_name(role)), AllocationId=GetAtt(eip, "AllocationId"), NetworkInterfaceId=Ref(eni))) # Add a route53 DNS name self.add_resource( route53.RecordSetGroup('{}Route53'.format(self.cfn_name(role)), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet( Name="{}.{}".format( role, route53_zone), ResourceRecords=[Ref(eip)], Type='A', TTL=600) ])) # Substitute the userdata template and feed it to CFN userdata_template = self.get_cloudinit_template( replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVICE__', self.service), ('__BIND_ZONEFILE__', zonefile))) userdata = Sub( userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets {'CFN_ENI_ID': Ref(eni)}) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.cfn_name(role)), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(self.cfn_name(role)), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=0, MaxSize=1, DesiredCapacity=0, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=self.service, role_override=role) + [autoscaling.Tag('Name', role, True)]))
def configure(self): """ Returns a Nexus template """ self.defaults = {'instance_type': 't3.xlarge'} self.service = 'nexus' self.set_description('Sets up Nexus repository manager servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) config = constants.ENVIRONMENTS[self.env][self.service] # We want the preferred subnet only. subnet = self.get_subnets('private', _preferred_only=True)[0] # Add our security group security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {}'.format(self.name), SecurityGroupIngress=[ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": constants.SUPERNET }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": constants.SUPERNET }, # HTTPS # {"IpProtocol": "tcp", "FromPort": "8081", "ToPort": "8081", "CidrIp": constants.SUPERNET}, # NexusRM Direct (disabled!) ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add our EBS data volume data_volume = ec2.Volume( '{}DataVolume'.format(self.name), Size=config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=self.name) + [ec2.Tag('Name', self.name + "-datavol")]) self.add_resource(data_volume) self.add_iam_policy( iam.Policy(PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) # Add a ENI for static IP address eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(self.name), SubnetId=subnet['SubnetId'], Description='ENI for {}'.format(self.name), GroupSet=[Ref(security_group)] + self.security_groups, SourceDestCheck=True, Tags=self.get_tags(service_override=self.service, role_override=self.name))) self.get_eni_policies() # Add a route53 A record for the main Nexus host route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone'] private_dns = config.get('private_dns', 'nexus.{}'.format(route53_zone)) self.add_resource( route53.RecordSetGroup( '{}Route53'.format(self.name), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet(Name=private_dns, ResourceRecords=[ GetAtt(eni, 'PrimaryPrivateIpAddress') ], Type='A', TTL=600) ])) # Add CNAME records for each repository, pointing to the main for repository in config['repositories']: self.add_resource( route53.RecordSetGroup( '{}{}Route53'.format(self.name, self.cfn_name(repository)), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet(Name='{}.{}'.format( repository, route53_zone), ResourceRecords=[private_dns], Type='CNAME', TTL=600) ])) # Add S3 IAM role for nexus blobstore access self.add_iam_policy( iam.Policy( PolicyName='S3Access', PolicyDocument={ 'Statement': [{ "Effect": "Allow", "Action": [ "s3:ListBucket", "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", "s3:ListBucketVersions", "s3:GetBucketAcl", "s3:GetLifecycleConfiguration", "s3:PutLifecycleConfiguration" ], "Resource": [ 'arn:{}:s3:::{}'.format(self.get_partition(), config['s3_bucket']) ] }, { "Effect": "Allow", "Action": [ "s3:GetObject", "s3:PutObject", "s3:DeleteObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:GetObjectTagging", "s3:PutObjectTagging", "s3:GetObjectTagging", "s3:DeleteObjectTagging" ], "Resource": [ 'arn:{}:s3:::{}/*'.format(self.get_partition(), config['s3_bucket']) ] }] })) # Substitute the userdata template and feed it to CFN userdata_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__SERVICE__', self.service), ('__DEFAULT_DOMAIN__', route53_zone[:-1]), # route53_zone has a trailing '.', strip it ('__TOP_DOMAIN__', constants.ROOT_ROUTE53_ZONE), # ('__REPOSITORIES__', " ".join(['"{}"'.format(x) for x in config['repositories']])) # '"abc" "def" "ghi"' )) userdata = Sub( userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_EBS_ID': Ref(data_volume) }) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.name), AssociatePublicIpAddress=False, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(self.name), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=0, MaxSize=1, DesiredCapacity=0, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=self.name) + [autoscaling.Tag('Name', self.name, True)]))
HealthCheckType='EC2', LaunchConfigurationName=Ref(decider_launch_config), MaxSize=1, MinSize=1, NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(notification_arn_param), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(private_subnets_param), Tags=[asg.Tag('Name', 'Decider', True)])) activity_worker_launch_config = t.add_resource( asg.LaunchConfiguration( 'lcActivityWorker', ImageId=Ref(coreos_ami_param), # IamInstanceProfile=Ref(activity_worker_instance_profile_param), InstanceType=Ref(activity_worker_instance_type_param), KeyName=Ref(keyname_param), SecurityGroups=[Ref(activity_worker_security_group)], UserData=Base64(read_file('cloud-config/oam-activity-worker.yml')))) decider_auto_scaling_group = t.add_resource( asg.AutoScalingGroup( 'asgActivityWorker', AvailabilityZones=Ref(availability_zones_param),
def create_auto_scaling_resources(self, app_server_security_group, app_server_lb, backward_compat_app_server_lb): self.add_condition('BlueCondition', Equals('Blue', Ref(self.color))) self.add_condition('GreenCondition', Equals('Green', Ref(self.color))) blue_app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServerBlue', Condition='BlueCondition', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.app_server_instance_profile), InstanceType=Ref(self.app_server_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(app_server_security_group)], UserData=Base64( Join( '', self.get_cloud_config( self.blue_tile_distribution_endpoint))))) self.add_resource( asg.AutoScalingGroup( 'asgAppServerBlue', AvailabilityZones=Ref(self.availability_zones), Condition='BlueCondition', Cooldown=300, DesiredCapacity=Ref(self.app_server_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(blue_app_server_launch_config), LoadBalancerNames=[ Ref(app_server_lb), Ref(backward_compat_app_server_lb) ], MaxSize=Ref(self.app_server_auto_scaling_max), MinSize=Ref(self.app_server_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'AppServer', True)])) green_app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServerGreen', Condition='GreenCondition', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.app_server_instance_profile), InstanceType=Ref(self.app_server_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(app_server_security_group)], UserData=Base64( Join( '', self.get_cloud_config( self.green_tile_distribution_endpoint))))) self.add_resource( asg.AutoScalingGroup( 'asgAppServerGreen', AvailabilityZones=Ref(self.availability_zones), Condition='GreenCondition', Cooldown=300, DesiredCapacity=Ref(self.app_server_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(green_app_server_launch_config), LoadBalancerNames=[ Ref(app_server_lb), Ref(backward_compat_app_server_lb) ], MaxSize=Ref(self.app_server_auto_scaling_max), MinSize=Ref(self.app_server_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'AppServer', True)]))
def generate_asg(self, placement, count, block_mapping, load_balancers=None, target_group_arns=None, preferred_subnets_only=False): if placement not in ["public", "private"]: raise NameError("Mesos ASG must be either public or private") mesos_masters = constants.ENVIRONMENTS[self.env]['mesos']['master']['masters'] user_data = self.get_cloudinit_template( replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__PLACEMENT__', placement), ('__ZK_CONNECT__', ','.join(['{}:2181'.format(z) for z in mesos_masters])) ) ) # Datadog webhook for scaling events # sns_topic = self.add_resource( # sns.Topic( # "MesosASG", # Subscription=[ # sns.Subscription( # Endpoint='https://app.datadoghq.com/intake/webhook/sns?api_key=', # Protocol='https' # ) # ] # ) # ) role_name = "Mesos{}Agent".format(placement.capitalize()) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(role_name), AssociatePublicIpAddress=False, BlockDeviceMappings=block_mapping, EbsOptimized=True if self.defaults.get('instance_type') in EBS_OPTIMIZED_INSTANCES else False, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), IamInstanceProfile=Ref(self.instance_profile), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, SecurityGroups=self.security_groups, UserData=Base64(user_data) ) ) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(role_name), AvailabilityZones=[subnet['AvailabilityZone'] for subnet in self.get_subnets(placement, _preferred_only=preferred_subnets_only)], HealthCheckType='ELB', HealthCheckGracePeriod=600, LaunchConfigurationName=Ref(launch_configuration), LoadBalancerNames=load_balancers if target_group_arns == None else [], TargetGroupARNs=target_group_arns if load_balancers == None else [], MinSize=count, MaxSize=100, VPCZoneIdentifier=[subnet['SubnetId'] for subnet in self.get_subnets(placement, _preferred_only=preferred_subnets_only)], Tags=self.get_autoscaling_tags(service_override="MesosAgent", role_override=role_name) + [ autoscaling.Tag('Name', self.env + role_name, True) ], # NotificationConfigurations=[ # autoscaling.NotificationConfigurations( # TopicARN=Ref(_sns_topic), # NotificationTypes=[ # autoscaling.EC2_INSTANCE_LAUNCH, # autoscaling.EC2_INSTANCE_LAUNCH_ERROR, # autoscaling.EC2_INSTANCE_TERMINATE, # autoscaling.EC2_INSTANCE_TERMINATE_ERROR # ] # ) # ] ) )
AutoScalingGroup( autoscaling_group_name, template=template, VPCZoneIdentifier=[Ref(public_subnet)], MinSize=1, MaxSize=1, DesiredCapacity=1, LaunchConfigurationName=Ref(container_instance_configuration), DependsOn=["LoadBalancer"], # Since one instance within the group is a reserved slot # for rolling ECS service upgrade, it's not possible to rely # on a "dockerized" `ELB` health-check, else this reserved # instance will be flagged as `unhealthy` and won't stop respawning' HealthCheckType="EC2", HealthCheckGracePeriod=300, Tags=[autoscaling.Tag("Name", "ecs-auto-scaling-group-instances", True)], ) app_service_role = iam.Role( "AppServiceRole", template=template, AssumeRolePolicyDocument=dict(Statement=[ dict( Effect="Allow", Principal=dict(Service=["ecs.amazonaws.com"]), Action=["sts:AssumeRole"], ) ]), Path="/", Policies=[ iam.Policy(
def configure(self): """ Returns a cassandra template with seed nodes """ self.add_description('Sets up Cassandra in all Zones') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'ivy-cassandra', _global_config.get('ami_owner', 'self')))) _cassandra_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {} Instances'.format( self.name), SecurityGroupIngress=[ { 'IpProtocol': 'tcp', 'FromPort': 7000, 'ToPort': 7001, 'CidrIp': self.vpc_cidr }, # inter-node { 'IpProtocol': 'tcp', 'FromPort': 7199, 'ToPort': 7199, 'CidrIp': self.vpc_cidr }, # jmx { 'IpProtocol': 'tcp', 'FromPort': 9042, 'ToPort': 9042, 'CidrIp': self.vpc_cidr }, # client port { 'IpProtocol': 'tcp', 'FromPort': 9160, 'ToPort': 9160, 'CidrIp': self.vpc_cidr }, # client (thrift) ])) self.add_resource( ec2.SecurityGroupIngress( '{}IngressSecurityGroup'.format(self.name), GroupId=Ref(_cassandra_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_cassandra_security_group ) # this allows members all traffic )) self.add_security_group(Ref(_cassandra_security_group)) # Add support for creating EBS snapshots and tagging them self.add_iam_policy( iam.Policy(PolicyName='CassandraBackups', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:CreateSnapshot', 'ec2:CreateTags', 'ec2:DeleteSnapshot', 'ec2:DescribeInstances', 'ec2:DescribeSnapshots', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) for cluster in constants.ENVIRONMENTS[ self.env]['cassandra']['clusters']: for _instance in cluster['instances']: subnet = [ s for s in self.get_subnets('private') if netaddr.IPAddress(_instance['ip']) in netaddr.IPNetwork( s['CidrBlock']) ][0] service = 'cassandra-{}'.format(cluster['name']) role = '-'.join([ self.name, cluster['name'], subnet['AvailabilityZone'], _instance['ip'] ]) tags = self.get_tags(service_override=service, role_override=role) # Create ENI for this server, and hold onto a Ref for it so we can feed it into the userdata uniq_id = hashlib.md5(role.encode('utf-8')).hexdigest()[:10] eni = ec2.NetworkInterface( self.name + cluster['name'] + "ENI" + uniq_id, Description= 'Cassandra: Cluster: {} ENV: {} PrivateSubnet {}'.format( cluster['name'], self.env, subnet['SubnetId']), GroupSet=self.security_groups, PrivateIpAddress=_instance['ip'], SourceDestCheck=True, SubnetId=subnet['SubnetId'], Tags=tags, ) self.add_resource(eni) # Add the rootfs _block_device_mapping = get_block_device_mapping( self.parameters['InstanceType'].resource['Default']) _block_device_mapping += { ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=cluster.get( 'rootfs_size', 20), VolumeType="gp2", )) } # Seed the cluster from one node in the remote DC, plus three nodes in this DC # We want to avoid making too many nodes into seeds if cluster.get('remote_seed'): remote_env_name = cluster['remote_seed']['datacenter'] remote_cluster_name = cluster['remote_seed']['cluster'] remote_clusters = constants.ENVIRONMENTS[remote_env_name][ 'cassandra']['clusters'] # filter to just the remote cluster in the remote DC and return that one only remote_cluster = list( filter(lambda x: x['name'] == remote_cluster_name, remote_clusters))[0] remote_seeds = [ i['ip'] for i in remote_cluster['instances'] ][:1] local_seeds = [i['ip'] for i in cluster['instances']][:3] seeds = ','.join(remote_seeds + local_seeds) else: # Use the first three cassandra nodes as seeds seeds = ','.join([i['ip'] for i in cluster['instances']][:3]) if cluster.get('data_volume_size'): # Create the EBS volume data_volume = ec2.Volume( '{}{}DataVolume{}'.format( self.name, cluster['name'], uniq_id ), # something like 'envnameCassandraappDataVolumec47145e176' Size=cluster.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=tags + [ec2.Tag('Name', role + "-datavol")]) self.add_resource(data_volume) else: data_volume = None # Create the user data in two phases # Phase 1: substitute from constants in Rain user_data_template = self.get_cloudinit_template( cluster['cassandra_template'], replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__CASSANDRA_CLUSTER__', cluster['name']), ('__CASSANDRA_CLUSTER_OVERRIDE__', cluster.get('cluster_name_override', "")), ('__CASSANDRA_SEEDS__', seeds), ('__SERVICE__', service))) # Phase 2: Allow AWS Cloudformation to further substitute Ref()'s in the userdata userdata = Base64( Sub( user_data_template.replace( '${', '${!' ) # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_DATA_EBS_VOLUME_ID': Ref(data_volume) if data_volume else "" })) # Create the Launch Configuration / ASG _instance_type = cluster.get('instance_type', Ref(self.instance_type)) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}{}LaunchConfiguration{}'.format( self.name, cluster['name'], uniq_id), AssociatePublicIpAddress=False, BlockDeviceMappings=_block_device_mapping, EbsOptimized=True if _instance_type in EBS_OPTIMIZED_INSTANCES else False, ImageId=Ref(self.ami), InstanceType=_instance_type, InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), KeyName=Ref(self.keypair_name), SecurityGroups=self.security_groups, UserData=userdata)) self.add_resource( autoscaling.AutoScalingGroup( '{}{}ASGroup{}'.format(self.name, cluster['name'], uniq_id), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=1, MaxSize=1, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=service, role_override=role) + [autoscaling.Tag('Name', role, True)]))