def test_create_autoscaling_group(): elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer('test_lb', zones=[], listeners=[(80, 8080, 'http')]) conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', instance_type='t2.medium', ) conn.create_launch_configuration(config) group = AutoScalingGroup( name='tester_group', availability_zones=['us-east-1c', 'us-east-1b'], default_cooldown=60, desired_capacity=2, health_check_period=100, health_check_type="EC2", max_size=2, min_size=2, launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", vpc_zone_identifier='subnet-1234abcd', termination_policies=["OldestInstance", "NewestInstance"], tags=[ Tag(resource_id='tester_group', key='test_key', value='test_value', propagate_at_launch=True) ], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] group.name.should.equal('tester_group') set(group.availability_zones).should.equal( set(['us-east-1c', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) group.vpc_zone_identifier.should.equal('subnet-1234abcd') group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) group.health_check_type.should.equal("EC2") list(group.load_balancers).should.equal(["test_lb"]) group.placement_group.should.equal("test_placement") list(group.termination_policies).should.equal( ["OldestInstance", "NewestInstance"]) len(list(group.tags)).should.equal(1) tag = list(group.tags)[0] tag.resource_id.should.equal('tester_group') tag.key.should.equal('test_key') tag.value.should.equal('test_value') tag.propagate_at_launch.should.equal(True)
def test_autoscaling_tags_update(): mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name="tester", image_id="ami-abcd1234", instance_type="t2.medium" ) conn.create_launch_configuration(config) group = AutoScalingGroup( name="tester_group", availability_zones=["us-east-1a"], desired_capacity=2, max_size=2, min_size=2, launch_config=config, tags=[ Tag( resource_id="tester_group", key="test_key", value="test_value", propagate_at_launch=True, ) ], vpc_zone_identifier=mocked_networking["subnet1"], ) conn.create_auto_scaling_group(group) conn.create_or_update_tags( tags=[ Tag( resource_id="tester_group", key="test_key", value="new_test_value", propagate_at_launch=True, ), Tag( resource_id="tester_group", key="test_key2", value="test_value2", propagate_at_launch=True, ), ] ) group = conn.get_all_groups()[0] group.tags.should.have.length_of(2)
def update_or_create(self): """ Creates autoscaling group and sets a `propagate_at_launch` tag for future instances the autoscale group boots """ self.group = self._get_autoscaling_group() if self.group is None: autoscaling_group = AutoScalingGroup(group_name=self.name, **self.configuration) self.resource = self.autoscale.create_auto_scaling_group( autoscaling_group) self.group = self._get_autoscaling_group() name_tag = Tag(key='Name', value=self.application, propagate_at_launch=True, resource_id=self.name) application_tag = Tag(key='forseti:application', value=self.application, propagate_at_launch=True, resource_id=self.name) date_tag = Tag(key='forseti:date', value=self.today, propagate_at_launch=True, resource_id=self.name) self.autoscale.create_or_update_tags( [name_tag, application_tag, date_tag]) else: self.group.launch_config_name = self.configuration['launch_config'] self.group.availability_zones = self.configuration[ 'availability_zones'] if 'desired_capacity' in self.configuration: self.group.desired_capacity = self.configuration[ 'desired_capacity'] self.group.max_size = self.configuration['max_size'] self.group.min_size = self.configuration['min_size'] self.group.load_balancers = self.configuration['load_balancers'] self.group.default_cooldown = self.configuration.get( 'default_cooldown', None) self.group.termination_policies = self.configuration[ 'termination_policies'] self.group.update() self.group = self._get_autoscaling_group()
def mark_asg_az_disabled(as_group, zone): try: health_values = get_tag_dict_value(as_group, 'AZ_status') health_values[zone]['use'] = False print_verbose(os.path.basename(__file__), 'info', health_values) tag = Tag(key='AZ_status', value=health_values, resource_id=as_group.name) return tag except Exception as e: handle_exception(e) sys.exit(1)
def test_autoscaling_tags_update(): mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', instance_type='t2.medium', ) conn.create_launch_configuration(config) group = AutoScalingGroup( name='tester_group', availability_zones=['us-east-1c', 'us-east-1b'], desired_capacity=2, max_size=2, min_size=2, launch_config=config, tags=[Tag( resource_id='tester_group', key='test_key', value='test_value', propagate_at_launch=True )], vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) conn.create_or_update_tags(tags=[Tag( resource_id='tester_group', key='test_key', value='new_test_value', propagate_at_launch=True ), Tag( resource_id='tester_group', key='test_key2', value='test_value2', propagate_at_launch=True )]) group = conn.get_all_groups()[0] group.tags.should.have.length_of(2)
def create_tag(as_group, key, value): try: tag = Tag(key=key, value=value, resource_id=as_group.name) print_verbose(os.path.basename(__file__), 'info', "Creating tag for %s." % key) if dry_run: return True return as_group.connection.create_or_update_tags([tag]) # this often indicates tag limit has been exceeded except BotoServerError as e: throttle_response(e) return create_tag(as_group, key, value)
def update_az_health_list_tag(as_group, health_dict): try: health_values = get_tag_dict_value(as_group, 'AZ_status') for k, v in health_dict.items(): health_values[k]['health'].pop() health_values[k]['health'].insert(0, v) print_verbose(os.path.basename(__file__), 'info', health_values) tag = Tag(key='AZ_status', value=health_values, resource_id=as_group.name) return tag except Exception as e: handle_exception(e) sys.exit(1)
def set_auto_scaling_group_state(autoscale, group): """ Set The Last Auto Scaling Group State :type autoscale: boto.ec2.autoscale.AutoScaleConnection :param autoscale: AutoScale Connection Object :type group: boto.ec2.autoscale.group :param group: Auto Scaling Group Object :returns: None """ state_tag = Tag(key='scaling_state', value=dict(min=group.min_size, desired=group.desired_capacity, max=group.max_size), resource_id=group.name) autoscale.create_or_update_tags([state_tag])
def create_group_tag(self, key, value, resource_id, propagate_at_launch=None): # self.debug("Number of tags: " + str(len(self.tester.autoscale.get_all_tags()))) # self.debug("Autoscale group info: " + str(self.tester.autoscale.get_all_groups(names=[auto_scaling_group_name])[0].tags)) tag = Tag(key=key, value=value, propagate_at_launch=propagate_at_launch, resource_id=resource_id) self.connection.create_or_update_tags([tag]) if len(self.connection.get_all_tags(filters=key)) != 1: self.log.debug("Number of tags: " + str(len(self.connection.get_all_tags(filters=key)))) raise Exception('Tag not created') self.log.debug("created or updated tag: " + str(self.connection.get_all_tags(filters=key)[0]))
def create_autoscaling_group(self): existing_asg = self.conn.get_all_groups(names=[self.autoscaling_group]) if not existing_asg: self.log.info("Creating new autoscaling group: {g}".format( g=self.autoscaling_group)) # Convert our tags list into something that AWS can understand: aws_tags = list() for tag in self.tags: self.log.info("Adding tag [" + str(tag['name']) + "] with value [" + str(tag['value']) + "]") aws_tags.append( Tag(resource_id=self.autoscaling_group, key=tag['name'], value=tag['value'], propagate_at_launch=True)) ag = AutoScalingGroup( name=self.autoscaling_group, tags=aws_tags, availability_zones=self.autoscale_availability_zones, desired_capacity=self.desired_capacity, health_check_period=self.health_check_grace_period, launch_config=self.launch_configuration, min_size=self.min_size, max_size=self.max_size, default_cooldown=self.default_cooldown, vpc_zone_identifier=self.autoscale_subnets, termination_policies=self.termination_policies, connection=self.conn) self.conn.create_auto_scaling_group(ag) else: self.log.info('Autoscaling group {g} already exists.'.format( g=self.autoscaling_group))
if not vpc_zone_identifier and not availability_zones: region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) asg_tags = [] for tag in set_tags: if tag.has_key('key') and tag.has_key( 'value'): # this block is to support depricated form asg_tags.append( Tag(key=tag.get('key'), value=tag.get('value'), propagate_at_launch=bool( tag.get('propagate_at_launch', True)), resource_id=group_name)) else: for k, v in tag.iteritems(): if k != 'propagate_at_launch': asg_tags.append( Tag(key=k, value=v, propagate_at_launch=bool( tag.get('propagate_at_launch', True)), resource_id=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [
if not vpc_zone_identifier and not availability_zones: region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k,v in tag.iteritems(): if k !='propagate_at_launch': asg_tags.append(Tag(key=k, value=v, propagate_at_launch=bool(tag.get('propagate_at_launch', True)), resource_id=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()] enforce_required_arguments(module) launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) ag = AutoScalingGroup( group_name=group_name, load_balancers=load_balancers, availability_zones=availability_zones, launch_config=launch_configs[0], min_size=min_size, max_size=max_size, desired_capacity=desired_capacity,
def create_AutoScaling(): print "Creating AutoScaling..." # establish connection as_conn = AutoScaleConnection(AWSAccessKeyId, AWSSecretKey) # create launch configuration global lc lc = LaunchConfiguration(name='lc', image_id=DATA_CEN_AMI, key_name=ACCESS_KEY, instance_monitoring=True, security_groups=[SECURITY_GRP], instance_type=MACHINE_TYPE) as_conn.create_launch_configuration(lc) # create tag for autoscaling group as_tag = Tag(key="Project", value="2.2", propagate_at_launch=True, resource_id='my_group') # create aotoscaling group global ag ag = AutoScalingGroup(group_name='my_group', load_balancers=['myELB'], availability_zones=['us-east-1a'], launch_config=lc, min_size=MIN_SIZE, max_size=MAX_SIZE, connection=as_conn, tags=[as_tag]) # associate the autoscaling group with launch configuration as_conn.create_auto_scaling_group(ag) # build the scale policy scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=-1, cooldown=60) # register the scale policy as_conn.create_scaling_policy(scale_up_policy) as_conn.create_scaling_policy(scale_down_policy) # refresh the scale policy for extra information scale_up_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_up'])[0] scale_down_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_down' ])[0] # create cloudwatch alarm cloudwatch = CloudWatchConnection(aws_access_key_id=AWSAccessKeyId, aws_secret_access_key=AWSSecretKey, is_secure=True) # region='us-east-1a') # assocate cloudwatch with alarm alarm_dimensions = {"AutoScalingGroupName": 'my_group'} # create scale up alarm scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='50', period='60', evaluation_periods=2, alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_up_alarm) # create scale down alarm scale_down_alarm = MetricAlarm( name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold='20', period='60', evaluation_periods=1, alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_down_alarm) print "AutoScaling created successfully"
def main(): # pseudocode (repeats in code comments below) # check for autoscale group # if autoscale group not present, create it # else read launchconfig name from asg # define new launchconfig # assign launchconfig # delete old launchconfig - we can only have so many # read config print "reading configuration ..." config = ConfigParser.SafeConfigParser(allow_no_value=True) # This assumes that the file is either in the local directory being ran, or in the home/aws/ folder # We have to use the second option because XLD does not run the script in the same location that it's located config.read(['ec2-deploy.conf', os.path.expanduser('/var/lib/jenkins/workspace/AWS-Demo/cm/ec2-deploy.conf')]) # check for autoscale group # FIXME: Should connect to region there # FIXME: proxy information? print "connecting to ec2..." #asconn = boto.ec2.autoscale.AutoScaleConnection(aws_access_key_id=config.get('auth', 'AWS_ACCESS_KEY_ID'), #aws_secret_access_key=config.get('auth', 'AWS_SECRET_ACCESS_KEY'), security_token=config.get('auth', 'AWS_SECURITY_TOKEN')) #boto.set_stream_logger('boto') asconn = boto.ec2.autoscale.AutoScaleConnection() print "validating autoscaling group ..." asg = get_autoscale_group(config.get('autoscalegroup','name'), asconn) oldlc = None # read userdata userdata = "" with open(config.get('launchconfig', 'userdata_filename'), 'r') as udf: userdata=udf.read() # define new launchconfig timenow = str(datetime.now()).split(".")[0] timenow = timenow.replace(" ", "").replace("-", "").replace(":", "") lcname = config.get('autoscalegroup', 'name') + "-lc-" + timenow print "Creating new launch config '{}'".format(lcname) newlc = LaunchConfiguration( name = lcname, image_id = config.get('launchconfig', 'ami'), key_name = config.get('launchconfig', 'keypair'), instance_type = config.get('launchconfig', 'instancetype'), # security_groups = sgnames_to_list( config.get('launchconfig', 'sgnames') , config.get('ec2', 'region')), security_groups = str(config.get('launchconfig', 'security_groups')).split(','), # classic_link_vpc_security_groups = str(config.get('launchconfig', 'security_groups')).split(','), user_data = userdata, associate_public_ip_address = True, delete_on_termination = True, instance_monitoring = False, instance_profile_name = config.get('launchconfig', 'instance_profile_name') ) print "new lc created" asconn.create_launch_configuration(newlc) print "lc associated, now checking if asg exists" # if autoscale group not present, create it if asg is None: print "Autoscaling Group '{}' not found, creating...".format(config.get('autoscalegroup', 'name')) azlist = str(config.get('autoscalegroup', 'azs')).split(',') elblist = str(config.get('autoscalegroup', 'elbs')).split(',') vpclist = str(config.get('launchconfig', 'subnet')).split(',') asg = AutoScalingGroup( connection = asconn, name = config.get('autoscalegroup', 'name'), load_balancers = elblist, availability_zones = azlist, desired_capacity = config.getint('autoscalegroup','desired_capacity'), launch_config = newlc, max_size = config.getint('autoscalegroup','max_size'), min_size = config.getint('autoscalegroup','min_size'), vpc_zone_identifier = vpclist ) asconn.create_auto_scaling_group(asg) else: # else read launchconfig name from asg # Note that the oldlc is just the name of the lc we're about to delete oldlc = asg.launch_config_name print "Replacing launch configuration '{}' with new lc '{}'.".format(oldlc, lcname) asg.endElement("LaunchConfigurationName", lcname, asconn) asg.update() # this part now terminates each instance individually autoscale = boto.connect_autoscale() ec2 = boto.connect_ec2() group = autoscale.get_all_groups([config.get('autoscalegroup', 'name')])[0] instance_ids = [i.instance_id for i in group.instances] # reservations = ec2.get_all_instances(instance_ids) # instances = [i for r in reservations for i in r.instances] for i in instance_ids: asconn.terminate_instance(i,decrement_capacity=False) # delete old launchconfig - we can only have so many if oldlc is not None: print "Deleting old launch configuration ... " asconn.delete_launch_configuration(oldlc) print "done." # end main print "Now injecting the Name Tag" # can't figure out a better way to inject the boto Tag tag class - will need to fix later to make it look better taglist = Tag(key='Name', value=config.get('tags', 'name'), propagate_at_launch=True, resource_id=config.get('autoscalegroup', 'name')) asconn.create_or_update_tags([taglist])
def test_create_autoscaling_group(): mocked_networking = setup_networking_deprecated() elb_conn = boto.ec2.elb.connect_to_region("us-east-1") elb_conn.create_load_balancer("test_lb", zones=[], listeners=[(80, 8080, "http")]) conn = boto.ec2.autoscale.connect_to_region("us-east-1") config = LaunchConfiguration(name="tester", image_id="ami-abcd1234", instance_type="t2.medium") conn.create_launch_configuration(config) group = AutoScalingGroup( name="tester_group", availability_zones=["us-east-1a", "us-east-1b"], default_cooldown=60, desired_capacity=2, health_check_period=100, health_check_type="EC2", max_size=2, min_size=2, launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", vpc_zone_identifier="{subnet1},{subnet2}".format( subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"]), termination_policies=["OldestInstance", "NewestInstance"], tags=[ Tag( resource_id="tester_group", key="test_key", value="test_value", propagate_at_launch=True, ) ], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] group.name.should.equal("tester_group") set(group.availability_zones).should.equal( set(["us-east-1a", "us-east-1b"])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format( subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"])) group.launch_config_name.should.equal("tester") group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) group.health_check_type.should.equal("EC2") list(group.load_balancers).should.equal(["test_lb"]) group.placement_group.should.equal("test_placement") list(group.termination_policies).should.equal( ["OldestInstance", "NewestInstance"]) len(list(group.tags)).should.equal(1) tag = list(group.tags)[0] tag.resource_id.should.equal("tester_group") tag.key.should.equal("test_key") tag.value.should.equal("test_value") tag.propagate_at_launch.should.equal(True)
def create_asg_with_tags(asg_name, tags, ami_id="ami-abcd1234", elbs=None): """ Create an ASG with the given name, tags and AMI. This is meant to be used in tests that are decorated with the @mock_autoscaling moto decorator. Arguments: asg_name(str): The name of the new auto-scaling group. tags(dict): A dict mapping tag names to tag values. ami_id(str): The ID of the AMI that should be deployed. Returns: boto.ec2.autoscale.group.AutoScalingGroup """ tag_list = [ Tag(key=k, value=v, resource_id=asg_name, propagate_at_launch=True) for k, v in six.iteritems(tags) ] if elbs is None: elbs = [] # Create asgs vpcconn = VPCConnection() conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( name='{}_lc'.format(asg_name), image_id=ami_id, instance_type='t2.medium', ) conn.create_launch_configuration(config) vpc = vpcconn.create_vpc('10.0.0.0/24') subnetc = vpcconn.create_subnet(vpc.id, '10.0.0.0/28', 'us-east-1c') subnetb = vpcconn.create_subnet(vpc.id, '10.0.0.16/28', 'us-east-1b') group = AutoScalingGroup( name=asg_name, availability_zones=['us-east-1c', 'us-east-1b'], default_cooldown=60, desired_capacity=2, load_balancers=elbs, health_check_period=100, health_check_type="EC2", max_size=2, min_size=2, launch_config=config, placement_group="test_placement", vpc_zone_identifier="{subnetbid},{subnetcid}".format( subnetbid=subnetb.id, subnetcid=subnetc.id), termination_policies=["OldestInstance", "NewestInstance"], tags=tag_list, ) conn.create_auto_scaling_group(group) # Each ASG tag that has 'propagate_at_launch' set to True is *supposed* to be set on the instances. # However, it seems that moto (as of 0.4.30) does not properly set the tags on the instances created by the ASG. # So set the tags on the ASG instances manually instead. ec2_conn = boto.connect_ec2() for asg in conn.get_all_groups(): if asg.name == asg_name: asg_instance_ids = [ instance.instance_id for instance in asg.instances ] for instance_id in asg_instance_ids: ec2_conn.create_tags(instance_id, tags) return group
policy_name_key = 'PHANTOM_DEFINITION' policy_name = 'error_overflow_n_preserving' ordered_clouds_key = 'clouds' ordered_clouds = "" delim = "" for cloud_size in sys.argv[4:]: (cloudname, maxsize) = cloud_size.split(':') ordered_clouds = ordered_clouds + delim + cloud_size delim = "," n_preserve_key = 'minimum_vms' # make the tags policy_tag = Tag(connection=con, key=policy_name_key, value=policy_name, resource_id=name) clouds_tag = Tag(connection=con, key=ordered_clouds_key, value=ordered_clouds, resource_id=name) npreserve_tag = Tag(connection=con, key=n_preserve_key, value=n_preserve, resource_id=name) tags = [policy_tag, clouds_tag, npreserve_tag] lc_a = x = con.get_all_launch_configurations(names=[ lc_name, ])
blockDeviceMap = [] blockDeviceMap.append( {'DeviceName':'/dev/sdh', 'VirtualName' : 'ephemeral0'}) #create user-data string userData = '#!/bin/bash \n cur=$(hostname | sed \'s/-/./g\' | cut -c4-18) \n echo \"alfresco.jgroups.bind_address=$cur\" >> /opt/alfresco/tomcat/shared/classes/alfresco-global.properties \n echo \"alfresco.ehcache.rmi.hostname=$cur\" >> /opt/alfresco/tomcat/shared/classes/alfresco-global.properties \n cur1=$(hostname)\n echo \"$cur $cur1\" >> /etc/hosts\n' #create launch configuration and AS group launchConfig = LaunchConfiguration(name=asLCstr, image_id=AMIID, key_name=KEY, security_groups=[SECGRP], instance_type=TYPE, instance_monitoring=True, instance_profile_name=ROLE, block_device_mappings=blockDeviceMap, user_data=userData) conn_as.create_launch_configuration(launchConfig) time.sleep(20) autoscaleGroup = AutoScalingGroup(group_name=asGrpStr , load_balancers=[ELB_NAME], availabilty_zones=[AZLIST], launch_config=launchConfig, vpc_zone_identifier=VPC_ZONE, min_size=2, max_size=6, health_check_period='360', health_check_type='ELB') conn_as.create_auto_scaling_group(autoscaleGroup) #setup tagging for the instances # create a Tag for the austoscale group as_tag = Tag(key='Name', value = 'Alfresco Server', propagate_at_launch=True, resource_id=asGrpStr) # Add the tag to the autoscale group conn_as.create_or_update_tags([as_tag]) #create scale up and scale down policies for the autoscale group scaleUpPolicy = ScalingPolicy(name='alfrescoScaleUp-'+randomStr, adjustment_type='ChangeInCapacity', as_name=autoscaleGroup.name, scaling_adjustment=2, cooldown=1200) scaleDownPolicy = ScalingPolicy(name='alfrescoScaleDown-'+randomStr, adjustment_type='ChangeInCapacity', as_name=autoscaleGroup.name, scaling_adjustment=-1, cooldown=600) conn_as.create_scaling_policy(scaleUpPolicy) conn_as.create_scaling_policy(scaleDownPolicy) #redeclare policies to populate the ARN fields policyResults = conn_as.get_all_policies(as_group=autoscaleGroup.name, policy_names=[scaleUpPolicy.name]) scaleUpPolicy = policyResults[0] policyResults = conn_as.get_all_policies(as_group=autoscaleGroup.name, policy_names=[scaleDownPolicy.name])
import boto from boto.ec2.autoscale import Tag # make sure your access keys are stored in ~/.boto conn = boto.connect_autoscale() # Assumes you already have an elastic load balancer and a launch configuration setup ag = AutoScalingGroup(group_name=group_name, load_balancers=[load_balancer], availability_zones=availability_zones, launch_config=config, min_size=min_size, max_size=max_size) # create auto scaling group conn.create_auto_scaling_group(ag) # fetch the autoscale group after it is created auto_scaling_group = conn.get_all_groups(names=[group_name])[0] # create a Tag for the austoscale group as_tag = Tag(key='Name', value='as-instance', propagate_at_launch=True, resource_id=group_name) # Add the tag to the autoscale group conn.create_or_update_tags([as_tag])
minimum_vms_key = 'minimum_vms' maximum_vms_key = 'maximum_vms' scale_up_threshold_key = 'scale_up_threshold' scale_up_threshold = 2.0 scale_up_n_vms_key = 'scale_up_n_vms' scale_up_n_vms = 1 scale_down_threshold_key = 'scale_down_threshold' scale_down_threshold = 0.5 scale_down_n_vms_key = 'scale_down_n_vms' scale_down_n_vms = 1 iaas_allocation_key = 'iaas_allocation' iaas_allocation = 'm1.small' # make the tags policy_tag = Tag(connection=con, key=policy_name_key, value=policy_name, resource_id=name) sensor_type_tag = Tag(connection=con, key=sensor_type_key, value=sensor_type, resource_id=name) metric_tag = Tag(connection=con, key=metric_key, value=metric, resource_id=name) sample_function_tag = Tag(connection=con, key=sample_function_key, value=sample_function, resource_id=name) minimum_vms_tag = Tag(connection=con, key=minimum_vms_key,
def setup(CONF): global out lookup_tbl = { 'name': CONF['NAME'], } conn = AutoScaleConnection() out['conn'] = conn # Launch Configurations LC = CONF['LC'] LC['name'] = LC['name'] % lookup_tbl lc = LaunchConfiguration(**LC) conn.create_launch_configuration(lc) out['lc'] = lc # Auto Scaling Group ASG = CONF['ASG'] ASG['group_name'] = ASG['group_name'] % lookup_tbl ASG['launch_config'] = lc groups = conn.get_all_groups(names=[ASG['group_name']]) if (len(groups) > 0): # update asg = groups[0] for k in ASG : # asg not iterable, try-except to make sure asg[k] exists try: asg.__getattribute__(k) except: continue asg.__setattr__(k, ASG[k]) asg.launch_config_name = LC['name'] asg.update() out['asg'] = asg else: #create asg = AutoScalingGroup(**ASG) conn.create_auto_scaling_group(asg) # ASG Tags ASG_TAGS = CONF['ASG_TAGS'] for i in ASG_TAGS: if 'propagate_at_launch' not in i: i['propagate_at_launch'] = True i['key'] = i['key'] % lookup_tbl i['value'] = i['value'] % lookup_tbl tags = [ Tag(**dict(x.items() + [('resource_id', ASG['group_name'])])) for x in ASG_TAGS ] conn.create_or_update_tags(tags) # Triggers (Scaling Policy / Cloudwatch Alarm) conn_cw = connect_to_region(CONF['REGION']) TRIGGERS = CONF['TRIGGERS'] for T in TRIGGERS: T['policy']['name'] = T['policy']['name'] % lookup_tbl T['policy']['as_name'] = ASG['group_name'] T['alarm']['dimensions'] = {'AutoScalingGroupName': ASG['group_name']} T['alarm']['alarm_actions'] = None if 'name' in T['alarm']: T['alarm']['name'] = T['alarm']['name'] % lookup_tbl else: T['alarm']['name'] = T['policy']['name'] # Policies are safely overwritten, so not checked for existence conn.create_scaling_policy(ScalingPolicy(**T['policy'])) policy = conn.get_all_policies(as_group=ASG['group_name'], policy_names=[T['policy']['name']])[0] T['alarm']['alarm_actions'] = [policy.policy_arn] hits = conn_cw.describe_alarms(alarm_names=[T['alarm']['name']]) conn_cw.create_alarm(MetricAlarm(**T['alarm']))
def as_ensure(name, zones, instance, balancers=[], events=[], min_size=2, max_size=20, desired_size=None, force=False): """Create a new autoscale group. :type name: str :param name: the name of the autoscale group to be created :type zones: list of str :param zones: a list of the availability zones where autoscale group will be working on. :param instance: an instance config, created by :func:`as_config`. :type balancers: list of balancers :param balancers: a list of balancers where new instances will be autoattached. :type events: list of events :param events: a list of events created with as_event, which define in what conditions the autoscale group will be grow up. """ connection = as_connect() if force: ag_name = "%s-%s" % (name, _as_get_timestamp()) else: ag_name = name _obj = as_exists(ag_name) if _obj: if not force: ag = _obj[0] mico.output.info("use existent autoscaling group: %s" % ag_name) return ag _l = [] for elb in balancers: if isinstance(elb, str): _l.append(elb) else: _l.append(elb.name) ag = AutoScalingGroup(name=ag_name, availability_zones=zones, launch_config=instance, load_balancers=_l, min_size=min_size, max_size=max_size, desired_capacity=desired_size) connection.create_auto_scaling_group(ag) mico.output.info("created new autoscaling group: %s" % ag_name) as_tag = Tag(key='Name', value="%s" % name, propagate_at_launch=True, resource_id=ag_name) # Add the tag to the autoscale group connection.create_or_update_tags([as_tag]) cw_connection = cw_connect() for condition, actions in events: if not isinstance(actions, list): actions = [actions] condition.dimensions = {"AutoScalingGroupName": ag_name} # XXX: boto does not handle very well the alarm_actions list when the # same connection is used for two different cloudwatch alarms, so the # actions appears to be duplicated in both alarms. We need to force the # internal list to be empty. condition.alarm_actions = [] for action in actions: policy = ScalingPolicy(action["name"], as_name=ag_name, **action) mico.output.info("create policy %s" % policy.name) connection.create_scaling_policy(policy) action = connection.get_all_policies(as_group=ag_name, policy_names=[action["name"] ])[0] condition.name = "%s-%s" % (condition.name, _as_get_timestamp()) condition.add_alarm_action(action.policy_arn) mico.output.debug("add new alarm for condition %s: %s" % (condition.name, action.name)) cw_connection.create_alarm(condition) mico.output.info("create alarm %s" % condition.name) return ag
def ag_tag(self, ag, k, v): return Tag(key=k, value=v, propagate_at_launch=True, resource_id=ag.name)
def create_autoscaling_group(connection, module): group_name = module.params.get('name') load_balancers = module.params['load_balancers'] availability_zones = module.params['availability_zones'] launch_config_name = module.params.get('launch_config_name') min_size = module.params['min_size'] max_size = module.params['max_size'] desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') termination_policies = module.params.get('termination_policies') if not vpc_zone_identifier and not availability_zones: region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k, v in tag.iteritems(): if k != 'propagate_at_launch': asg_tags.append( Tag(key=k, value=v, propagate_at_launch=bool( tag.get('propagate_at_launch', True)), resource_id=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [ zone.name for zone in ec2_connection.get_all_zones() ] enforce_required_arguments(module) launch_configs = connection.get_all_launch_configurations( names=[launch_config_name]) ag = AutoScalingGroup(group_name=group_name, load_balancers=load_balancers, availability_zones=availability_zones, launch_config=launch_configs[0], min_size=min_size, max_size=max_size, desired_capacity=desired_capacity, vpc_zone_identifier=vpc_zone_identifier, connection=connection, tags=asg_tags, health_check_period=health_check_period, health_check_type=health_check_type, default_cooldown=default_cooldown, termination_policies=termination_policies) try: connection.create_auto_scaling_group(ag) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) changed = True return (changed, asg_properties) except BotoServerError as e: module.fail_json(msg=str(e)) else: as_group = as_groups[0] changed = False for attr in ASG_ATTRIBUTES: if module.params.get(attr, None) is not None: module_attr = module.params.get(attr) if attr == 'vpc_zone_identifier': module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently try: module_attr.sort() except: pass try: group_attr.sort() except: pass if group_attr != module_attr: changed = True setattr(as_group, attr, module_attr) if len(set_tags) > 0: have_tags = {} want_tags = {} for tag in asg_tags: want_tags[tag.key] = [tag.value, tag.propagate_at_launch] dead_tags = [] for tag in as_group.tags: have_tags[tag.key] = [tag.value, tag.propagate_at_launch] if tag.key not in want_tags: changed = True dead_tags.append(tag) if dead_tags != []: connection.delete_tags(dead_tags) if have_tags != want_tags: changed = True connection.create_or_update_tags(asg_tags) # handle loadbalancers separately because None != [] load_balancers = module.params.get('load_balancers') or [] if load_balancers and as_group.load_balancers != load_balancers: changed = True as_group.load_balancers = module.params.get('load_balancers') if changed: try: as_group.update() except BotoServerError as e: module.fail_json(msg=str(e)) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) except BotoServerError as e: module.fail_json(msg=str(e)) return (changed, asg_properties)
def run(self): all_lcs = self.as_conn.get_all_launch_configurations() lc_by_group = defaultdict(list) lc_max_num_by_group = defaultdict(int) for lc in all_lcs: name, num = lc.name.split('-') num = int(num) lc_by_group[name].append(lc) if num > lc_max_num_by_group[name]: lc_max_num_by_group[name] = num all_ags = self.as_conn.get_all_groups() ag_by_name = {} for ag in all_ags: ag_by_name[ag.name] = ag for group_name, config in self.as_config["groups"].iteritems(): print "Configuring %s" % group_name use_lc = None lc_to_delete = [] for lc in lc_by_group[group_name]: if use_lc is None and \ lc.image_id == config['ami'] and \ lc.key_name == config['ssh_key'] and \ lc.instance_type == config['instance_type'] and \ lc.security_groups == [config['security_group']] and \ lc.user_data == self.user_data: print " Found LaunchConfig %s that matches profile" % \ lc.name use_lc = lc else: lc_to_delete.append(lc) print " Found %d LaunchConfigurations to delete" % len( lc_to_delete) if not use_lc: print " Making LaunchConfiguration for %s" % group_name lc_num = lc_max_num_by_group[group_name] + 1 use_lc = LaunchConfiguration( name="%s-%d" % (group_name, lc_num), image_id=config['ami'], key_name=config['ssh_key'], instance_type=config['instance_type'], security_groups=[config['security_group']], user_data=self.user_data) self.as_conn.create_launch_configuration(use_lc) if group_name in ag_by_name: print " Found existing AutoScalingGroup, updating" ag = ag_by_name[group_name] ag_exists = True else: print " Making new AutoScalingGroup" ag = AutoScalingGroup() ag_exists = False # config ASG as we want it ag.name = group_name ag.launch_config_name = use_lc.name ag.availability_zones = config['zones'] ag.desired_capacity = config['capacity'] ag.min_size = config['min_size'] ag.max_size = config['max_size'] # create or update as appropriate if ag_exists: ag.update() else: self.as_conn.create_auto_scaling_group(ag) # make it send e-mail whenever it does something if 'notification_topic' in self.as_config: # NOTE [adam Sept/18/12]: this is a hack designed to work # around that boto support for this isn't in a release yet. # when the next release is out, we should uncomment the # code below. params = { 'AutoScalingGroupName': ag.name, 'TopicARN': self.as_config['notification_topic'] } self.as_conn.build_list_params(params, self.AS_NOTIFICATIONS, 'NotificationTypes') self.as_conn.get_status('PutNotificationConfiguration', params) #as_conn.put_notification_configuration( # ag.name, # self.as_config['notification_topic'], # self.AS_NOTIFICATIONS) tags = [] for tag_name, tag_value in config.get('tags', {}).iteritems(): print " Adding tag %s = %s" % (tag_name, tag_value) tags.append( Tag(key=tag_name, value=tag_value, propagate_at_launch=True, resource_id=ag.name)) self.as_conn.create_or_update_tags(tags) for lc in lc_to_delete: print " Deleting old LaunchConfiguration %s" % lc.name lc.delete() for alarm_name, alarm_cfg in config.get('alarms', {}).iteritems(): alarm_policy_arn = self.make_policy(group_name, alarm_cfg['policy']) alarm_name = '%s|%s|%s' % (group_name, alarm_cfg['policy'], alarm_cfg['metric']) alarm = MetricAlarm( name=alarm_name, namespace=alarm_cfg['namespace'], metric=alarm_cfg['metric'], statistic='Average', dimensions={'AutoScalingGroupName': group_name}, comparison=alarm_cfg['comparison'], threshold=alarm_cfg['threshold'], period=alarm_cfg['period'], evaluation_periods=alarm_cfg.get('evaluation_periods', 1), alarm_actions=[alarm_policy_arn]) self.cw_conn.put_metric_alarm(alarm)