def check_tag_attributes_set(self, name, value, attr): tag = Tag() tag.endElement(name, value, None) if value == 'true': self.assertEqual(getattr(tag, attr), True) else: self.assertEqual(getattr(tag, attr), value)
def test_create_or_update_tags(self): self.set_http_response(status_code=200) tags = [ Tag(connection=self.service_connection, key='alpha', value='tango', resource_id='sg-00000000', resource_type='auto-scaling-group', propagate_at_launch=True), Tag(connection=self.service_connection, key='bravo', value='sierra', resource_id='sg-00000000', resource_type='auto-scaling-group', propagate_at_launch=False) ] response = self.service_connection.create_or_update_tags(tags) self.assert_request_parameters( { 'Action': 'CreateOrUpdateTags', 'Tags.member.1.ResourceType': 'auto-scaling-group', 'Tags.member.1.ResourceId': 'sg-00000000', 'Tags.member.1.Key': 'alpha', 'Tags.member.1.Value': 'tango', 'Tags.member.1.PropagateAtLaunch': 'true', 'Tags.member.2.ResourceType': 'auto-scaling-group', 'Tags.member.2.ResourceId': 'sg-00000000', 'Tags.member.2.Key': 'bravo', 'Tags.member.2.Value': 'sierra', 'Tags.member.2.PropagateAtLaunch': 'false' }, ignore_params_values=['Version'])
def create_autoscale_group(gname, lconfig_name, placement_group, size, zones=None): existing_group = CompEC2._get_autoscale_group(gname) if existing_group is not None: Cluster.log_error("Autoscale group %s already exists!", gname) return None tags = [ Tag(key='Name', value=gname, propagate_at_launch=True, resource_id=gname) ] if zones is None: zones = [x.name for x in Cluster._ec2().get_all_zones()] Cluster.log_info("zones: %r", zones) ag = AutoScalingGroup(group_name=gname, availability_zones=zones, launch_config=lconfig_name, placement_group=placement_group, tags=tags, desired_capacity=0, min_size=0, max_size=size) conn = Cluster._autoscale() return conn.create_auto_scaling_group(ag)
def test_autoscale_modify_tags_1(self): new_tags = self.orig_tags[:] new_tags[1] = Tag(key='tag2', value='value2', propagate_at_launch=True) (del_tags, update_tags) = ScalingGroupView.optimize_tag_update( self.orig_tags, new_tags) self.assertTrue(len(del_tags) == 1) self.assertTrue(len(update_tags) == 1)
def test_autoscale_add_tags2(self): new_tags = self.orig_tags[:] new_tags.append( Tag(key='tag4', value='value4', propagate_at_launch=True), ) (del_tags, update_tags) = ScalingGroupView.optimize_tag_update( self.orig_tags, new_tags) self.assertTrue(len(del_tags) == 0) self.assertTrue(len(update_tags) == 1)
def tag_instances(groups): group_number = select_group(groups) tag_name = raw_input( 'What would you like to name the instances in this group? ') tag = Tag(key='Name', value=tag_name, propagate_at_launch=True, resource_id=groups[group_number].name) asConnection.create_or_update_tags([tag])
def main(REGION, ASG_NAME, SERVICE_NAME): print('Current region: {0}'.format(REGION)) global AUTOSCALING_GROUP_NAME AUTOSCALING_GROUP_NAME = ASG_NAME global asConnection asConnection = boto.ec2.autoscale.connect_to_region(REGION) global ec2Connection ec2Connection = boto.ec2.connect_to_region(REGION) print('Adding the tag "{0}" to the "{1}" group.'.format( SERVICE_NAME, ASG_NAME)) tag = Tag(key='Name', value=SERVICE_NAME, propagate_at_launch=True, resource_id=ASG_NAME) asConnection.create_or_update_tags([tag]) group = get_group() if group.desired_capacity == 0: exit('There are no instances to phase out.') else: DESIRED_CAPACITY = group.desired_capacity NEW_DESIRED_CAPACITY = group.desired_capacity * 2 global original_instances if not original_instances: for instance in get_group_instances(): original_instances.append(instance.id) # TODO: ?? check if group.max_size is smaller than what we intend to launch # Launch same number of instances that are now running group.set_capacity(NEW_DESIRED_CAPACITY) print('Waiting for group capacity to be updated from {0} to {1}..'.format( DESIRED_CAPACITY, NEW_DESIRED_CAPACITY)) while get_group().desired_capacity != NEW_DESIRED_CAPACITY: sys.stdout.write('.') sys.stdout.flush() time.sleep(5) #monitor the new instances print('\nWaiting for all new instances to be ready...') while not get_new_instances_status(): sys.stdout.write('.') sys.stdout.flush() time.sleep(10) else: # When new instances are live, terminate same number of instances that we launched # This requires Termination Policies to be: OldestLaunchConfiguration, OldestInstance # TODO?? Sometimes the instances that get terminated are the original ones. # To avoid this we could specify which instances to terminate and manually scale in the group. # Or we can just let scaling policies take capacity back down for us. #group.set_capacity(DESIRED_CAPACITY) print('\nNew instances are live.')
def parse_tags_param(self, scaling_group_name=None): tags_json = self.request.params.get('tags') tags_list = json.loads(tags_json) if tags_json else [] tags = [] for tag in tags_list: tags.append(Tag( resource_id=scaling_group_name, key=tag.get('name'), value=tag.get('value'), propagate_at_launch=tag.get('propagate_at_launch', False), )) return tags
def create_group(): # Get the list of LC's print('\nLaunch Configuration List') launchconfigs = read_launch_configs(True) if len(launchconfigs) < 1: print('You have not yet created a Launch Configuration.') return print('\n') print('Enter the Launch Configuration number to use:') lc_number = get_choice(range(1, len(launchconfigs) + 1)) launchConfigName = launchconfigs[lc_number].name autoscalingGroupName = None while not autoscalingGroupName: print('Enter a name for this new AutoScaling Group:') autoscalingGroupName = raw_input('#: ') print('Enter the Minimum Size') GROUP_MIN_SIZE = get_int() print('Enter the Maximum Size') GROUP_MAX_SIZE = get_int() print('Enter the default cooldown in seconds (default is 300 (5 minutes))') DEFAULT_COOLDOWN = get_int() print( 'Enter the desired capacity (number of instances to start right now)') DESIRED_CAPACITY = get_int() print('Enter a Name tag for intances of this group:') NAME_TAG = raw_input('#: ') if NAME_TAG != '': NAME_TAG = [ Tag(key='Name', value=NAME_TAG, propagate_at_launch=True, resource_id=autoscalingGroupName) ] else: NAME_TAG = None asgroup = AutoScalingGroup( group_name=autoscalingGroupName, availability_zones=REGIONS[CURRENT_REGION]['zones'], launch_config=launchConfigName, min_size=GROUP_MIN_SIZE, max_size=GROUP_MAX_SIZE, default_cooldown=DEFAULT_COOLDOWN, desired_capacity=DESIRED_CAPACITY, tags=NAME_TAG, #TODO: load_balancers = [elb, list] # health_check_type?? [ELB, EC2] #connection=asConnection ) asConnection.create_auto_scaling_group(asgroup) # returns request id
def create_tag_for_asg_deletion(asg_name, seconds_until_delete_delta=None): """ Create a tag that will be used to mark an ASG for deletion. """ if seconds_until_delete_delta is None: tag_value = None else: tag_value = ( datetime.utcnow() + timedelta(seconds=seconds_until_delete_delta)).isoformat() return Tag(key=ASG_DELETE_TAG_KEY, value=tag_value, propagate_at_launch=False, resource_id=asg_name)
def create_autoscalability_group(self, lb_name, lc): self.logger.log("Creating autoscalability group ...") try: tag = Tag( key='Name', value = self.instance_identifier, propagate_at_launch=True, resource_id='cloudscale-as' ) ag = AutoScalingGroup(group_name='cloudscale-as', load_balancers=[lb_name], availability_zones=[self.availability_zone], launch_config=lc, min_size=1, max_size=10, connection=self.conn, tags=[tag]) self.conn.create_auto_scaling_group(ag) except boto.exception.BotoServerError as e: if e.error_code != 'AlreadyExists': raise # self.conn.get_all_groups(names=['cloudscale-as'])[0]
def test_basic(self): # NB: as it says on the tin these are really basic tests that only # (lightly) exercise read-only behaviour - and that's only if you # have any autoscale groups to introspect. It's useful, however, to # catch simple errors print('--- running %s tests ---' % self.__class__.__name__) c = AutoScaleConnection() self.assertTrue(repr(c).startswith('AutoScaleConnection')) groups = c.get_all_groups() for group in groups: self.assertIsInstance(group, AutoScalingGroup) # get activities activities = group.get_activities() for activity in activities: self.assertIsInstance(activity, Activity) # get launch configs configs = c.get_all_launch_configurations() for config in configs: self.assertIsInstance(config, LaunchConfiguration) # get policies policies = c.get_all_policies() for policy in policies: self.assertIsInstance(policy, ScalingPolicy) # get scheduled actions actions = c.get_all_scheduled_actions() for action in actions: self.assertIsInstance(action, ScheduledUpdateGroupAction) # get instances instances = c.get_all_autoscaling_instances() for instance in instances: self.assertIsInstance(instance, Instance) # get all scaling process types ptypes = c.get_all_scaling_process_types() for ptype in ptypes: self.assertTrue(ptype, ProcessType) # get adjustment types adjustments = c.get_all_adjustment_types() for adjustment in adjustments: self.assertIsInstance(adjustment, AdjustmentType) # get metrics collection types types = c.get_all_metric_collection_types() self.assertIsInstance(types, MetricCollectionTypes) # create the simplest possible AutoScale group # first create the launch configuration time_string = '%d' % int(time.time()) lc_name = 'lc-%s' % time_string lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b', instance_type='t1.micro') c.create_launch_configuration(lc) found = False lcs = c.get_all_launch_configurations() for lc in lcs: if lc.name == lc_name: found = True break assert found # now create autoscaling group group_name = 'group-%s' % time_string group = AutoScalingGroup(name=group_name, launch_config=lc, availability_zones=['us-east-1a'], min_size=1, max_size=1) c.create_auto_scaling_group(group) found = False groups = c.get_all_groups() for group in groups: if group.name == group_name: found = True break assert found # now create a tag tag = Tag(key='foo', value='bar', resource_id=group_name, propagate_at_launch=True) c.create_or_update_tags([tag]) found = False tags = c.get_all_tags() for tag in tags: if tag.resource_id == group_name and tag.key == 'foo': found = True break assert found c.delete_tags([tag]) # shutdown instances and wait for them to disappear group.shutdown_instances() instances = True while instances: time.sleep(5) groups = c.get_all_groups() for group in groups: if group.name == group_name: if not group.instances: instances = False group.delete() lc.delete() found = True while found: found = False time.sleep(5) tags = c.get_all_tags() for tag in tags: if tag.resource_id == group_name and tag.key == 'foo': found = True assert not found print('--- tests completed ---')
def setUp(self): self.orig_tags = [ Tag(key='tag1', value='value1', propagate_at_launch=True), Tag(key='tag2', value='value2', propagate_at_launch=False), Tag(key='tag3', value='value3', propagate_at_launch=True), ]
elb = conn2.create_load_balancer('elb', [ZONE], [(80, 80, 'http')]) # allow all traffic conn2.apply_security_groups_to_lb('elb', [sg.id]) conn2.configure_health_check('elb', hc) print elb.dns_name print 'Creating ASG' # initialize launch config conn3 = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY']) config = LaunchConfiguration(name='config', image_id=DC_IMAGE, security_groups=sgs, instance_type=TYPE, instance_monitoring=True) conn3.create_launch_configuration(config) # initialize auto scaling group ag = AutoScalingGroup(connection=conn3, name='gp', load_balancers=['elb'], availability_zones=[ZONE], health_check_type='ELB', health_check_period=60, launch_config=config, min_size=2, max_size=5, desired_capacity=2, tags=[Tag(key=TAGK, value=TAGV, propagate_at_launch=True, resource_id='gp', resource_type='auto-scaling-group')]) conn3.create_auto_scaling_group(ag) # define the scaling policies scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='gp', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='gp', scaling_adjustment=-1, cooldown=60) # create policies conn3.create_scaling_policy(scale_up_policy) conn3.create_scaling_policy(scale_down_policy) # get ARN for policies up_policy = conn3.get_all_policies(as_group='gp', policy_names=['scale_up'])[0] down_policy = conn3.get_all_policies(as_group='gp', policy_names=['scale_down'])[0] # set up cloudwatch cloudwatch = boto.ec2.cloudwatch.connect_to_region('us-east-1', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],