Exemple #1
0
def create_autoscaling_group():
    global img
    conn = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])
    autoscale = boto.ec2.autoscale.connect_to_region('us-east-1')
    print conn.get_all_groups()
    timestamp = time.time()
    value = datetime.datetime.fromtimestamp(timestamp)
    humanreadabledate = value.strftime('%Y-%m-%d_%H.%M.%S')
    config_name = 'live_launch_config'+humanreadabledate
    init_script = "#!/bin/sh /home/ec2-user/sds/deployment_scripts/initialize_server.py"
    lc = LaunchConfiguration(name=config_name, image_id=img,
                             key_name='SDSEastKey',
                             security_groups=['sg-a7afb1c2'],
                             user_data=init_script)
    conn.create_launch_configuration(lc)
    ag = AutoScalingGroup(group_name=config_name, load_balancers=['SDSLiveLoadBalancer'], availability_zones=['us-east-1a'], launch_config=lc, min_size=2, max_size=2, connection=conn)
    conn.create_auto_scaling_group(ag)
Exemple #2
0
def launch_auto_scaling(stage = 'development'):
	config = get_provider_dict()
	from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger
	conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'], fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'], host='%s.autoscaling.amazonaws.com' % config['location'][:-1])
	
	for name, values in config.get(stage, {}).get('autoscale', {}):
		if any(group.name == name for group in conn.get_all_groups()):
			fabric.api.warn(fabric.colors.orange('Autoscale group %s already exists' % name))
			continue
		lc = LaunchConfiguration(name = '%s-launch-config' % name, image_id = values['image'],  key_name = config['key'])
		conn.create_launch_configuration(lc)
		ag = AutoScalingGroup(group_name = name, load_balancers = values.get('load-balancers'), availability_zones = [config['location']], launch_config = lc, min_size = values['min-size'], max_size = values['max-size'])
		conn.create_auto_scaling_group(ag)
		if 'min-cpu' in values and 'max-cpu' in values:
			tr = Trigger(name = '%s-trigger' % name, autoscale_group = ag, measure_name = 'CPUUtilization', statistic = 'Average', unit = 'Percent', dimensions = [('AutoScalingGroupName', ag.name)],
						 period = 60, lower_threshold = values['min-cpu'], lower_breach_scale_increment = '-1', upper_threshold = values['max-cpu'], upper_breach_scale_increment = '2', breach_duration = 60)
			conn.create_trigger(tr)
Exemple #3
0
def launch_auto_scaling(stage='development'):
    config = get_provider_dict()
    from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger
    conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'],
                               fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'],
                               host='%s.autoscaling.amazonaws.com' %
                               config['location'][:-1])

    for name, values in config.get(stage, {}).get('autoscale', {}):
        if any(group.name == name for group in conn.get_all_groups()):
            fabric.api.warn(
                fabric.colors.orange('Autoscale group %s already exists' %
                                     name))
            continue
        lc = LaunchConfiguration(name='%s-launch-config' % name,
                                 image_id=values['image'],
                                 key_name=config['key'])
        conn.create_launch_configuration(lc)
        ag = AutoScalingGroup(group_name=name,
                              load_balancers=values.get('load-balancers'),
                              availability_zones=[config['location']],
                              launch_config=lc,
                              min_size=values['min-size'],
                              max_size=values['max-size'])
        conn.create_auto_scaling_group(ag)
        if 'min-cpu' in values and 'max-cpu' in values:
            tr = Trigger(name='%s-trigger' % name,
                         autoscale_group=ag,
                         measure_name='CPUUtilization',
                         statistic='Average',
                         unit='Percent',
                         dimensions=[('AutoScalingGroupName', ag.name)],
                         period=60,
                         lower_threshold=values['min-cpu'],
                         lower_breach_scale_increment='-1',
                         upper_threshold=values['max-cpu'],
                         upper_breach_scale_increment='2',
                         breach_duration=60)
            conn.create_trigger(tr)
Exemple #4
0
class MSBManager:
    def __init__(self, aws_access_key, aws_secret_key):
        self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
        self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
        self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
        self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
        self.default_cooldown = 60

    def get_security_group(self, name):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        return sgs[0] if sgs else None

    def create_security_group(self, name, description):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        sg = sgs[0] if sgs else None
        if not sgs:
            sg = self.ec2_conn.create_security_group(name, description)

        try:
            sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
        except EC2ResponseError:
            pass
        return sg

    def remove_security_group(self, name):
        self.ec2_conn.delete_security_group(name=name)

    def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
        instance = None
        reservations = self.ec2_conn.get_all_instances()
        for reservation in reservations:
            for i in reservation.instances:
                if "Name" in i.tags and i.tags["Name"] == tags["Name"] and i.state == "running":
                    instance = i
                    break

        if not instance:
            reservation = self.ec2_conn.run_instances(
                image,
                instance_type=instance_type,
                key_name=key_name,
                placement=zone,
                security_groups=security_groups,
                monitoring_enabled=True,
            )
            instance = reservation.instances[0]
            while not instance.update() == "running":
                time.sleep(5)
            time.sleep(10)
            self.ec2_conn.create_tags([instance.id], tags)

        return instance

    def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
        req = self.ec2_conn.request_spot_instances(
            price=bid,
            instance_type=instance_type,
            image_id=image,
            placement=zone,
            key_name=key_name,
            security_groups=security_groups,
        )
        instance_id = None

        while not instance_id:
            job_sir_id = req[0].id
            requests = self.ec2_conn.get_all_spot_instance_requests()
            for sir in requests:
                if sir.id == job_sir_id:
                    instance_id = sir.instance_id
                    break
            print "Job {} not ready".format(job_sir_id)
            time.sleep(60)

        self.ec2_conn.create_tags([instance_id], tags)

    def remove_instance(self, instance_id):
        self.remove_instances([instance_id])

    def remove_instances(self, instance_ids):
        self.ec2_conn.terminate_instances(instance_ids)

    def remove_instance_by_tag_name(self, name):
        reservations = self.ec2_conn.get_all_instances()
        data_centers_intance_ids = []
        for reservation in reservations:
            for instance in reservation.instances:
                if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
                    data_centers_intance_ids.append(instance.id)
        if data_centers_intance_ids:
            self.remove_instances(data_centers_intance_ids)

    def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
        lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
        lb = lbs[0] if lbs else None
        if not lb:
            hc = HealthCheck(
                timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
            )
            ports = [(80, 80, "http")]
            zones = [zone]
            lb = self.elb_conn.create_load_balancer(name, zones, ports)

            self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
            lb.configure_health_check(hc)
            if instance_ids:
                lb.register_instances(instance_ids)

            params = {
                "LoadBalancerNames.member.1": lb.name,
                "Tags.member.1.Key": "15619project",
                "Tags.member.1.Value": project_tag_value,
            }
            lb.connection.get_status("AddTags", params, verb="POST")
        return lb

    def remove_elb(self, name):
        self.elb_conn.delete_load_balancer(name)

    def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
        lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
        lc = lcs[0] if lcs else None
        if not lc:
            lc = LaunchConfiguration(
                name=name,
                image_id=image,
                key_name=key_name,
                security_groups=[security_groups],
                instance_type=instance_type,
            )
            self.auto_scale_conn.create_launch_configuration(lc)
        return lc

    def remove_launch_configuration(self, name):
        self.auto_scale_conn.delete_launch_configuration(name)

    def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
        lc = self.create_launch_configuration()
        as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
        as_group = as_groups[0] if as_groups else None
        if not as_group:
            as_group = AutoScalingGroup(
                group_name=name,
                load_balancers=[lb_name],
                availability_zones=[zone],
                launch_config=lc,
                min_size=4,
                max_size=4,
                health_check_type="ELB",
                health_check_period=120,
                connection=self.auto_scale_conn,
                default_cooldown=self.default_cooldown,
                desired_capacity=4,
                tags=tags,
            )

            self.auto_scale_conn.create_auto_scaling_group(as_group)
            if instance_ids:
                self.auto_scale_conn.attach_instances(name, instance_ids)

            scale_up_policy = ScalingPolicy(
                name="scale_up",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=1,
                cooldown=self.default_cooldown,
            )
            scale_down_policy = ScalingPolicy(
                name="scale_down",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=-1,
                cooldown=self.default_cooldown,
            )

            self.auto_scale_conn.create_scaling_policy(scale_up_policy)
            self.auto_scale_conn.create_scaling_policy(scale_down_policy)

            scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_up"])[0]
            scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_down"])[0]

            alarm_dimensions = {"AutoScalingGroupName": name}
            scale_up_alarm = MetricAlarm(
                name="scale_up_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison=">",
                threshold=85,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_up_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_up_alarm)
            scale_down_alarm = MetricAlarm(
                name="scale_down_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison="<",
                threshold=60,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_down_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_down_alarm)

        return as_group

    def update_autoscaling_group_max_size(self, as_group, max_size):
        setattr(as_group, "max_size", max_size)
        as_group.update()

    def update_autoscaling_group_min_size(self, as_group, min_size):
        setattr(as_group, "min_size", min_size)
        as_group.update()

    def remove_autoscaling_group(self, name):
        self.auto_scale_conn.delete_auto_scaling_group(name)
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path='/services/AutoScaling'
        port=8773
        if clc_host[len(clc_host)-13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port=443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records, next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj
def create_autoscaling(ami_id, sns_arn):
    """
    Creates the autoscaling group for proxy instances
    Inspired by boto autoscaling tutorial.
    """
    con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                              aws_access_key_id=AWS_ACCESS_KEY,
                              region=RegionInfo(name=REGION,
                                               endpoint='autoscaling.%s.amazonaws.com' % REGION))



    print "Creating autoscaling configuration.."
    config = LaunchConfiguration(name=AUTOSCALING_GROUP_NAME,
                                 image_id=ami_id,
                                 key_name=KEY_NAME,
                                 security_groups=[EC2_SECURITY_GROUP_NAME],
                                 instance_type=INSTANCE_TYPE)

    con.create_launch_configuration(config)


    print "Create autoscaling group..."
    ag = AutoScalingGroup(name=AUTOSCALING_GROUP_NAME,
                          launch_config=config,
                          availability_zones=["{0}a".format(REGION)],
                          load_balancers=[ELB_NAME],
                          min_size=AUTOSCALING_MIN_INSTANCES,
                          max_size=AUTOSCALING_MAX_INSTANCES,
                          group_name=AUTOSCALING_GROUP_NAME)
    con.create_auto_scaling_group(ag)

    # fetch the autoscale group after it is created (unused but may be necessary)
    _ = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0]

    # Create tag name for autoscaling-created machines
    as_tag = Tag(key='Name', value=AUTOSCALING_GROUP_NAME, propagate_at_launch=True, resource_id=AUTOSCALING_GROUP_NAME)
    con.create_or_update_tags([as_tag])


    print "Creating autoscaling policy..."
    scaleup_policy = ScalingPolicy(name='scale_up',
                                   adjustment_type='ChangeInCapacity',
                                   as_name=AUTOSCALING_GROUP_NAME,
                                   scaling_adjustment=1,
                                   cooldown=AUTOSCALING_COOLDOWN_PERIOD)

    scaledown_policy = ScalingPolicy(name='scale_down',
                                     adjustment_type='ChangeInCapacity',
                                     as_name=AUTOSCALING_GROUP_NAME,
                                     scaling_adjustment=-1,
                                     cooldown=AUTOSCALING_COOLDOWN_PERIOD)

    con.create_scaling_policy(scaleup_policy)
    con.create_scaling_policy(scaledown_policy)

    # Get freshened policy objects
    scaleup_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_up'])[0]
    scaledown_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_down'])[0]

    print "Creating cloudwatch alarms"
    cloudwatch_con = CloudWatchConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                                      aws_access_key_id=AWS_ACCESS_KEY,
                                      region=RegionInfo(name=REGION,
                                                        endpoint='monitoring.%s.amazonaws.com' % REGION))


    alarm_dimensions = {"AutoScalingGroupName": AUTOSCALING_GROUP_NAME}
    scaleup_alarm = MetricAlarm(name='scale_up_on_cpu',
                                namespace='AWS/EC2',
                                metric='CPUUtilization',
                                statistic='Average',
                                comparison='>',
                                threshold=AUTOSCALING_CPU_MAX_THRESHOLD,
                                period='60',
                                evaluation_periods=1,
                                alarm_actions=[scaleup_policy.policy_arn, sns_arn],
                                dimensions=alarm_dimensions)

    # Don't send SNS on scaledown policy
    scaledown_alarm = MetricAlarm(name='scale_down_on_cpu',
                                 namespace='AWS/EC2',
                                 metric='CPUUtilization',
                                 statistic='Average',
                                 comparison='<',
                                 threshold=AUTOSCALING_CPU_MIN_THRESHOLD,
                                 period='60',
                                 evaluation_periods=1,
                                 alarm_actions=[scaledown_policy.policy_arn],
                                 dimensions=alarm_dimensions)
    cloudwatch_con.create_alarm(scaleup_alarm)
    cloudwatch_con.create_alarm(scaledown_alarm)
Exemple #7
0
    def test_basic(self):
        # NB: as it says on the tin these are really basic tests that only
        # (lightly) exercise read-only behaviour - and that's only if you
        # have any autoscale groups to introspect. It's useful, however, to
        # catch simple errors

        print('--- running %s tests ---' % self.__class__.__name__)
        c = AutoScaleConnection()

        self.assertTrue(repr(c).startswith('AutoScaleConnection'))

        groups = c.get_all_groups()
        for group in groups:
            self.assertIsInstance(group, AutoScalingGroup)

            # get activities
            activities = group.get_activities()

            for activity in activities:
                self.assertIsInstance(activity, Activity)

        # get launch configs
        configs = c.get_all_launch_configurations()
        for config in configs:
            self.assertIsInstance(config, LaunchConfiguration)

        # get policies
        policies = c.get_all_policies()
        for policy in policies:
            self.assertIsInstance(policy, ScalingPolicy)

        # get scheduled actions
        actions = c.get_all_scheduled_actions()
        for action in actions:
            self.assertIsInstance(action, ScheduledUpdateGroupAction)

        # get instances
        instances = c.get_all_autoscaling_instances()
        for instance in instances:
            self.assertIsInstance(instance, Instance)

        # get all scaling process types
        ptypes = c.get_all_scaling_process_types()
        for ptype in ptypes:
            self.assertTrue(ptype, ProcessType)

        # get adjustment types
        adjustments = c.get_all_adjustment_types()
        for adjustment in adjustments:
            self.assertIsInstance(adjustment, AdjustmentType)

        # get metrics collection types
        types = c.get_all_metric_collection_types()
        self.assertIsInstance(types, MetricCollectionTypes)

        # create the simplest possible AutoScale group
        # first create the launch configuration
        time_string = '%d' % int(time.time())
        lc_name = 'lc-%s' % time_string
        lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
                                 instance_type='t1.micro')
        c.create_launch_configuration(lc)
        found = False
        lcs = c.get_all_launch_configurations()
        for lc in lcs:
            if lc.name == lc_name:
                found = True
                break
        assert found

        # now create autoscaling group
        group_name = 'group-%s' % time_string
        group = AutoScalingGroup(name=group_name, launch_config=lc,
                                 availability_zones=['us-east-1a'],
                                 min_size=1, max_size=1)
        c.create_auto_scaling_group(group)
        found = False
        groups = c.get_all_groups()
        for group in groups:
            if group.name == group_name:
                found = True
                break
        assert found

        # now create a tag
        tag = Tag(key='foo', value='bar', resource_id=group_name,
                  propagate_at_launch=True)
        c.create_or_update_tags([tag])

        found = False
        tags = c.get_all_tags()
        for tag in tags:
            if tag.resource_id == group_name and tag.key == 'foo':
                found = True
                break
        assert found

        c.delete_tags([tag])

        # shutdown instances and wait for them to disappear
        group.shutdown_instances()
        instances = True
        while instances:
            time.sleep(5)
            groups = c.get_all_groups()
            for group in groups:
                if group.name == group_name:
                    if not group.instances:
                        instances = False

        group.delete()
        lc.delete()

        found = True
        while found:
            found = False
            time.sleep(5)
            tags = c.get_all_tags()
            for tag in tags:
                if tag.resource_id == group_name and tag.key == 'foo':
                    found = True

        assert not found

        print('--- tests completed ---')
Exemple #8
0
lc = LaunchConfiguration(name=lc_name,
                         image_id=as_ami['id'],
                         key_name=as_ami['access_key'],
                         security_groups=as_ami['security_groups'],
                         instance_type=as_ami['instance_type'],
                         instance_monitoring=as_ami['instance_monitoring'])
conn_as.create_launch_configuration(lc)

#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.group.AutoScalingGroup
ag = AutoScalingGroup(group_name=autoscaling_group['name'],
                      load_balancers=[elastic_load_balancer['name']],
                      availability_zones=zoneStrings,
                      launch_config=lc,
                      min_size=autoscaling_group['min_size'],
                      max_size=autoscaling_group['max_size'])
conn_as.create_auto_scaling_group(ag)

#=================Create Scaling Policies=============================================
#Policy for scaling the number of servers up and down
#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.policy.ScalingPolicy
scalingUpPolicy = ScalingPolicy(name='webserverScaleUpPolicy',
                                adjustment_type='ChangeInCapacity',
                                as_name=ag.name,
                                scaling_adjustment=2,
                                cooldown=180)

scalingDownPolicy = ScalingPolicy(name='webserverScaleDownPolicy',
                                  adjustment_type='ChangeInCapacity',
                                  as_name=ag.name,
                                  scaling_adjustment=-1,
                                  cooldown=180)
Exemple #9
0
class AutoScale:

    def __init__(self, args):
        """
        Initializing basic variables needed for auto scaling
        """
        self.configs                = ConfigParser.RawConfigParser()
        self.args                   = args
        self.test_props             = {}
        self.props                  = {}
        self.ec2_connection         = EC2Connection(self.args.access_key, self.args.secret_key)
        self.autoscale_connection   = AutoScaleConnection(self.args.access_key, self.args.secret_key)
        self.elb_connection         = ELBConnection(self.args.access_key, self.args.secret_key)
        self.cw_connection          = CloudWatchConnection(self.args.access_key, self.args.secret_key)
        self.firstInstance          = None
        self.launchConfiguration    = None
        self.healthCheck            = None

    def loadConfigs(self):
        """
        FIX ME: Currently doesnt do anything
        This method will load the configurations from boto config file if present else will 
        accept parameters passed by user.
        """
        if os.path.isfile("/etc/boto.cfg"):
            self.configs.read("/etc/boto.cfg")
            conf = self.configs.sections()
            self.populateConfigs(conf)
        if os.path.isfile("~/.boto"):
            self.configs.read("~/.boto")
            conf = self.configs.sections()
            self.populateConfigs(conf)
            
        print ">>> Loaded configs"
            
    def populateConfigs(self, sections):
        for section in sections:
            self.boto_props[section] = self.configs.items(section)
            for item in self.boto_props[section]:
                key, value = item
                if not self.props.has_key(key):
                    self.props[key] = value

    def createLaunchConfiguration(self, lc_name, ami_id, key_name):
        """
        Creates launch configuration for the auto scaling cluster
        """
        self.launchConfiguration = LaunchConfiguration(name     = lc_name, 
                                                       image_id = ami_id, 
                                                       key_name = key_name)
        self.autoscale_connection.create_launch_configuration(self.launchConfiguration)
        print ">>> Created launch configuration: " + lc_name

    def createAutoScaleGroup(self, asg_name):
        """
        Create a Auto scaling group for the auto scaling cluster
        """
        autoScalingGroup = AutoScalingGroup(group_name         = asg_name, 
                                            load_balancers     = [self.args.lb_name], 
                                            launch_config      = self.launchConfiguration, 
                                            min_size           = self.args.min_size, 
                                            max_size           = self.args.max_size, 
                                            availability_zones = ['us-east-1a'])
        self.autoscale_connection.create_auto_scaling_group(autoScalingGroup)
        print ">>> Created auto scaling group: " + asg_name

    def createTrigger(self, trigger_name, measure, asg_name):
        """
        Trigger to spawn new instances as per specific metrics
        """
        alarm_actions = []
        dimensions = {"AutoScalingGroupName" : asg_name}
        policies = self.autoscale_connection.get_all_policies(as_group=self.args.asg_name, policy_names=[self.args.asp_name])
        for policy in policies:
            alarm_actions.append(policy.policy_arn)
        alarm = MetricAlarm(name                = trigger_name, 
                            namespace           = "AWS/EC2", 
                            metric              = measure, 
                            statistic           = "Average", 
                            comparison          = ">=", 
                            threshold           = 50, 
                            period              = 60, 
                            unit                = "Percent",
                            evaluation_periods  = 2,  
                            alarm_actions       = alarm_actions, 
                            dimensions          = dimensions)
        
        self.cw_connection.create_alarm(alarm)
        print ">>> Created trigger: "+self.args.trigger

    def createAutoScalePolicy(self, asp_name):
        """
        Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster
        """
        self.autoScalingUpPolicy = ScalingPolicy(name                 = asp_name+'-up',
                                                 adjustment_type      = "ChangeInCapacity", 
                                                 as_name              = self.args.asg_name, 
                                                 scaling_adjustment   = 1, 
                                                 cooldown             = 180)
        self.autoScalingDownPolicy = ScalingPolicy(name                 = asp_name+'-down',
                                                   adjustment_type      = "ChangeInCapacity", 
                                                   as_name              = self.args.asg_name, 
                                                   scaling_adjustment   = -1, 
                                                   cooldown             = 180)

        self.autoscale_connection.create_scaling_policy(self.autoScalingUpPolicy)
        self.autoscale_connection.create_scaling_policy(self.autoScalingDownPolicy)
        
        print ">>> Created auto scaling policy: " + asp_name
    
    def configureHealthCheck(self, target):
        """
        Configures health check for the cluster
        """
        self.healthCheck = HealthCheck(target   = target, 
                                       timeout  = 5)
        print ">>> Configured health check for: " + target
        
    def createLoadBalancer(self, lb_name, region, lb_port, instance_port, protocol):
        """
        Creates a load balancer for cluster
        """
        listener = (int(lb_port), int(instance_port), protocol)
        tuple_list =[]
        tuple_list.append(listener)
        lbs = self.elb_connection.get_all_load_balancers()
        for lb in lbs:
            if lb.name != lb_name:
                self.elb_connection.create_load_balancer(lb_name, [region], tuple_list)
                self.elb_connection.configure_health_check(name         = lb_name, 
                                                           health_check = self.healthCheck)
                print ">>> Created load balancer: " + lb_name
            else:
                print "Load balancer with name '"+lb_name+"' already exists"
        
    def startInstance(self, image_id, key_name, region, instance_type):
        """
        Starts the first instance which will be serving requests irrespective of auto scaling 
        instances.
        """
        reservation = self.ec2_connection.run_instances(image_id=image_id, min_count=1, max_count=1, placement=region, key_name=key_name, instance_type=instance_type)
#        for instance in reservation.instances:
#            instance.add_tag('node', '0')
#            break

        self.firstInstance = reservation.instances[0].id.split('\'')[0]
        print ">>> Started instance: ", self.firstInstance
    
    def registerInstanceToELB(self, lb_name):
        """
        Register the first instance started to the Elastic Load Balancer.
        """
        self.elb_connection.register_instances(load_balancer_name = lb_name, 
                                               instances          = [self.firstInstance])
        print ">>> Registered instance '",self.firstInstance,"' to load balancer '"+lb_name+"'"
    
    def setUp(self):
        """
        Set's up the auto scaling for the application
        """
        # STEP 1: Load the configurations
        self.loadConfigs()
        # STEP 2: Configure the health check for the instances
        self.configureHealthCheck(self.args.lb_target)
        # STEP 3: Create a load balancer
        self.createLoadBalancer(self.args.lb_name, self.args.region, self.args.lb_port, self.args.instance_port, self.args.protocol)
        # STEP 4: Start the first instance
        self.startInstance(self.args.ami_id, self.args.key_name, self.args.region, self.args.instance_type)
        # STEP 5: Register the instance to the load balancer created in STEP 4
        self.registerInstanceToELB(self.args.lb_name)
        # STEP 6: Create launch configuration to launch instances by auto scale
        self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id, self.args.key_name)
        # STEP 7: Create a auto scale group which will manage the instances started by auto scaling
        self.createAutoScaleGroup(self.args.asg_name)
        # STEP 8: Create a auto scaling policy to say add/remove a node
        self.createAutoScalePolicy(self.args.asp_name)
        # STEP 9: Create a trigger, so that auto scaling can trigger it to start 
        # or remove a instance from auto scaling group 
        self.createTrigger(self.args.trigger, self.args.measure, self.args.asg_name)
def create_AutoScaling():
    print "Creating AutoScaling..."
    # establish connection
    as_conn = AutoScaleConnection(AWSAccessKeyId, AWSSecretKey)
    # create launch configuration
    global lc
    lc = LaunchConfiguration(name='lc',
                             image_id=DATA_CEN_AMI,
                             key_name=ACCESS_KEY,
                             instance_monitoring=True,
                             security_groups=[SECURITY_GRP],
                             instance_type=MACHINE_TYPE)
    as_conn.create_launch_configuration(lc)

    # create tag for autoscaling group
    as_tag = Tag(key="Project",
                 value="2.2",
                 propagate_at_launch=True,
                 resource_id='my_group')

    # create aotoscaling group
    global ag
    ag = AutoScalingGroup(group_name='my_group',
                          load_balancers=['myELB'],
                          availability_zones=['us-east-1a'],
                          launch_config=lc,
                          min_size=MIN_SIZE,
                          max_size=MAX_SIZE,
                          connection=as_conn,
                          tags=[as_tag])
    # associate the autoscaling group with launch configuration
    as_conn.create_auto_scaling_group(ag)

    # build the scale policy
    scale_up_policy = ScalingPolicy(name='scale_up',
                                    adjustment_type='ChangeInCapacity',
                                    as_name='my_group',
                                    scaling_adjustment=1,
                                    cooldown=60)
    scale_down_policy = ScalingPolicy(name='scale_down',
                                      adjustment_type='ChangeInCapacity',
                                      as_name='my_group',
                                      scaling_adjustment=-1,
                                      cooldown=60)

    # register the scale policy
    as_conn.create_scaling_policy(scale_up_policy)
    as_conn.create_scaling_policy(scale_down_policy)

    # refresh the scale policy for extra information
    scale_up_policy = as_conn.get_all_policies(as_group='my_group',
                                               policy_names=['scale_up'])[0]
    scale_down_policy = as_conn.get_all_policies(as_group='my_group',
                                                 policy_names=['scale_down'
                                                               ])[0]

    # create cloudwatch alarm
    cloudwatch = CloudWatchConnection(aws_access_key_id=AWSAccessKeyId,
                                      aws_secret_access_key=AWSSecretKey,
                                      is_secure=True)
    # region='us-east-1a')

    # assocate cloudwatch with alarm
    alarm_dimensions = {"AutoScalingGroupName": 'my_group'}

    # create scale up alarm
    scale_up_alarm = MetricAlarm(name='scale_up_on_cpu',
                                 namespace='AWS/EC2',
                                 metric='CPUUtilization',
                                 statistic='Average',
                                 comparison='>',
                                 threshold='50',
                                 period='60',
                                 evaluation_periods=2,
                                 alarm_actions=[scale_up_policy.policy_arn],
                                 dimensions=alarm_dimensions)
    cloudwatch.create_alarm(scale_up_alarm)

    # create scale down alarm
    scale_down_alarm = MetricAlarm(
        name='scale_down_on_cpu',
        namespace='AWS/EC2',
        metric='CPUUtilization',
        statistic='Average',
        comparison='<',
        threshold='20',
        period='60',
        evaluation_periods=1,
        alarm_actions=[scale_down_policy.policy_arn],
        dimensions=alarm_dimensions)
    cloudwatch.create_alarm(scale_down_alarm)

    print "AutoScaling created successfully"
Exemple #11
0
                         key_name=KEY_NAME,
                         security_groups=SECURITY_GROUP2,
                         instance_type=DC_INSTANCE_TYPE,
                         instance_monitoring=DETAIL_MON)
con_as.create_launch_configuration(lc)

asg = AutoScalingGroup(name='Project2.2_AutoSacling_Group',
                       load_balancers=[elb['name']],
                       availability_zones=ZONE,
                       health_check_period='120',
                       health_check_type='ELB',
                       launch_config=lc,
                       min_size=1,
                       max_size=5,
                       tags=[Tag(key='Project', value='2.2',propagate_at_launch=True,resource_id='Project2.2_AutoSacling_Group')])
con_as.create_auto_scaling_group(asg)

# -------------------------Create Scaling Policies------------------------------
scaleOut = ScalingPolicy(name='ScaleOut',
                         adjustment_type='ChangeInCapacity',
                         as_name=asg.name,
                         scaling_adjustment=1,
                         cooldown=100)

scaleIn = ScalingPolicy(name='ScaleIn',
                        adjustment_type='ChangeInCapacity',
                        as_name=asg.name,
                        scaling_adjustment=-1,
                        cooldown=100)

con_as.create_scaling_policy(scaleOut)
Exemple #12
0
class aws:
    def __init__(self,PREFIX='tfound-',ENV='dev',AMI='',TYPE='',SIZE='',
                 DOMAIN='tfound',SSHKEY='myprivatekey',AWSKEY='',AWSSECRET='',AVAIL_ZONES=["us-east-1a","us-east-1b","us-east-1c","us-east-1d"]):
        '''
        Shows examples
        Create load balancer group 'tfound-dev-web-lb' for web servers, in dev group for tfound:
            python control-lb-and-groups.py --createlb --env dev --aws SC --type web
        Add an instance to the load balancer group:
            python control-lb-and-groups.py --addtolb=true --env dev --aws SC --type web --instance=i-999999
        Create launch config using ami ami-fa6b8393 (default), medium sized instance, and Autoscale Group 'tfound-dev-web-group' with a min of 2 instances, max 5, with health check on port 80:
            python control-lb-and-groups.py  --createlc --ami ami-fa6b8393 --size c1.medium --env dev --aws SC --type web --createag --min 2 --max 5
        Triggers/Health checks are hard coded to spawn new instances when total cpu reaches 60 percent or health check fails.
        '''
        self.PREFIX=PREFIX+DOMAIN+'-'+ENV+'-'+TYPE
        self.ENV=ENV
        self.AMI=AMI
        self.TYPE=TYPE
        self.DOMAIN=DOMAIN
        self.SIZE=SIZE
        self.MIN=MIN
        self.MAX=MAX
        self.SSHKEY=SSHKEY
        self.AWSKEY=AWSKEY
        self.AWSSECRET=AWSSECRET
        self.AVAIL_ZONES=AVAIL_ZONES
        self.LBNAME=self.PREFIX+'-lb'
        self.AGNAME=self.PREFIX+'-group'
        self.TRNAME=self.PREFIX+'-trigger'
        self.LCNAME=self.PREFIX+'-launch_config'
        self.asconn=AutoScaleConnection(self.AWSKEY, self.AWSSECRET)
        self.elbconn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
        self.lc = self._buildLaunchConfig()
        self.ag = self._buildAutoscaleGroup()
        
    
    def _buildLaunchConfig(self):
        return LaunchConfiguration(name=self.LCNAME, 
                                   image_id=self.AMI,
                                   key_name=self.SSHKEY,
                                   security_groups=[self.ENV+'.'+self.TYPE],
                                   user_data='LAUNCHTAGS="'+self.ENV+' '
                                   +self.TYPE+' '+self.DOMAIN+'";',
                                   instance_type=self.SIZE)

    def _buildAutoscaleGroup(self):
        return AutoScalingGroup(group_name=self.AGNAME,
                              load_balancers=[self.LBNAME],
                              availability_zones=self.AVAIL_ZONES,
                              launch_config=self.lc, 
                              min_size=self.MIN,
                              max_size=self.MAX)

    def getGroups(self):
        '''get existing lb groups'''
        # conn = AutoScaleConnection(AWSKEY, AWSSECRET)
        #conn = AutoScaleConnection()
        return self.asconn.get_all_groups()

    def getActivities(self,AUTOSCALE_GROUP=None):
        return self.asconn.get_all_activities(AUTOSCALE_GROUP)
                        
    def createLaunchConfig(self):
        '''create Launch Configuration to define initial startup params
        '''
        #conn = AutoScaleConnection(AWSKEY, AWSSECRET)
        #lc = self.buildLaunchConfig()
        return self.asconn.create_launch_configuration(self.lc)
    

        
    def createAutoscaleGroup(self):
        '''We now have created a launch configuration called tfound...launch-config.
        We are now ready to associate it with our new autoscale group.
        returns autoscale object
        '''
        #conn = AutoScaleConnection(AWSKEY, AWSSECRET) 
        #lc = self.buildLaunchConfig()
        
        return self.asconn.create_auto_scaling_group(self.ag)
        #conn.get_all_activities(ag)
        
    def createTrigger(self,AUTOSCALE_GROUP=None):
        ''' 
        you create a trigger on a group, pass in a group object
        this creates a trigger that scales up to MAX instances if average cpu utilitzation goes over 60, 
        scales down to MIN instances if under 40 avg cpu
        '''
        #conn = AutoScaleConnection(AWSKEY, AWSSECRET)
        tr = Trigger(name=self.TRNAME, 
                     autoscale_group=AUTOSCALE_GROUP,
                     measure_name='CPUUtilization', statistic='Average',
                     unit='Percent',
                     dimensions=[('AutoScalingGroupName', AUTOSCALE_GROUP.name),
                                 ('Namespace','AWS/EC2')],
                                 period=120, lower_threshold=10,
                                 lower_breach_scale_increment='-1',
                                 upper_threshold=30,
                                 upper_breach_scale_increment='1',
                                 breach_duration=360)
        return self.asconn.create_trigger(tr)
    
    def createHealthCheck(self):
        return HealthCheck('instance_health', 
                           interval=20, 
                           target='TCP:8080', 
                           timeout='5')
    
    def createLoadbalancer(self):
        #elbconn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
        #conn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET,
        #  host='us-east-1a.elasticloadbalancing.amazonaws.com')
        #  hc = HealthCheck('instance_health', interval=20, target='HTTP:80/index.php', timeout='5')
        #  lb = conn.create_load_balancer('tfound-'+options.ENV+'-'+options.TYPE+'-lb', [options.ZONE],
        #                                   [(80, 80, 'http'),(8000,8000, 'tcp')])
        ##  lb.delete()
        hc = self.createHealthCheck()
        lb = self.elbconn.create_load_balancer(self.LBNAME, 
                                               [self.ZONE],
                                               [(8080,8080, 'tcp')])

        lb.configure_health_check(hc)
        return lb.dns_name
    
    def addToLoadbalancer(self,INSTANCE=None):
        #from boto.ec2.elb import ELBConnection
        #from boto.ec2.elb import HealthCheck
        #conn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
        #  conn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET,
        #  host='us-east-1a.elasticloadbalancing.amazonaws.com')
        if INSTANCE == None:
            sys.stderr.write("Please provide instance id to add. not acceptble: %s\n" % options.INSTANCE )
            raise SystemExit(1)
        lb = self.elbconn.register_instances(self.LBNAME, 
                                             INSTANCE)
        #lbgroup = 'tfound-'+options.ENV+'-'+options.TYPE+'-lb'
        print "Added instance %s to %s\n" % (INSTANCE, self.LBNAME)

    def getLoadbalancers(self):
        return self.elbconn.get_all_load_balancers()

    def startInstances(self,TYPE='',NUM='',SIZE=''):
        return
Exemple #13
0
            'Value': '2.2'
        },
    ]
)
#Create Launch Configuration

print 'Creating LC'
lc = LaunchConfiguration(name='Project22', image_id=DC_IMAGE, key_name=KEY_NAME, instance_type=DC_TYPE, instance_monitoring = True, security_groups=['All_Traffic'])
asg_conn.create_launch_configuration(lc)
print 'LC created'

#Create Auto Scaling Group

print 'Creating ASG'
asg = AutoScalingGroup(group_name='Project22group', load_balancers=['Project22hongf'], health_check_type = 'ELB', health_check_period = '119', desired_capacity = 5, availability_zones=['us-east-1c'], launch_config = lc, min_size = 5, max_size = 5, tags = [boto.ec2.autoscale.tag.Tag(key='Project',value='2.2', resource_id = 'Project22group', propagate_at_launch=True)])
asg_conn.create_auto_scaling_group(asg)
print 'ASG created'

#Create Scaling Policy

print 'Creating Scaling Policy'
scale_out_policy = ScalingPolicy(name = 'scale_out', adjustment_type = 'ChangeInCapacity', as_name = 'Project22group', scaling_adjustment = 1, cooldown = 60)
asg_conn.create_scaling_policy(scale_out_policy)
scale_in_policy = ScalingPolicy(name = 'scale_in', adjustment_type = 'ChangeInCapacity', as_name = 'Project22group', scaling_adjustment = -1, cooldown = 60)
asg_conn.create_scaling_policy(scale_in_policy)

#Check Policys and get them for CloudWatch
ScaleOut = asg_conn.get_all_policies(as_group = 'Project22group', policy_names = ['scale_out'])[0]
ScaleIn = asg_conn.get_all_policies(as_group = 'Project22group', policy_names = ['scale_in'])[0]
print 'Scaling Policy created'
Exemple #14
0
class Cloud(object):
    def __init__(self, cloud_config):
        self.config = cloud_config
        self.all_instances = []
        self.failed_launch = False
        self.failed_count = 0
        self.failed_last_valid_count = 0
        self._conn = None
        self._as_conn = None
        self._lc = None
        self._asg = None
        self._last_asg_launch_attempt = None
        self.maxed = False
        self._last_launch_attempt = datetime.datetime.utcnow()
        self._initialize()

    def _create_connection(self):
        LOG.debug("Creating connection for %s" % self.config.name)
        self._conn = boto.connect_ec2(self.config.access_id,
                                      self.config.secret_key,
                                      validate_certs=False)
        self._conn.host = self.config.cloud_uri
        self._conn.port = self.config.cloud_port

    def _create_autoscale_connection(self):
        LOG.debug("Creating autoscale connection for %s" % self.config.name)
        region = RegionInfo(name=self.config.cloud_type,
                            endpoint=self.config.as_uri)
        self._as_conn = AutoScaleConnection(
            aws_access_key_id=self.config.access_id,
            aws_secret_access_key=self.config.secret_key,
            is_secure=True,
            port=self.config.as_port,
            region=region,
            validate_certs=False)

    def _create_or_set_launch_configuration(self):
        name = self.config.lc_name
        if not self._lc:
            LOG.debug("Attempting to load launch configuration: %s" % (name))
            lc = self._as_conn.get_all_launch_configurations(names=[name])
            if len(lc) == 1:
                LOG.debug("Launch configuration %s found." % (name))
                self._lc = lc[0]
        if not self._lc:
            #TODO(pdmars): key and security groups are hardcoded for now, gross
            if self.config.user_data_file is not None:
                user_data_file = self.config.user_data_file
                with open(user_data_file) as f:
                    user_data = f.read()
            else:
                user_data = None
            LOG.debug("Creating launch configuration %s" % name)
            LOG.debug("\tname: %s" % name)
            LOG.debug("\timage_id: %s" % self.config.image_id)
            LOG.debug("\tinstance_type: %s" % self.config.instance_type)
            LOG.debug("\tuser_data: %s" % user_data)
            self._lc = LaunchConfiguration(
                name=name,
                image_id=self.config.image_id,
                key_name="phantomkey",
                security_groups=['default'],
                instance_type=self.config.instance_type,
                user_data=user_data)
            self._as_conn.create_launch_configuration(self._lc)

    def _create_or_set_autoscale_group(self):
        name = self.config.asg_name
        if not self._asg:
            LOG.debug("Attempting to load autoscale group: %s" % name)
            asg = self._as_conn.get_all_groups(names=[name])
            LOG.debug("Autoscale group: %s" % asg)
            if len(asg) == 1:
                LOG.debug("Autoscale group %s found." % name)
                self._asg = asg[0]
        if not self._asg:
            # TODO(pdmars): more hard coded grossness, for now
            try:
                cloud_guess = self.config.lc_name.split("@")[1].strip()
            except Exception as e:
                LOG.warn("Unable to guess cloud for auto scale tags")
                LOG.warn("Setting cloud to hotel")
                cloud_guess = "hotel"
            policy_name_key = "PHANTOM_DEFINITION"
            policy_name = "error_overflow_n_preserving"
            ordered_clouds_key = "clouds"
            n_preserve_key = "minimum_vms"
            ordered_clouds = cloud_guess + ":-1"
            n_preserve = 0
            policy_tag = Tag(connection=self._as_conn, key=policy_name_key,
                             value=policy_name, resource_id=name)
            clouds_tag = Tag(connection=self._as_conn, key=ordered_clouds_key,
                             value=ordered_clouds, resource_id=name)
            npreserve_tag = Tag(connection=self._as_conn, key=n_preserve_key,
                                value=n_preserve, resource_id=name)
            tags = [policy_tag, clouds_tag, npreserve_tag]
            zones = [self.config.az]
            LOG.debug("Creating autoscale group %s" % name)
            LOG.debug("\tname: %s" % name)
            LOG.debug("\tavailability_zones: %s" % zones)
            LOG.debug("\tlaunch_config: %s" % self._lc)
            self._asg = AutoScalingGroup(group_name=name,
                                         availability_zones=zones,
                                         min_size=0,
                                         max_size=0,
                                         launch_config=self._lc,
                                         tags=tags)
            self._as_conn.create_auto_scaling_group(self._asg)

    def _initialize(self):
        LOG.debug("Initializing %s" % self.config.name)
        self._create_connection()
        self._create_autoscale_connection()
        self._create_or_set_launch_configuration()
        self._create_or_set_autoscale_group()
        LOG.debug("Initialization complete for %s" % self.config.name)

    def get_valid_instances(self):
        return self.all_instances

    def _refresh_instances(self):
        LOG.debug("%s: getting instance information" % self.config.name)
        self.all_instances = []
        instances = []
        as_instances = self._asg.instances
        as_instance_ids = [i.instance_id for i in as_instances]
        reservations = self._conn.get_all_instances()
        for reservation in reservations:
            for instance in reservation.instances:
                if instance.id in as_instance_ids:
                    if instance.state in VALID_RUN_STATES:
                        instances.append(instance)
        for instance in instances:
            self.all_instances.append(instance)
        num_instances = len(self.all_instances)
        LOG.debug("%s: updated %d instances" % (self.config.name,
                                                num_instances))
        if num_instances >= self.config.max_instances:
            LOG.warn("%s reached the max (%s) instances: %s" % (
                self.config.name, self.config.max_instances,
                num_instances))
            self.maxed = True
        else:
            self.maxed = False

    def _refresh_asg(self):
        LOG.debug("%s: refreshing autoscale group" % self.config.name)
        asg_name = self.config.asg_name
        asgs = self._as_conn.get_all_groups(names=[asg_name])
        if len(asgs) == 1:
            self._asg = asgs[0]
            LOG.debug("\trefreshed autoscale group: %s" % asg_name)
        else:
            LOG.warn("\tunable to refresh autoscale group: %s" % asg_name)

    def refresh(self, cluster):
        self._refresh_asg()
        self._refresh_instances()

    def get_total_num_valid_cores(self):
        LOG.debug("%s: getting number of valid cores" % self.config.name)
        total_num_valid_cores = 0
        num_valid_instances = len(self.get_valid_instances())
        total_valid_cores = num_valid_instances * self.config.instance_cores
        num_desired_instances = self._asg.desired_capacity
        num_desired_cores = num_desired_instances * self.config.instance_cores
        if num_desired_cores != total_num_valid_cores:
            LOG.debug("\tmismatching core counts")
            LOG.debug("\tnum_desired_cores: %d" % (num_desired_cores))
            LOG.debug("\ttotal_valid_cores: %d" % (total_valid_cores))
        return total_valid_cores

    def get_instance_by_id(self, id):
        LOG.debug("Searching for instance %s" % id)
        for instances in self.all_instances:
            if instance.id == id:
                LOG.debug("Found instance %s" % id)
                return instance
        return None

    def get_instance_ids_for_public_dns_names(self, public_dns_names):
        instance_ids = []
        for instance in self.all_instances:
            if instance.public_dns_name in public_dns_names:
                instance_ids.append(instance.id)
        return instance_ids

    def get_public_dns_names_close_to_charge(self):
        instances_close_to_charge = []
        sleep_secs = self.config.get_loop_sleep_secs()
        cur_utc_time = datetime.datetime.utcnow()
        valid_instances = self.get_valid_instances()
        time_fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
        for instance in valid_instances:
            launch_time = datetime.datetime.strptime(instance.launch_time,
                                                     time_fmt)
            time_diff = cur_utc_time - launch_time
            # Ignores microseconds
            time_diff_secs = time_diff.seconds + time_diff.days * 24 * 3600
            cur_charge_secs = time_diff_secs % self.config.charge_time_secs
            secs_to_charge = self.config.charge_time_secs - cur_charge_secs
            LOG.debug("%s:%s: charge: %d; current: %d; to charge: %d" % (
                instance.id, instance.public_dns_name,
                self.config.charge_time_secs,
                cur_charge_secs, secs_to_charge))
            if secs_to_charge < (3 * sleep_secs):
                instances_close_to_charge.append(instance.public_dns_name)
        return instances_close_to_charge

    def delete_instances(self, instance_ids=[]):
        if not instance_ids:
            return
        LOG.debug("Deleting instances: %s" % instance_ids)
        # TODO(pdmars): this has the potential to kill instances running jobs
        # maybe I should err on the side of having extra instances if the
        # capacity is higher than the cloud can currently support
        num_instances = len(self.all_instances)
        if ((self._asg.desired_capacity > num_instances) and
                (num_instances > 0)):
            LOG.warn("Desired capacity is greater than num_instances running")
            LOG.warn("Adjusting desired capacity to match")
            self.set_capacity(num_instances)
        for instance_id in instance_ids:
            self._as_conn.terminate_instance(instance_id)
            # TODO(pdmars): due to a bug in phantom, maybe this will help
            # 2013/04/05: this might not be relevant anymore
            time.sleep(.1)

    def launch_autoscale_instances(self, num_instances=1):
        new_capacity = self._asg.desired_capacity + int(num_instances)
        if new_capacity > self.config.max_instances:
            new_capacity = self.config.max_instances
            LOG.warn("%s can launch %s total instances" % (self.config.name,
                                                           new_capacity))
        self._last_launch_attempt = datetime.datetime.utcnow()
        LOG.debug("Setting cloud capacity for %s to %s" % (self.config.name,
                                                           new_capacity))
        self.set_capacity(new_capacity)

    def set_capacity(self, new_capacity):
        self._asg.set_capacity(new_capacity)
Exemple #15
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        self.conn = AutoScaleConnection(access_id,
                                        secret_key,
                                        region=reg,
                                        port=port,
                                        path=path,
                                        is_secure=True,
                                        security_token=token,
                                        debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not (clc_host[len(clc_host) - 13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0]
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self,
                                      config_names=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self,
                         as_group=None,
                         policy_names=None,
                         max_records=None,
                         next_token=None):
        return self.conn.get_all_policies(as_group, policy_names, max_records,
                                          next_token)

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        return self.conn.get_all_tags(filters, max_records, next_token)

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
Exemple #16
0
class AutoScale:
    def __init__(self, args):
        """
        Initializing basic variables needed for auto scaling
        """
        self.configs = ConfigParser.RawConfigParser()
        self.args = args
        self.test_props = {}
        self.props = {}
        self.ec2_connection = EC2Connection(self.args.access_key,
                                            self.args.secret_key)
        self.autoscale_connection = AutoScaleConnection(
            self.args.access_key, self.args.secret_key)
        self.elb_connection = ELBConnection(self.args.access_key,
                                            self.args.secret_key)
        self.cw_connection = CloudWatchConnection(self.args.access_key,
                                                  self.args.secret_key)
        self.firstInstance = None
        self.launchConfiguration = None
        self.healthCheck = None

    def loadConfigs(self):
        """
        FIX ME: Currently doesnt do anything
        This method will load the configurations from boto config file if present else will 
        accept parameters passed by user.
        """
        if os.path.isfile("/etc/boto.cfg"):
            self.configs.read("/etc/boto.cfg")
            conf = self.configs.sections()
            self.populateConfigs(conf)
        if os.path.isfile("~/.boto"):
            self.configs.read("~/.boto")
            conf = self.configs.sections()
            self.populateConfigs(conf)

        print ">>> Loaded configs"

    def populateConfigs(self, sections):
        for section in sections:
            self.boto_props[section] = self.configs.items(section)
            for item in self.boto_props[section]:
                key, value = item
                if not self.props.has_key(key):
                    self.props[key] = value

    def createLaunchConfiguration(self, lc_name, ami_id, key_name):
        """
        Creates launch configuration for the auto scaling cluster
        """
        self.launchConfiguration = LaunchConfiguration(name=lc_name,
                                                       image_id=ami_id,
                                                       key_name=key_name)
        self.autoscale_connection.create_launch_configuration(
            self.launchConfiguration)
        print ">>> Created launch configuration: " + lc_name

    def createAutoScaleGroup(self, asg_name):
        """
        Create a Auto scaling group for the auto scaling cluster
        """
        autoScalingGroup = AutoScalingGroup(
            group_name=asg_name,
            load_balancers=[self.args.lb_name],
            launch_config=self.launchConfiguration,
            min_size=self.args.min_size,
            max_size=self.args.max_size,
            availability_zones=['us-east-1a'])
        self.autoscale_connection.create_auto_scaling_group(autoScalingGroup)
        print ">>> Created auto scaling group: " + asg_name

    def createTrigger(self, trigger_name, measure, asg_name):
        """
        Trigger to spawn new instances as per specific metrics
        """
        alarm_actions = []
        dimensions = {"AutoScalingGroupName": asg_name}
        policies = self.autoscale_connection.get_all_policies(
            as_group=self.args.asg_name, policy_names=[self.args.asp_name])
        for policy in policies:
            alarm_actions.append(policy.policy_arn)
        alarm = MetricAlarm(name=trigger_name,
                            namespace="AWS/EC2",
                            metric=measure,
                            statistic="Average",
                            comparison=">=",
                            threshold=50,
                            period=60,
                            unit="Percent",
                            evaluation_periods=2,
                            alarm_actions=alarm_actions,
                            dimensions=dimensions)

        self.cw_connection.create_alarm(alarm)
        print ">>> Created trigger: " + self.args.trigger

    def createAutoScalePolicy(self, asp_name):
        """
        Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster
        """
        self.autoScalingUpPolicy = ScalingPolicy(
            name=asp_name + '-up',
            adjustment_type="ChangeInCapacity",
            as_name=self.args.asg_name,
            scaling_adjustment=1,
            cooldown=180)
        self.autoScalingDownPolicy = ScalingPolicy(
            name=asp_name + '-down',
            adjustment_type="ChangeInCapacity",
            as_name=self.args.asg_name,
            scaling_adjustment=-1,
            cooldown=180)

        self.autoscale_connection.create_scaling_policy(
            self.autoScalingUpPolicy)
        self.autoscale_connection.create_scaling_policy(
            self.autoScalingDownPolicy)

        print ">>> Created auto scaling policy: " + asp_name

    def configureHealthCheck(self, target):
        """
        Configures health check for the cluster
        """
        self.healthCheck = HealthCheck(target=target, timeout=5)
        print ">>> Configured health check for: " + target

    def createLoadBalancer(self, lb_name, region, lb_port, instance_port,
                           protocol):
        """
        Creates a load balancer for cluster
        """
        listener = (int(lb_port), int(instance_port), protocol)
        tuple_list = []
        tuple_list.append(listener)
        lbs = self.elb_connection.get_all_load_balancers()
        for lb in lbs:
            if lb.name != lb_name:
                self.elb_connection.create_load_balancer(
                    lb_name, [region], tuple_list)
                self.elb_connection.configure_health_check(
                    name=lb_name, health_check=self.healthCheck)
                print ">>> Created load balancer: " + lb_name
            else:
                print "Load balancer with name '" + lb_name + "' already exists"

    def startInstance(self, image_id, key_name, region, instance_type):
        """
        Starts the first instance which will be serving requests irrespective of auto scaling 
        instances.
        """
        reservation = self.ec2_connection.run_instances(
            image_id=image_id,
            min_count=1,
            max_count=1,
            placement=region,
            key_name=key_name,
            instance_type=instance_type)
        #        for instance in reservation.instances:
        #            instance.add_tag('node', '0')
        #            break

        self.firstInstance = reservation.instances[0].id.split('\'')[0]
        print ">>> Started instance: ", self.firstInstance

    def registerInstanceToELB(self, lb_name):
        """
        Register the first instance started to the Elastic Load Balancer.
        """
        self.elb_connection.register_instances(load_balancer_name=lb_name,
                                               instances=[self.firstInstance])
        print ">>> Registered instance '", self.firstInstance, "' to load balancer '" + lb_name + "'"

    def setUp(self):
        """
        Set's up the auto scaling for the application
        """
        # STEP 1: Load the configurations
        self.loadConfigs()
        # STEP 2: Configure the health check for the instances
        self.configureHealthCheck(self.args.lb_target)
        # STEP 3: Create a load balancer
        self.createLoadBalancer(self.args.lb_name, self.args.region,
                                self.args.lb_port, self.args.instance_port,
                                self.args.protocol)
        # STEP 4: Start the first instance
        self.startInstance(self.args.ami_id, self.args.key_name,
                           self.args.region, self.args.instance_type)
        # STEP 5: Register the instance to the load balancer created in STEP 4
        self.registerInstanceToELB(self.args.lb_name)
        # STEP 6: Create launch configuration to launch instances by auto scale
        self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id,
                                       self.args.key_name)
        # STEP 7: Create a auto scale group which will manage the instances started by auto scaling
        self.createAutoScaleGroup(self.args.asg_name)
        # STEP 8: Create a auto scaling policy to say add/remove a node
        self.createAutoScalePolicy(self.args.asp_name)
        # STEP 9: Create a trigger, so that auto scaling can trigger it to start
        # or remove a instance from auto scaling group
        self.createTrigger(self.args.trigger, self.args.measure,
                           self.args.asg_name)
Exemple #17
0
def setup(CONF):
  global out

  lookup_tbl = {
    'name': CONF['NAME'],
  }

  conn = AutoScaleConnection()

  out['conn'] = conn

  # Launch Configurations
  LC = CONF['LC']
  LC['name'] = LC['name'] % lookup_tbl

  lc = LaunchConfiguration(**LC)
  conn.create_launch_configuration(lc)
  out['lc'] = lc

  # Auto Scaling Group
  ASG = CONF['ASG']
  ASG['group_name'] = ASG['group_name'] % lookup_tbl
  ASG['launch_config'] = lc

  groups = conn.get_all_groups(names=[ASG['group_name']])
  if (len(groups) > 0):
    # update
    asg = groups[0]
    for k in ASG :
      # asg not iterable, try-except to make sure asg[k] exists
      try: asg.__getattribute__(k)
      except: continue
      asg.__setattr__(k, ASG[k])
    asg.launch_config_name = LC['name']
    asg.update()
    out['asg'] = asg
  else:
    #create
    asg = AutoScalingGroup(**ASG)
    conn.create_auto_scaling_group(asg)

  # ASG Tags
  ASG_TAGS = CONF['ASG_TAGS']
  for i in ASG_TAGS:
    if 'propagate_at_launch' not in i:
      i['propagate_at_launch'] = True
    i['key'] = i['key'] % lookup_tbl
    i['value'] = i['value'] % lookup_tbl

  tags = [
      Tag(**dict(x.items() + [('resource_id', ASG['group_name'])])) for x in ASG_TAGS
  ]
  conn.create_or_update_tags(tags)

  # Triggers (Scaling Policy / Cloudwatch Alarm)
  conn_cw = connect_to_region(CONF['REGION'])

  TRIGGERS = CONF['TRIGGERS']
  for T in TRIGGERS:
    T['policy']['name'] = T['policy']['name'] % lookup_tbl
    T['policy']['as_name'] = ASG['group_name']
    T['alarm']['dimensions'] = {'AutoScalingGroupName': ASG['group_name']}
    T['alarm']['alarm_actions'] = None

    if 'name' in T['alarm']:
      T['alarm']['name'] = T['alarm']['name'] % lookup_tbl
    else:
      T['alarm']['name'] = T['policy']['name']

    # Policies are safely overwritten, so not checked for existence
    conn.create_scaling_policy(ScalingPolicy(**T['policy']))
    policy = conn.get_all_policies(as_group=ASG['group_name'], policy_names=[T['policy']['name']])[0]

    T['alarm']['alarm_actions'] = [policy.policy_arn]
    hits = conn_cw.describe_alarms(alarm_names=[T['alarm']['name']])

    conn_cw.create_alarm(MetricAlarm(**T['alarm']))
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('scale')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        port = 8773
        if endpoint[len(endpoint)-13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=self.token, debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Policies.json")
        return obj

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        obj = self.conn.get_all_tags(filters, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Tags.json")
        return obj

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        if boto.__version__ < '2.6':
            self.conn = AutoScaleConnection(access_id,
                                            secret_key,
                                            region=reg,
                                            port=port,
                                            path=path,
                                            is_secure=True,
                                            security_token=token,
                                            debug=0)
        else:
            self.conn = AutoScaleConnection(access_id,
                                            secret_key,
                                            region=reg,
                                            port=port,
                                            path=path,
                                            validate_certs=False,
                                            is_secure=True,
                                            security_token=token,
                                            debug=0)
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        return []
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        return []
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        return self.conn.set_desired_capacity(group_name, desired_capacity,
                                              honor_cooldown)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records,
                                      next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj