def test_basic(self): # NB: as it says on the tin these are really basic tests that only # (lightly) exercise read-only behaviour - and that's only if you # have any autoscale groups to introspect. It's useful, however, to # catch simple errors print '--- running %s tests ---' % self.__class__.__name__ c = AutoScaleConnection() self.assertTrue(repr(c).startswith('AutoScaleConnection')) groups = c.get_all_groups() for group in groups: self.assertTrue(type(group), AutoScalingGroup) # get activities activities = group.get_activities() for activity in activities: self.assertEqual(type(activity), Activity) # get launch configs configs = c.get_all_launch_configurations() for config in configs: self.assertTrue(type(config), LaunchConfiguration) # get policies policies = c.get_all_policies() for policy in policies: self.assertTrue(type(policy), ScalingPolicy) # get scheduled actions actions = c.get_all_scheduled_actions() for action in actions: self.assertTrue(type(action), ScheduledUpdateGroupAction) # get instances instances = c.get_all_autoscaling_instances() for instance in instances: self.assertTrue(type(instance), Instance) # get all scaling process types ptypes = c.get_all_scaling_process_types() for ptype in ptypes: self.assertTrue(type(ptype), ProcessType) # get adjustment types adjustments = c.get_all_adjustment_types() for adjustment in adjustments: self.assertTrue(type(adjustment), AdjustmentType) # get metrics collection types types = c.get_all_metric_collection_types() self.assertTrue(type(types), MetricCollectionTypes) print '--- tests completed ---'
class BotoScaleInterface(ScaleInterface): conn = None saveclcdata = False def __init__(self, clc_host, access_id, secret_key, token): self.access_id = access_id self.secret_key = secret_key self.token = token self.set_endpoint(clc_host) def set_endpoint(self, endpoint): #boto.set_stream_logger('scale') path = '/services/AutoScaling' reg = RegionInfo(name='eucalyptus', endpoint=endpoint) port = 8773 if endpoint[len(endpoint)-13:] == 'amazonaws.com': endpoint = endpoint.replace('ec2', 'autoscaling', 1) path = '/' reg = RegionInfo(endpoint=endpoint) port = 443 self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg, port=port, path=path, is_secure=True, security_token=self.token, debug=0) self.conn.APIVersion = '2011-01-01' if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'): self.conn.auth_region_name = 'Eucalyptus' self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30 def __save_json__(self, obj, name): f = open(name, 'w') json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2) f.close() ## # autoscaling methods ## def create_auto_scaling_group(self, as_group): return self.conn.create_auto_scaling_group(as_group) def delete_auto_scaling_group(self, name, force_delete=False): return self.conn.delete_auto_scaling_group(name, force_delete) def get_all_groups(self, names=None, max_records=None, next_token=None): obj = self.conn.get_all_groups(names, max_records, next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_Groups.json") return obj def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None): obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_Instances.json") return obj def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False): group = self.conn.get_all_groups([group_name])[0]; # notice, honor_cooldown not supported. return group.set_capacity(desired_capacity) def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True): return self.conn.set_instance_health(instance_id, health_status, should_respect_grace_period) def terminate_instance(self, instance_id, decrement_capacity=True): return self.conn.terminate_instance(instance_id, decrement_capacity) def update_autoscaling_group(self, as_group): as_group.connection = self.conn return as_group.update() def create_launch_configuration(self, launch_config): return self.conn.create_launch_configuration(launch_config) def delete_launch_configuration(self, launch_config_name): return self.conn.delete_launch_configuration(launch_config_name) def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None): obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json") return obj # policy related def delete_policy(self, policy_name, autoscale_group=None): return self.conn.delete_policy(policy_name, autoscale_group) def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None): obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_Policies.json") return obj def execute_policy(self, policy_name, as_group=None, honor_cooldown=None): return self.conn.execute_policy(policy_name, as_group, honor_cooldown) def create_scaling_policy(self, scaling_policy): return self.conn.create_scaling_policy(scaling_policy) def get_all_adjustment_types(self): return self.conn.get_all_adjustment_types() # tag related def delete_tags(self, tags): return self.conn.delete_tags(tags) def get_all_tags(self, filters=None, max_records=None, next_token=None): obj = self.conn.get_all_tags(filters, max_records, next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_Tags.json") return obj def create_or_update_tags(self, tags): return self.conn.create_or_update_tags(tags)
def create_AutoScaling(): print "Creating AutoScaling..." # establish connection as_conn = AutoScaleConnection(AWSAccessKeyId, AWSSecretKey) # create launch configuration global lc lc = LaunchConfiguration(name='lc', image_id=DATA_CEN_AMI, key_name=ACCESS_KEY, instance_monitoring=True, security_groups=[SECURITY_GRP], instance_type=MACHINE_TYPE) as_conn.create_launch_configuration(lc) # create tag for autoscaling group as_tag = Tag(key="Project", value="2.2", propagate_at_launch=True, resource_id='my_group') # create aotoscaling group global ag ag = AutoScalingGroup(group_name='my_group', load_balancers=['myELB'], availability_zones=['us-east-1a'], launch_config=lc, min_size=MIN_SIZE, max_size=MAX_SIZE, connection=as_conn, tags=[as_tag]) # associate the autoscaling group with launch configuration as_conn.create_auto_scaling_group(ag) # build the scale policy scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=-1, cooldown=60) # register the scale policy as_conn.create_scaling_policy(scale_up_policy) as_conn.create_scaling_policy(scale_down_policy) # refresh the scale policy for extra information scale_up_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_up'])[0] scale_down_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_down' ])[0] # create cloudwatch alarm cloudwatch = CloudWatchConnection(aws_access_key_id=AWSAccessKeyId, aws_secret_access_key=AWSSecretKey, is_secure=True) # region='us-east-1a') # assocate cloudwatch with alarm alarm_dimensions = {"AutoScalingGroupName": 'my_group'} # create scale up alarm scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='50', period='60', evaluation_periods=2, alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_up_alarm) # create scale down alarm scale_down_alarm = MetricAlarm( name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold='20', period='60', evaluation_periods=1, alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_down_alarm) print "AutoScaling created successfully"
class AutoScale: def __init__(self, args): """ Initializing basic variables needed for auto scaling """ self.configs = ConfigParser.RawConfigParser() self.args = args self.test_props = {} self.props = {} self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key) self.autoscale_connection = AutoScaleConnection( self.args.access_key, self.args.secret_key) self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key) self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key) self.firstInstance = None self.launchConfiguration = None self.healthCheck = None def loadConfigs(self): """ FIX ME: Currently doesnt do anything This method will load the configurations from boto config file if present else will accept parameters passed by user. """ if os.path.isfile("/etc/boto.cfg"): self.configs.read("/etc/boto.cfg") conf = self.configs.sections() self.populateConfigs(conf) if os.path.isfile("~/.boto"): self.configs.read("~/.boto") conf = self.configs.sections() self.populateConfigs(conf) print ">>> Loaded configs" def populateConfigs(self, sections): for section in sections: self.boto_props[section] = self.configs.items(section) for item in self.boto_props[section]: key, value = item if not self.props.has_key(key): self.props[key] = value def createLaunchConfiguration(self, lc_name, ami_id, key_name): """ Creates launch configuration for the auto scaling cluster """ self.launchConfiguration = LaunchConfiguration(name=lc_name, image_id=ami_id, key_name=key_name) self.autoscale_connection.create_launch_configuration( self.launchConfiguration) print ">>> Created launch configuration: " + lc_name def createAutoScaleGroup(self, asg_name): """ Create a Auto scaling group for the auto scaling cluster """ autoScalingGroup = AutoScalingGroup( group_name=asg_name, load_balancers=[self.args.lb_name], launch_config=self.launchConfiguration, min_size=self.args.min_size, max_size=self.args.max_size, availability_zones=['us-east-1a']) self.autoscale_connection.create_auto_scaling_group(autoScalingGroup) print ">>> Created auto scaling group: " + asg_name def createTrigger(self, trigger_name, measure, asg_name): """ Trigger to spawn new instances as per specific metrics """ alarm_actions = [] dimensions = {"AutoScalingGroupName": asg_name} policies = self.autoscale_connection.get_all_policies( as_group=self.args.asg_name, policy_names=[self.args.asp_name]) for policy in policies: alarm_actions.append(policy.policy_arn) alarm = MetricAlarm(name=trigger_name, namespace="AWS/EC2", metric=measure, statistic="Average", comparison=">=", threshold=50, period=60, unit="Percent", evaluation_periods=2, alarm_actions=alarm_actions, dimensions=dimensions) self.cw_connection.create_alarm(alarm) print ">>> Created trigger: " + self.args.trigger def createAutoScalePolicy(self, asp_name): """ Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster """ self.autoScalingUpPolicy = ScalingPolicy( name=asp_name + '-up', adjustment_type="ChangeInCapacity", as_name=self.args.asg_name, scaling_adjustment=1, cooldown=180) self.autoScalingDownPolicy = ScalingPolicy( name=asp_name + '-down', adjustment_type="ChangeInCapacity", as_name=self.args.asg_name, scaling_adjustment=-1, cooldown=180) self.autoscale_connection.create_scaling_policy( self.autoScalingUpPolicy) self.autoscale_connection.create_scaling_policy( self.autoScalingDownPolicy) print ">>> Created auto scaling policy: " + asp_name def configureHealthCheck(self, target): """ Configures health check for the cluster """ self.healthCheck = HealthCheck(target=target, timeout=5) print ">>> Configured health check for: " + target def createLoadBalancer(self, lb_name, region, lb_port, instance_port, protocol): """ Creates a load balancer for cluster """ listener = (int(lb_port), int(instance_port), protocol) tuple_list = [] tuple_list.append(listener) lbs = self.elb_connection.get_all_load_balancers() for lb in lbs: if lb.name != lb_name: self.elb_connection.create_load_balancer( lb_name, [region], tuple_list) self.elb_connection.configure_health_check( name=lb_name, health_check=self.healthCheck) print ">>> Created load balancer: " + lb_name else: print "Load balancer with name '" + lb_name + "' already exists" def startInstance(self, image_id, key_name, region, instance_type): """ Starts the first instance which will be serving requests irrespective of auto scaling instances. """ reservation = self.ec2_connection.run_instances( image_id=image_id, min_count=1, max_count=1, placement=region, key_name=key_name, instance_type=instance_type) # for instance in reservation.instances: # instance.add_tag('node', '0') # break self.firstInstance = reservation.instances[0].id.split('\'')[0] print ">>> Started instance: ", self.firstInstance def registerInstanceToELB(self, lb_name): """ Register the first instance started to the Elastic Load Balancer. """ self.elb_connection.register_instances(load_balancer_name=lb_name, instances=[self.firstInstance]) print ">>> Registered instance '", self.firstInstance, "' to load balancer '" + lb_name + "'" def setUp(self): """ Set's up the auto scaling for the application """ # STEP 1: Load the configurations self.loadConfigs() # STEP 2: Configure the health check for the instances self.configureHealthCheck(self.args.lb_target) # STEP 3: Create a load balancer self.createLoadBalancer(self.args.lb_name, self.args.region, self.args.lb_port, self.args.instance_port, self.args.protocol) # STEP 4: Start the first instance self.startInstance(self.args.ami_id, self.args.key_name, self.args.region, self.args.instance_type) # STEP 5: Register the instance to the load balancer created in STEP 4 self.registerInstanceToELB(self.args.lb_name) # STEP 6: Create launch configuration to launch instances by auto scale self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id, self.args.key_name) # STEP 7: Create a auto scale group which will manage the instances started by auto scaling self.createAutoScaleGroup(self.args.asg_name) # STEP 8: Create a auto scaling policy to say add/remove a node self.createAutoScalePolicy(self.args.asp_name) # STEP 9: Create a trigger, so that auto scaling can trigger it to start # or remove a instance from auto scaling group self.createTrigger(self.args.trigger, self.args.measure, self.args.asg_name)
class BotoScaleInterface(ScaleInterface): conn = None saveclcdata = False def __init__(self, clc_host, access_id, secret_key, token): #boto.set_stream_logger('foo') path = '/services/AutoScaling' reg = RegionInfo(name='eucalyptus', endpoint=clc_host) port = 8773 if clc_host[len(clc_host) - 13:] == 'amazonaws.com': clc_host = clc_host.replace('ec2', 'autoscaling', 1) path = '/' reg = None port = 443 self.conn = AutoScaleConnection(access_id, secret_key, region=reg, port=port, path=path, is_secure=True, security_token=token, debug=0) self.conn.APIVersion = '2011-01-01' if not (clc_host[len(clc_host) - 13:] == 'amazonaws.com'): self.conn.auth_region_name = 'Eucalyptus' self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30 def __save_json__(self, obj, name): f = open(name, 'w') json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2) f.close() ## # autoscaling methods ## def create_auto_scaling_group(self, as_group): return self.conn.create_auto_scaling_group(as_group) def delete_auto_scaling_group(self, name, force_delete=False): return self.conn.delete_auto_scaling_group(name, force_delete) def get_all_groups(self, names=None, max_records=None, next_token=None): obj = self.conn.get_all_groups(names, max_records, next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_Groups.json") return obj def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None): obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_Instances.json") return obj def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False): group = self.conn.get_all_groups([group_name])[0] # notice, honor_cooldown not supported. return group.set_capacity(desired_capacity) def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True): return self.conn.set_instance_health(instance_id, health_status, should_respect_grace_period) def terminate_instance(self, instance_id, decrement_capacity=True): return self.conn.terminate_instance(instance_id, decrement_capacity) def update_autoscaling_group(self, as_group): as_group.connection = self.conn return as_group.update() def create_launch_configuration(self, launch_config): return self.conn.create_launch_configuration(launch_config) def delete_launch_configuration(self, launch_config_name): return self.conn.delete_launch_configuration(launch_config_name) def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None): obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token) if self.saveclcdata: self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json") return obj # policy related def delete_policy(self, policy_name, autoscale_group=None): return self.conn.delete_policy(policy_name, autoscale_group) def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None): return self.conn.get_all_policies(as_group, policy_names, max_records, next_token) def execute_policy(self, policy_name, as_group=None, honor_cooldown=None): return self.conn.execute_policy(policy_name, as_group, honor_cooldown) def create_scaling_policy(self, scaling_policy): return self.conn.create_scaling_policy(scaling_policy) def get_all_adjustment_types(self): return self.conn.get_all_adjustment_types() # tag related def delete_tags(self, tags): return self.conn.delete_tags(tags) def get_all_tags(self, filters=None, max_records=None, next_token=None): return self.conn.get_all_tags(filters, max_records, next_token) def create_or_update_tags(self, tags): return self.conn.create_or_update_tags(tags)
class MSBManager: def __init__(self, aws_access_key, aws_secret_key): self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key) self.elb_conn = ELBConnection(aws_access_key, aws_secret_key) self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key) self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key) self.default_cooldown = 60 def get_security_group(self, name): sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name] return sgs[0] if sgs else None def create_security_group(self, name, description): sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name] sg = sgs[0] if sgs else None if not sgs: sg = self.ec2_conn.create_security_group(name, description) try: sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False) except EC2ResponseError: pass return sg def remove_security_group(self, name): self.ec2_conn.delete_security_group(name=name) def create_instance(self, image, instance_type, key_name, zone, security_groups, tags): instance = None reservations = self.ec2_conn.get_all_instances() for reservation in reservations: for i in reservation.instances: if "Name" in i.tags and i.tags["Name"] == tags["Name"] and i.state == "running": instance = i break if not instance: reservation = self.ec2_conn.run_instances( image, instance_type=instance_type, key_name=key_name, placement=zone, security_groups=security_groups, monitoring_enabled=True, ) instance = reservation.instances[0] while not instance.update() == "running": time.sleep(5) time.sleep(10) self.ec2_conn.create_tags([instance.id], tags) return instance def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags): req = self.ec2_conn.request_spot_instances( price=bid, instance_type=instance_type, image_id=image, placement=zone, key_name=key_name, security_groups=security_groups, ) instance_id = None while not instance_id: job_sir_id = req[0].id requests = self.ec2_conn.get_all_spot_instance_requests() for sir in requests: if sir.id == job_sir_id: instance_id = sir.instance_id break print "Job {} not ready".format(job_sir_id) time.sleep(60) self.ec2_conn.create_tags([instance_id], tags) def remove_instance(self, instance_id): self.remove_instances([instance_id]) def remove_instances(self, instance_ids): self.ec2_conn.terminate_instances(instance_ids) def remove_instance_by_tag_name(self, name): reservations = self.ec2_conn.get_all_instances() data_centers_intance_ids = [] for reservation in reservations: for instance in reservation.instances: if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running": data_centers_intance_ids.append(instance.id) if data_centers_intance_ids: self.remove_instances(data_centers_intance_ids) def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None): lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name] lb = lbs[0] if lbs else None if not lb: hc = HealthCheck( timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat" ) ports = [(80, 80, "http")] zones = [zone] lb = self.elb_conn.create_load_balancer(name, zones, ports) self.elb_conn.apply_security_groups_to_lb(name, [security_group_id]) lb.configure_health_check(hc) if instance_ids: lb.register_instances(instance_ids) params = { "LoadBalancerNames.member.1": lb.name, "Tags.member.1.Key": "15619project", "Tags.member.1.Value": project_tag_value, } lb.connection.get_status("AddTags", params, verb="POST") return lb def remove_elb(self, name): self.elb_conn.delete_load_balancer(name) def create_launch_configuration(self, name, image, key_name, security_groups, instance_type): lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name] lc = lcs[0] if lcs else None if not lc: lc = LaunchConfiguration( name=name, image_id=image, key_name=key_name, security_groups=[security_groups], instance_type=instance_type, ) self.auto_scale_conn.create_launch_configuration(lc) return lc def remove_launch_configuration(self, name): self.auto_scale_conn.delete_launch_configuration(name) def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None): lc = self.create_launch_configuration() as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name] as_group = as_groups[0] if as_groups else None if not as_group: as_group = AutoScalingGroup( group_name=name, load_balancers=[lb_name], availability_zones=[zone], launch_config=lc, min_size=4, max_size=4, health_check_type="ELB", health_check_period=120, connection=self.auto_scale_conn, default_cooldown=self.default_cooldown, desired_capacity=4, tags=tags, ) self.auto_scale_conn.create_auto_scaling_group(as_group) if instance_ids: self.auto_scale_conn.attach_instances(name, instance_ids) scale_up_policy = ScalingPolicy( name="scale_up", adjustment_type="ChangeInCapacity", as_name=name, scaling_adjustment=1, cooldown=self.default_cooldown, ) scale_down_policy = ScalingPolicy( name="scale_down", adjustment_type="ChangeInCapacity", as_name=name, scaling_adjustment=-1, cooldown=self.default_cooldown, ) self.auto_scale_conn.create_scaling_policy(scale_up_policy) self.auto_scale_conn.create_scaling_policy(scale_down_policy) scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_up"])[0] scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_down"])[0] alarm_dimensions = {"AutoScalingGroupName": name} scale_up_alarm = MetricAlarm( name="scale_up_on_cpu", namespace="AWS/EC2", metric="CPUUtilization", statistic="Average", comparison=">", threshold=85, period=60, evaluation_periods=1, alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions, ) self.cloud_watch_conn.create_alarm(scale_up_alarm) scale_down_alarm = MetricAlarm( name="scale_down_on_cpu", namespace="AWS/EC2", metric="CPUUtilization", statistic="Average", comparison="<", threshold=60, period=60, evaluation_periods=1, alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions, ) self.cloud_watch_conn.create_alarm(scale_down_alarm) return as_group def update_autoscaling_group_max_size(self, as_group, max_size): setattr(as_group, "max_size", max_size) as_group.update() def update_autoscaling_group_min_size(self, as_group, min_size): setattr(as_group, "min_size", min_size) as_group.update() def remove_autoscaling_group(self, name): self.auto_scale_conn.delete_auto_scaling_group(name)
def create_autoscaling(ami_id, sns_arn): """ Creates the autoscaling group for proxy instances Inspired by boto autoscaling tutorial. """ con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY, aws_access_key_id=AWS_ACCESS_KEY, region=RegionInfo(name=REGION, endpoint='autoscaling.%s.amazonaws.com' % REGION)) print "Creating autoscaling configuration.." config = LaunchConfiguration(name=AUTOSCALING_GROUP_NAME, image_id=ami_id, key_name=KEY_NAME, security_groups=[EC2_SECURITY_GROUP_NAME], instance_type=INSTANCE_TYPE) con.create_launch_configuration(config) print "Create autoscaling group..." ag = AutoScalingGroup(name=AUTOSCALING_GROUP_NAME, launch_config=config, availability_zones=["{0}a".format(REGION)], load_balancers=[ELB_NAME], min_size=AUTOSCALING_MIN_INSTANCES, max_size=AUTOSCALING_MAX_INSTANCES, group_name=AUTOSCALING_GROUP_NAME) con.create_auto_scaling_group(ag) # fetch the autoscale group after it is created (unused but may be necessary) _ = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0] # Create tag name for autoscaling-created machines as_tag = Tag(key='Name', value=AUTOSCALING_GROUP_NAME, propagate_at_launch=True, resource_id=AUTOSCALING_GROUP_NAME) con.create_or_update_tags([as_tag]) print "Creating autoscaling policy..." scaleup_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name=AUTOSCALING_GROUP_NAME, scaling_adjustment=1, cooldown=AUTOSCALING_COOLDOWN_PERIOD) scaledown_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name=AUTOSCALING_GROUP_NAME, scaling_adjustment=-1, cooldown=AUTOSCALING_COOLDOWN_PERIOD) con.create_scaling_policy(scaleup_policy) con.create_scaling_policy(scaledown_policy) # Get freshened policy objects scaleup_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_up'])[0] scaledown_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_down'])[0] print "Creating cloudwatch alarms" cloudwatch_con = CloudWatchConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY, aws_access_key_id=AWS_ACCESS_KEY, region=RegionInfo(name=REGION, endpoint='monitoring.%s.amazonaws.com' % REGION)) alarm_dimensions = {"AutoScalingGroupName": AUTOSCALING_GROUP_NAME} scaleup_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold=AUTOSCALING_CPU_MAX_THRESHOLD, period='60', evaluation_periods=1, alarm_actions=[scaleup_policy.policy_arn, sns_arn], dimensions=alarm_dimensions) # Don't send SNS on scaledown policy scaledown_alarm = MetricAlarm(name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold=AUTOSCALING_CPU_MIN_THRESHOLD, period='60', evaluation_periods=1, alarm_actions=[scaledown_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch_con.create_alarm(scaleup_alarm) cloudwatch_con.create_alarm(scaledown_alarm)
def test_basic(self): # NB: as it says on the tin these are really basic tests that only # (lightly) exercise read-only behaviour - and that's only if you # have any autoscale groups to introspect. It's useful, however, to # catch simple errors print('--- running %s tests ---' % self.__class__.__name__) c = AutoScaleConnection() self.assertTrue(repr(c).startswith('AutoScaleConnection')) groups = c.get_all_groups() for group in groups: self.assertIsInstance(group, AutoScalingGroup) # get activities activities = group.get_activities() for activity in activities: self.assertIsInstance(activity, Activity) # get launch configs configs = c.get_all_launch_configurations() for config in configs: self.assertIsInstance(config, LaunchConfiguration) # get policies policies = c.get_all_policies() for policy in policies: self.assertIsInstance(policy, ScalingPolicy) # get scheduled actions actions = c.get_all_scheduled_actions() for action in actions: self.assertIsInstance(action, ScheduledUpdateGroupAction) # get instances instances = c.get_all_autoscaling_instances() for instance in instances: self.assertIsInstance(instance, Instance) # get all scaling process types ptypes = c.get_all_scaling_process_types() for ptype in ptypes: self.assertTrue(ptype, ProcessType) # get adjustment types adjustments = c.get_all_adjustment_types() for adjustment in adjustments: self.assertIsInstance(adjustment, AdjustmentType) # get metrics collection types types = c.get_all_metric_collection_types() self.assertIsInstance(types, MetricCollectionTypes) # create the simplest possible AutoScale group # first create the launch configuration time_string = '%d' % int(time.time()) lc_name = 'lc-%s' % time_string lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b', instance_type='t1.micro') c.create_launch_configuration(lc) found = False lcs = c.get_all_launch_configurations() for lc in lcs: if lc.name == lc_name: found = True break assert found # now create autoscaling group group_name = 'group-%s' % time_string group = AutoScalingGroup(name=group_name, launch_config=lc, availability_zones=['us-east-1a'], min_size=1, max_size=1) c.create_auto_scaling_group(group) found = False groups = c.get_all_groups() for group in groups: if group.name == group_name: found = True break assert found # now create a tag tag = Tag(key='foo', value='bar', resource_id=group_name, propagate_at_launch=True) c.create_or_update_tags([tag]) found = False tags = c.get_all_tags() for tag in tags: if tag.resource_id == group_name and tag.key == 'foo': found = True break assert found c.delete_tags([tag]) # shutdown instances and wait for them to disappear group.shutdown_instances() instances = True while instances: time.sleep(5) groups = c.get_all_groups() for group in groups: if group.name == group_name: if not group.instances: instances = False group.delete() lc.delete() found = True while found: found = False time.sleep(5) tags = c.get_all_tags() for tag in tags: if tag.resource_id == group_name and tag.key == 'foo': found = True assert not found print('--- tests completed ---')
def setup(CONF): global out lookup_tbl = { 'name': CONF['NAME'], } conn = AutoScaleConnection() out['conn'] = conn # Launch Configurations LC = CONF['LC'] LC['name'] = LC['name'] % lookup_tbl lc = LaunchConfiguration(**LC) conn.create_launch_configuration(lc) out['lc'] = lc # Auto Scaling Group ASG = CONF['ASG'] ASG['group_name'] = ASG['group_name'] % lookup_tbl ASG['launch_config'] = lc groups = conn.get_all_groups(names=[ASG['group_name']]) if (len(groups) > 0): # update asg = groups[0] for k in ASG : # asg not iterable, try-except to make sure asg[k] exists try: asg.__getattribute__(k) except: continue asg.__setattr__(k, ASG[k]) asg.launch_config_name = LC['name'] asg.update() out['asg'] = asg else: #create asg = AutoScalingGroup(**ASG) conn.create_auto_scaling_group(asg) # ASG Tags ASG_TAGS = CONF['ASG_TAGS'] for i in ASG_TAGS: if 'propagate_at_launch' not in i: i['propagate_at_launch'] = True i['key'] = i['key'] % lookup_tbl i['value'] = i['value'] % lookup_tbl tags = [ Tag(**dict(x.items() + [('resource_id', ASG['group_name'])])) for x in ASG_TAGS ] conn.create_or_update_tags(tags) # Triggers (Scaling Policy / Cloudwatch Alarm) conn_cw = connect_to_region(CONF['REGION']) TRIGGERS = CONF['TRIGGERS'] for T in TRIGGERS: T['policy']['name'] = T['policy']['name'] % lookup_tbl T['policy']['as_name'] = ASG['group_name'] T['alarm']['dimensions'] = {'AutoScalingGroupName': ASG['group_name']} T['alarm']['alarm_actions'] = None if 'name' in T['alarm']: T['alarm']['name'] = T['alarm']['name'] % lookup_tbl else: T['alarm']['name'] = T['policy']['name'] # Policies are safely overwritten, so not checked for existence conn.create_scaling_policy(ScalingPolicy(**T['policy'])) policy = conn.get_all_policies(as_group=ASG['group_name'], policy_names=[T['policy']['name']])[0] T['alarm']['alarm_actions'] = [policy.policy_arn] hits = conn_cw.describe_alarms(alarm_names=[T['alarm']['name']]) conn_cw.create_alarm(MetricAlarm(**T['alarm']))
scaleOut = ScalingPolicy(name='ScaleOut', adjustment_type='ChangeInCapacity', as_name=asg.name, scaling_adjustment=1, cooldown=100) scaleIn = ScalingPolicy(name='ScaleIn', adjustment_type='ChangeInCapacity', as_name=asg.name, scaling_adjustment=-1, cooldown=100) con_as.create_scaling_policy(scaleOut) con_as.create_scaling_policy(scaleIn) scaleOut_policy = con_as.get_all_policies( as_group=asg.name, policy_names=['ScaleOut'])[0] scaleIn_policy = con_as.get_all_policies( as_group=asg.name, policy_names=['ScaleIn'])[0] # -------------------------Create CloudWatch Alarm------------------------------ con_cw = CloudWatchConnection() alarm_dimensions = {"AutoScalingGroupName": asg.name} scaleOut_alarm = MetricAlarm(name='scaleOut_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='70', period='120',
ag = AutoScalingGroup(connection=conn3, name='gp', load_balancers=['elb'], availability_zones=[ZONE], health_check_type='ELB', health_check_period=60, launch_config=config, min_size=2, max_size=5, desired_capacity=2, tags=[Tag(key=TAGK, value=TAGV, propagate_at_launch=True, resource_id='gp', resource_type='auto-scaling-group')]) conn3.create_auto_scaling_group(ag) # define the scaling policies scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='gp', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='gp', scaling_adjustment=-1, cooldown=60) # create policies conn3.create_scaling_policy(scale_up_policy) conn3.create_scaling_policy(scale_down_policy) # get ARN for policies up_policy = conn3.get_all_policies(as_group='gp', policy_names=['scale_up'])[0] down_policy = conn3.get_all_policies(as_group='gp', policy_names=['scale_down'])[0] # set up cloudwatch cloudwatch = boto.ec2.cloudwatch.connect_to_region('us-east-1', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_KEY']) scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='80', period='60', evaluation_periods=2, alarm_actions=[up_policy.policy_arn], dimensions={"AutoScalingGroupName": 'gp'}) scale_down_alarm = MetricAlarm(name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold='30', period='60', evaluation_periods=2, alarm_actions=[down_policy.policy_arn], dimensions={"AutoScalingGroupName": 'gp'}) # create alarms cloudwatch.create_alarm(scale_up_alarm)
print 'Creating ASG' asg = AutoScalingGroup(group_name='Project22group', load_balancers=['Project22hongf'], health_check_type = 'ELB', health_check_period = '119', desired_capacity = 5, availability_zones=['us-east-1c'], launch_config = lc, min_size = 5, max_size = 5, tags = [boto.ec2.autoscale.tag.Tag(key='Project',value='2.2', resource_id = 'Project22group', propagate_at_launch=True)]) asg_conn.create_auto_scaling_group(asg) print 'ASG created' #Create Scaling Policy print 'Creating Scaling Policy' scale_out_policy = ScalingPolicy(name = 'scale_out', adjustment_type = 'ChangeInCapacity', as_name = 'Project22group', scaling_adjustment = 1, cooldown = 60) asg_conn.create_scaling_policy(scale_out_policy) scale_in_policy = ScalingPolicy(name = 'scale_in', adjustment_type = 'ChangeInCapacity', as_name = 'Project22group', scaling_adjustment = -1, cooldown = 60) asg_conn.create_scaling_policy(scale_in_policy) #Check Policys and get them for CloudWatch ScaleOut = asg_conn.get_all_policies(as_group = 'Project22group', policy_names = ['scale_out'])[0] ScaleIn = asg_conn.get_all_policies(as_group = 'Project22group', policy_names = ['scale_in'])[0] print 'Scaling Policy created' #Create CloudWatch alarm_dimensions = {"AutoScalingGroupName": 'Project22group'} scale_out_alarm = MetricAlarm(name = 'scale_out', namespace = 'AWS/EC2', metric = 'CPUUtilization', statistic='Average', comparison='>', threshold='80', period = '60', evaluation_periods=5, alarm_actions=[ScaleOut.policy_arn], dimensions=alarm_dimensions) cw_conn.create_alarm(scale_out_alarm) scale_in_alarm = MetricAlarm(name = 'scale_in', namespace = 'AWS/EC2', metric = 'CPUUtilization', statistic='Average', comparison='<', threshold='20', period = '60', evaluation_periods=5, alarm_actions=[ScaleIn.policy_arn], dimensions=alarm_dimensions) cw_conn.create_alarm(scale_in_alarm) print'CloudWatch Alarm created'
class AutoScale: def __init__(self, args): """ Initializing basic variables needed for auto scaling """ self.configs = ConfigParser.RawConfigParser() self.args = args self.test_props = {} self.props = {} self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key) self.autoscale_connection = AutoScaleConnection(self.args.access_key, self.args.secret_key) self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key) self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key) self.firstInstance = None self.launchConfiguration = None self.healthCheck = None def loadConfigs(self): """ FIX ME: Currently doesnt do anything This method will load the configurations from boto config file if present else will accept parameters passed by user. """ if os.path.isfile("/etc/boto.cfg"): self.configs.read("/etc/boto.cfg") conf = self.configs.sections() self.populateConfigs(conf) if os.path.isfile("~/.boto"): self.configs.read("~/.boto") conf = self.configs.sections() self.populateConfigs(conf) print ">>> Loaded configs" def populateConfigs(self, sections): for section in sections: self.boto_props[section] = self.configs.items(section) for item in self.boto_props[section]: key, value = item if not self.props.has_key(key): self.props[key] = value def createLaunchConfiguration(self, lc_name, ami_id, key_name): """ Creates launch configuration for the auto scaling cluster """ self.launchConfiguration = LaunchConfiguration(name = lc_name, image_id = ami_id, key_name = key_name) self.autoscale_connection.create_launch_configuration(self.launchConfiguration) print ">>> Created launch configuration: " + lc_name def createAutoScaleGroup(self, asg_name): """ Create a Auto scaling group for the auto scaling cluster """ autoScalingGroup = AutoScalingGroup(group_name = asg_name, load_balancers = [self.args.lb_name], launch_config = self.launchConfiguration, min_size = self.args.min_size, max_size = self.args.max_size, availability_zones = ['us-east-1a']) self.autoscale_connection.create_auto_scaling_group(autoScalingGroup) print ">>> Created auto scaling group: " + asg_name def createTrigger(self, trigger_name, measure, asg_name): """ Trigger to spawn new instances as per specific metrics """ alarm_actions = [] dimensions = {"AutoScalingGroupName" : asg_name} policies = self.autoscale_connection.get_all_policies(as_group=self.args.asg_name, policy_names=[self.args.asp_name]) for policy in policies: alarm_actions.append(policy.policy_arn) alarm = MetricAlarm(name = trigger_name, namespace = "AWS/EC2", metric = measure, statistic = "Average", comparison = ">=", threshold = 50, period = 60, unit = "Percent", evaluation_periods = 2, alarm_actions = alarm_actions, dimensions = dimensions) self.cw_connection.create_alarm(alarm) print ">>> Created trigger: "+self.args.trigger def createAutoScalePolicy(self, asp_name): """ Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster """ self.autoScalingUpPolicy = ScalingPolicy(name = asp_name+'-up', adjustment_type = "ChangeInCapacity", as_name = self.args.asg_name, scaling_adjustment = 1, cooldown = 180) self.autoScalingDownPolicy = ScalingPolicy(name = asp_name+'-down', adjustment_type = "ChangeInCapacity", as_name = self.args.asg_name, scaling_adjustment = -1, cooldown = 180) self.autoscale_connection.create_scaling_policy(self.autoScalingUpPolicy) self.autoscale_connection.create_scaling_policy(self.autoScalingDownPolicy) print ">>> Created auto scaling policy: " + asp_name def configureHealthCheck(self, target): """ Configures health check for the cluster """ self.healthCheck = HealthCheck(target = target, timeout = 5) print ">>> Configured health check for: " + target def createLoadBalancer(self, lb_name, region, lb_port, instance_port, protocol): """ Creates a load balancer for cluster """ listener = (int(lb_port), int(instance_port), protocol) tuple_list =[] tuple_list.append(listener) lbs = self.elb_connection.get_all_load_balancers() for lb in lbs: if lb.name != lb_name: self.elb_connection.create_load_balancer(lb_name, [region], tuple_list) self.elb_connection.configure_health_check(name = lb_name, health_check = self.healthCheck) print ">>> Created load balancer: " + lb_name else: print "Load balancer with name '"+lb_name+"' already exists" def startInstance(self, image_id, key_name, region, instance_type): """ Starts the first instance which will be serving requests irrespective of auto scaling instances. """ reservation = self.ec2_connection.run_instances(image_id=image_id, min_count=1, max_count=1, placement=region, key_name=key_name, instance_type=instance_type) # for instance in reservation.instances: # instance.add_tag('node', '0') # break self.firstInstance = reservation.instances[0].id.split('\'')[0] print ">>> Started instance: ", self.firstInstance def registerInstanceToELB(self, lb_name): """ Register the first instance started to the Elastic Load Balancer. """ self.elb_connection.register_instances(load_balancer_name = lb_name, instances = [self.firstInstance]) print ">>> Registered instance '",self.firstInstance,"' to load balancer '"+lb_name+"'" def setUp(self): """ Set's up the auto scaling for the application """ # STEP 1: Load the configurations self.loadConfigs() # STEP 2: Configure the health check for the instances self.configureHealthCheck(self.args.lb_target) # STEP 3: Create a load balancer self.createLoadBalancer(self.args.lb_name, self.args.region, self.args.lb_port, self.args.instance_port, self.args.protocol) # STEP 4: Start the first instance self.startInstance(self.args.ami_id, self.args.key_name, self.args.region, self.args.instance_type) # STEP 5: Register the instance to the load balancer created in STEP 4 self.registerInstanceToELB(self.args.lb_name) # STEP 6: Create launch configuration to launch instances by auto scale self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id, self.args.key_name) # STEP 7: Create a auto scale group which will manage the instances started by auto scaling self.createAutoScaleGroup(self.args.asg_name) # STEP 8: Create a auto scaling policy to say add/remove a node self.createAutoScalePolicy(self.args.asp_name) # STEP 9: Create a trigger, so that auto scaling can trigger it to start # or remove a instance from auto scaling group self.createTrigger(self.args.trigger, self.args.measure, self.args.asg_name)
as_name=autoscaling_group['name'], scaling_adjustment=1, cooldown=180) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name=autoscaling_group['name'], scaling_adjustment=-1, cooldown=180) conn_as.create_scaling_policy(scale_up_policy) conn_as.create_scaling_policy(scale_down_policy) print 'scaling policies created!' sleep(10) # refresh and get back the scaling policies scale_up_policy = conn_as.get_all_policies(as_group=autoscaling_group['name'], policy_names=['scale_up'])[0] scale_down_policy = conn_as.get_all_policies( as_group=autoscaling_group['name'], policy_names=['scale_down'])[0] # ============================================== # # ============================================== # # create CloudWatch alarms for when to scale up and when to scale down alarm_dimensions = {"AutoScalingGroupName": autoscaling_group['name']} scale_up_alarm = MetricAlarm( name=metric_alarm['upAlarmName'], namespace=metric_alarm['nameSpace'], metric=metric_alarm['metric'], statistic=metric_alarm['statistic'], comparison='>',
scale_down_policy = ScalingPolicy( name='scale_down', adjustment_type='ChangeInCapacity', as_name=autoscaling_group['name'], scaling_adjustment=-1, cooldown=180 ) conn_as.create_scaling_policy(scale_up_policy) conn_as.create_scaling_policy(scale_down_policy) print 'scaling policies created!' sleep(10) # refresh and get back the scaling policies scale_up_policy = conn_as.get_all_policies( as_group=autoscaling_group['name'], policy_names=['scale_up'])[0] scale_down_policy = conn_as.get_all_policies( as_group=autoscaling_group['name'], policy_names=['scale_down'])[0] # ============================================== # # ============================================== # # create CloudWatch alarms for when to scale up and when to scale down alarm_dimensions = {"AutoScalingGroupName": autoscaling_group['name']} scale_up_alarm = MetricAlarm( name=metric_alarm['upAlarmName'],