class BotoBalanceInterface(BalanceInterface): conn = None saveclcdata = False def __init__(self, clc_host, access_id, secret_key, token): #boto.set_stream_logger('foo') path='/services/elb' port=8773 if clc_host[len(clc_host)-13:] == 'amazonaws.com': clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1) path = '/' reg = None port=443 reg = RegionInfo(name='eucalyptus', endpoint=clc_host) self.conn = ELBConnection(access_id, secret_key, region=reg, port=port, path=path, is_secure=True, security_token=token, debug=0) self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30 def __save_json__(self, obj, name): f = open(name, 'w') json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2) f.close() def create_load_balancer(self, name, zones, listeners, subnets=None, security_groups=None, scheme='internet-facing'): return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme) def delete_load_balancer(self, name): return self.conn.delete_load_balancer(name) def get_all_load_balancers(self, load_balancer_names=None): return [] obj = self.conn.get_all_load_balancers(load_balancer_names) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Balancers.json") return obj def deregister_instances(self, load_balancer_name, instances): return self.conn.deregister_instances(load_balancer_name, instances) def register_instances(self, load_balancer_name, instances): return self.conn.register_instances(load_balancer_name, instances) def create_load_balancer_listeners(self, name, listeners): return self.conn.create_load_balancer_listeners(name, listeners) def delete_load_balancer_listeners(self, name, ports): return self.conn.delete_load_balancer_listeners(name, ports) def configure_health_check(self, name, health_check): return self.conn.configure_health_check(name, health_check) def describe_instance_health(self, load_balancer_name, instances=None): obj = self.conn.describe_instance_health(load_balancer_name, instances) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Instances.json") return obj
class BotoBalanceInterface(BalanceInterface): conn = None saveclcdata = False def __init__(self, clc_host, access_id, secret_key, token): self.access_id = access_id self.secret_key = secret_key self.token = token self.set_endpoint(clc_host) def set_endpoint(self, endpoint): #boto.set_stream_logger('foo') reg = RegionInfo(name='eucalyptus', endpoint=endpoint) path = '/services/LoadBalancing' port = 8773 if endpoint[len(endpoint)-13:] == 'amazonaws.com': endpoint = endpoint.replace('ec2', 'elasticloadbalancing', 1) path = '/' reg = RegionInfo(endpoint=endpoint) port = 443 self.conn = ELBConnection(self.access_id, self.secret_key, region=reg, port=port, path=path, is_secure=True, security_token=self.token, debug=0) self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30 def __save_json__(self, obj, name): f = open(name, 'w') json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2) f.close() def create_load_balancer(self, name, zones, listeners, subnets=None, security_groups=None, scheme='internet-facing'): return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme) def delete_load_balancer(self, name): return self.conn.delete_load_balancer(name) def get_all_load_balancers(self, load_balancer_names=None): params = {} if load_balancer_names: self.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') http_request = self.conn.build_base_http_request('GET', '/', None, params, {}, '', self.conn.server_name()) http_request.params['Action'] = 'DescribeLoadBalancers' http_request.params['Version'] = self.conn.APIVersion response = self.conn._mexe(http_request, override_num_retries=2) body = response.read() boto.log.debug(body) if not body: boto.log.error('Null body %s' % body) raise self.conn.ResponseError(response.status, response.reason, body) elif response.status == 200: obj = boto.resultset.ResultSet([('member', boto.ec2.elb.loadbalancer.LoadBalancer)]) h = boto.handler.XmlHandler(obj, self.conn) import xml.sax; xml.sax.parseString(body, h) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Balancers.json") return obj else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.conn.ResponseError(response.status, response.reason, body) def deregister_instances(self, load_balancer_name, instances): return self.conn.deregister_instances(load_balancer_name, instances) def register_instances(self, load_balancer_name, instances): return self.conn.register_instances(load_balancer_name, instances) def create_load_balancer_listeners(self, name, listeners): return self.conn.create_load_balancer_listeners(name, listeners) def delete_load_balancer_listeners(self, name, ports): return self.conn.delete_load_balancer_listeners(name, ports) def configure_health_check(self, name, health_check): return self.conn.configure_health_check(name, health_check) def describe_instance_health(self, load_balancer_name, instances=None): obj = self.conn.describe_instance_health(load_balancer_name, instances) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Instances.json") return obj
class AutoScale: def __init__(self, args): """ Initializing basic variables needed for auto scaling """ self.configs = ConfigParser.RawConfigParser() self.args = args self.test_props = {} self.props = {} self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key) self.autoscale_connection = AutoScaleConnection( self.args.access_key, self.args.secret_key) self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key) self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key) self.firstInstance = None self.launchConfiguration = None self.healthCheck = None def loadConfigs(self): """ FIX ME: Currently doesnt do anything This method will load the configurations from boto config file if present else will accept parameters passed by user. """ if os.path.isfile("/etc/boto.cfg"): self.configs.read("/etc/boto.cfg") conf = self.configs.sections() self.populateConfigs(conf) if os.path.isfile("~/.boto"): self.configs.read("~/.boto") conf = self.configs.sections() self.populateConfigs(conf) print ">>> Loaded configs" def populateConfigs(self, sections): for section in sections: self.boto_props[section] = self.configs.items(section) for item in self.boto_props[section]: key, value = item if not self.props.has_key(key): self.props[key] = value def createLaunchConfiguration(self, lc_name, ami_id, key_name): """ Creates launch configuration for the auto scaling cluster """ self.launchConfiguration = LaunchConfiguration(name=lc_name, image_id=ami_id, key_name=key_name) self.autoscale_connection.create_launch_configuration( self.launchConfiguration) print ">>> Created launch configuration: " + lc_name def createAutoScaleGroup(self, asg_name): """ Create a Auto scaling group for the auto scaling cluster """ autoScalingGroup = AutoScalingGroup( group_name=asg_name, load_balancers=[self.args.lb_name], launch_config=self.launchConfiguration, min_size=self.args.min_size, max_size=self.args.max_size, availability_zones=['us-east-1a']) self.autoscale_connection.create_auto_scaling_group(autoScalingGroup) print ">>> Created auto scaling group: " + asg_name def createTrigger(self, trigger_name, measure, asg_name): """ Trigger to spawn new instances as per specific metrics """ alarm_actions = [] dimensions = {"AutoScalingGroupName": asg_name} policies = self.autoscale_connection.get_all_policies( as_group=self.args.asg_name, policy_names=[self.args.asp_name]) for policy in policies: alarm_actions.append(policy.policy_arn) alarm = MetricAlarm(name=trigger_name, namespace="AWS/EC2", metric=measure, statistic="Average", comparison=">=", threshold=50, period=60, unit="Percent", evaluation_periods=2, alarm_actions=alarm_actions, dimensions=dimensions) self.cw_connection.create_alarm(alarm) print ">>> Created trigger: " + self.args.trigger def createAutoScalePolicy(self, asp_name): """ Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster """ self.autoScalingUpPolicy = ScalingPolicy( name=asp_name + '-up', adjustment_type="ChangeInCapacity", as_name=self.args.asg_name, scaling_adjustment=1, cooldown=180) self.autoScalingDownPolicy = ScalingPolicy( name=asp_name + '-down', adjustment_type="ChangeInCapacity", as_name=self.args.asg_name, scaling_adjustment=-1, cooldown=180) self.autoscale_connection.create_scaling_policy( self.autoScalingUpPolicy) self.autoscale_connection.create_scaling_policy( self.autoScalingDownPolicy) print ">>> Created auto scaling policy: " + asp_name def configureHealthCheck(self, target): """ Configures health check for the cluster """ self.healthCheck = HealthCheck(target=target, timeout=5) print ">>> Configured health check for: " + target def createLoadBalancer(self, lb_name, region, lb_port, instance_port, protocol): """ Creates a load balancer for cluster """ listener = (int(lb_port), int(instance_port), protocol) tuple_list = [] tuple_list.append(listener) lbs = self.elb_connection.get_all_load_balancers() for lb in lbs: if lb.name != lb_name: self.elb_connection.create_load_balancer( lb_name, [region], tuple_list) self.elb_connection.configure_health_check( name=lb_name, health_check=self.healthCheck) print ">>> Created load balancer: " + lb_name else: print "Load balancer with name '" + lb_name + "' already exists" def startInstance(self, image_id, key_name, region, instance_type): """ Starts the first instance which will be serving requests irrespective of auto scaling instances. """ reservation = self.ec2_connection.run_instances( image_id=image_id, min_count=1, max_count=1, placement=region, key_name=key_name, instance_type=instance_type) # for instance in reservation.instances: # instance.add_tag('node', '0') # break self.firstInstance = reservation.instances[0].id.split('\'')[0] print ">>> Started instance: ", self.firstInstance def registerInstanceToELB(self, lb_name): """ Register the first instance started to the Elastic Load Balancer. """ self.elb_connection.register_instances(load_balancer_name=lb_name, instances=[self.firstInstance]) print ">>> Registered instance '", self.firstInstance, "' to load balancer '" + lb_name + "'" def setUp(self): """ Set's up the auto scaling for the application """ # STEP 1: Load the configurations self.loadConfigs() # STEP 2: Configure the health check for the instances self.configureHealthCheck(self.args.lb_target) # STEP 3: Create a load balancer self.createLoadBalancer(self.args.lb_name, self.args.region, self.args.lb_port, self.args.instance_port, self.args.protocol) # STEP 4: Start the first instance self.startInstance(self.args.ami_id, self.args.key_name, self.args.region, self.args.instance_type) # STEP 5: Register the instance to the load balancer created in STEP 4 self.registerInstanceToELB(self.args.lb_name) # STEP 6: Create launch configuration to launch instances by auto scale self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id, self.args.key_name) # STEP 7: Create a auto scale group which will manage the instances started by auto scaling self.createAutoScaleGroup(self.args.asg_name) # STEP 8: Create a auto scaling policy to say add/remove a node self.createAutoScalePolicy(self.args.asp_name) # STEP 9: Create a trigger, so that auto scaling can trigger it to start # or remove a instance from auto scaling group self.createTrigger(self.args.trigger, self.args.measure, self.args.asg_name)
class BotoBalanceInterface(BalanceInterface): conn = None saveclcdata = False def __init__(self, clc_host, access_id, secret_key, token): #boto.set_stream_logger('foo') path = '/services/LoadBalancing' port = 8773 if clc_host[len(clc_host) - 13:] == 'amazonaws.com': clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1) path = '/' reg = None port = 443 reg = RegionInfo(name='eucalyptus', endpoint=clc_host) self.conn = ELBConnection(access_id, secret_key, region=reg, port=port, path=path, is_secure=True, security_token=token, debug=0) self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30 def __save_json__(self, obj, name): f = open(name, 'w') json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2) f.close() def create_load_balancer(self, name, zones, listeners, subnets=None, security_groups=None, scheme='internet-facing'): return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme) def delete_load_balancer(self, name): return self.conn.delete_load_balancer(name) def get_all_load_balancers(self, load_balancer_names=None): obj = self.conn.get_all_load_balancers(load_balancer_names) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Balancers.json") return obj def deregister_instances(self, load_balancer_name, instances): return self.conn.deregister_instances(load_balancer_name, instances) def register_instances(self, load_balancer_name, instances): return self.conn.register_instances(load_balancer_name, instances) def create_load_balancer_listeners(self, name, listeners): return self.conn.create_load_balancer_listeners(name, listeners) def delete_load_balancer_listeners(self, name, ports): return self.conn.delete_load_balancer_listeners(name, ports) def configure_health_check(self, name, health_check): return self.conn.configure_health_check(name, health_check) def describe_instance_health(self, load_balancer_name, instances=None): obj = self.conn.describe_instance_health(load_balancer_name, instances) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Instances.json") return obj
class BotoBalanceInterface(BalanceInterface): conn = None saveclcdata = False def __init__(self, clc_host, access_id, secret_key, token): #boto.set_stream_logger('foo') path = '/services/LoadBalancing' port = 8773 if clc_host[len(clc_host) - 13:] == 'amazonaws.com': clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1) path = '/' reg = None port = 443 reg = RegionInfo(name='eucalyptus', endpoint=clc_host) self.conn = ELBConnection(access_id, secret_key, region=reg, port=port, path=path, is_secure=True, security_token=token, debug=0) self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30 def __save_json__(self, obj, name): f = open(name, 'w') json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2) f.close() def create_load_balancer(self, name, zones, listeners, subnets=None, security_groups=None, scheme='internet-facing'): return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme) def delete_load_balancer(self, name): return self.conn.delete_load_balancer(name) def get_all_load_balancers(self, load_balancer_names=None): params = {} if load_balancer_names: self.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') http_request = self.conn.build_base_http_request( 'GET', '/', None, params, {}, '', self.conn.server_name()) http_request.params['Action'] = 'DescribeLoadBalancers' http_request.params['Version'] = self.conn.APIVersion response = self.conn._mexe(http_request, override_num_retries=2) body = response.read() boto.log.debug(body) if not body: boto.log.error('Null body %s' % body) raise self.conn.ResponseError(response.status, response.reason, body) elif response.status == 200: obj = boto.resultset.ResultSet([ ('member', boto.ec2.elb.loadbalancer.LoadBalancer) ]) h = boto.handler.XmlHandler(obj, self.conn) import xml.sax xml.sax.parseString(body, h) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Balancers.json") return obj else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.conn.ResponseError(response.status, response.reason, body) def deregister_instances(self, load_balancer_name, instances): return self.conn.deregister_instances(load_balancer_name, instances) def register_instances(self, load_balancer_name, instances): return self.conn.register_instances(load_balancer_name, instances) def create_load_balancer_listeners(self, name, listeners): return self.conn.create_load_balancer_listeners(name, listeners) def delete_load_balancer_listeners(self, name, ports): return self.conn.delete_load_balancer_listeners(name, ports) def configure_health_check(self, name, health_check): return self.conn.configure_health_check(name, health_check) def describe_instance_health(self, load_balancer_name, instances=None): obj = self.conn.describe_instance_health(load_balancer_name, instances) if self.saveclcdata: self.__save_json__(obj, "mockdata/ELB_Instances.json") return obj
lg_instance.add_tag(TAGK, TAGV) time.sleep(5) print lg_instance.id print lg_instance.dns_name print lg_instance.tags print 'Creating ELB' # initialize elastc load balancer conn2 = ELBConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY']) # set heartbeat page = 'HTTP:80' + '/heartbeat?lg=' + lg_instance.dns_name hc = HealthCheck(interval=20, healthy_threshold=3, unhealthy_threshold=5, target=page) # set port 80 elb = conn2.create_load_balancer('elb', [ZONE], [(80, 80, 'http')]) # allow all traffic conn2.apply_security_groups_to_lb('elb', [sg.id]) conn2.configure_health_check('elb', hc) print elb.dns_name print 'Creating ASG' # initialize launch config conn3 = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY']) config = LaunchConfiguration(name='config', image_id=DC_IMAGE, security_groups=sgs, instance_type=TYPE, instance_monitoring=True) conn3.create_launch_configuration(config) # initialize auto scaling group ag = AutoScalingGroup(connection=conn3, name='gp', load_balancers=['elb'], availability_zones=[ZONE], health_check_type='ELB', health_check_period=60, launch_config=config, min_size=2, max_size=5, desired_capacity=2, tags=[Tag(key=TAGK, value=TAGV, propagate_at_launch=True, resource_id='gp', resource_type='auto-scaling-group')]) conn3.create_auto_scaling_group(ag) # define the scaling policies
class AutoScale: def __init__(self, args): """ Initializing basic variables needed for auto scaling """ self.configs = ConfigParser.RawConfigParser() self.args = args self.test_props = {} self.props = {} self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key) self.autoscale_connection = AutoScaleConnection(self.args.access_key, self.args.secret_key) self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key) self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key) self.firstInstance = None self.launchConfiguration = None self.healthCheck = None def loadConfigs(self): """ FIX ME: Currently doesnt do anything This method will load the configurations from boto config file if present else will accept parameters passed by user. """ if os.path.isfile("/etc/boto.cfg"): self.configs.read("/etc/boto.cfg") conf = self.configs.sections() self.populateConfigs(conf) if os.path.isfile("~/.boto"): self.configs.read("~/.boto") conf = self.configs.sections() self.populateConfigs(conf) print ">>> Loaded configs" def populateConfigs(self, sections): for section in sections: self.boto_props[section] = self.configs.items(section) for item in self.boto_props[section]: key, value = item if not self.props.has_key(key): self.props[key] = value def createLaunchConfiguration(self, lc_name, ami_id, key_name): """ Creates launch configuration for the auto scaling cluster """ self.launchConfiguration = LaunchConfiguration(name = lc_name, image_id = ami_id, key_name = key_name) self.autoscale_connection.create_launch_configuration(self.launchConfiguration) print ">>> Created launch configuration: " + lc_name def createAutoScaleGroup(self, asg_name): """ Create a Auto scaling group for the auto scaling cluster """ autoScalingGroup = AutoScalingGroup(group_name = asg_name, load_balancers = [self.args.lb_name], launch_config = self.launchConfiguration, min_size = self.args.min_size, max_size = self.args.max_size, availability_zones = ['us-east-1a']) self.autoscale_connection.create_auto_scaling_group(autoScalingGroup) print ">>> Created auto scaling group: " + asg_name def createTrigger(self, trigger_name, measure, asg_name): """ Trigger to spawn new instances as per specific metrics """ alarm_actions = [] dimensions = {"AutoScalingGroupName" : asg_name} policies = self.autoscale_connection.get_all_policies(as_group=self.args.asg_name, policy_names=[self.args.asp_name]) for policy in policies: alarm_actions.append(policy.policy_arn) alarm = MetricAlarm(name = trigger_name, namespace = "AWS/EC2", metric = measure, statistic = "Average", comparison = ">=", threshold = 50, period = 60, unit = "Percent", evaluation_periods = 2, alarm_actions = alarm_actions, dimensions = dimensions) self.cw_connection.create_alarm(alarm) print ">>> Created trigger: "+self.args.trigger def createAutoScalePolicy(self, asp_name): """ Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster """ self.autoScalingUpPolicy = ScalingPolicy(name = asp_name+'-up', adjustment_type = "ChangeInCapacity", as_name = self.args.asg_name, scaling_adjustment = 1, cooldown = 180) self.autoScalingDownPolicy = ScalingPolicy(name = asp_name+'-down', adjustment_type = "ChangeInCapacity", as_name = self.args.asg_name, scaling_adjustment = -1, cooldown = 180) self.autoscale_connection.create_scaling_policy(self.autoScalingUpPolicy) self.autoscale_connection.create_scaling_policy(self.autoScalingDownPolicy) print ">>> Created auto scaling policy: " + asp_name def configureHealthCheck(self, target): """ Configures health check for the cluster """ self.healthCheck = HealthCheck(target = target, timeout = 5) print ">>> Configured health check for: " + target def createLoadBalancer(self, lb_name, region, lb_port, instance_port, protocol): """ Creates a load balancer for cluster """ listener = (int(lb_port), int(instance_port), protocol) tuple_list =[] tuple_list.append(listener) lbs = self.elb_connection.get_all_load_balancers() for lb in lbs: if lb.name != lb_name: self.elb_connection.create_load_balancer(lb_name, [region], tuple_list) self.elb_connection.configure_health_check(name = lb_name, health_check = self.healthCheck) print ">>> Created load balancer: " + lb_name else: print "Load balancer with name '"+lb_name+"' already exists" def startInstance(self, image_id, key_name, region, instance_type): """ Starts the first instance which will be serving requests irrespective of auto scaling instances. """ reservation = self.ec2_connection.run_instances(image_id=image_id, min_count=1, max_count=1, placement=region, key_name=key_name, instance_type=instance_type) # for instance in reservation.instances: # instance.add_tag('node', '0') # break self.firstInstance = reservation.instances[0].id.split('\'')[0] print ">>> Started instance: ", self.firstInstance def registerInstanceToELB(self, lb_name): """ Register the first instance started to the Elastic Load Balancer. """ self.elb_connection.register_instances(load_balancer_name = lb_name, instances = [self.firstInstance]) print ">>> Registered instance '",self.firstInstance,"' to load balancer '"+lb_name+"'" def setUp(self): """ Set's up the auto scaling for the application """ # STEP 1: Load the configurations self.loadConfigs() # STEP 2: Configure the health check for the instances self.configureHealthCheck(self.args.lb_target) # STEP 3: Create a load balancer self.createLoadBalancer(self.args.lb_name, self.args.region, self.args.lb_port, self.args.instance_port, self.args.protocol) # STEP 4: Start the first instance self.startInstance(self.args.ami_id, self.args.key_name, self.args.region, self.args.instance_type) # STEP 5: Register the instance to the load balancer created in STEP 4 self.registerInstanceToELB(self.args.lb_name) # STEP 6: Create launch configuration to launch instances by auto scale self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id, self.args.key_name) # STEP 7: Create a auto scale group which will manage the instances started by auto scaling self.createAutoScaleGroup(self.args.asg_name) # STEP 8: Create a auto scaling policy to say add/remove a node self.createAutoScalePolicy(self.args.asp_name) # STEP 9: Create a trigger, so that auto scaling can trigger it to start # or remove a instance from auto scaling group self.createTrigger(self.args.trigger, self.args.measure, self.args.asg_name)