def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` :return: A connection to Amazon's Auto Scaling Service """ from boto.ec2.autoscale import AutoScaleConnection return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def get_all_group_instances_and_conn( groups_names=get_autoscale_groups()['groups']): conn = AutoScaleConnection() global autoscale_conn autoscale_conn = conn ec2 = boto.ec2.connect_to_region('us-east-1') selected_group_name = random.choice(groups_names) logger.info('Selected autoscale group: %s' % selected_group_name) group = conn.get_all_groups(names=[selected_group_name])[0] if not group.instances: logger.info("No working instances in selected group %s" % selected_group_name) upload_logs_to_s3() sys.exit() instance_ids = [i.instance_id for i in group.instances] instances = ec2.get_only_instances(instance_ids) return instances, conn
def autoscale_group_hosts(group_name): import boto.ec2 from boto.ec2.autoscale import AutoScaleConnection ec2 = boto.connect_ec2() conn = AutoScaleConnection() groups = conn.get_all_groups(names=[]) groups = [group for group in groups if group.name.startswith(group_name)] instance_ids = [] instances = [] for group in groups: print "group name:", group.name instance_ids.extend([i.instance_id for i in group.instances]) instances.extend(ec2.get_only_instances(instance_ids)) return set([ i.private_ip_address for i in instances ]), instances[0].id, instances[0].tags.get("aws:autoscaling:groupName")
def __init__(self, args): """ Initializing basic variables needed for auto scaling """ self.configs = ConfigParser.RawConfigParser() self.args = args self.test_props = {} self.props = {} self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key) self.autoscale_connection = AutoScaleConnection( self.args.access_key, self.args.secret_key) self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key) self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key) self.firstInstance = None self.launchConfiguration = None self.healthCheck = None
def find_unused_launch_configs(): conn = AutoScaleConnection() autoscale_groups = conn.get_all_groups(max_records=100) launch_configs = conn.get_all_launch_configurations(max_records=100) launch_config_names = {lc.name for lc in launch_configs} used_launch_config_names = {asg.launch_config_name for asg in autoscale_groups} unused_launch_config_names = launch_config_names - used_launch_config_names print "Autoscale Groups and Current Launch Configs:" print "{:<40}{:<40}".format("ASG", "LC") for asg in autoscale_groups: #print "asg:", asg.name, "-> lc:", asg.launch_config_name print "{:<40}{:<40}".format(asg.name, asg.launch_config_name) print "\nUnused Launch Configs: (launch configs without a autoscale group)" unused_launch_config_names = list(sorted(unused_launch_config_names)) for unused_launch_config in unused_launch_config_names: print "\t", unused_launch_config return unused_launch_config_names
def __init__(self, clc_host, access_id, secret_key, token): #boto.set_stream_logger('foo') path = '/services/AutoScaling' port = 8773 if clc_host[len(clc_host) - 13:] == 'amazonaws.com': clc_host = clc_host.replace('ec2', 'autoscaling', 1) path = '/' reg = None port = 443 reg = RegionInfo(name='eucalyptus', endpoint=clc_host) self.conn = AutoScaleConnection(access_id, secret_key, region=reg, port=port, path=path, is_secure=True, security_token=token, debug=0) self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` :return: A connection to Amazon's Auto Scaling Service :type use_block_device_types bool :param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability with the old incorrect style. """ from boto.ec2.autoscale import AutoScaleConnection return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def launch_auto_scaling(stage='development'): config = get_provider_dict() from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'], fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'], host='%s.autoscaling.amazonaws.com' % config['location'][:-1]) for name, values in config.get(stage, {}).get('autoscale', {}): if any(group.name == name for group in conn.get_all_groups()): fabric.api.warn( fabric.colors.orange('Autoscale group %s already exists' % name)) continue lc = LaunchConfiguration(name='%s-launch-config' % name, image_id=values['image'], key_name=config['key']) conn.create_launch_configuration(lc) ag = AutoScalingGroup(group_name=name, load_balancers=values.get('load-balancers'), availability_zones=[config['location']], launch_config=lc, min_size=values['min-size'], max_size=values['max-size']) conn.create_auto_scaling_group(ag) if 'min-cpu' in values and 'max-cpu' in values: tr = Trigger(name='%s-trigger' % name, autoscale_group=ag, measure_name='CPUUtilization', statistic='Average', unit='Percent', dimensions=[('AutoScalingGroupName', ag.name)], period=60, lower_threshold=values['min-cpu'], lower_breach_scale_increment='-1', upper_threshold=values['max-cpu'], upper_breach_scale_increment='2', breach_duration=60) conn.create_trigger(tr)
def set_endpoint(self, endpoint): #boto.set_stream_logger('scale') path = '/services/AutoScaling' reg = RegionInfo(name='eucalyptus', endpoint=endpoint) port = 8773 if endpoint[len(endpoint) - 13:] == 'amazonaws.com': endpoint = endpoint.replace('ec2', 'autoscaling', 1) path = '/' reg = RegionInfo(endpoint=endpoint) port = 443 self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg, port=port, path=path, is_secure=True, security_token=self.token, debug=0) self.conn.APIVersion = '2011-01-01' if not (endpoint[len(endpoint) - 13:] == 'amazonaws.com'): self.conn.auth_region_name = 'Eucalyptus' self.conn.https_validate_certificates = False self.conn.http_connection_kwargs['timeout'] = 30
def main(): # sys.exit() # disable self-killer AGAIN conn = AutoScaleConnection() instance_id = get_instance_metadata()['instance-id'] log_file = '/tmp/remote_instance_starter2.log' flag, reason = check_logs_status(log_file) if flag and reason: s3_conn = boto.connect_s3() bucket = s3_conn.get_bucket(BUCKET_NAME) k = Key(bucket) k.key = BUCKET_KEY global log_file_path time.sleep(70) # Try to upload logs prior to stop server os.system('python upload_logs_to_s3.py') os.system('rm %s' % log_file) k.get_contents_to_filename(log_file_path) logger.warning( "Instance with id=%s was terminated" " due to reason='%s'. " "Instance was killed by itself.", instance_id, reason) k.set_contents_from_filename(log_file_path) conn.terminate_instance(instance_id, decrement_capacity=True)
def create_AutoScaling(): print "Creating AutoScaling..." # establish connection as_conn = AutoScaleConnection(AWSAccessKeyId, AWSSecretKey) # create launch configuration global lc lc = LaunchConfiguration(name='lc', image_id=DATA_CEN_AMI, key_name=ACCESS_KEY, instance_monitoring=True, security_groups=[SECURITY_GRP], instance_type=MACHINE_TYPE) as_conn.create_launch_configuration(lc) # create tag for autoscaling group as_tag = Tag(key="Project", value="2.2", propagate_at_launch=True, resource_id='my_group') # create aotoscaling group global ag ag = AutoScalingGroup(group_name='my_group', load_balancers=['myELB'], availability_zones=['us-east-1a'], launch_config=lc, min_size=MIN_SIZE, max_size=MAX_SIZE, connection=as_conn, tags=[as_tag]) # associate the autoscaling group with launch configuration as_conn.create_auto_scaling_group(ag) # build the scale policy scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=-1, cooldown=60) # register the scale policy as_conn.create_scaling_policy(scale_up_policy) as_conn.create_scaling_policy(scale_down_policy) # refresh the scale policy for extra information scale_up_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_up'])[0] scale_down_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_down' ])[0] # create cloudwatch alarm cloudwatch = CloudWatchConnection(aws_access_key_id=AWSAccessKeyId, aws_secret_access_key=AWSSecretKey, is_secure=True) # region='us-east-1a') # assocate cloudwatch with alarm alarm_dimensions = {"AutoScalingGroupName": 'my_group'} # create scale up alarm scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='50', period='60', evaluation_periods=2, alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_up_alarm) # create scale down alarm scale_down_alarm = MetricAlarm( name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold='20', period='60', evaluation_periods=1, alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_down_alarm) print "AutoScaling created successfully"
print lg_instance.tags print 'Creating ELB' # initialize elastc load balancer conn2 = ELBConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY']) # set heartbeat page = 'HTTP:80' + '/heartbeat?lg=' + lg_instance.dns_name hc = HealthCheck(interval=20, healthy_threshold=3, unhealthy_threshold=5, target=page) # set port 80 elb = conn2.create_load_balancer('elb', [ZONE], [(80, 80, 'http')]) # allow all traffic conn2.apply_security_groups_to_lb('elb', [sg.id]) conn2.configure_health_check('elb', hc) print elb.dns_name print 'Creating ASG' # initialize launch config conn3 = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY']) config = LaunchConfiguration(name='config', image_id=DC_IMAGE, security_groups=sgs, instance_type=TYPE, instance_monitoring=True) conn3.create_launch_configuration(config) # initialize auto scaling group ag = AutoScalingGroup(connection=conn3, name='gp', load_balancers=['elb'], availability_zones=[ZONE], health_check_type='ELB', health_check_period=60, launch_config=config, min_size=2, max_size=5, desired_capacity=2, tags=[Tag(key=TAGK, value=TAGV, propagate_at_launch=True, resource_id='gp', resource_type='auto-scaling-group')]) conn3.create_auto_scaling_group(ag) # define the scaling policies scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='gp', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='gp', scaling_adjustment=-1, cooldown=60)
"--mothball", type=str, help="reduce the instances for all autoscaling group(s) " "in a stack to zero") parser.add_argument( "-r", "--reopen", nargs=2, help="increase the instances for all autoscaling group(s) " "in a stack to min:max:desired") args = parser.parse_args() # connect to AWS try: cfn = CloudFormationConnection() asg = AutoScaleConnection() except: print "AWS connect error" else: # get the key data data = getStackAutoscalingGroupData(cfn, asg) # list if explicitly listing or not doing anything else if args.list or args.mothball is None and args.reopen is None: for stackname in sorted(data, key=data.__getitem__): print "{s}:".format(s=stackname, ) for asginfo in data[stackname]: print " {n} {mn}:{mx}:{d}".format( n=asginfo['name'], mn=asginfo['min'], mx=asginfo['max'], d=asginfo['desired'],
def test_basic(self): # NB: as it says on the tin these are really basic tests that only # (lightly) exercise read-only behaviour - and that's only if you # have any autoscale groups to introspect. It's useful, however, to # catch simple errors print('--- running %s tests ---' % self.__class__.__name__) c = AutoScaleConnection() self.assertTrue(repr(c).startswith('AutoScaleConnection')) groups = c.get_all_groups() for group in groups: self.assertIsInstance(group, AutoScalingGroup) # get activities activities = group.get_activities() for activity in activities: self.assertIsInstance(activity, Activity) # get launch configs configs = c.get_all_launch_configurations() for config in configs: self.assertIsInstance(config, LaunchConfiguration) # get policies policies = c.get_all_policies() for policy in policies: self.assertIsInstance(policy, ScalingPolicy) # get scheduled actions actions = c.get_all_scheduled_actions() for action in actions: self.assertIsInstance(action, ScheduledUpdateGroupAction) # get instances instances = c.get_all_autoscaling_instances() for instance in instances: self.assertIsInstance(instance, Instance) # get all scaling process types ptypes = c.get_all_scaling_process_types() for ptype in ptypes: self.assertTrue(ptype, ProcessType) # get adjustment types adjustments = c.get_all_adjustment_types() for adjustment in adjustments: self.assertIsInstance(adjustment, AdjustmentType) # get metrics collection types types = c.get_all_metric_collection_types() self.assertIsInstance(types, MetricCollectionTypes) # create the simplest possible AutoScale group # first create the launch configuration time_string = '%d' % int(time.time()) lc_name = 'lc-%s' % time_string lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b', instance_type='t1.micro') c.create_launch_configuration(lc) found = False lcs = c.get_all_launch_configurations() for lc in lcs: if lc.name == lc_name: found = True break assert found # now create autoscaling group group_name = 'group-%s' % time_string group = AutoScalingGroup(name=group_name, launch_config=lc, availability_zones=['us-east-1a'], min_size=1, max_size=1) c.create_auto_scaling_group(group) found = False groups = c.get_all_groups() for group in groups: if group.name == group_name: found = True break assert found # now create a tag tag = Tag(key='foo', value='bar', resource_id=group_name, propagate_at_launch=True) c.create_or_update_tags([tag]) found = False tags = c.get_all_tags() for tag in tags: if tag.resource_id == group_name and tag.key == 'foo': found = True break assert found c.delete_tags([tag]) # shutdown instances and wait for them to disappear group.shutdown_instances() instances = True while instances: time.sleep(5) groups = c.get_all_groups() for group in groups: if group.name == group_name: if not group.instances: instances = False group.delete() lc.delete() found = True while found: found = False time.sleep(5) tags = c.get_all_tags() for tag in tags: if tag.resource_id == group_name and tag.key == 'foo': found = True assert not found print('--- tests completed ---')
def __init__(self, region, **kwargs): self.conn = AutoScaleConnection(region=get_region(region), **kwargs)
def setup(CONF): global out lookup_tbl = { 'name': CONF['NAME'], } conn = AutoScaleConnection() out['conn'] = conn # Launch Configurations LC = CONF['LC'] LC['name'] = LC['name'] % lookup_tbl lc = LaunchConfiguration(**LC) conn.create_launch_configuration(lc) out['lc'] = lc # Auto Scaling Group ASG = CONF['ASG'] ASG['group_name'] = ASG['group_name'] % lookup_tbl ASG['launch_config'] = lc groups = conn.get_all_groups(names=[ASG['group_name']]) if (len(groups) > 0): # update asg = groups[0] for k in ASG : # asg not iterable, try-except to make sure asg[k] exists try: asg.__getattribute__(k) except: continue asg.__setattr__(k, ASG[k]) asg.launch_config_name = LC['name'] asg.update() out['asg'] = asg else: #create asg = AutoScalingGroup(**ASG) conn.create_auto_scaling_group(asg) # ASG Tags ASG_TAGS = CONF['ASG_TAGS'] for i in ASG_TAGS: if 'propagate_at_launch' not in i: i['propagate_at_launch'] = True i['key'] = i['key'] % lookup_tbl i['value'] = i['value'] % lookup_tbl tags = [ Tag(**dict(x.items() + [('resource_id', ASG['group_name'])])) for x in ASG_TAGS ] conn.create_or_update_tags(tags) # Triggers (Scaling Policy / Cloudwatch Alarm) conn_cw = connect_to_region(CONF['REGION']) TRIGGERS = CONF['TRIGGERS'] for T in TRIGGERS: T['policy']['name'] = T['policy']['name'] % lookup_tbl T['policy']['as_name'] = ASG['group_name'] T['alarm']['dimensions'] = {'AutoScalingGroupName': ASG['group_name']} T['alarm']['alarm_actions'] = None if 'name' in T['alarm']: T['alarm']['name'] = T['alarm']['name'] % lookup_tbl else: T['alarm']['name'] = T['policy']['name'] # Policies are safely overwritten, so not checked for existence conn.create_scaling_policy(ScalingPolicy(**T['policy'])) policy = conn.get_all_policies(as_group=ASG['group_name'], policy_names=[T['policy']['name']])[0] T['alarm']['alarm_actions'] = [policy.policy_arn] hits = conn_cw.describe_alarms(alarm_names=[T['alarm']['name']]) conn_cw.create_alarm(MetricAlarm(**T['alarm']))
import urllib import urllib2 import sys import json as jS ec2_aws_region = RegionInfo(name=metadata["aws_region"], endpoint=metadata["aws_region_endpoint_ec2"]) ec2_conn = EC2Connection(metadata["access_key"], metadata["secret_key"], region=ec2_aws_region) autoscale_aws_region = RegionInfo( name=metadata["aws_region"], endpoint=metadata["aws_region_endpoint_autoscaling"]) autoscale_conn = AutoScaleConnection(metadata["access_key"], metadata["secret_key"], region=autoscale_aws_region) reload_counter = 0 # First find the terminated instances terminated_instances = [] for group in metadata['autoscaling_group_list']: activities = autoscale_conn.get_all_activities( max_records=metadata["number_of_activities"], autoscale_group=group['as_group_name']) for activity in activities: instance_id = activity.__dict__.get("description").split( ":")[1].strip() if len(instance_id) > 10: break activity_type = activity.__dict__.get("description").split(" ")
} ######################### end parameter block ################################ ######################### begin configuration ################################ # make the connections conn_ec2 = boto.ec2.connect_to_region( regionName, aws_access_key_id = AWS_ACCESS_KEY, aws_secret_access_key = AWS_SECRET_KEY ) conn_reg = boto.ec2.elb.connect_to_region(regionName) conn_elb = ELBConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY) conn_as = AutoScaleConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY) conn_cw = boto.ec2.cloudwatch.connect_to_region(regionName) conn_cw = boto.ec2.cloudwatch.connect_to_region( regionName, aws_access_key_id = AWS_ACCESS_KEY, aws_secret_access_key = AWS_SECRET_KEY ) ######################### end configuration ################################ # balancers = elb.get_all_load_balancers() # print balancers[0] # retrieve the instances in the autoscale group
def add_ingress_rule(dry_run, go_agent_security_group, go_agent_security_group_owner, go_agent_security_group_name): """ For each ASG (app) in each VPC, add a rule to each SG associated with the ASG's launch configuration that allows SSH ingress from the GoCD agents' SG. BEFORE RUNNING THIS SCRIPT!: - Use the assume_role bash script to assume the role in the proper account/VPC (edx, edge, mckinsey, etc.) - If you don't know what this is, ask someone in DevOps. - THEN run this script. """ asg_conn = AutoScaleConnection() ec2_conn = boto.ec2.connect_to_region('us-east-1') asgs = [] launch_configs = {} security_groups = {} logging.debug('All ASGs:') for group in asg_conn.get_all_groups(): logging.debug(' {}'.format(group)) asgs.append(group) logging.debug('All launch configurations:') for launch_config in asg_conn.get_all_launch_configurations(): logging.debug(' {}'.format(launch_config)) launch_configs[launch_config.name] = launch_config logging.debug('All security groups:') for sec_group in ec2_conn.get_all_security_groups(): logging.debug(' {}'.format(sec_group)) security_groups[sec_group.id] = sec_group # Validate that each ASG has a launch configuration. for group in asgs: try: logging.info("Launch configuration for ASG '{}' is '{}'.".format( group.name, launch_configs[group.launch_config_name] )) except KeyError: logging.error("Launch configuration '{}' for ASG '{}' was not found!".format( group.launch_config_name, group.name )) raise # Construct a fake security group for the prod-tools-goagent-sg security group in the edx-tools account. # This group will be used to grant the go-agents ingress into the ASG's VPCs. go_agent_security_group = boto.ec2.securitygroup.SecurityGroup( name=go_agent_security_group_name, owner_id=go_agent_security_group_owner, id=go_agent_security_group ) # For each launch config, check for the security group. Can support multiple security groups # but the edX DevOps convention is to use a single security group. for group in asgs: launch_config = launch_configs[group.launch_config_name] if len(launch_config.security_groups) > 1: err_msg = "Launch config '{}' for ASG '{}' has more than one security group!: {}".format( launch_config.name, group.name, launch_config.security_groups ) logging.warning(err_msg) continue sg_name = launch_config.security_groups[0] try: # Find the security group. sec_group = security_groups[sg_name] except KeyError: logging.error("Security group '{}' for ASG '{}' was not found!.".format(sg_name, group.name)) logging.info('BEFORE: Rules for security group {}:'.format(sec_group.name)) logging.info(sec_group.rules) try: # Add the ingress rule to the security group. yes_no = six.moves.input("Apply the change to this security group? [Yes]") if yes_no in ("", "y", "Y", "yes"): sec_group.authorize( ip_protocol='tcp', from_port=22, to_port=22, src_group=go_agent_security_group, dry_run=dry_run ) except boto.exception.EC2ResponseError as exc: if exc.status == 412: # If the dry_run flag is set, then each rule addition will raise this exception. # Log it and carry on. logging.info('Dry run is True but rule addition would have succeeded for security group {}.'.format( sg_name )) elif exc.code == "InvalidPermission.Duplicate": logging.info("Rule already exists for {}.".format(sg_name)) else: raise logging.info('AFTER: Rules for security group {}:'.format(sg_name)) logging.info(sec_group.rules)
region_autoscale_endpoint = 'autoscaling.ap-southeast-1.amazonaws.com' # Engine Scaling conf names to clean scaling_confs = ['scaling_conf_name_1', 'scaling_conf_name_2'] # Connect EC2 aws_region = RegionInfo(name=region_name, endpoint=region_endpoint) conn = EC2Connection(aws_access_key_id, aws_secret_access_key, region=aws_region) # Connect autoscaling service aws_region_as = RegionInfo(name=region_name, endpoint=region_autoscale_endpoint) conn_as = AutoScaleConnection(aws_access_key_id, aws_secret_access_key, region=aws_region_as) lcs = conn_as.get_all_launch_configurations(names=scaling_confs) for lc in lcs: try: img = conn.get_image(lc.image_id) snaps = conn.get_all_snapshots( filters={"description": "*" + img.id + "*"}) image.deregister(delete_snapshot=False) for snap in snaps: snap.delete() print "scaling configuration image and these related " + str( snaps) + " snapshots removed" except:
def __init__(self, name, application, configuration=None, resource=None): super(EC2AutoScale, self).__init__(application, configuration, resource) self.autoscale = AutoScaleConnection() self.name = name
#=================Construct a list of all availability zones for your region========= get_reg = boto.ec2.connect_to_region(region_name=region ) print get_reg zones = get_reg.get_all_zones() zoneStrings = [] for zone in zones: zoneStrings.append(zone.name) print zoneStrings; conn_elb = ELBConnection() conn_as = AutoScaleConnection() all_elb = conn_elb.get_all_load_balancers() ##=================Create a Load Balancer============================================= #For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#module-boto.ec2.elb.healthcheck hc = HealthCheck('healthCheck', interval=elastic_load_balancer['interval'], target=elastic_load_balancer['health_check_target'], timeout=elastic_load_balancer['timeout']) # ##For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.elb.ELBConnection.create_load_balancer lb = conn_elb.create_load_balancer(elastic_load_balancer['name'], sorted(zoneStrings, reverse=True),