instance_type=as_ami['instance_type'], instance_monitoring=as_ami['instance_monitoring']) conn_as.create_launch_configuration(lc) #For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.group.AutoScalingGroup ag = AutoScalingGroup(group_name=autoscaling_group['name'], load_balancers=[elastic_load_balancer['name']], availability_zones=zoneStrings, launch_config=lc, min_size=autoscaling_group['min_size'], max_size=autoscaling_group['max_size']) conn_as.create_auto_scaling_group(ag) #=================Create Scaling Policies============================================= #Policy for scaling the number of servers up and down #For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.policy.ScalingPolicy scalingUpPolicy = ScalingPolicy(name='webserverScaleUpPolicy', adjustment_type='ChangeInCapacity', as_name=ag.name, scaling_adjustment=2, cooldown=180) scalingDownPolicy = ScalingPolicy(name='webserverScaleDownPolicy', adjustment_type='ChangeInCapacity', as_name=ag.name, scaling_adjustment=-1, cooldown=180) conn_as.create_scaling_policy(scalingUpPolicy) conn_as.create_scaling_policy(scalingDownPolicy)
lc = LaunchConfiguration(name='m3.medium.non_spot', image_id=Data_Center, key_name='worker', instance_type=Data_Center_Type, security_groups=SECURITY_GROUP) # How about time to cool down ag = AutoScalingGroup(group_name='SmartGroup', load_balancers=['SmartELB'], availability_zones=['us-east-1a'], launch_config=lc, min_size=MIN_INSTANCE_SIZE, max_size=MAX_INSTANCE_SIZE, health_check_type='ELB', health_check_period='120', connection=conn) conn.create_auto_scaling_group(ag) scale_up_policy = ScalingPolicy( name='scale_up', adjustment_type='ChangeInCapacity', as_name=AUTO_SCALE_GROUP, scaling_adjustment=1, cooldown=SCALE_UP_COOLDOWN) scale_down_policy = ScalingPolicy( name='scale_down', adjustment_type='ChangeInCapacity', as_name=AUTO_SCALE_GROUP, scaling_adjustment=-1, cooldown=SCALE_DOWN_COOLDOWN) conn.create_scaling_policy(scale_up_policy) conn.create_scaling_policy(scale_down_policy) scale_up_policy = conn.get_all_policies( as_group=AUTO_SCALE_GROUP, policy_names=['scale_up'])[0] scale_down_policy = conn.get_all_policies( as_group=AUTO_SCALE_GROUP, policy_names=['scale_down'])[0] cloudwatch = boto.ec2.cloudwatch.connect_to_region("us-east-1", aws_access_key_id=ACCESS_KEY_ID, aws_secret_access_key=SECRET_ACCESS_KEY)
def setup(CONF): global out lookup_tbl = { 'name': CONF['NAME'], } conn = AutoScaleConnection() out['conn'] = conn # Launch Configurations LC = CONF['LC'] LC['name'] = LC['name'] % lookup_tbl lc = LaunchConfiguration(**LC) conn.create_launch_configuration(lc) out['lc'] = lc # Auto Scaling Group ASG = CONF['ASG'] ASG['group_name'] = ASG['group_name'] % lookup_tbl ASG['launch_config'] = lc groups = conn.get_all_groups(names=[ASG['group_name']]) if (len(groups) > 0): # update asg = groups[0] for k in ASG : # asg not iterable, try-except to make sure asg[k] exists try: asg.__getattribute__(k) except: continue asg.__setattr__(k, ASG[k]) asg.launch_config_name = LC['name'] asg.update() out['asg'] = asg else: #create asg = AutoScalingGroup(**ASG) conn.create_auto_scaling_group(asg) # ASG Tags ASG_TAGS = CONF['ASG_TAGS'] for i in ASG_TAGS: if 'propagate_at_launch' not in i: i['propagate_at_launch'] = True i['key'] = i['key'] % lookup_tbl i['value'] = i['value'] % lookup_tbl tags = [ Tag(**dict(x.items() + [('resource_id', ASG['group_name'])])) for x in ASG_TAGS ] conn.create_or_update_tags(tags) # Triggers (Scaling Policy / Cloudwatch Alarm) conn_cw = connect_to_region(CONF['REGION']) TRIGGERS = CONF['TRIGGERS'] for T in TRIGGERS: T['policy']['name'] = T['policy']['name'] % lookup_tbl T['policy']['as_name'] = ASG['group_name'] T['alarm']['dimensions'] = {'AutoScalingGroupName': ASG['group_name']} T['alarm']['alarm_actions'] = None if 'name' in T['alarm']: T['alarm']['name'] = T['alarm']['name'] % lookup_tbl else: T['alarm']['name'] = T['policy']['name'] # Policies are safely overwritten, so not checked for existence conn.create_scaling_policy(ScalingPolicy(**T['policy'])) policy = conn.get_all_policies(as_group=ASG['group_name'], policy_names=[T['policy']['name']])[0] T['alarm']['alarm_actions'] = [policy.policy_arn] hits = conn_cw.describe_alarms(alarm_names=[T['alarm']['name']]) conn_cw.create_alarm(MetricAlarm(**T['alarm']))
def create_scaling_policy(connection, module): sp_name = module.params.get('name') adjustment_type = module.params.get('adjustment_type') asg_name = module.params.get('asg_name') scaling_adjustment = module.params.get('scaling_adjustment') min_adjustment_step = module.params.get('min_adjustment_step') cooldown = module.params.get('cooldown') scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name]) if not scalingPolicies: sp = ScalingPolicy(name=sp_name, adjustment_type=adjustment_type, as_name=asg_name, scaling_adjustment=scaling_adjustment, min_adjustment_step=min_adjustment_step, cooldown=cooldown) try: connection.create_scaling_policy(sp) policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0] module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) except BotoServerError as e: module.fail_json(msg=str(e)) else: policy = scalingPolicies[0] changed = False # min_adjustment_step attribute is only relevant if the adjustment_type # is set to percentage change in capacity, so it is a special case if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity': if getattr(policy, 'min_adjustment_step') != module.params.get( 'min_adjustment_step'): changed = True # set the min adjustment step in case the user decided to change their # adjustment type to percentage setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) # check the remaining attributes for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'): if getattr(policy, attr) != module.params.get(attr): changed = True setattr(policy, attr, module.params.get(attr)) try: if changed: connection.create_scaling_policy(policy) policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0] module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) except BotoServerError as e: module.fail_json(msg=str(e))
user_data=data_script) conn.create_launch_configuration(lc) ag = AutoScalingGroup(group_name='my_autoscale_group', availability_zones=['us-east-1a', 'us-east-1b'], launch_config=lc, min_size=1, max_size=2, connection=conn) conn.create_auto_scaling_group(ag) scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='my_autoscale_group', scaling_adjustment=1, cooldown=180) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='my_autoscale_group', scaling_adjustment=-1, cooldown=180) conn.create_scaling_policy(scale_up_policy) conn.create_scaling_policy(scale_down_policy) scale_up_policy = conn.get_all_policies(as_group='my_autoscale_group', policy_names=['scale_up'])[0] scale_down_policy = conn.get_all_policies(as_group='my_autoscale_group',
metricname = "Apachestatus" unitname = "Maximum" apachemetrics = metrics.apacheMetrics() dimensions = {"instanceId" : thisInstanceId} # Loop through all instances, find this instance and get it's aws:autoscaling:groupName all_instances = ec2conn.get_all_instances() instances = [i for r in all_instances for i in r.instances] for instance in instances: if instance.__dict__['id'] == thisInstanceId: thisAutoScalename = instance.__dict__['tags']['aws:autoscaling:groupName'] # Define the ScaleDownPolicy ScalingDownPolicy = ScalingPolicy(name='ctScaleDown', adjustment_type='ChangeInCapacity', as_name=thisAutoScalename, scaling_adjustment=-1, cooldown=180) asconn.create_scaling_policy(ScalingDownPolicy) ScaleDownPolicy = asconn.get_all_policies(as_group=thisAutoScalename, policy_names=['ctScaleDown'])[0] alarm_actions = [] alarm_actions.append(ScaleDownPolicy.policy_arn) ApacheStatusAlarm = MetricAlarm(name=alarmname, namespace=namespace, metric=metricname, statistic=unitname, comparison='>',
def create_AutoScaling(): print "Creating AutoScaling..." # establish connection as_conn = AutoScaleConnection(AWSAccessKeyId, AWSSecretKey) # create launch configuration global lc lc = LaunchConfiguration(name='lc', image_id=DATA_CEN_AMI, key_name=ACCESS_KEY, instance_monitoring=True, security_groups=[SECURITY_GRP], instance_type=MACHINE_TYPE) as_conn.create_launch_configuration(lc) # create tag for autoscaling group as_tag = Tag(key="Project", value="2.3", propagate_at_launch=True, resource_id='my_group') # create aotoscaling group global ag ag = AutoScalingGroup(group_name='my_group', load_balancers=['myELB'], availability_zones=['us-east-1a'], launch_config=lc, min_size=MIN_SIZE, max_size=MAX_SIZE, connection=as_conn, tags=[as_tag]) # associate the autoscaling group with launch configuration as_conn.create_auto_scaling_group(ag) # build the scale policy scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=1, cooldown=60) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name='my_group', scaling_adjustment=-1, cooldown=60) # register the scale policy as_conn.create_scaling_policy(scale_up_policy) as_conn.create_scaling_policy(scale_down_policy) # refresh the scale policy for extra information scale_up_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_up'])[0] scale_down_policy = as_conn.get_all_policies(as_group='my_group', policy_names=['scale_down' ])[0] # create cloudwatch alarm cloudwatch = CloudWatchConnection(aws_access_key_id=AWSAccessKeyId, aws_secret_access_key=AWSSecretKey, is_secure=True) # assocate cloudwatch with alarm alarm_dimensions = {"AutoScalingGroupName": 'my_group'} # create scale up alarm scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='80', period='60', evaluation_periods=1, alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_up_alarm) # create scale down alarm scale_down_alarm = MetricAlarm( name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold='55', period='60', evaluation_periods=2, alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_down_alarm) # configure autoscaling group with Simple Notification Service(SNS) as_conn.put_notification_configuration(ag, ARN_topic, [ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' ]) print "AutoScaling created successfully"
def as_ensure(name, zones, instance, balancers=[], events=[], min_size=2, max_size=20, desired_size=None, force=False): """Create a new autoscale group. :type name: str :param name: the name of the autoscale group to be created :type zones: list of str :param zones: a list of the availability zones where autoscale group will be working on. :param instance: an instance config, created by :func:`as_config`. :type balancers: list of balancers :param balancers: a list of balancers where new instances will be autoattached. :type events: list of events :param events: a list of events created with as_event, which define in what conditions the autoscale group will be grow up. """ connection = as_connect() if force: ag_name = "%s-%s" % (name, _as_get_timestamp()) else: ag_name = name _obj = as_exists(ag_name) if _obj: if not force: ag = _obj[0] mico.output.info("use existent autoscaling group: %s" % ag_name) return ag _l = [] for elb in balancers: if isinstance(elb, str): _l.append(elb) else: _l.append(elb.name) ag = AutoScalingGroup(name=ag_name, availability_zones=zones, launch_config=instance, load_balancers=_l, min_size=min_size, max_size=max_size, desired_capacity=desired_size) connection.create_auto_scaling_group(ag) mico.output.info("created new autoscaling group: %s" % ag_name) as_tag = Tag(key='Name', value="%s" % name, propagate_at_launch=True, resource_id=ag_name) # Add the tag to the autoscale group connection.create_or_update_tags([as_tag]) cw_connection = cw_connect() for condition, actions in events: if not isinstance(actions, list): actions = [actions] condition.dimensions = {"AutoScalingGroupName": ag_name} # XXX: boto does not handle very well the alarm_actions list when the # same connection is used for two different cloudwatch alarms, so the # actions appears to be duplicated in both alarms. We need to force the # internal list to be empty. condition.alarm_actions = [] for action in actions: policy = ScalingPolicy(action["name"], as_name=ag_name, **action) mico.output.info("create policy %s" % policy.name) connection.create_scaling_policy(policy) action = connection.get_all_policies(as_group=ag_name, policy_names=[action["name"] ])[0] condition.name = "%s-%s" % (condition.name, _as_get_timestamp()) condition.add_alarm_action(action.policy_arn) mico.output.debug("add new alarm for condition %s: %s" % (condition.name, action.name)) cw_connection.create_alarm(condition) mico.output.info("create alarm %s" % condition.name) return ag
def start_elb(tag, user_data, region, auto_register, as_ami, subnet_id, security_groups, public_ip_address, iam_role, zone_strings, elastic_load_balancer): print "Using tag \"" + tag + "\"" conn_reg = boto.ec2.connect_to_region(region_name=region) # =================Construct a list of all availability zones for your region========= conn_elb = boto.ec2.elb.connect_to_region(region_name=region) conn_as = boto.ec2.autoscale.connect_to_region(region_name=region) # =================Create a Load Balancer============================================= # For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#module-boto.ec2.elb.healthcheck hc = HealthCheck('healthCheck', interval=elastic_load_balancer['interval'], target=elastic_load_balancer['health_check_target'], timeout=elastic_load_balancer['timeout']) # ELB does not accept any special characters elb_tag = tag elb_tag = elb_tag.replace("_", "") elb_tag = elb_tag.replace("-", "") elb_tag = elb_tag.replace(".", "") print "ELB name: \"" + elb_tag + "\"" # For a complete list of options see # http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.elb.ELBConnection.create_load_balancer lb = conn_elb.create_load_balancer( name=elb_tag + 'Elb', zones=None, subnets=subnet_id, security_groups=security_groups, listeners=elastic_load_balancer['connection_forwarding']) if auto_register: aws_library.add_instances_to_lb(tag=tag, lb=lb, region=region) lb.configure_health_check(hc) # DNS name for your new load balancer print "Map the CNAME of your website to: %s" % lb.dns_name # =================Create a Auto Scaling Group and a Launch Configuration============================================ # For a complete list of options see # http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.launchconfig.LaunchConfiguration lc = LaunchConfiguration(name=elb_tag + "Lc", image_id=as_ami['id'], key_name=as_ami['access_key'], security_groups=as_ami['security_groups'], instance_type=as_ami['instance_type'], instance_monitoring=as_ami['instance_monitoring'], instance_profile_name=iam_role, user_data=user_data) conn_as.create_launch_configuration(lc) # For a complete list of options see # http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.group.AutoScalingGroup ag = AutoScalingGroup(group_name=elb_tag + "Sg", load_balancers=[elb_tag], availability_zones=zone_strings, launch_config=lc, min_size=autoscaling_group['min_size'], max_size=autoscaling_group['max_size'], associate_public_ip_address=public_ip_address, vpc_zone_identifier=subnet_id) conn_as.create_auto_scaling_group(ag) # =================Create Scaling Policies============================================= # Policy for scaling the number of servers up and down # For a complete list of options see # http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_up_policy = ScalingPolicy(name=elb_tag + "webserverScaleUpPolicy", adjustment_type='ChangeInCapacity', as_name=ag.name, scaling_adjustment=1, cooldown=60) scaling_down_policy = ScalingPolicy(name=elb_tag + "webserverScaleDownPolicy", adjustment_type='ChangeInCapacity', as_name=ag.name, scaling_adjustment=-1, cooldown=180) conn_as.create_scaling_policy(scaling_up_policy) conn_as.create_scaling_policy(scaling_down_policy) scaling_up_policy = conn_as.get_all_policies( as_group=elb_tag + "Sg", policy_names=[elb_tag + "webserverScaleUpPolicy"])[0] scaling_down_policy = conn_as.get_all_policies( as_group=elb_tag + "Sg", policy_names=[elb_tag + "webserverScaleDownPolicy"])[0] cloudwatch = boto.ec2.cloudwatch.connect_to_region(region) alarm_dimensions = {"AutoScalingGroupName": 'my_group'} scale_up_alarm = MetricAlarm(name=elb_tag + 'scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='>', threshold='70', period='60', evaluation_periods=2, alarm_actions=[scaling_up_policy.policy_arn], dimensions=alarm_dimensions) scale_down_alarm = MetricAlarm( name=elb_tag + 'scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average', comparison='<', threshold='40', period='60', evaluation_periods=2, alarm_actions=[scaling_down_policy.policy_arn], dimensions=alarm_dimensions) cloudwatch.create_alarm(scale_down_alarm) cloudwatch.create_alarm(scale_up_alarm)
launchConfig = LaunchConfiguration(name=asLCstr, image_id=AMIID, key_name=KEY, security_groups=[SECGRP], instance_type=TYPE, instance_monitoring=True, instance_profile_name=ROLE, block_device_mappings=blockDeviceMap, user_data=userData) conn_as.create_launch_configuration(launchConfig) time.sleep(20) autoscaleGroup = AutoScalingGroup(group_name=asGrpStr , load_balancers=[ELB_NAME], availabilty_zones=[AZLIST], launch_config=launchConfig, vpc_zone_identifier=VPC_ZONE, min_size=2, max_size=6, health_check_period='360', health_check_type='ELB') conn_as.create_auto_scaling_group(autoscaleGroup) #setup tagging for the instances # create a Tag for the austoscale group as_tag = Tag(key='Name', value = 'Alfresco Server', propagate_at_launch=True, resource_id=asGrpStr) # Add the tag to the autoscale group conn_as.create_or_update_tags([as_tag]) #create scale up and scale down policies for the autoscale group scaleUpPolicy = ScalingPolicy(name='alfrescoScaleUp-'+randomStr, adjustment_type='ChangeInCapacity', as_name=autoscaleGroup.name, scaling_adjustment=2, cooldown=1200) scaleDownPolicy = ScalingPolicy(name='alfrescoScaleDown-'+randomStr, adjustment_type='ChangeInCapacity', as_name=autoscaleGroup.name, scaling_adjustment=-1, cooldown=600) conn_as.create_scaling_policy(scaleUpPolicy) conn_as.create_scaling_policy(scaleDownPolicy) #redeclare policies to populate the ARN fields policyResults = conn_as.get_all_policies(as_group=autoscaleGroup.name, policy_names=[scaleUpPolicy.name]) scaleUpPolicy = policyResults[0] policyResults = conn_as.get_all_policies(as_group=autoscaleGroup.name, policy_names=[scaleDownPolicy.name]) scaleDownPolicy = policyResults[0] #connect to Cloud Watch cw_conn = boto.ec2.cloudwatch.connect_to_region(REGION) #create the following alarms: ScaleUp @ Avg CPU >60% over 2 periods OR ELB latency >= 0.5sec. ScaleDown @ Avg CPU <30% over 2 periods
def main(): parser = optparse.OptionParser('Usage: %prog [options]') parser.add_option('-n', '--name', dest='name', help='The name of this configuration (e.g., TEST).') parser.add_option('-i', '--image', dest='image', default=Defaults.IMAGE, help='The Amazon Machine Image (AMI) ID that will be used to launch ' 'EC2 instances. The most recent Amazon Linux AMI 2013.09.2 (ami-' 'a43909e1) is used by default.') parser.add_option('-t', '--type', dest='type', default=Defaults.TYPE, help='The type of the Amazon EC2 instance. If not specified, micro ' 'instance (t1.micro) type will be used.') parser.add_option('-k', '--key', dest='key', help='The name of the key pair to use when creating EC2 instances. ' 'This options is required.') parser.add_option('-g', '--group', dest='group', help='Security group that will be used when creating EC2 instances. ' 'This option is required.') parser.add_option('-m', '--min', dest='min', default=Defaults.MIN_INSTANCES, help='The minimum number of EC2 instances in the auto scaling group. ' 'By default it is set to 2.') parser.add_option('-M', '--max', dest='max', default=Defaults.MAX_INSTANCES, help='The maximum size of the auto scaling group. By default it is ' 'set to 4.') parser.add_option('-z', '--zone', dest='zones', action='append', help='The availability zone for the auto scaling group. This option ' 'is required.') parser.add_option('-l', '--load-balancer', dest='lbs', action='append', help='The name of an existing AWS load balancer to use, if any.') parser.add_option('--min-threshold', dest='min_threshold', default=Defaults.MIN_THRESHOLD, help='The minimum CPU utilization ' 'threshold that triggers an alarm. This option is not required and ' 'is set to 40% by default.') parser.add_option('--max-threshold', dest='max_threshold', default=Defaults.MAX_THRESHOLD, help='The maximum CPU utilization ' 'threshold that triggers an alarm. This option is not required and ' 'is set to 60% by default.') parser.add_option('-a', '--adjustment', dest='adjustment', default=Defaults.ADJUSTMENT, help='The number of EC2 instances by ' 'which to scale up or down. This is set to 1 by default.') parser.add_option('-p', '--period', dest='period', default=Defaults.PERIOD, help='The evaluation period in seconds. This is optional and is set ' 'to 300 seconds by default.') (opts, args) = parser.parse_args() if (0 != len(args) or opts.name is None or opts.key is None or opts.group is None or opts.zones is None): parser.print_help() return 1 try: c = boto.connect_autoscale() lc = _create_launch_configuration(c, opts) g = _create_autoscaling_group(c, lc, opts) policy_up = ScalingPolicy(name=opts.name + s.POLICY_UP_SUFFIX, as_name=g.name, scaling_adjustment=opts.adjustment, adjustment_type='ChangeInCapacity') c.create_scaling_policy(policy_up) cloudwatch = boto.connect_cloudwatch() alarm_high = MetricAlarm(name=opts.name + s.ALARM_HIGH_SUFFIX, alarm_actions=[policy_up], metric='CPUUtilization', namespace='AWS/EC2', statistic='Average', dimensions={'AutoScalingGroupName': g.name}, period=opts.period, evaluation_periods=1, threshold=int(opts.max_threshold), comparison='>') cloudwatch.create_alarm(alarm_high) policy_down = ScalingPolicy(name=opts.name + s.POLICY_DOWN_SUFFIX, as_name=g.name, scaling_adjustment=-opts.adjustment, adjustment_type='ChangeInCapacity') autoscale.create_scaling_policy(policy_down) alarm_low = MetricAlarm(name=opts.name + s.ALARM_LOW_SUFFIX, alarm_actions=[policy_down], metric='CPUUtilization', namespace='AWS/EC2', statistic='Average', dimensions={'AutoScalingGroupName': g.name}, period=opts.period, evaluation_periods=1, threshold=int(opts.min_threshold), comparison='<') cloudwatch.create_alarm(alarm_low) except Error, err: sys.stderr.write('[ERROR] {0}\n'.format(err)) return 1
launch=LaunchConfiguration(name="LAB4",image_id=AMI,key_name=KEYNAME,instance_type=TYPE, security_groups=[SECURITY],user_data=Data,instance_monitoring=True) conn.create_launch_configuration(launch) asg=AutoScalingGroup(group_name="My_AutoScaling_Group",availability_zones=['us-east-2a'], launch_config=launch,min_size=1,max_size=2,connection=conn) conn.create_auto_scaling_group(asg) print('\nCreating scaling policies\n') scalingUpPolicy = ScalingPolicy(name='ScalingUpPolicy', adjustment_type='ChangeInCapacity', as_name=asg.name, scaling_adjustment=1, cooldown=180) scalingDownPolicy = ScalingPolicy(name='ScalingDownPolicy', adjustment_type='ChangeInCapacity', as_name=asg.name, scaling_adjustment=-1, cooldown=180) conn.create_scaling_policy(scalingUpPolicy) conn.create_scaling_policy(scalingDownPolicy) scalingUpPolicy = conn.get_all_policies(as_group='My_AutoScaling_Group', policy_names=['ScalingUpPolicy'])[0] scalingDownPolicy = conn.get_all_policies(as_group='My_AutoScaling_Group', policy_names=['ScalingDownPolicy'])[0]
default_cooldown=300, desired_capacity=2, min_size=2, max_size=6, load_balancers=[collectorLoadBalancerName], launch_config=collectorTierLaunchConfig) as_con.create_auto_scaling_group(collectorTierScalingGroup) # Now create the scaling conditions, under which the scaling will take place # This is a two-stage process: create a scaleUp and scaleDown policy, and then tie these to metrics # which we will fetch using the CloudWatch monitoring services collectionTierScalingUpPolicy = ScalingPolicy(name='ctScaleUp', adjustment_type='ChangeInCapacity', as_name=collectorTierScalingGroup.name, scaling_adjustment=2, cooldown=180) collectionTierScalingDownPolicy = ScalingPolicy(name='ctScaleDown', adjustment_type='ChangeInCapacity', as_name=collectorTierScalingGroup.name, scaling_adjustment=-1, cooldown=180) as_con.create_scaling_policy(collectionTierScalingUpPolicy) as_con.create_scaling_policy(collectionTierScalingDownPolicy) # It appears that we need to fetch the policies again, to make sure that the policy_arn is filled in for each policy # We need the policy_arn to set the scaling alarm below # Not sure if there is a way to avoid this? Should as_con.create_scaling_policy fill this in somehow?
instance_monitoring=True) asg_conn.create_launch_configuration(launch_config) asg = AutoScalingGroup(group_name=ASG_NAME, load_balancers=[LB_NAME], availability_zones=[CONNECT_AVAILABILITY_ZONE], launch_config=launch_config, min_size=MIN_SIZE, max_size=MAX_SIZE, connection=asg_conn) asg_conn.create_auto_scaling_group(asg) scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name=ASG_NAME, scaling_adjustment=1, cooldown=300) scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name=ASG_NAME, scaling_adjustment=-1, cooldown=300) asg_conn.create_scaling_policy(scale_up_policy) asg_conn.create_scaling_policy(scale_down_policy) ##################### cloud watch cw_conn = boto.ec2.cloudwatch.connect_to_region( CONNECT_REGION,
heath_check_type='ELB',health_check_period='120' ) print("Creating auto-scaling group") try: conn.create_auto_scaling_group(ag) print("") except: print("Oops!", sys.exc_info()[0], "occured.") print("The auto-scaling group may already exist.") print("") print("Creating auto-scaling policies") #create auto-scaling policies scale_up_policy = ScalingPolicy(name='scale_up',adjustment_type='ChangeInCapacity', as_name="Analystic-Group",scaling_adjustment=1,cooldown=180) scale_down_policy = ScalingPolicy(name='scale_down',adjustment_type='ChangeInCapacity', as_name="Analystic-Group",scaling_adjustment=-1,cooldown=180) conn.create_scaling_policy(scale_up_policy) conn.create_scaling_policy(scale_down_policy) scale_up_policy = conn.get_all_policies(as_group="Analystic-Group", policy_names=["scale_up"])[0] scale_down_policy = conn.get_all_policies(as_group="Analystic-Group", policy_names=["scale_down"])[0] print "Connecting to CloudWatch" cloudwatch = boto.ec2.cloudwatch.connect_to_region(REGION,aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY) alarm_dimensions = {"AutoScalingGroupName": "Analystic-Group", } print("Creating scale-up alarm")