Пример #1
0
 def __init__(
     self,
     aws,
     wait_time_secs,
     app_name=None,
 ):
     """
     Creates the EbsHelper
     """
     self.aws = aws
     self.ebs = connect_to_region(aws.region,
                                  aws_access_key_id=aws.access_key,
                                  aws_secret_access_key=aws.secret_key,
                                  security_token=aws.security_token)
     self.autoscale = AutoScaleConnection(
         aws_access_key_id=aws.access_key,
         aws_secret_access_key=aws.secret_key,
         security_token=aws.security_token)
     self.s3 = S3Connection(
         aws_access_key_id=aws.access_key,
         aws_secret_access_key=aws.secret_key,
         security_token=aws.security_token,
         host=(lambda r: 's3.amazonaws.com'
               if r == 'us-east-1' else 's3-' + r + '.amazonaws.com')(
                   aws.region))
     self.app_name = app_name
     self.wait_time_secs = wait_time_secs
Пример #2
0
 def __init__(self, clc_host, access_id, secret_key, token):
     #boto.set_stream_logger('foo')
     path = '/services/AutoScaling'
     port = 8773
     if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
         clc_host = clc_host.replace('ec2', 'autoscaling', 1)
         path = '/'
         reg = None
         port = 443
     reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
     if boto.__version__ < '2.6':
         self.conn = AutoScaleConnection(access_id,
                                         secret_key,
                                         region=reg,
                                         port=port,
                                         path=path,
                                         is_secure=True,
                                         security_token=token,
                                         debug=0)
     else:
         self.conn = AutoScaleConnection(access_id,
                                         secret_key,
                                         region=reg,
                                         port=port,
                                         path=path,
                                         validate_certs=False,
                                         is_secure=True,
                                         security_token=token,
                                         debug=0)
     self.conn.http_connection_kwargs['timeout'] = 30
Пример #3
0
def scale_instances(tasks_per_instance, group_name, total_groups):
    conn = AutoScaleConnection()
    group = conn.get_all_groups(names=[group_name])[0]

    if group.desired_capacity == group.max_size:
        logger.info('Maximum number of instances reached')
        return
    tasks_count = get_sqs_tasks_count()
    if not tasks_count:
        logger.info('No tasks left in queues')
        return
    logger.info('Num of tasks in queues %s', tasks_count)

    tasks_per_instance = float(tasks_per_instance)
    additional_instances_count = int(
        ceil(tasks_count / tasks_per_instance) / total_groups)
    updated_instances_count = \
        group.desired_capacity + additional_instances_count
    # consider max allowed instances
    if updated_instances_count > group.max_size:
        updated_instances_count = group.max_size

    logger.info('Updating group from %s to %s instances',
                group.desired_capacity, updated_instances_count)
    group.set_capacity(updated_instances_count)
    group.desired_capacity = updated_instances_count
    group.update()
    logger.info('Done\n')
Пример #4
0
	def connect(self, groupname):
		self.ec2 = boto.connect_ec2()
		self.cw = CloudWatchConnection()
		self.autoscale = AutoScaleConnection()
		self.group = self.autoscale.get_all_groups(names=[groupname])[0]
		self.instances = len(self.group.instances)
		self.desired = self.group.desired_capacity
		self.name = groupname
Пример #5
0
    def test_basic(self):
        # NB: as it says on the tin these are really basic tests that only
        # (lightly) exercise read-only behaviour - and that's only if you
        # have any autoscale groups to introspect. It's useful, however, to
        # catch simple errors

        print '--- running %s tests ---' % self.__class__.__name__
        c = AutoScaleConnection()

        self.assertTrue(repr(c).startswith('AutoScaleConnection'))

        groups = c.get_all_groups()
        for group in groups:
            self.assertTrue(type(group), AutoScalingGroup)

            # get activities
            activities = group.get_activities()

            for activity in activities:
                self.assertEqual(type(activity), Activity)

        # get launch configs
        configs = c.get_all_launch_configurations()
        for config in configs:
            self.assertTrue(type(config), LaunchConfiguration)

        # get policies
        policies = c.get_all_policies()
        for policy in policies:
            self.assertTrue(type(policy), ScalingPolicy)

        # get scheduled actions
        actions = c.get_all_scheduled_actions()
        for action in actions:
            self.assertTrue(type(action), ScheduledUpdateGroupAction)

        # get instances
        instances = c.get_all_autoscaling_instances()
        for instance in instances:
            self.assertTrue(type(instance), Instance)

        # get all scaling process types
        ptypes = c.get_all_scaling_process_types()
        for ptype in ptypes:
            self.assertTrue(type(ptype), ProcessType)

        # get adjustment types
        adjustments = c.get_all_adjustment_types()
        for adjustment in adjustments:
            self.assertTrue(type(adjustment), AdjustmentType)

        # get metrics collection types
        types = c.get_all_metric_collection_types()
        self.assertTrue(type(types), MetricCollectionTypes)

        print '--- tests completed ---'
Пример #6
0
 def test_ebs_optimized_regression(self):
     c = AutoScaleConnection()
     time_string = '%d' % int(time.time())
     lc_name = 'lc-%s' % time_string
     lc = LaunchConfiguration(name=lc_name,
                              image_id='ami-2272864b',
                              instance_type='t1.micro',
                              ebs_optimized=True)
     # This failed due to the difference between native Python ``True/False``
     # & the expected string variants.
     c.create_launch_configuration(lc)
     self.addCleanup(c.delete_launch_configuration, lc_name)
Пример #7
0
def cleanup_unused_launch_configs(unused_launch_config_names, delete=False):
    conn = AutoScaleConnection()
    configs = conn.get_all_launch_configurations(names=unused_launch_config_names)
    print "\nGetting ready to cleanup launch configs ... {}".format(delete and "FOR REAL" or "DRYRUN")
    for config in configs:
        if delete:
            print "deleting launch config: {} in {} seconds...".format(config.name, 5)
            time.sleep(5)
            print "deleting launch config: {}!".format(config.name)
            response = config.delete()
            print "deleted launch config: {} ({})!".format(config.name, response)
        else:
            print "dry run: not deleting config:", config.name
Пример #8
0
 def test_ebs_optimized_regression(self):
     c = AutoScaleConnection()
     time_string = '%d' % int(time.time())
     lc_name = 'lc-%s' % time_string
     lc = LaunchConfiguration(
         name=lc_name,
         image_id='ami-2272864b',
         instance_type='t1.micro',
         ebs_optimized=True
     )
     # This failed due to the difference between native Python ``True/False``
     # & the expected string variants.
     c.create_launch_configuration(lc)
     self.addCleanup(c.delete_launch_configuration, lc_name)
Пример #9
0
def get_all_group_instances_and_conn():
    conn = AutoScaleConnection()
    global autoscale_conn
    autoscale_conn = conn
    ec2 = boto.ec2.connect_to_region('us-east-1')
    groups = conn.get_all_groups(
        names=['SCCluster1', 'SCCluster2', 'SCCluster3',
               'SCCluster4'])  # TODO: update this list
    instances = [instance for group in groups for instance in group]
    if not instances:
        sys.exit()
    instance_ids = [instance.instance_id for instance in instances]
    instances = ec2.get_only_instances(instance_ids)
    return instances, conn
Пример #10
0
def _remove_from_worker_pool():
    """
    Ensures that this instance is shut down, and unregisted from the worker
    pool.
    """

    # Retrieve the current state of the pool.
    pool = AutoScaleConnection().get_all_groups(["LSDA Worker Pool"])[0]

    if pool.desired_capacity <= pool.min_size:
        return

    # Reduce the pool size and shut ourself down.
    pool.desired_capacity -= 1
    pool.update()
Пример #11
0
def as_connect(region=None, *args, **kwargs):
    """Helper to connect to Amazon Web Services EC2, using identify provided
    by environment, as also optional region in arguments.
    """
    if not os_environ.get("AWS_ACCESS_KEY_ID", None):
        raise EC2LibraryError(
            "Environment variable AWS_ACCESS_KEY_ID is not set.")
    if not os_environ.get("AWS_SECRET_ACCESS_KEY", None):
        raise EC2LibraryError(
            "Environment variable AWS_SECRET_ACCESS_KEY is not set.")

    if not region:
        region = env.get("ec2_region")

    for reg in boto.ec2.autoscale.regions():
        if reg.name == region:
            region = reg

    connection = AutoScaleConnection(os_environ.get("AWS_ACCESS_KEY_ID"),
                                     os_environ.get("AWS_SECRET_ACCESS_KEY"),
                                     region=region,
                                     *args,
                                     **kwargs)

    return connection
Пример #12
0
def main():
    parser = optparse.OptionParser()
    parser.add_option( "-c", "--config", dest="config_file", help="AutoScale config INI", metavar="FILE" )
    (options, args) = parser.parse_args()
    logging.info( "Using config file [%s]" % options.config_file )

    config = parse_config( options.config_file ) 

    aws_access = config.get("AWS", 'access')
    aws_secret = config.get("AWS", 'secret')

    logging.debug( "Connecting to AWS with access [%s] and secret [%s]" % ( aws_access, aws_secret ) )
    aws_connection = AutoScaleConnection( aws_access, aws_secret )

    print "AutoScalingGroups:"
    print aws_connection.get_all_groups().__dict__
Пример #13
0
def autoscale_group_hosts(group_name):
    import boto.ec2
    from boto.ec2.autoscale import AutoScaleConnection
    ec2 = boto.connect_ec2()
    conn = AutoScaleConnection()
    groups = conn.get_all_groups(names=[])
    groups = [ group for group in groups if group.name.startswith(group_name) ]

    instance_ids = []
    instances = []
    for group in groups:
        print group.name
        instance_ids.extend([i.instance_id for i in group.instances])
        instances.extend(ec2.get_only_instances(instance_ids))

    return [i.private_ip_address for i in instances], instances[0].id, instances[0].tags.get("aws:autoscaling:groupName")
Пример #14
0
 def __init__(self,PREFIX='tfound-',ENV='dev',AMI='',TYPE='',SIZE='',
              DOMAIN='tfound',SSHKEY='myprivatekey',AWSKEY='',AWSSECRET='',AVAIL_ZONES=["us-east-1a","us-east-1b","us-east-1c","us-east-1d"]):
     '''
     Shows examples
     Create load balancer group 'tfound-dev-web-lb' for web servers, in dev group for tfound:
         python control-lb-and-groups.py --createlb --env dev --aws SC --type web
     Add an instance to the load balancer group:
         python control-lb-and-groups.py --addtolb=true --env dev --aws SC --type web --instance=i-999999
     Create launch config using ami ami-fa6b8393 (default), medium sized instance, and Autoscale Group 'tfound-dev-web-group' with a min of 2 instances, max 5, with health check on port 80:
         python control-lb-and-groups.py  --createlc --ami ami-fa6b8393 --size c1.medium --env dev --aws SC --type web --createag --min 2 --max 5
     Triggers/Health checks are hard coded to spawn new instances when total cpu reaches 60 percent or health check fails.
     '''
     self.PREFIX=PREFIX+DOMAIN+'-'+ENV+'-'+TYPE
     self.ENV=ENV
     self.AMI=AMI
     self.TYPE=TYPE
     self.DOMAIN=DOMAIN
     self.SIZE=SIZE
     self.MIN=MIN
     self.MAX=MAX
     self.SSHKEY=SSHKEY
     self.AWSKEY=AWSKEY
     self.AWSSECRET=AWSSECRET
     self.AVAIL_ZONES=AVAIL_ZONES
     self.LBNAME=self.PREFIX+'-lb'
     self.AGNAME=self.PREFIX+'-group'
     self.TRNAME=self.PREFIX+'-trigger'
     self.LCNAME=self.PREFIX+'-launch_config'
     self.asconn=AutoScaleConnection(self.AWSKEY, self.AWSSECRET)
     self.elbconn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
     self.lc = self._buildLaunchConfig()
     self.ag = self._buildAutoscaleGroup()
Пример #15
0
def main():
    """
    Main entry point for the automated scaling daemon.
    """

    # Configure logging.
    logging.basicConfig(
        format = "%(asctime)-15s %(levelname)5s %(message)s",
        level = logging.INFO
    )

    # Read configuration.
    options = yaml.load(open("config.yaml"))

    # Connect to the RabbitMQ cluster.
    params = pika.ConnectionParameters(host=options["amqp"])
    conn = pika.BlockingConnection(params)

    channel = conn.channel()

    while True:
        # Ensure that we have things stuck in the queue for the given amount
        # of time.
        for i in xrange(DELAY / 5):
            queue_length = get_queue_length(channel, "stable")

            logging.info("Queue length: {}".format(queue_length))

            if queue_length == 0:
                break
            time.sleep(5)

        else:
            # Scale up!
            group = AutoScaleConnection().get_all_groups(["LSDA Worker Pool"])[0]
            group.desired_capacity = min(
              group.desired_capacity + 2, group.max_size)
            group.update()

            logging.info(
              "Triggering increase to {}".format(group.desired_capacity))

            time.sleep(300)

        # Wait until next polling event.
        time.sleep(30)
Пример #16
0
def _is_up_to_date():
    """
    Returns True if this instance is up to date.
    """

    # Retrieve instance information.
    conn = AutoScaleConnection()
    pool = conn.get_all_groups(["LSDA Worker Pool"])[0]
    config = conn.get_all_launch_configurations(
      names=[pool.launch_config_name])[0]

    # Retrive the AMI for this instance and for others.
    config_ami = config.image_id
    my_ami = urllib.urlopen("http://169.254.169.254/latest/"
                            "meta-data/ami-id").read()

    return config_ami == my_ami
Пример #17
0
def get_all_group_instances_and_conn(
        groups_names=get_autoscale_groups()['groups']):
    conn = AutoScaleConnection()
    global autoscale_conn
    autoscale_conn = conn
    ec2 = boto.ec2.connect_to_region('us-east-1')
    selected_group_name = random.choice(groups_names)
    logger.info('Selected autoscale group: %s' % selected_group_name)
    group = conn.get_all_groups(names=[selected_group_name])[0]
    if not group.instances:
        logger.info("No working instances in selected group %s" %
                    selected_group_name)
        upload_logs_to_s3()
        sys.exit()
    instance_ids = [i.instance_id for i in group.instances]
    instances = ec2.get_only_instances(instance_ids)
    return instances, conn
Пример #18
0
def autoscale_group_hosts(group_name):
    import boto.ec2
    from boto.ec2.autoscale import AutoScaleConnection
    ec2 = boto.connect_ec2()
    conn = AutoScaleConnection()
    groups = conn.get_all_groups(names=[])
    groups = [group for group in groups if group.name.startswith(group_name)]

    instance_ids = []
    instances = []
    for group in groups:
        print "group name:", group.name
        instance_ids.extend([i.instance_id for i in group.instances])
        instances.extend(ec2.get_only_instances(instance_ids))

    return set([
        i.private_ip_address for i in instances
    ]), instances[0].id, instances[0].tags.get("aws:autoscaling:groupName")
Пример #19
0
    def as_connection():
        """ Create and return an Auto Scale Connection """

        key_id = config.get('Credentials', 'aws_access_key_id')
        access_key = config.get('Credentials', 'aws_secret_access_key')

        conn = AutoScaleConnection(key_id, access_key)

        return conn
Пример #20
0
 def __init__(self, clc_host, access_id, secret_key, token):
     #boto.set_stream_logger('foo')
     path='/services/AutoScaling'
     port=8773
     if clc_host[len(clc_host)-13:] == 'amazonaws.com':
         clc_host = clc_host.replace('ec2', 'autoscaling', 1)
         path = '/'
         reg = None
         port=443
     reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
     if boto.__version__ < '2.6':
         self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
                               port=port, path=path,
                               is_secure=True, security_token=token, debug=0)
     else:
         self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
                               port=port, path=path, validate_certs=False,
                               is_secure=True, security_token=token, debug=0)
     self.conn.http_connection_kwargs['timeout'] = 30
Пример #21
0
 def __init__(self, args):
     """
     Initializing basic variables needed for auto scaling
     """
     self.configs = ConfigParser.RawConfigParser()
     self.args = args
     self.test_props = {}
     self.props = {}
     self.ec2_connection = EC2Connection(self.args.access_key,
                                         self.args.secret_key)
     self.autoscale_connection = AutoScaleConnection(
         self.args.access_key, self.args.secret_key)
     self.elb_connection = ELBConnection(self.args.access_key,
                                         self.args.secret_key)
     self.cw_connection = CloudWatchConnection(self.args.access_key,
                                               self.args.secret_key)
     self.firstInstance = None
     self.launchConfiguration = None
     self.healthCheck = None
Пример #22
0
def find_unused_launch_configs():
    conn = AutoScaleConnection()
    autoscale_groups = conn.get_all_groups(max_records=100)
    launch_configs = conn.get_all_launch_configurations(max_records=100)
    launch_config_names = {lc.name for lc in launch_configs}
    used_launch_config_names = {asg.launch_config_name for asg in autoscale_groups}
    unused_launch_config_names = launch_config_names - used_launch_config_names

    print "Autoscale Groups and Current Launch Configs:"
    print "{:<40}{:<40}".format("ASG", "LC")
    for asg in autoscale_groups:
        #print "asg:", asg.name, "-> lc:", asg.launch_config_name
        print "{:<40}{:<40}".format(asg.name, asg.launch_config_name)

    print "\nUnused Launch Configs: (launch configs without a autoscale group)"
    unused_launch_config_names = list(sorted(unused_launch_config_names))
    for unused_launch_config in unused_launch_config_names:
        print "\t", unused_launch_config
    return unused_launch_config_names
Пример #23
0
 def _create_autoscale_connection(self):
     LOG.debug("Creating autoscale connection for %s" % self.config.name)
     region = RegionInfo(name=self.config.cloud_type,
                         endpoint=self.config.as_uri)
     self._as_conn = AutoScaleConnection(
         aws_access_key_id=self.config.access_id,
         aws_secret_access_key=self.config.secret_key,
         is_secure=True,
         port=self.config.as_port,
         region=region,
         validate_certs=False)
Пример #24
0
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
    """
    :type aws_access_key_id: string
    :param aws_access_key_id: Your AWS Access Key ID

    :type aws_secret_access_key: string
    :param aws_secret_access_key: Your AWS Secret Access Key

    :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
    :return: A connection to Amazon's Auto Scaling Service
    """
    from boto.ec2.autoscale import AutoScaleConnection
    return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
Пример #25
0
def main():
    parser = optparse.OptionParser()
    parser.add_option("-c", "--config", dest="config_file", help="AutoScale config INI", metavar="FILE")
    (options, args) = parser.parse_args()
    logging.info("Using config file [%s]" % options.config_file)

    config = parse_config(options.config_file)

    aws_access = config.get("AWS", "access")
    aws_secret = config.get("AWS", "secret")

    logging.debug("Connecting to AWS with access [%s] and secret [%s]" % (aws_access, aws_secret))
    aws_connection = AutoScaleConnection(aws_access, aws_secret)

    lc = boto.ec2.autoscale.launchconfig.LaunchConfiguration(
        name=config.get("LaunchConfig", "name"),
        image_id=config.get("LaunchConfig", "image"),
        key_name=config.get("LaunchConfig", "key"),
        user_data=config.get("LaunchConfig", "user_data"),
        security_groups=config.get("LaunchConfig", "security_groups"),
        instance_type=config.get("LaunchConfig", "instance_type"),
    )
    logging.info("LC CONFIG = %s" % lc.__dict__)

    asg = boto.ec2.autoscale.group.AutoScalingGroup(
        group_name=config.get("AutoScaleGroup", "group_name"),
        availability_zones=config.get("AutoScaleGroup", "zones"),
        min_size=config.get("AutoScaleGroup", "min_instances"),
        max_size=config.get("AutoScaleGroup", "max_instances"),
        launch_config=lc,
    )

    print "ASG dict: %s" % asg.__dict__

    asg.connection = aws_connection
    params = {"AutoScalingGroupName": asg.name}
    asg = aws_connection.get_object("DescribeAutoScalingGroups", params, boto.ec2.autoscale.group.AutoScalingGroup)
    print asg
def main():
    # sys.exit()  # disable self-killer AGAIN
    conn = AutoScaleConnection()
    instance_id = get_instance_metadata()['instance-id']
    log_file = '/tmp/remote_instance_starter2.log'
    flag, reason = check_logs_status(log_file)
    if flag and reason:
        s3_conn = boto.connect_s3()
        bucket = s3_conn.get_bucket(BUCKET_NAME)
        k = Key(bucket)
        k.key = BUCKET_KEY
        global log_file_path
        time.sleep(70)
        # Try to upload logs prior to stop server
        os.system('python upload_logs_to_s3.py')
        os.system('rm %s' % log_file)
        k.get_contents_to_filename(log_file_path)
        logger.warning(
            "Instance with id=%s was terminated"
            " due to reason='%s'. "
            "Instance was killed by itself.", instance_id, reason)
        k.set_contents_from_filename(log_file_path)
        conn.terminate_instance(instance_id, decrement_capacity=True)
Пример #27
0
 def __init__(self, clc_host, access_id, secret_key, token):
     #boto.set_stream_logger('foo')
     path = '/services/AutoScaling'
     reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
     port = 8773
     if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
         clc_host = clc_host.replace('ec2', 'autoscaling', 1)
         path = '/'
         reg = None
         port = 443
     self.conn = AutoScaleConnection(access_id,
                                     secret_key,
                                     region=reg,
                                     port=port,
                                     path=path,
                                     is_secure=True,
                                     security_token=token,
                                     debug=0)
     self.conn.APIVersion = '2011-01-01'
     if not (clc_host[len(clc_host) - 13:] == 'amazonaws.com'):
         self.conn.auth_region_name = 'Eucalyptus'
     self.conn.https_validate_certificates = False
     self.conn.http_connection_kwargs['timeout'] = 30
Пример #28
0
 def __init__(self, args):
     """
     Initializing basic variables needed for auto scaling
     """
     self.configs                = ConfigParser.RawConfigParser()
     self.args                   = args
     self.test_props             = {}
     self.props                  = {}
     self.ec2_connection         = EC2Connection(self.args.access_key, self.args.secret_key)
     self.autoscale_connection   = AutoScaleConnection(self.args.access_key, self.args.secret_key)
     self.elb_connection         = ELBConnection(self.args.access_key, self.args.secret_key)
     self.cw_connection          = CloudWatchConnection(self.args.access_key, self.args.secret_key)
     self.firstInstance          = None
     self.launchConfiguration    = None
     self.healthCheck            = None
Пример #29
0
def launch_auto_scaling(stage = 'development'):
	config = get_provider_dict()
	from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger
	conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'], fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'], host='%s.autoscaling.amazonaws.com' % config['location'][:-1])
	
	for name, values in config.get(stage, {}).get('autoscale', {}):
		if any(group.name == name for group in conn.get_all_groups()):
			fabric.api.warn(fabric.colors.orange('Autoscale group %s already exists' % name))
			continue
		lc = LaunchConfiguration(name = '%s-launch-config' % name, image_id = values['image'],  key_name = config['key'])
		conn.create_launch_configuration(lc)
		ag = AutoScalingGroup(group_name = name, load_balancers = values.get('load-balancers'), availability_zones = [config['location']], launch_config = lc, min_size = values['min-size'], max_size = values['max-size'])
		conn.create_auto_scaling_group(ag)
		if 'min-cpu' in values and 'max-cpu' in values:
			tr = Trigger(name = '%s-trigger' % name, autoscale_group = ag, measure_name = 'CPUUtilization', statistic = 'Average', unit = 'Percent', dimensions = [('AutoScalingGroupName', ag.name)],
						 period = 60, lower_threshold = values['min-cpu'], lower_breach_scale_increment = '-1', upper_threshold = values['max-cpu'], upper_breach_scale_increment = '2', breach_duration = 60)
			conn.create_trigger(tr)
Пример #30
0
def connect_autoscale(aws_access_key_id=None,
                      aws_secret_access_key=None,
                      **kwargs):
    """
    @type aws_access_key_id: string
    @param aws_access_key_id: Your AWS Access Key ID

    @type aws_secret_access_key: string
    @param aws_secret_access_key: Your AWS Secret Access Key

    @rtype: L{AutoScaleConnnection<boto.ec2.autoscale.AutoScaleConnection>}
    @return: A connection to Amazon's Auto Scaling Service
    """
    from boto.ec2.autoscale import AutoScaleConnection
    return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
                               **kwargs)
Пример #31
0
def launch_auto_scaling(stage='development'):
    config = get_provider_dict()
    from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger
    conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'],
                               fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'],
                               host='%s.autoscaling.amazonaws.com' %
                               config['location'][:-1])

    for name, values in config.get(stage, {}).get('autoscale', {}):
        if any(group.name == name for group in conn.get_all_groups()):
            fabric.api.warn(
                fabric.colors.orange('Autoscale group %s already exists' %
                                     name))
            continue
        lc = LaunchConfiguration(name='%s-launch-config' % name,
                                 image_id=values['image'],
                                 key_name=config['key'])
        conn.create_launch_configuration(lc)
        ag = AutoScalingGroup(group_name=name,
                              load_balancers=values.get('load-balancers'),
                              availability_zones=[config['location']],
                              launch_config=lc,
                              min_size=values['min-size'],
                              max_size=values['max-size'])
        conn.create_auto_scaling_group(ag)
        if 'min-cpu' in values and 'max-cpu' in values:
            tr = Trigger(name='%s-trigger' % name,
                         autoscale_group=ag,
                         measure_name='CPUUtilization',
                         statistic='Average',
                         unit='Percent',
                         dimensions=[('AutoScalingGroupName', ag.name)],
                         period=60,
                         lower_threshold=values['min-cpu'],
                         lower_breach_scale_increment='-1',
                         upper_threshold=values['max-cpu'],
                         upper_breach_scale_increment='2',
                         breach_duration=60)
            conn.create_trigger(tr)
Пример #32
0
def delete_autoscaling():
    con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                              aws_access_key_id=AWS_ACCESS_KEY,
                              region=RegionInfo(name=REGION,
                                               endpoint='autoscaling.%s.amazonaws.com' % REGION))

    print "Deleting autoscaling group.."
    group = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0]
    print "shutting down instances"
    group.shutdown_instances()
    time.sleep(LONG_SLEEP_PERIOD)
    print "Deleting autoscaling group itself"
    con.delete_auto_scaling_group(AUTOSCALING_GROUP_NAME, force_delete=True)
    print "Deleting launch configuration"
    con.delete_launch_configuration(AUTOSCALING_GROUP_NAME)



    con.close()
Пример #33
0
 def __init__(self, aws, wait_time_secs, app_name=None,):
     """
     Creates the EbsHelper
     """
     self.aws = aws
     self.ebs = connect_to_region(aws.region, aws_access_key_id=aws.access_key,
                                  aws_secret_access_key=aws.secret_key,
                                  security_token=aws.security_token)
     self.autoscale = AutoScaleConnection(aws_access_key_id=aws.access_key,
                                          aws_secret_access_key=aws.secret_key,
                                          security_token=aws.security_token)
     self.s3 = S3Connection(
         aws_access_key_id=aws.access_key, 
         aws_secret_access_key=aws.secret_key, 
         security_token=aws.security_token,
         host=(lambda r: 's3.amazonaws.com' if r == 'us-east-1' else 's3-' + r + '.amazonaws.com')(aws.region))
     self.app_name = app_name
     self.wait_time_secs = wait_time_secs
 def set_endpoint(self, endpoint):
     #boto.set_stream_logger('scale')
     path = '/services/AutoScaling'
     reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
     port = 8773
     if endpoint[len(endpoint)-13:] == 'amazonaws.com':
         endpoint = endpoint.replace('ec2', 'autoscaling', 1)
         path = '/'
         reg = RegionInfo(endpoint=endpoint)
         port = 443
     self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
                               port=port, path=path,
                               is_secure=True, security_token=self.token, debug=0)
     self.conn.APIVersion = '2011-01-01'
     if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
         self.conn.auth_region_name = 'Eucalyptus'
     self.conn.https_validate_certificates = False
     self.conn.http_connection_kwargs['timeout'] = 30
Пример #35
0
def create_autoscaling_group():
    global img
    conn = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])
    autoscale = boto.ec2.autoscale.connect_to_region('us-east-1')
    print conn.get_all_groups()
    timestamp = time.time()
    value = datetime.datetime.fromtimestamp(timestamp)
    humanreadabledate = value.strftime('%Y-%m-%d_%H.%M.%S')
    config_name = 'live_launch_config'+humanreadabledate
    init_script = "#!/bin/sh /home/ec2-user/sds/deployment_scripts/initialize_server.py"
    lc = LaunchConfiguration(name=config_name, image_id=img,
                             key_name='SDSEastKey',
                             security_groups=['sg-a7afb1c2'],
                             user_data=init_script)
    conn.create_launch_configuration(lc)
    ag = AutoScalingGroup(group_name=config_name, load_balancers=['SDSLiveLoadBalancer'], availability_zones=['us-east-1a'], launch_config=lc, min_size=2, max_size=2, connection=conn)
    conn.create_auto_scaling_group(ag)
Пример #36
0
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
                      **kwargs):
    """
    :type aws_access_key_id: string
    :param aws_access_key_id: Your AWS Access Key ID

    :type aws_secret_access_key: string
    :param aws_secret_access_key: Your AWS Secret Access Key

    :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
    :return: A connection to Amazon's Auto Scaling Service

    :type use_block_device_types bool
    :param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing
        block device types, or a list of old style block device mappings (deprecated).  This defaults to false for compatability
        with the old incorrect style.
    """
    from boto.ec2.autoscale import AutoScaleConnection
    return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
                               **kwargs)
Пример #37
0
def get_asg_connection():
    conn = AutoScaleConnection()
    autoscale_groups = conn.get_all_groups(max_records=1)
    return conn
}


 ######################### end parameter block ################################


######################### begin configuration ################################
# make the connections
conn_ec2 = boto.ec2.connect_to_region(
        regionName,
        aws_access_key_id = AWS_ACCESS_KEY,
        aws_secret_access_key = AWS_SECRET_KEY
    )
conn_reg = boto.ec2.elb.connect_to_region(regionName)
conn_elb = ELBConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_as = AutoScaleConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_cw = boto.ec2.cloudwatch.connect_to_region(regionName)
conn_cw = boto.ec2.cloudwatch.connect_to_region(
        regionName,
        aws_access_key_id = AWS_ACCESS_KEY,
        aws_secret_access_key = AWS_SECRET_KEY
    )
######################### end configuration ################################




# balancers = elb.get_all_load_balancers()
# print balancers[0]

# retrieve the instances in the autoscale group
Пример #39
0
def create_AutoScaling():
    print "Creating AutoScaling..."
    # establish connection
    as_conn = AutoScaleConnection(AWSAccessKeyId, AWSSecretKey)
    # create launch configuration
    global lc
    lc = LaunchConfiguration(name='lc',
                             image_id=DATA_CEN_AMI,
                             key_name=ACCESS_KEY,
                             instance_monitoring=True,
                             security_groups=[SECURITY_GRP],
                             instance_type=MACHINE_TYPE)
    as_conn.create_launch_configuration(lc)

    # create tag for autoscaling group
    as_tag = Tag(key="Project",
                 value="2.2",
                 propagate_at_launch=True,
                 resource_id='my_group')

    # create aotoscaling group
    global ag
    ag = AutoScalingGroup(group_name='my_group',
                          load_balancers=['myELB'],
                          availability_zones=['us-east-1a'],
                          launch_config=lc,
                          min_size=MIN_SIZE,
                          max_size=MAX_SIZE,
                          connection=as_conn,
                          tags=[as_tag])
    # associate the autoscaling group with launch configuration
    as_conn.create_auto_scaling_group(ag)

    # build the scale policy
    scale_up_policy = ScalingPolicy(name='scale_up',
                                    adjustment_type='ChangeInCapacity',
                                    as_name='my_group',
                                    scaling_adjustment=1,
                                    cooldown=60)
    scale_down_policy = ScalingPolicy(name='scale_down',
                                      adjustment_type='ChangeInCapacity',
                                      as_name='my_group',
                                      scaling_adjustment=-1,
                                      cooldown=60)

    # register the scale policy
    as_conn.create_scaling_policy(scale_up_policy)
    as_conn.create_scaling_policy(scale_down_policy)

    # refresh the scale policy for extra information
    scale_up_policy = as_conn.get_all_policies(as_group='my_group',
                                               policy_names=['scale_up'])[0]
    scale_down_policy = as_conn.get_all_policies(as_group='my_group',
                                                 policy_names=['scale_down'
                                                               ])[0]

    # create cloudwatch alarm
    cloudwatch = CloudWatchConnection(aws_access_key_id=AWSAccessKeyId,
                                      aws_secret_access_key=AWSSecretKey,
                                      is_secure=True)
    # region='us-east-1a')

    # assocate cloudwatch with alarm
    alarm_dimensions = {"AutoScalingGroupName": 'my_group'}

    # create scale up alarm
    scale_up_alarm = MetricAlarm(name='scale_up_on_cpu',
                                 namespace='AWS/EC2',
                                 metric='CPUUtilization',
                                 statistic='Average',
                                 comparison='>',
                                 threshold='50',
                                 period='60',
                                 evaluation_periods=2,
                                 alarm_actions=[scale_up_policy.policy_arn],
                                 dimensions=alarm_dimensions)
    cloudwatch.create_alarm(scale_up_alarm)

    # create scale down alarm
    scale_down_alarm = MetricAlarm(
        name='scale_down_on_cpu',
        namespace='AWS/EC2',
        metric='CPUUtilization',
        statistic='Average',
        comparison='<',
        threshold='20',
        period='60',
        evaluation_periods=1,
        alarm_actions=[scale_down_policy.policy_arn],
        dimensions=alarm_dimensions)
    cloudwatch.create_alarm(scale_down_alarm)

    print "AutoScaling created successfully"
Пример #40
0
def main():
    parser = argparse.ArgumentParser(
      description = "triggers a full LSDA rollout")
    
    parser.add_argument("--inspect", action = "store_true",
      help = "pause before baking AMI", default = False)
    parser.add_argument("--clean", action = "store_true",
      help = "reset from clean Ubuntu 12.04 image", default = False)
    parser.add_argument("--no-restart", action = "store_true",
      dest = "no_restart", help = "don't restart all nodes in ASG",
      default = False)
    
    options = parser.parse_args()
    
    logging.info("Starting rollout.")
    
    conn_ec2 = boto.ec2.connect_to_region("us-east-1")
    conn_ec2_as = AutoScaleConnection()
    
    if not options.clean:
        logging.info("Searching for existing images...")
        
        group = conn_ec2_as.get_all_groups(['LSDA Worker Pool'])[0]
        launch_config = conn_ec2_as.get_all_launch_configurations(
          names=[group.launch_config_name])[0]
        
        existing_images = conn_ec2.get_all_images(owners = ["self"])[0]
        
        ami_id = launch_config.image_id
        logging.info("Using existing image {0}".format(ami_id))
    
    else:
        ami_id = 'ami-59a4a230' # Clean Ubuntu 12.04.
        logging.info("Using base image {0}".format(ami_id))
    
    reservation = conn_ec2.run_instances(
        image_id = ami_id,
        key_name = 'jeremy-aws-key',
        instance_type = 't1.micro',
        security_groups = ['Worker Nodes'],
    )
    
    try:
        instance = reservation.instances[0]
        logging.info("Waiting for instance {} to start...".format(instance.id))
        
        instance.update()
        while instance.ip_address is None:
            logging.info("Not ready. Retrying in 10 seconds...")
            time.sleep(10)
            instance.update()
        
        while True:
            result = subprocess.call(["ssh", "-o",
              "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no",
              "ubuntu@{}".format(instance.ip_address), "uname -r"])
            if result != 0:
                logging.info("Not ready for SSH. Retrying in 10 seconds...")
                time.sleep(10)
            else:
                break
        
        logging.info("Instance has started; running setup script.")
        logging.info("(IP address is {})".format(instance.ip_address))
        
        subprocess.check_call(["ssh", "-o", "UserKnownHostsFile=/dev/null",
          "-o", "StrictHostKeyChecking=no",
          "ubuntu@{}".format(instance.ip_address),
          "sudo stop lsda; sleep 20; sudo rm worker.sh;"
          "wget https://raw.github.com/fatlotus/lsda-infrastructure/"
          "master/servers/worker.sh; sudo bash worker.sh"])
        
        if options.inspect:
            logging.info("Connect to ubuntu@{} to inspect the image."
              .format(instance.ip_address))
            logging.info("When you're done, press CTRL-C.")
            
            try:
                while True:
                    time.sleep(3600)
            except KeyboardInterrupt:
                pass
        
        logging.info("Creating AMI from existing image.")
        new_image = instance.create_image(
            name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
              format(datetime.datetime.now())),
            description = "(automatically generated)"
        )
        
        time.sleep(10)
        
        image_object = conn_ec2.get_image(new_image)
        
        while image_object.state == "pending":
            logging.info("State is still pending. Retrying in 10 seconds.")
            time.sleep(10)
            image_object.update()
        
    finally:
        logging.warn("Stopping all nodes.")
        for node in reservation.instances:
            node.terminate()
    
    logging.info("Creating new LaunchConfiguration.")
    
    mapping = BlockDeviceMapping()
    mapping["/dev/sdb"] = BlockDeviceType(ephemeral_name = "ephemeral0")
    mapping["/dev/sdc"] = BlockDeviceType(ephemeral_name = "ephemeral1")
    
    new_launch_config = LaunchConfiguration(
        conn_ec2_as,
        name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
          format(datetime.datetime.now())),
        image_id = new_image,
        security_groups = ['sg-f9a08492'],
        instance_type = 'c3.large',
        block_device_mappings = [mapping],
        instance_profile_name = ("arn:aws:iam::470084502640:instance-profile"
          "/dal-access"),
        spot_price = 0.02,
    )
    conn_ec2_as.create_launch_configuration(new_launch_config)
    
    logging.info("Setting launch configuration in existing ASG.")
    group.launch_config_name = new_launch_config.name
    group.update()
    
    logging.info("Cleaning up old launch configurations.")
    for config in conn_ec2_as.get_all_launch_configurations():
        if config.image_id != new_launch_config.image_id:
            conn_ec2_as.delete_launch_configuration(config.name)
    
    logging.info("Cleaning up old images.")
    for image in conn_ec2.get_all_images(filters={"name":["LatestImage"]}):
        if image.id != new_image:
            conn_ec2.deregister_image(image.id, True)
    
    logging.info("Rollout complete. New image is {}.".format(new_image))
    
    if not options.no_restart:
        logging.info("Triggering reload of all nodes in ASG.")
        for instance in group.instances:
            for reservation in conn_ec2.get_all_instances(instance.instance_id):
                reservation.stop_all()
Пример #41
0
class WatchData:
    datafile = "/var/tmp/watchdata.p"
    dry = False
    low_limit = 72
    high_limit = 90
    high_urgent = 95
    stats_period = 60
    history_size = 0

    def __init__(self):
        self.name = ''
        self.instances = 0
        self.new_desired = 0
        self.desired = 0
        self.instances_info = None
        self.previous_instances = 0
        self.action = ""
        self.action_ts = 0
        self.changed_ts = 0
        self.total_load = 0
        self.avg_load = 0
        self.max_load = 0
        self.up_ts = 0
        self.down_ts = 0
        self.max_loaded = None
        self.loads = {}
        self.measures = {}
        self.emergency = False
        self.history = None
        self.trend = 0
        self.exponential_average = 0
        self.ts = 0

    def __getstate__(self):
        """ Don't store these objets """
        d = self.__dict__.copy()
        del d['ec2']
        del d['cw']
        del d['autoscale']
        del d['group']
        del d['instances_info']
        return d

    def connect(self, groupname):
        self.ec2 = boto.connect_ec2()
        self.cw = CloudWatchConnection()
        self.autoscale = AutoScaleConnection()
        self.group = self.autoscale.get_all_groups(names=[groupname])[0]
        self.instances = len(self.group.instances)
        self.desired = self.group.desired_capacity
        self.name = groupname
        self.ts = int(time.time())

    def get_instances_info(self):
        ids = [i.instance_id for i in self.group.instances]
        self.instances_info = self.ec2.get_only_instances(instance_ids=ids)

    def get_CPU_loads(self):
        """ Read instances load and store in data """
        measures = 0
        for instance in self.group.instances:
            load = self.get_instance_CPU_load(instance.instance_id)
            if load is None:
                continue
            measures += 1
            self.total_load += load
            self.loads[instance.instance_id] = load
            if load > self.max_load:
                self.max_load = load
                self.max_loaded = instance.instance_id

        if measures > 0:
            self.avg_load = self.total_load / measures

    def get_instance_CPU_load(self, instance):
        end = datetime.datetime.now()
        start = end - datetime.timedelta(seconds=int(self.stats_period * 3))

        m = self.cw.get_metric_statistics(self.stats_period, start, end,
                                          "CPUUtilization", "AWS/EC2",
                                          ["Average"],
                                          {"InstanceId": instance})
        if len(m) > 0:
            measures = self.measures[instance] = len(m)
            ordered = sorted(m, key=lambda x: x['Timestamp'])
            averages = [x['Average'] for x in ordered]
            average = reduce(lambda x, y: 0.4 * x + 0.6 * y, averages[-2:])
            return average

        return None

    @classmethod
    def from_file(cls):
        try:
            data = pickle.load(open(cls.datafile, "rb"))
        except:
            data = WatchData()

        return data

    def store(self, annotation=False):
        if self.history_size > 0:
            if not self.history: self.history = []
            self.history.append([
                int(time.time()),
                len(self.group.instances),
                int(round(self.total_load)),
                int(round(self.avg_load))
            ])
            self.history = self.history[-self.history_size:]

        pickle.dump(self, open(self.datafile, "wb"))

        if annotation:
            import utils
            text = json.dumps(self.__getstate__(), skipkeys=True)
            utils.store_annotation("ec2_watch", text)

    def check_too_low(self):
        for instance, load in self.loads.iteritems():
            if load is not None and self.measures[
                    instance] > 1 and self.instances > 1 and load < self.avg_load * 0.2 and load < 4:
                self.emergency = True
                self.check_avg_low(
                )  # Check if the desired instanes can be decreased
                self.action = "EMERGENCY LOW (%s %5.2f%%) " % (instance, load)
                self.kill_instance(instance)
                return True
        return self.emergency

    def check_too_high(self):
        for instance, load in self.loads.iteritems():
            if load is not None and self.measures[
                    instance] > 1 and load > self.high_urgent:
                self.emergency = True
                self.action = "EMERGENCY HIGH (%s %5.2f%%) " % (instance, load)
                if self.instances > 1 and load > self.avg_load * 1.5:
                    self.action += " killing bad instance"
                    self.kill_instance(instance)
                else:
                    self.action += " increasing instances to %d" % (
                        self.instances + 1, )
                    self.set_desired(self.instances + 1)
                return True

        return self.emergency

    def check_avg_high(self):
        threshold = self.high_limit
        if self.instances == 1:
            threshold = threshold * 0.9  # Increase faster if there is just one instance

        if self.avg_load > threshold:
            self.action = "WARN, high load: %d -> %d " % (self.instances,
                                                          self.instances + 1)
            self.set_desired(self.instances + 1)
            return True

    def check_avg_low(self):
        if self.instances <= self.group.min_size:
            return False

        if self.total_load / (self.instances - 1) < self.low_limit:
            self.action = "low load: %d -> %d " % (self.instances,
                                                   self.instances - 1)
            self.set_desired(self.instances - 1)

    def kill_instance(self, id):
        if self.action:
            print(self.action)
        print("Kill instance", id)
        syslog.syslog(
            syslog.LOG_INFO, "ec2_watch kill_instance: %s instances: %d (%s)" %
            (id, self.instances, self.action))
        if self.dry:
            return
        self.ec2.terminate_instances(instance_ids=[id])
        self.action_ts = time.time()

    def set_desired(self, desired):
        if self.action:
            print(self.action)
        print("Setting instances from %d to %d" % (self.instances, desired))
        syslog.syslog(
            syslog.LOG_INFO, "ec2_watch set_desired: %d -> %d (%s)" %
            (self.instances, desired, self.action))
        if self.dry:
            return
        if desired >= self.group.min_size:
            self.group.set_capacity(desired)
        self.action_ts = time.time()
        self.new_desired = desired
Пример #42
0
        "--mothball",
        type=str,
        help="reduce the instances for all autoscaling group(s) "
        "in a stack to zero")
    parser.add_argument(
        "-r",
        "--reopen",
        nargs=2,
        help="increase the instances for all autoscaling group(s) "
        "in a stack to min:max:desired")
    args = parser.parse_args()

    # connect to AWS
    try:
        cfn = CloudFormationConnection()
        asg = AutoScaleConnection()
    except:
        print "AWS connect error"
    else:
        # get the key data
        data = getStackAutoscalingGroupData(cfn, asg)
        # list if explicitly listing or not doing anything else
        if args.list or args.mothball is None and args.reopen is None:
            for stackname in sorted(data, key=data.__getitem__):
                print "{s}:".format(s=stackname, )
                for asginfo in data[stackname]:
                    print "    {n} {mn}:{mx}:{d}".format(
                        n=asginfo['name'],
                        mn=asginfo['min'],
                        mx=asginfo['max'],
                        d=asginfo['desired'],
Пример #43
0
class AutoScale:
    def __init__(self, args):
        """
        Initializing basic variables needed for auto scaling
        """
        self.configs = ConfigParser.RawConfigParser()
        self.args = args
        self.test_props = {}
        self.props = {}
        self.ec2_connection = EC2Connection(self.args.access_key,
                                            self.args.secret_key)
        self.autoscale_connection = AutoScaleConnection(
            self.args.access_key, self.args.secret_key)
        self.elb_connection = ELBConnection(self.args.access_key,
                                            self.args.secret_key)
        self.cw_connection = CloudWatchConnection(self.args.access_key,
                                                  self.args.secret_key)
        self.firstInstance = None
        self.launchConfiguration = None
        self.healthCheck = None

    def loadConfigs(self):
        """
        FIX ME: Currently doesnt do anything
        This method will load the configurations from boto config file if present else will 
        accept parameters passed by user.
        """
        if os.path.isfile("/etc/boto.cfg"):
            self.configs.read("/etc/boto.cfg")
            conf = self.configs.sections()
            self.populateConfigs(conf)
        if os.path.isfile("~/.boto"):
            self.configs.read("~/.boto")
            conf = self.configs.sections()
            self.populateConfigs(conf)

        print ">>> Loaded configs"

    def populateConfigs(self, sections):
        for section in sections:
            self.boto_props[section] = self.configs.items(section)
            for item in self.boto_props[section]:
                key, value = item
                if not self.props.has_key(key):
                    self.props[key] = value

    def createLaunchConfiguration(self, lc_name, ami_id, key_name):
        """
        Creates launch configuration for the auto scaling cluster
        """
        self.launchConfiguration = LaunchConfiguration(name=lc_name,
                                                       image_id=ami_id,
                                                       key_name=key_name)
        self.autoscale_connection.create_launch_configuration(
            self.launchConfiguration)
        print ">>> Created launch configuration: " + lc_name

    def createAutoScaleGroup(self, asg_name):
        """
        Create a Auto scaling group for the auto scaling cluster
        """
        autoScalingGroup = AutoScalingGroup(
            group_name=asg_name,
            load_balancers=[self.args.lb_name],
            launch_config=self.launchConfiguration,
            min_size=self.args.min_size,
            max_size=self.args.max_size,
            availability_zones=['us-east-1a'])
        self.autoscale_connection.create_auto_scaling_group(autoScalingGroup)
        print ">>> Created auto scaling group: " + asg_name

    def createTrigger(self, trigger_name, measure, asg_name):
        """
        Trigger to spawn new instances as per specific metrics
        """
        alarm_actions = []
        dimensions = {"AutoScalingGroupName": asg_name}
        policies = self.autoscale_connection.get_all_policies(
            as_group=self.args.asg_name, policy_names=[self.args.asp_name])
        for policy in policies:
            alarm_actions.append(policy.policy_arn)
        alarm = MetricAlarm(name=trigger_name,
                            namespace="AWS/EC2",
                            metric=measure,
                            statistic="Average",
                            comparison=">=",
                            threshold=50,
                            period=60,
                            unit="Percent",
                            evaluation_periods=2,
                            alarm_actions=alarm_actions,
                            dimensions=dimensions)

        self.cw_connection.create_alarm(alarm)
        print ">>> Created trigger: " + self.args.trigger

    def createAutoScalePolicy(self, asp_name):
        """
        Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster
        """
        self.autoScalingUpPolicy = ScalingPolicy(
            name=asp_name + '-up',
            adjustment_type="ChangeInCapacity",
            as_name=self.args.asg_name,
            scaling_adjustment=1,
            cooldown=180)
        self.autoScalingDownPolicy = ScalingPolicy(
            name=asp_name + '-down',
            adjustment_type="ChangeInCapacity",
            as_name=self.args.asg_name,
            scaling_adjustment=-1,
            cooldown=180)

        self.autoscale_connection.create_scaling_policy(
            self.autoScalingUpPolicy)
        self.autoscale_connection.create_scaling_policy(
            self.autoScalingDownPolicy)

        print ">>> Created auto scaling policy: " + asp_name

    def configureHealthCheck(self, target):
        """
        Configures health check for the cluster
        """
        self.healthCheck = HealthCheck(target=target, timeout=5)
        print ">>> Configured health check for: " + target

    def createLoadBalancer(self, lb_name, region, lb_port, instance_port,
                           protocol):
        """
        Creates a load balancer for cluster
        """
        listener = (int(lb_port), int(instance_port), protocol)
        tuple_list = []
        tuple_list.append(listener)
        lbs = self.elb_connection.get_all_load_balancers()
        for lb in lbs:
            if lb.name != lb_name:
                self.elb_connection.create_load_balancer(
                    lb_name, [region], tuple_list)
                self.elb_connection.configure_health_check(
                    name=lb_name, health_check=self.healthCheck)
                print ">>> Created load balancer: " + lb_name
            else:
                print "Load balancer with name '" + lb_name + "' already exists"

    def startInstance(self, image_id, key_name, region, instance_type):
        """
        Starts the first instance which will be serving requests irrespective of auto scaling 
        instances.
        """
        reservation = self.ec2_connection.run_instances(
            image_id=image_id,
            min_count=1,
            max_count=1,
            placement=region,
            key_name=key_name,
            instance_type=instance_type)
        #        for instance in reservation.instances:
        #            instance.add_tag('node', '0')
        #            break

        self.firstInstance = reservation.instances[0].id.split('\'')[0]
        print ">>> Started instance: ", self.firstInstance

    def registerInstanceToELB(self, lb_name):
        """
        Register the first instance started to the Elastic Load Balancer.
        """
        self.elb_connection.register_instances(load_balancer_name=lb_name,
                                               instances=[self.firstInstance])
        print ">>> Registered instance '", self.firstInstance, "' to load balancer '" + lb_name + "'"

    def setUp(self):
        """
        Set's up the auto scaling for the application
        """
        # STEP 1: Load the configurations
        self.loadConfigs()
        # STEP 2: Configure the health check for the instances
        self.configureHealthCheck(self.args.lb_target)
        # STEP 3: Create a load balancer
        self.createLoadBalancer(self.args.lb_name, self.args.region,
                                self.args.lb_port, self.args.instance_port,
                                self.args.protocol)
        # STEP 4: Start the first instance
        self.startInstance(self.args.ami_id, self.args.key_name,
                           self.args.region, self.args.instance_type)
        # STEP 5: Register the instance to the load balancer created in STEP 4
        self.registerInstanceToELB(self.args.lb_name)
        # STEP 6: Create launch configuration to launch instances by auto scale
        self.createLaunchConfiguration(self.args.lc_name, self.args.ami_id,
                                       self.args.key_name)
        # STEP 7: Create a auto scale group which will manage the instances started by auto scaling
        self.createAutoScaleGroup(self.args.asg_name)
        # STEP 8: Create a auto scaling policy to say add/remove a node
        self.createAutoScalePolicy(self.args.asp_name)
        # STEP 9: Create a trigger, so that auto scaling can trigger it to start
        # or remove a instance from auto scaling group
        self.createTrigger(self.args.trigger, self.args.measure,
                           self.args.asg_name)
Пример #44
0
class WatchData:
	datafile = "/tmp/watchdata.p"
	dry = False
	low_limit = 70
	high_limit = 90
	high_urgent = 95
	stats_period = 120
	history_size = 0

	def __init__(self):
		self.name = ''
		self.instances = 0
		self.new_desired = 0
		self.desired = 0
		self.instances_info = None
		self.previous_instances = 0
		self.action = ""
		self.action_ts = 0
		self.changed_ts = 0
		self.total_load = 0
		self.avg_load = 0
		self.max_load = 0
		self.up_ts = 0
		self.down_ts= 0
		self.max_loaded = None
		self.loads = {}
		self.measures = {}
		self.emergency = False
		self.history = None

	def __getstate__(self):
		""" Don't store these objets """
		d = self.__dict__.copy()
		del d['ec2']
		del d['cw']
		del d['autoscale']
		del d['group']
		del d['instances_info']
		return d

	def connect(self, groupname):
		self.ec2 = boto.connect_ec2()
		self.cw = CloudWatchConnection()
		self.autoscale = AutoScaleConnection()
		self.group = self.autoscale.get_all_groups(names=[groupname])[0]
		self.instances = len(self.group.instances)
		self.desired = self.group.desired_capacity
		self.name = groupname

	def get_instances_info(self):
		ids = [i.instance_id for i in self.group.instances]
		self.instances_info = self.ec2.get_only_instances(instance_ids = ids)
	
	def get_CPU_loads(self):
		""" Read instances load and store in data """
		for instance in self.group.instances:
			load = self.get_instance_CPU_load(instance.instance_id)
			if load is None:
				continue
			self.total_load += load
			self.loads[instance.instance_id] = load
			if load > self.max_load:
				self.max_load = load
				self.max_loaded = instance.instance_id

		self.avg_load = self.total_load/self.instances

	def get_instance_CPU_load(self, instance):
		end = datetime.datetime.now()
		start = end - datetime.timedelta(seconds=300)

		m = self.cw.get_metric_statistics(self.stats_period, start, end, "CPUUtilization", "AWS/EC2", ["Average"], {"InstanceId": instance})
		if len(m) > 0:
			self.measures[instance] = len(m)
			ordered = sorted(m, key=lambda x: x['Timestamp'], reverse=True)
			return ordered[0]['Average']

		return None

	@classmethod
	def from_file(cls):
		try:
  			data = pickle.load( open(cls.datafile, "rb" ))
		except:
			data = WatchData()

		return data

	def store(self, annotation = False):
		if self.history_size > 0:
			if not self.history: self.history = []
			self.history.append([int(time.time()), len(self.group.instances), int(round(self.total_load))])
			self.history = self.history[-self.history_size:]

		pickle.dump(self, open(self.datafile, "wb" ))

		if annotation:
			import utils
			text = json.dumps(self.__getstate__(), skipkeys=True)
			utils.store_annotation("ec2_watch", text)

	def check_too_low(self):
		for instance, load in self.loads.iteritems():
			if load is not None and self.measures[instance] > 1 and self.instances > 1 and load < self.avg_load * 0.2 and load < 4:
				self.emergency = True
				self.check_avg_low() # Check if the desired instanes can be decreased
				self.action = "EMERGENCY LOW (%s %5.2f%%) " % (instance, load)
				self.kill_instance(instance)
				return True
		return self.emergency

	def check_too_high(self):
		for instance, load in self.loads.iteritems():
			if load is not None and self.measures[instance] > 1 and load > self.high_urgent:
				self.emergency = True
				self.action = "EMERGENCY HIGH (%s %5.2f%%) " % (instance, load)
				if self.instances > 1 and load > self.avg_load * 1.5:
					self.action += " killing bad instance"
					self.kill_instance(instance)
				else:
					self.action += " increasing instances to %d" % (self.instances+1,)
					self.set_desired(self.instances+1)
				return True

		return self.emergency

	def check_avg_high(self):
		threshold = self.high_limit
		if self.instances == 1:
			threshold = threshold * 0.9 # Increase faster if there is just one instance
		
		if self.avg_load > threshold:
			self.action = "WARN, high load: %d -> %d " % (self.instances, self.instances + 1)
			self.set_desired(self.instances + 1)
			return True

	def check_avg_low(self):
		if self.instances <= self.group.min_size:
			return False
		
		if self.total_load/(self.instances-1) < self.low_limit:
			self.action = "low load: %d -> %d " % (self.instances, self.instances - 1)
			self.set_desired(self.instances - 1)

	def kill_instance(self, id):
		if self.action:
			print self.action
		print "Kill instance", id
		syslog.syslog(syslog.LOG_INFO, "ec2_watch kill_instance: %s instances: %d (%s)" % (id, self.instances, self.action))
		if self.dry:
			return
		self.ec2.terminate_instances(instance_ids=[id])
		self.action_ts = time.time()

	def set_desired(self, desired):
		if self.action:
			print self.action
		print "Setting instances from %d to %d" % (self.instances, desired)
		syslog.syslog(syslog.LOG_INFO, "ec2_watch set_desired: %d -> %d (%s)" % (self.instances, desired, self.action))
		if self.dry:
			return
		if desired >= self.group.min_size:
			self.group.set_capacity(desired)
		self.action_ts = time.time()
		self.new_desired = desired
Пример #45
0
def create_autoscaling(ami_id, sns_arn):
    """
    Creates the autoscaling group for proxy instances
    Inspired by boto autoscaling tutorial.
    """
    con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                              aws_access_key_id=AWS_ACCESS_KEY,
                              region=RegionInfo(name=REGION,
                                               endpoint='autoscaling.%s.amazonaws.com' % REGION))



    print "Creating autoscaling configuration.."
    config = LaunchConfiguration(name=AUTOSCALING_GROUP_NAME,
                                 image_id=ami_id,
                                 key_name=KEY_NAME,
                                 security_groups=[EC2_SECURITY_GROUP_NAME],
                                 instance_type=INSTANCE_TYPE)

    con.create_launch_configuration(config)


    print "Create autoscaling group..."
    ag = AutoScalingGroup(name=AUTOSCALING_GROUP_NAME,
                          launch_config=config,
                          availability_zones=["{0}a".format(REGION)],
                          load_balancers=[ELB_NAME],
                          min_size=AUTOSCALING_MIN_INSTANCES,
                          max_size=AUTOSCALING_MAX_INSTANCES,
                          group_name=AUTOSCALING_GROUP_NAME)
    con.create_auto_scaling_group(ag)

    # fetch the autoscale group after it is created (unused but may be necessary)
    _ = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0]

    # Create tag name for autoscaling-created machines
    as_tag = Tag(key='Name', value=AUTOSCALING_GROUP_NAME, propagate_at_launch=True, resource_id=AUTOSCALING_GROUP_NAME)
    con.create_or_update_tags([as_tag])


    print "Creating autoscaling policy..."
    scaleup_policy = ScalingPolicy(name='scale_up',
                                   adjustment_type='ChangeInCapacity',
                                   as_name=AUTOSCALING_GROUP_NAME,
                                   scaling_adjustment=1,
                                   cooldown=AUTOSCALING_COOLDOWN_PERIOD)

    scaledown_policy = ScalingPolicy(name='scale_down',
                                     adjustment_type='ChangeInCapacity',
                                     as_name=AUTOSCALING_GROUP_NAME,
                                     scaling_adjustment=-1,
                                     cooldown=AUTOSCALING_COOLDOWN_PERIOD)

    con.create_scaling_policy(scaleup_policy)
    con.create_scaling_policy(scaledown_policy)

    # Get freshened policy objects
    scaleup_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_up'])[0]
    scaledown_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_down'])[0]

    print "Creating cloudwatch alarms"
    cloudwatch_con = CloudWatchConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                                      aws_access_key_id=AWS_ACCESS_KEY,
                                      region=RegionInfo(name=REGION,
                                                        endpoint='monitoring.%s.amazonaws.com' % REGION))


    alarm_dimensions = {"AutoScalingGroupName": AUTOSCALING_GROUP_NAME}
    scaleup_alarm = MetricAlarm(name='scale_up_on_cpu',
                                namespace='AWS/EC2',
                                metric='CPUUtilization',
                                statistic='Average',
                                comparison='>',
                                threshold=AUTOSCALING_CPU_MAX_THRESHOLD,
                                period='60',
                                evaluation_periods=1,
                                alarm_actions=[scaleup_policy.policy_arn, sns_arn],
                                dimensions=alarm_dimensions)

    # Don't send SNS on scaledown policy
    scaledown_alarm = MetricAlarm(name='scale_down_on_cpu',
                                 namespace='AWS/EC2',
                                 metric='CPUUtilization',
                                 statistic='Average',
                                 comparison='<',
                                 threshold=AUTOSCALING_CPU_MIN_THRESHOLD,
                                 period='60',
                                 evaluation_periods=1,
                                 alarm_actions=[scaledown_policy.policy_arn],
                                 dimensions=alarm_dimensions)
    cloudwatch_con.create_alarm(scaleup_alarm)
    cloudwatch_con.create_alarm(scaledown_alarm)
Пример #46
0
    def test_basic(self):
        # NB: as it says on the tin these are really basic tests that only
        # (lightly) exercise read-only behaviour - and that's only if you
        # have any autoscale groups to introspect. It's useful, however, to
        # catch simple errors

        print('--- running %s tests ---' % self.__class__.__name__)
        c = AutoScaleConnection()

        self.assertTrue(repr(c).startswith('AutoScaleConnection'))

        groups = c.get_all_groups()
        for group in groups:
            self.assertIsInstance(group, AutoScalingGroup)

            # get activities
            activities = group.get_activities()

            for activity in activities:
                self.assertIsInstance(activity, Activity)

        # get launch configs
        configs = c.get_all_launch_configurations()
        for config in configs:
            self.assertIsInstance(config, LaunchConfiguration)

        # get policies
        policies = c.get_all_policies()
        for policy in policies:
            self.assertIsInstance(policy, ScalingPolicy)

        # get scheduled actions
        actions = c.get_all_scheduled_actions()
        for action in actions:
            self.assertIsInstance(action, ScheduledUpdateGroupAction)

        # get instances
        instances = c.get_all_autoscaling_instances()
        for instance in instances:
            self.assertIsInstance(instance, Instance)

        # get all scaling process types
        ptypes = c.get_all_scaling_process_types()
        for ptype in ptypes:
            self.assertTrue(ptype, ProcessType)

        # get adjustment types
        adjustments = c.get_all_adjustment_types()
        for adjustment in adjustments:
            self.assertIsInstance(adjustment, AdjustmentType)

        # get metrics collection types
        types = c.get_all_metric_collection_types()
        self.assertIsInstance(types, MetricCollectionTypes)

        # create the simplest possible AutoScale group
        # first create the launch configuration
        time_string = '%d' % int(time.time())
        lc_name = 'lc-%s' % time_string
        lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
                                 instance_type='t1.micro')
        c.create_launch_configuration(lc)
        found = False
        lcs = c.get_all_launch_configurations()
        for lc in lcs:
            if lc.name == lc_name:
                found = True
                break
        assert found

        # now create autoscaling group
        group_name = 'group-%s' % time_string
        group = AutoScalingGroup(name=group_name, launch_config=lc,
                                 availability_zones=['us-east-1a'],
                                 min_size=1, max_size=1)
        c.create_auto_scaling_group(group)
        found = False
        groups = c.get_all_groups()
        for group in groups:
            if group.name == group_name:
                found = True
                break
        assert found

        # now create a tag
        tag = Tag(key='foo', value='bar', resource_id=group_name,
                  propagate_at_launch=True)
        c.create_or_update_tags([tag])

        found = False
        tags = c.get_all_tags()
        for tag in tags:
            if tag.resource_id == group_name and tag.key == 'foo':
                found = True
                break
        assert found

        c.delete_tags([tag])

        # shutdown instances and wait for them to disappear
        group.shutdown_instances()
        instances = True
        while instances:
            time.sleep(5)
            groups = c.get_all_groups()
            for group in groups:
                if group.name == group_name:
                    if not group.instances:
                        instances = False

        group.delete()
        lc.delete()

        found = True
        while found:
            found = False
            time.sleep(5)
            tags = c.get_all_tags()
            for tag in tags:
                if tag.resource_id == group_name and tag.key == 'foo':
                    found = True

        assert not found

        print('--- tests completed ---')
Пример #47
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path='/services/AutoScaling'
        port=8773
        if clc_host[len(clc_host)-13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port=443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records, next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('scale')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        port = 8773
        if endpoint[len(endpoint)-13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=self.token, debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Policies.json")
        return obj

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        obj = self.conn.get_all_tags(filters, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Tags.json")
        return obj

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
Пример #49
0
class MSBManager:
    def __init__(self, aws_access_key, aws_secret_key):
        self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
        self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
        self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
        self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
        self.default_cooldown = 60

    def get_security_group(self, name):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        return sgs[0] if sgs else None

    def create_security_group(self, name, description):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        sg = sgs[0] if sgs else None
        if not sgs:
            sg = self.ec2_conn.create_security_group(name, description)

        try:
            sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
        except EC2ResponseError:
            pass
        return sg

    def remove_security_group(self, name):
        self.ec2_conn.delete_security_group(name=name)

    def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
        instance = None
        reservations = self.ec2_conn.get_all_instances()
        for reservation in reservations:
            for i in reservation.instances:
                if "Name" in i.tags and i.tags["Name"] == tags["Name"] and i.state == "running":
                    instance = i
                    break

        if not instance:
            reservation = self.ec2_conn.run_instances(
                image,
                instance_type=instance_type,
                key_name=key_name,
                placement=zone,
                security_groups=security_groups,
                monitoring_enabled=True,
            )
            instance = reservation.instances[0]
            while not instance.update() == "running":
                time.sleep(5)
            time.sleep(10)
            self.ec2_conn.create_tags([instance.id], tags)

        return instance

    def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
        req = self.ec2_conn.request_spot_instances(
            price=bid,
            instance_type=instance_type,
            image_id=image,
            placement=zone,
            key_name=key_name,
            security_groups=security_groups,
        )
        instance_id = None

        while not instance_id:
            job_sir_id = req[0].id
            requests = self.ec2_conn.get_all_spot_instance_requests()
            for sir in requests:
                if sir.id == job_sir_id:
                    instance_id = sir.instance_id
                    break
            print "Job {} not ready".format(job_sir_id)
            time.sleep(60)

        self.ec2_conn.create_tags([instance_id], tags)

    def remove_instance(self, instance_id):
        self.remove_instances([instance_id])

    def remove_instances(self, instance_ids):
        self.ec2_conn.terminate_instances(instance_ids)

    def remove_instance_by_tag_name(self, name):
        reservations = self.ec2_conn.get_all_instances()
        data_centers_intance_ids = []
        for reservation in reservations:
            for instance in reservation.instances:
                if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
                    data_centers_intance_ids.append(instance.id)
        if data_centers_intance_ids:
            self.remove_instances(data_centers_intance_ids)

    def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
        lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
        lb = lbs[0] if lbs else None
        if not lb:
            hc = HealthCheck(
                timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
            )
            ports = [(80, 80, "http")]
            zones = [zone]
            lb = self.elb_conn.create_load_balancer(name, zones, ports)

            self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
            lb.configure_health_check(hc)
            if instance_ids:
                lb.register_instances(instance_ids)

            params = {
                "LoadBalancerNames.member.1": lb.name,
                "Tags.member.1.Key": "15619project",
                "Tags.member.1.Value": project_tag_value,
            }
            lb.connection.get_status("AddTags", params, verb="POST")
        return lb

    def remove_elb(self, name):
        self.elb_conn.delete_load_balancer(name)

    def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
        lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
        lc = lcs[0] if lcs else None
        if not lc:
            lc = LaunchConfiguration(
                name=name,
                image_id=image,
                key_name=key_name,
                security_groups=[security_groups],
                instance_type=instance_type,
            )
            self.auto_scale_conn.create_launch_configuration(lc)
        return lc

    def remove_launch_configuration(self, name):
        self.auto_scale_conn.delete_launch_configuration(name)

    def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
        lc = self.create_launch_configuration()
        as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
        as_group = as_groups[0] if as_groups else None
        if not as_group:
            as_group = AutoScalingGroup(
                group_name=name,
                load_balancers=[lb_name],
                availability_zones=[zone],
                launch_config=lc,
                min_size=4,
                max_size=4,
                health_check_type="ELB",
                health_check_period=120,
                connection=self.auto_scale_conn,
                default_cooldown=self.default_cooldown,
                desired_capacity=4,
                tags=tags,
            )

            self.auto_scale_conn.create_auto_scaling_group(as_group)
            if instance_ids:
                self.auto_scale_conn.attach_instances(name, instance_ids)

            scale_up_policy = ScalingPolicy(
                name="scale_up",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=1,
                cooldown=self.default_cooldown,
            )
            scale_down_policy = ScalingPolicy(
                name="scale_down",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=-1,
                cooldown=self.default_cooldown,
            )

            self.auto_scale_conn.create_scaling_policy(scale_up_policy)
            self.auto_scale_conn.create_scaling_policy(scale_down_policy)

            scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_up"])[0]
            scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_down"])[0]

            alarm_dimensions = {"AutoScalingGroupName": name}
            scale_up_alarm = MetricAlarm(
                name="scale_up_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison=">",
                threshold=85,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_up_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_up_alarm)
            scale_down_alarm = MetricAlarm(
                name="scale_down_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison="<",
                threshold=60,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_down_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_down_alarm)

        return as_group

    def update_autoscaling_group_max_size(self, as_group, max_size):
        setattr(as_group, "max_size", max_size)
        as_group.update()

    def update_autoscaling_group_min_size(self, as_group, min_size):
        setattr(as_group, "min_size", min_size)
        as_group.update()

    def remove_autoscaling_group(self, name):
        self.auto_scale_conn.delete_auto_scaling_group(name)
Пример #50
0
def add_ingress_rule(dry_run, go_agent_security_group, go_agent_security_group_owner, go_agent_security_group_name):
    """
    For each ASG (app) in each VPC, add a rule to each SG associated with the ASG's launch configuration
    that allows SSH ingress from the GoCD agents' SG.

    BEFORE RUNNING THIS SCRIPT!:
    - Use the assume_role bash script to assume the role in the proper account/VPC (edx, edge, mckinsey, etc.)
        - If you don't know what this is, ask someone in DevOps.
    - THEN run this script.
    """
    asg_conn = AutoScaleConnection()
    ec2_conn = boto.ec2.connect_to_region('us-east-1')
    asgs = []
    launch_configs = {}
    security_groups = {}

    logging.debug('All ASGs:')
    for group in asg_conn.get_all_groups():
        logging.debug('    {}'.format(group))
        asgs.append(group)

    logging.debug('All launch configurations:')
    for launch_config in asg_conn.get_all_launch_configurations():
        logging.debug('    {}'.format(launch_config))
        launch_configs[launch_config.name] = launch_config

    logging.debug('All security groups:')
    for sec_group in ec2_conn.get_all_security_groups():
        logging.debug('    {}'.format(sec_group))
        security_groups[sec_group.id] = sec_group

    # Validate that each ASG has a launch configuration.
    for group in asgs:
        try:
            logging.info("Launch configuration for ASG '{}' is '{}'.".format(
                group.name, launch_configs[group.launch_config_name]
            ))
        except KeyError:
            logging.error("Launch configuration '{}' for ASG '{}' was not found!".format(
                group.launch_config_name, group.name
            ))
            raise

    # Construct a fake security group for the prod-tools-goagent-sg security group in the edx-tools account.
    # This group will be used to grant the go-agents ingress into the ASG's VPCs.
    go_agent_security_group = boto.ec2.securitygroup.SecurityGroup(
        name=go_agent_security_group_name,
        owner_id=go_agent_security_group_owner,
        id=go_agent_security_group
    )

    # For each launch config, check for the security group. Can support multiple security groups
    # but the edX DevOps convention is to use a single security group.
    for group in asgs:
        launch_config = launch_configs[group.launch_config_name]
        if len(launch_config.security_groups) > 1:
            err_msg = "Launch config '{}' for ASG '{}' has more than one security group!: {}".format(
                launch_config.name, group.name, launch_config.security_groups
            )
            logging.warning(err_msg)
            continue
        sg_name = launch_config.security_groups[0]
        try:
            # Find the security group.
            sec_group = security_groups[sg_name]
        except KeyError:
            logging.error("Security group '{}' for ASG '{}' was not found!.".format(sg_name, group.name))
        logging.info('BEFORE: Rules for security group {}:'.format(sec_group.name))
        logging.info(sec_group.rules)
        try:
            # Add the ingress rule to the security group.
            yes_no = six.moves.input("Apply the change to this security group? [Yes]")
            if yes_no in ("", "y", "Y", "yes"):
                sec_group.authorize(
                    ip_protocol='tcp',
                    from_port=22,
                    to_port=22,
                    src_group=go_agent_security_group,
                    dry_run=dry_run
                )
        except boto.exception.EC2ResponseError as exc:
            if exc.status == 412:
                # If the dry_run flag is set, then each rule addition will raise this exception.
                # Log it and carry on.
                logging.info('Dry run is True but rule addition would have succeeded for security group {}.'.format(
                    sg_name
                ))
            elif exc.code == "InvalidPermission.Duplicate":
                logging.info("Rule already exists for {}.".format(sg_name))
            else:
                raise
        logging.info('AFTER: Rules for security group {}:'.format(sg_name))
        logging.info(sec_group.rules)
Пример #51
0
def add_ingress_rule(dry_run, go_agent_security_group, go_agent_security_group_owner, go_agent_security_group_name):
    """
    For each ASG (app) in each VPC, add a rule to each SG associated with the ASG's launch configuration
    that allows SSH ingress from the GoCD agents' SG.

    BEFORE RUNNING THIS SCRIPT!:
    - Use the assume_role bash script to assume the role in the proper account/VPC (edx, edge, mckinsey, etc.)
        - If you don't know what this is, ask someone in DevOps.
    - THEN run this script.
    """
    asg_conn = AutoScaleConnection()
    ec2_conn = boto.ec2.connect_to_region('us-east-1')
    asgs = []
    launch_configs = {}
    security_groups = {}

    logging.debug('All ASGs:')
    for group in asg_conn.get_all_groups():
        logging.debug('    {}'.format(group))
        asgs.append(group)

    logging.debug('All launch configurations:')
    for launch_config in asg_conn.get_all_launch_configurations():
        logging.debug('    {}'.format(launch_config))
        launch_configs[launch_config.name] = launch_config

    logging.debug('All security groups:')
    for sec_group in ec2_conn.get_all_security_groups():
        logging.debug('    {}'.format(sec_group))
        security_groups[sec_group.id] = sec_group

    # Validate that each ASG has a launch configuration.
    for group in asgs:
        try:
            logging.info("Launch configuration for ASG '{}' is '{}'.".format(
                group.name, launch_configs[group.launch_config_name]
            ))
        except KeyError:
            logging.error("Launch configuration '{}' for ASG '{}' was not found!".format(
                group.launch_config_name, group.name
            ))
            raise

    # Construct a fake security group for the prod-tools-goagent-sg security group in the edx-tools account.
    # This group will be used to grant the go-agents ingress into the ASG's VPCs.
    go_agent_security_group = boto.ec2.securitygroup.SecurityGroup(
        name=go_agent_security_group_name,
        owner_id=go_agent_security_group_owner,
        id=go_agent_security_group
    )

    # For each launch config, check for the security group. Can support multiple security groups
    # but the edX DevOps convention is to use a single security group.
    for group in asgs:
        launch_config = launch_configs[group.launch_config_name]
        if len(launch_config.security_groups) > 1:
            err_msg = "Launch config '{}' for ASG '{}' has more than one security group!: {}".format(
                launch_config.name, group.name, launch_config.security_groups
            )
            logging.warning(err_msg)
            continue
        sg_name = launch_config.security_groups[0]
        try:
            # Find the security group.
            sec_group = security_groups[sg_name]
        except KeyError:
            logging.error("Security group '{}' for ASG '{}' was not found!.".format(sg_name, group.name))
        logging.info('BEFORE: Rules for security group {}:'.format(sec_group.name))
        logging.info(sec_group.rules)
        try:
            # Add the ingress rule to the security group.
            yes_no = raw_input("Apply the change to this security group? [Yes]")
            if yes_no in ("", "y", "Y", "yes"):
                sec_group.authorize(
                    ip_protocol='tcp',
                    from_port=22,
                    to_port=22,
                    src_group=go_agent_security_group,
                    dry_run=dry_run
                )
        except boto.exception.EC2ResponseError as exc:
            if exc.status == 412:
                # If the dry_run flag is set, then each rule addition will raise this exception.
                # Log it and carry on.
                logging.info('Dry run is True but rule addition would have succeeded for security group {}.'.format(
                    sg_name
                ))
            elif exc.code == "InvalidPermission.Duplicate":
                logging.info("Rule already exists for {}.".format(sg_name))
            else:
                raise
        logging.info('AFTER: Rules for security group {}:'.format(sg_name))
        logging.info(sec_group.rules)
Пример #52
0
                 healthy_threshold=10,
                 unhealthy_threshold=2,
                 target=elb['target'],
                 timeout=elb['time_out'])

lb.configure_health_check(hc)

params = {"LoadBalancerNames.member.1": lb.name,
                      "Tags.member.1.Key": 'Project',
                      "Tags.member.1.Value": '2.2'}
s=lb.connection.get_status('AddTags', params, verb='POST')

print 'Load Balancer DNS: ' + lb_dns

# -------------------------Create Auto Scaling Group----------------------------
con_as = AutoScaleConnection()
lc = LaunchConfiguration(name='Project2.2_Lauch_Config',
                         image_id=DC_IMAGE,
                         key_name=KEY_NAME,
                         security_groups=SECURITY_GROUP2,
                         instance_type=DC_INSTANCE_TYPE,
                         instance_monitoring=DETAIL_MON)
con_as.create_launch_configuration(lc)

asg = AutoScalingGroup(name='Project2.2_AutoSacling_Group',
                       load_balancers=[elb['name']],
                       availability_zones=ZONE,
                       health_check_period='120',
                       health_check_type='ELB',
                       launch_config=lc,
                       min_size=1,
Пример #53
0
 def __init__(self, aws_access_key, aws_secret_key):
     self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
     self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
     self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
     self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
     self.default_cooldown = 60
Пример #54
0
 def __init__(self, region, **kwargs):
     self.conn = AutoScaleConnection(region=get_region(region), **kwargs)
Пример #55
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        self.conn = AutoScaleConnection(access_id,
                                        secret_key,
                                        region=reg,
                                        port=port,
                                        path=path,
                                        is_secure=True,
                                        security_token=token,
                                        debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not (clc_host[len(clc_host) - 13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0]
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self,
                                      config_names=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self,
                         as_group=None,
                         policy_names=None,
                         max_records=None,
                         next_token=None):
        return self.conn.get_all_policies(as_group, policy_names, max_records,
                                          next_token)

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        return self.conn.get_all_tags(filters, max_records, next_token)

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
Пример #56
0
    'instance_monitoring':
    True  #Indicated whether the instances will be launched with detailed monitoring enabled. Needed to enable CloudWatch
}

##############################END CONFIGURATION#######################################

#=================Construct a list of all availability zones for your region=========
conn_reg = boto.ec2.connect_to_region(region_name=region)
zones = conn_reg.get_all_zones()

zoneStrings = []
for zone in zones:
    zoneStrings.append(zone.name)

conn_elb = ELBConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_as = AutoScaleConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)

#=================Create a Load Balancer=============================================
#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#module-boto.ec2.elb.healthcheck
hc = HealthCheck('healthCheck',
                 interval=elastic_load_balancer['interval'],
                 target=elastic_load_balancer['health_check_target'],
                 timeout=elastic_load_balancer['timeout'])

#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.elb.ELBConnection.create_load_balancer
lb = conn_elb.create_load_balancer(
    elastic_load_balancer['name'], zoneStrings,
    elastic_load_balancer['connection_forwarding'])

lb.configure_health_check(hc)
Пример #57
0
def setup(CONF):
  global out

  lookup_tbl = {
    'name': CONF['NAME'],
  }

  conn = AutoScaleConnection()

  out['conn'] = conn

  # Launch Configurations
  LC = CONF['LC']
  LC['name'] = LC['name'] % lookup_tbl

  lc = LaunchConfiguration(**LC)
  conn.create_launch_configuration(lc)
  out['lc'] = lc

  # Auto Scaling Group
  ASG = CONF['ASG']
  ASG['group_name'] = ASG['group_name'] % lookup_tbl
  ASG['launch_config'] = lc

  groups = conn.get_all_groups(names=[ASG['group_name']])
  if (len(groups) > 0):
    # update
    asg = groups[0]
    for k in ASG :
      # asg not iterable, try-except to make sure asg[k] exists
      try: asg.__getattribute__(k)
      except: continue
      asg.__setattr__(k, ASG[k])
    asg.launch_config_name = LC['name']
    asg.update()
    out['asg'] = asg
  else:
    #create
    asg = AutoScalingGroup(**ASG)
    conn.create_auto_scaling_group(asg)

  # ASG Tags
  ASG_TAGS = CONF['ASG_TAGS']
  for i in ASG_TAGS:
    if 'propagate_at_launch' not in i:
      i['propagate_at_launch'] = True
    i['key'] = i['key'] % lookup_tbl
    i['value'] = i['value'] % lookup_tbl

  tags = [
      Tag(**dict(x.items() + [('resource_id', ASG['group_name'])])) for x in ASG_TAGS
  ]
  conn.create_or_update_tags(tags)

  # Triggers (Scaling Policy / Cloudwatch Alarm)
  conn_cw = connect_to_region(CONF['REGION'])

  TRIGGERS = CONF['TRIGGERS']
  for T in TRIGGERS:
    T['policy']['name'] = T['policy']['name'] % lookup_tbl
    T['policy']['as_name'] = ASG['group_name']
    T['alarm']['dimensions'] = {'AutoScalingGroupName': ASG['group_name']}
    T['alarm']['alarm_actions'] = None

    if 'name' in T['alarm']:
      T['alarm']['name'] = T['alarm']['name'] % lookup_tbl
    else:
      T['alarm']['name'] = T['policy']['name']

    # Policies are safely overwritten, so not checked for existence
    conn.create_scaling_policy(ScalingPolicy(**T['policy']))
    policy = conn.get_all_policies(as_group=ASG['group_name'], policy_names=[T['policy']['name']])[0]

    T['alarm']['alarm_actions'] = [policy.policy_arn]
    hits = conn_cw.describe_alarms(alarm_names=[T['alarm']['name']])

    conn_cw.create_alarm(MetricAlarm(**T['alarm']))