示例#1
0
    def test_basic(self):
        # NB: as it says on the tin these are really basic tests that only
        # (lightly) exercise read-only behaviour - and that's only if you
        # have any autoscale groups to introspect. It's useful, however, to
        # catch simple errors

        print '--- running %s tests ---' % self.__class__.__name__
        c = AutoScaleConnection()

        self.assertTrue(repr(c).startswith('AutoScaleConnection'))

        groups = c.get_all_groups()
        for group in groups:
            self.assertTrue(type(group), AutoScalingGroup)

            # get activities
            activities = group.get_activities()

            for activity in activities:
                self.assertEqual(type(activity), Activity)

        # get launch configs
        configs = c.get_all_launch_configurations()
        for config in configs:
            self.assertTrue(type(config), LaunchConfiguration)

        # get policies
        policies = c.get_all_policies()
        for policy in policies:
            self.assertTrue(type(policy), ScalingPolicy)

        # get scheduled actions
        actions = c.get_all_scheduled_actions()
        for action in actions:
            self.assertTrue(type(action), ScheduledUpdateGroupAction)

        # get instances
        instances = c.get_all_autoscaling_instances()
        for instance in instances:
            self.assertTrue(type(instance), Instance)

        # get all scaling process types
        ptypes = c.get_all_scaling_process_types()
        for ptype in ptypes:
            self.assertTrue(type(ptype), ProcessType)

        # get adjustment types
        adjustments = c.get_all_adjustment_types()
        for adjustment in adjustments:
            self.assertTrue(type(adjustment), AdjustmentType)

        # get metrics collection types
        types = c.get_all_metric_collection_types()
        self.assertTrue(type(types), MetricCollectionTypes)

        print '--- tests completed ---'
示例#2
0
def cleanup_unused_launch_configs(unused_launch_config_names, delete=False):
    conn = AutoScaleConnection()
    configs = conn.get_all_launch_configurations(names=unused_launch_config_names)
    print "\nGetting ready to cleanup launch configs ... {}".format(delete and "FOR REAL" or "DRYRUN")
    for config in configs:
        if delete:
            print "deleting launch config: {} in {} seconds...".format(config.name, 5)
            time.sleep(5)
            print "deleting launch config: {}!".format(config.name)
            response = config.delete()
            print "deleted launch config: {} ({})!".format(config.name, response)
        else:
            print "dry run: not deleting config:", config.name
示例#3
0
def _is_up_to_date():
    """
    Returns True if this instance is up to date.
    """

    # Retrieve instance information.
    conn = AutoScaleConnection()
    pool = conn.get_all_groups(["LSDA Worker Pool"])[0]
    config = conn.get_all_launch_configurations(
      names=[pool.launch_config_name])[0]

    # Retrive the AMI for this instance and for others.
    config_ami = config.image_id
    my_ami = urllib.urlopen("http://169.254.169.254/latest/"
                            "meta-data/ami-id").read()

    return config_ami == my_ami
def main():
    parser = optparse.OptionParser()
    parser.add_option( "-c", "--config", dest="config_file", help="AutoScale config INI", metavar="FILE" )
    (options, args) = parser.parse_args()
    logging.info( "Using config file [%s]" % options.config_file )

    config = parse_config( options.config_file ) 

    aws_access = config.get("AWS", 'access')
    aws_secret = config.get("AWS", 'secret')

    logging.debug( "Connecting to AWS with access [%s] and secret [%s]" % ( aws_access, aws_secret ) )
    aws_connection = AutoScaleConnection( aws_access, aws_secret )

    print "AutoScalingGroups:"
    lcs = aws_connection.get_all_launch_configurations()
    for lc in lcs:
        print "%s -> %s" % (lc, lc.__dict__ )
示例#5
0
def find_unused_launch_configs():
    conn = AutoScaleConnection()
    autoscale_groups = conn.get_all_groups(max_records=100)
    launch_configs = conn.get_all_launch_configurations(max_records=100)
    launch_config_names = {lc.name for lc in launch_configs}
    used_launch_config_names = {asg.launch_config_name for asg in autoscale_groups}
    unused_launch_config_names = launch_config_names - used_launch_config_names

    print "Autoscale Groups and Current Launch Configs:"
    print "{:<40}{:<40}".format("ASG", "LC")
    for asg in autoscale_groups:
        #print "asg:", asg.name, "-> lc:", asg.launch_config_name
        print "{:<40}{:<40}".format(asg.name, asg.launch_config_name)

    print "\nUnused Launch Configs: (launch configs without a autoscale group)"
    unused_launch_config_names = list(sorted(unused_launch_config_names))
    for unused_launch_config in unused_launch_config_names:
        print "\t", unused_launch_config
    return unused_launch_config_names
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('scale')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        port = 8773
        if endpoint[len(endpoint)-13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=self.token, debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Policies.json")
        return obj

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        obj = self.conn.get_all_tags(filters, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Tags.json")
        return obj

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
示例#7
0
def add_ingress_rule(dry_run, go_agent_security_group, go_agent_security_group_owner, go_agent_security_group_name):
    """
    For each ASG (app) in each VPC, add a rule to each SG associated with the ASG's launch configuration
    that allows SSH ingress from the GoCD agents' SG.

    BEFORE RUNNING THIS SCRIPT!:
    - Use the assume_role bash script to assume the role in the proper account/VPC (edx, edge, mckinsey, etc.)
        - If you don't know what this is, ask someone in DevOps.
    - THEN run this script.
    """
    asg_conn = AutoScaleConnection()
    ec2_conn = boto.ec2.connect_to_region('us-east-1')
    asgs = []
    launch_configs = {}
    security_groups = {}

    logging.debug('All ASGs:')
    for group in asg_conn.get_all_groups():
        logging.debug('    {}'.format(group))
        asgs.append(group)

    logging.debug('All launch configurations:')
    for launch_config in asg_conn.get_all_launch_configurations():
        logging.debug('    {}'.format(launch_config))
        launch_configs[launch_config.name] = launch_config

    logging.debug('All security groups:')
    for sec_group in ec2_conn.get_all_security_groups():
        logging.debug('    {}'.format(sec_group))
        security_groups[sec_group.id] = sec_group

    # Validate that each ASG has a launch configuration.
    for group in asgs:
        try:
            logging.info("Launch configuration for ASG '{}' is '{}'.".format(
                group.name, launch_configs[group.launch_config_name]
            ))
        except KeyError:
            logging.error("Launch configuration '{}' for ASG '{}' was not found!".format(
                group.launch_config_name, group.name
            ))
            raise

    # Construct a fake security group for the prod-tools-goagent-sg security group in the edx-tools account.
    # This group will be used to grant the go-agents ingress into the ASG's VPCs.
    go_agent_security_group = boto.ec2.securitygroup.SecurityGroup(
        name=go_agent_security_group_name,
        owner_id=go_agent_security_group_owner,
        id=go_agent_security_group
    )

    # For each launch config, check for the security group. Can support multiple security groups
    # but the edX DevOps convention is to use a single security group.
    for group in asgs:
        launch_config = launch_configs[group.launch_config_name]
        if len(launch_config.security_groups) > 1:
            err_msg = "Launch config '{}' for ASG '{}' has more than one security group!: {}".format(
                launch_config.name, group.name, launch_config.security_groups
            )
            logging.warning(err_msg)
            continue
        sg_name = launch_config.security_groups[0]
        try:
            # Find the security group.
            sec_group = security_groups[sg_name]
        except KeyError:
            logging.error("Security group '{}' for ASG '{}' was not found!.".format(sg_name, group.name))
        logging.info('BEFORE: Rules for security group {}:'.format(sec_group.name))
        logging.info(sec_group.rules)
        try:
            # Add the ingress rule to the security group.
            yes_no = six.moves.input("Apply the change to this security group? [Yes]")
            if yes_no in ("", "y", "Y", "yes"):
                sec_group.authorize(
                    ip_protocol='tcp',
                    from_port=22,
                    to_port=22,
                    src_group=go_agent_security_group,
                    dry_run=dry_run
                )
        except boto.exception.EC2ResponseError as exc:
            if exc.status == 412:
                # If the dry_run flag is set, then each rule addition will raise this exception.
                # Log it and carry on.
                logging.info('Dry run is True but rule addition would have succeeded for security group {}.'.format(
                    sg_name
                ))
            elif exc.code == "InvalidPermission.Duplicate":
                logging.info("Rule already exists for {}.".format(sg_name))
            else:
                raise
        logging.info('AFTER: Rules for security group {}:'.format(sg_name))
        logging.info(sec_group.rules)
示例#8
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        self.conn = AutoScaleConnection(access_id,
                                        secret_key,
                                        region=reg,
                                        port=port,
                                        path=path,
                                        is_secure=True,
                                        security_token=token,
                                        debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not (clc_host[len(clc_host) - 13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0]
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self,
                                      config_names=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self,
                         as_group=None,
                         policy_names=None,
                         max_records=None,
                         next_token=None):
        return self.conn.get_all_policies(as_group, policy_names, max_records,
                                          next_token)

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        return self.conn.get_all_tags(filters, max_records, next_token)

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
示例#9
0
class MSBManager:
    def __init__(self, aws_access_key, aws_secret_key):
        self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
        self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
        self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
        self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
        self.default_cooldown = 60

    def get_security_group(self, name):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        return sgs[0] if sgs else None

    def create_security_group(self, name, description):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        sg = sgs[0] if sgs else None
        if not sgs:
            sg = self.ec2_conn.create_security_group(name, description)

        try:
            sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
        except EC2ResponseError:
            pass
        return sg

    def remove_security_group(self, name):
        self.ec2_conn.delete_security_group(name=name)

    def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
        instance = None
        reservations = self.ec2_conn.get_all_instances()
        for reservation in reservations:
            for i in reservation.instances:
                if "Name" in i.tags and i.tags["Name"] == tags["Name"] and i.state == "running":
                    instance = i
                    break

        if not instance:
            reservation = self.ec2_conn.run_instances(
                image,
                instance_type=instance_type,
                key_name=key_name,
                placement=zone,
                security_groups=security_groups,
                monitoring_enabled=True,
            )
            instance = reservation.instances[0]
            while not instance.update() == "running":
                time.sleep(5)
            time.sleep(10)
            self.ec2_conn.create_tags([instance.id], tags)

        return instance

    def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
        req = self.ec2_conn.request_spot_instances(
            price=bid,
            instance_type=instance_type,
            image_id=image,
            placement=zone,
            key_name=key_name,
            security_groups=security_groups,
        )
        instance_id = None

        while not instance_id:
            job_sir_id = req[0].id
            requests = self.ec2_conn.get_all_spot_instance_requests()
            for sir in requests:
                if sir.id == job_sir_id:
                    instance_id = sir.instance_id
                    break
            print "Job {} not ready".format(job_sir_id)
            time.sleep(60)

        self.ec2_conn.create_tags([instance_id], tags)

    def remove_instance(self, instance_id):
        self.remove_instances([instance_id])

    def remove_instances(self, instance_ids):
        self.ec2_conn.terminate_instances(instance_ids)

    def remove_instance_by_tag_name(self, name):
        reservations = self.ec2_conn.get_all_instances()
        data_centers_intance_ids = []
        for reservation in reservations:
            for instance in reservation.instances:
                if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
                    data_centers_intance_ids.append(instance.id)
        if data_centers_intance_ids:
            self.remove_instances(data_centers_intance_ids)

    def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
        lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
        lb = lbs[0] if lbs else None
        if not lb:
            hc = HealthCheck(
                timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
            )
            ports = [(80, 80, "http")]
            zones = [zone]
            lb = self.elb_conn.create_load_balancer(name, zones, ports)

            self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
            lb.configure_health_check(hc)
            if instance_ids:
                lb.register_instances(instance_ids)

            params = {
                "LoadBalancerNames.member.1": lb.name,
                "Tags.member.1.Key": "15619project",
                "Tags.member.1.Value": project_tag_value,
            }
            lb.connection.get_status("AddTags", params, verb="POST")
        return lb

    def remove_elb(self, name):
        self.elb_conn.delete_load_balancer(name)

    def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
        lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
        lc = lcs[0] if lcs else None
        if not lc:
            lc = LaunchConfiguration(
                name=name,
                image_id=image,
                key_name=key_name,
                security_groups=[security_groups],
                instance_type=instance_type,
            )
            self.auto_scale_conn.create_launch_configuration(lc)
        return lc

    def remove_launch_configuration(self, name):
        self.auto_scale_conn.delete_launch_configuration(name)

    def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
        lc = self.create_launch_configuration()
        as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
        as_group = as_groups[0] if as_groups else None
        if not as_group:
            as_group = AutoScalingGroup(
                group_name=name,
                load_balancers=[lb_name],
                availability_zones=[zone],
                launch_config=lc,
                min_size=4,
                max_size=4,
                health_check_type="ELB",
                health_check_period=120,
                connection=self.auto_scale_conn,
                default_cooldown=self.default_cooldown,
                desired_capacity=4,
                tags=tags,
            )

            self.auto_scale_conn.create_auto_scaling_group(as_group)
            if instance_ids:
                self.auto_scale_conn.attach_instances(name, instance_ids)

            scale_up_policy = ScalingPolicy(
                name="scale_up",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=1,
                cooldown=self.default_cooldown,
            )
            scale_down_policy = ScalingPolicy(
                name="scale_down",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=-1,
                cooldown=self.default_cooldown,
            )

            self.auto_scale_conn.create_scaling_policy(scale_up_policy)
            self.auto_scale_conn.create_scaling_policy(scale_down_policy)

            scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_up"])[0]
            scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_down"])[0]

            alarm_dimensions = {"AutoScalingGroupName": name}
            scale_up_alarm = MetricAlarm(
                name="scale_up_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison=">",
                threshold=85,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_up_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_up_alarm)
            scale_down_alarm = MetricAlarm(
                name="scale_down_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison="<",
                threshold=60,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_down_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_down_alarm)

        return as_group

    def update_autoscaling_group_max_size(self, as_group, max_size):
        setattr(as_group, "max_size", max_size)
        as_group.update()

    def update_autoscaling_group_min_size(self, as_group, min_size):
        setattr(as_group, "min_size", min_size)
        as_group.update()

    def remove_autoscaling_group(self, name):
        self.auto_scale_conn.delete_auto_scaling_group(name)
示例#10
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path='/services/AutoScaling'
        port=8773
        if clc_host[len(clc_host)-13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port=443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records, next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj
示例#11
0
    def test_basic(self):
        # NB: as it says on the tin these are really basic tests that only
        # (lightly) exercise read-only behaviour - and that's only if you
        # have any autoscale groups to introspect. It's useful, however, to
        # catch simple errors

        print('--- running %s tests ---' % self.__class__.__name__)
        c = AutoScaleConnection()

        self.assertTrue(repr(c).startswith('AutoScaleConnection'))

        groups = c.get_all_groups()
        for group in groups:
            self.assertIsInstance(group, AutoScalingGroup)

            # get activities
            activities = group.get_activities()

            for activity in activities:
                self.assertIsInstance(activity, Activity)

        # get launch configs
        configs = c.get_all_launch_configurations()
        for config in configs:
            self.assertIsInstance(config, LaunchConfiguration)

        # get policies
        policies = c.get_all_policies()
        for policy in policies:
            self.assertIsInstance(policy, ScalingPolicy)

        # get scheduled actions
        actions = c.get_all_scheduled_actions()
        for action in actions:
            self.assertIsInstance(action, ScheduledUpdateGroupAction)

        # get instances
        instances = c.get_all_autoscaling_instances()
        for instance in instances:
            self.assertIsInstance(instance, Instance)

        # get all scaling process types
        ptypes = c.get_all_scaling_process_types()
        for ptype in ptypes:
            self.assertTrue(ptype, ProcessType)

        # get adjustment types
        adjustments = c.get_all_adjustment_types()
        for adjustment in adjustments:
            self.assertIsInstance(adjustment, AdjustmentType)

        # get metrics collection types
        types = c.get_all_metric_collection_types()
        self.assertIsInstance(types, MetricCollectionTypes)

        # create the simplest possible AutoScale group
        # first create the launch configuration
        time_string = '%d' % int(time.time())
        lc_name = 'lc-%s' % time_string
        lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
                                 instance_type='t1.micro')
        c.create_launch_configuration(lc)
        found = False
        lcs = c.get_all_launch_configurations()
        for lc in lcs:
            if lc.name == lc_name:
                found = True
                break
        assert found

        # now create autoscaling group
        group_name = 'group-%s' % time_string
        group = AutoScalingGroup(name=group_name, launch_config=lc,
                                 availability_zones=['us-east-1a'],
                                 min_size=1, max_size=1)
        c.create_auto_scaling_group(group)
        found = False
        groups = c.get_all_groups()
        for group in groups:
            if group.name == group_name:
                found = True
                break
        assert found

        # now create a tag
        tag = Tag(key='foo', value='bar', resource_id=group_name,
                  propagate_at_launch=True)
        c.create_or_update_tags([tag])

        found = False
        tags = c.get_all_tags()
        for tag in tags:
            if tag.resource_id == group_name and tag.key == 'foo':
                found = True
                break
        assert found

        c.delete_tags([tag])

        # shutdown instances and wait for them to disappear
        group.shutdown_instances()
        instances = True
        while instances:
            time.sleep(5)
            groups = c.get_all_groups()
            for group in groups:
                if group.name == group_name:
                    if not group.instances:
                        instances = False

        group.delete()
        lc.delete()

        found = True
        while found:
            found = False
            time.sleep(5)
            tags = c.get_all_tags()
            for tag in tags:
                if tag.resource_id == group_name and tag.key == 'foo':
                    found = True

        assert not found

        print('--- tests completed ---')
示例#12
0
文件: clouds.py 项目: cu-csc/phorque
class Cloud(object):
    def __init__(self, cloud_config):
        self.config = cloud_config
        self.all_instances = []
        self.failed_launch = False
        self.failed_count = 0
        self.failed_last_valid_count = 0
        self._conn = None
        self._as_conn = None
        self._lc = None
        self._asg = None
        self._last_asg_launch_attempt = None
        self.maxed = False
        self._last_launch_attempt = datetime.datetime.utcnow()
        self._initialize()

    def _create_connection(self):
        LOG.debug("Creating connection for %s" % self.config.name)
        self._conn = boto.connect_ec2(self.config.access_id,
                                      self.config.secret_key,
                                      validate_certs=False)
        self._conn.host = self.config.cloud_uri
        self._conn.port = self.config.cloud_port

    def _create_autoscale_connection(self):
        LOG.debug("Creating autoscale connection for %s" % self.config.name)
        region = RegionInfo(name=self.config.cloud_type,
                            endpoint=self.config.as_uri)
        self._as_conn = AutoScaleConnection(
            aws_access_key_id=self.config.access_id,
            aws_secret_access_key=self.config.secret_key,
            is_secure=True,
            port=self.config.as_port,
            region=region,
            validate_certs=False)

    def _create_or_set_launch_configuration(self):
        name = self.config.lc_name
        if not self._lc:
            LOG.debug("Attempting to load launch configuration: %s" % (name))
            lc = self._as_conn.get_all_launch_configurations(names=[name])
            if len(lc) == 1:
                LOG.debug("Launch configuration %s found." % (name))
                self._lc = lc[0]
        if not self._lc:
            #TODO(pdmars): key and security groups are hardcoded for now, gross
            if self.config.user_data_file is not None:
                user_data_file = self.config.user_data_file
                with open(user_data_file) as f:
                    user_data = f.read()
            else:
                user_data = None
            LOG.debug("Creating launch configuration %s" % name)
            LOG.debug("\tname: %s" % name)
            LOG.debug("\timage_id: %s" % self.config.image_id)
            LOG.debug("\tinstance_type: %s" % self.config.instance_type)
            LOG.debug("\tuser_data: %s" % user_data)
            self._lc = LaunchConfiguration(
                name=name,
                image_id=self.config.image_id,
                key_name="phantomkey",
                security_groups=['default'],
                instance_type=self.config.instance_type,
                user_data=user_data)
            self._as_conn.create_launch_configuration(self._lc)

    def _create_or_set_autoscale_group(self):
        name = self.config.asg_name
        if not self._asg:
            LOG.debug("Attempting to load autoscale group: %s" % name)
            asg = self._as_conn.get_all_groups(names=[name])
            LOG.debug("Autoscale group: %s" % asg)
            if len(asg) == 1:
                LOG.debug("Autoscale group %s found." % name)
                self._asg = asg[0]
        if not self._asg:
            # TODO(pdmars): more hard coded grossness, for now
            try:
                cloud_guess = self.config.lc_name.split("@")[1].strip()
            except Exception as e:
                LOG.warn("Unable to guess cloud for auto scale tags")
                LOG.warn("Setting cloud to hotel")
                cloud_guess = "hotel"
            policy_name_key = "PHANTOM_DEFINITION"
            policy_name = "error_overflow_n_preserving"
            ordered_clouds_key = "clouds"
            n_preserve_key = "minimum_vms"
            ordered_clouds = cloud_guess + ":-1"
            n_preserve = 0
            policy_tag = Tag(connection=self._as_conn, key=policy_name_key,
                             value=policy_name, resource_id=name)
            clouds_tag = Tag(connection=self._as_conn, key=ordered_clouds_key,
                             value=ordered_clouds, resource_id=name)
            npreserve_tag = Tag(connection=self._as_conn, key=n_preserve_key,
                                value=n_preserve, resource_id=name)
            tags = [policy_tag, clouds_tag, npreserve_tag]
            zones = [self.config.az]
            LOG.debug("Creating autoscale group %s" % name)
            LOG.debug("\tname: %s" % name)
            LOG.debug("\tavailability_zones: %s" % zones)
            LOG.debug("\tlaunch_config: %s" % self._lc)
            self._asg = AutoScalingGroup(group_name=name,
                                         availability_zones=zones,
                                         min_size=0,
                                         max_size=0,
                                         launch_config=self._lc,
                                         tags=tags)
            self._as_conn.create_auto_scaling_group(self._asg)

    def _initialize(self):
        LOG.debug("Initializing %s" % self.config.name)
        self._create_connection()
        self._create_autoscale_connection()
        self._create_or_set_launch_configuration()
        self._create_or_set_autoscale_group()
        LOG.debug("Initialization complete for %s" % self.config.name)

    def get_valid_instances(self):
        return self.all_instances

    def _refresh_instances(self):
        LOG.debug("%s: getting instance information" % self.config.name)
        self.all_instances = []
        instances = []
        as_instances = self._asg.instances
        as_instance_ids = [i.instance_id for i in as_instances]
        reservations = self._conn.get_all_instances()
        for reservation in reservations:
            for instance in reservation.instances:
                if instance.id in as_instance_ids:
                    if instance.state in VALID_RUN_STATES:
                        instances.append(instance)
        for instance in instances:
            self.all_instances.append(instance)
        num_instances = len(self.all_instances)
        LOG.debug("%s: updated %d instances" % (self.config.name,
                                                num_instances))
        if num_instances >= self.config.max_instances:
            LOG.warn("%s reached the max (%s) instances: %s" % (
                self.config.name, self.config.max_instances,
                num_instances))
            self.maxed = True
        else:
            self.maxed = False

    def _refresh_asg(self):
        LOG.debug("%s: refreshing autoscale group" % self.config.name)
        asg_name = self.config.asg_name
        asgs = self._as_conn.get_all_groups(names=[asg_name])
        if len(asgs) == 1:
            self._asg = asgs[0]
            LOG.debug("\trefreshed autoscale group: %s" % asg_name)
        else:
            LOG.warn("\tunable to refresh autoscale group: %s" % asg_name)

    def refresh(self, cluster):
        self._refresh_asg()
        self._refresh_instances()

    def get_total_num_valid_cores(self):
        LOG.debug("%s: getting number of valid cores" % self.config.name)
        total_num_valid_cores = 0
        num_valid_instances = len(self.get_valid_instances())
        total_valid_cores = num_valid_instances * self.config.instance_cores
        num_desired_instances = self._asg.desired_capacity
        num_desired_cores = num_desired_instances * self.config.instance_cores
        if num_desired_cores != total_num_valid_cores:
            LOG.debug("\tmismatching core counts")
            LOG.debug("\tnum_desired_cores: %d" % (num_desired_cores))
            LOG.debug("\ttotal_valid_cores: %d" % (total_valid_cores))
        return total_valid_cores

    def get_instance_by_id(self, id):
        LOG.debug("Searching for instance %s" % id)
        for instances in self.all_instances:
            if instance.id == id:
                LOG.debug("Found instance %s" % id)
                return instance
        return None

    def get_instance_ids_for_public_dns_names(self, public_dns_names):
        instance_ids = []
        for instance in self.all_instances:
            if instance.public_dns_name in public_dns_names:
                instance_ids.append(instance.id)
        return instance_ids

    def get_public_dns_names_close_to_charge(self):
        instances_close_to_charge = []
        sleep_secs = self.config.get_loop_sleep_secs()
        cur_utc_time = datetime.datetime.utcnow()
        valid_instances = self.get_valid_instances()
        time_fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
        for instance in valid_instances:
            launch_time = datetime.datetime.strptime(instance.launch_time,
                                                     time_fmt)
            time_diff = cur_utc_time - launch_time
            # Ignores microseconds
            time_diff_secs = time_diff.seconds + time_diff.days * 24 * 3600
            cur_charge_secs = time_diff_secs % self.config.charge_time_secs
            secs_to_charge = self.config.charge_time_secs - cur_charge_secs
            LOG.debug("%s:%s: charge: %d; current: %d; to charge: %d" % (
                instance.id, instance.public_dns_name,
                self.config.charge_time_secs,
                cur_charge_secs, secs_to_charge))
            if secs_to_charge < (3 * sleep_secs):
                instances_close_to_charge.append(instance.public_dns_name)
        return instances_close_to_charge

    def delete_instances(self, instance_ids=[]):
        if not instance_ids:
            return
        LOG.debug("Deleting instances: %s" % instance_ids)
        # TODO(pdmars): this has the potential to kill instances running jobs
        # maybe I should err on the side of having extra instances if the
        # capacity is higher than the cloud can currently support
        num_instances = len(self.all_instances)
        if ((self._asg.desired_capacity > num_instances) and
                (num_instances > 0)):
            LOG.warn("Desired capacity is greater than num_instances running")
            LOG.warn("Adjusting desired capacity to match")
            self.set_capacity(num_instances)
        for instance_id in instance_ids:
            self._as_conn.terminate_instance(instance_id)
            # TODO(pdmars): due to a bug in phantom, maybe this will help
            # 2013/04/05: this might not be relevant anymore
            time.sleep(.1)

    def launch_autoscale_instances(self, num_instances=1):
        new_capacity = self._asg.desired_capacity + int(num_instances)
        if new_capacity > self.config.max_instances:
            new_capacity = self.config.max_instances
            LOG.warn("%s can launch %s total instances" % (self.config.name,
                                                           new_capacity))
        self._last_launch_attempt = datetime.datetime.utcnow()
        LOG.debug("Setting cloud capacity for %s to %s" % (self.config.name,
                                                           new_capacity))
        self.set_capacity(new_capacity)

    def set_capacity(self, new_capacity):
        self._asg.set_capacity(new_capacity)
示例#13
0
def add_ingress_rule(dry_run, go_agent_security_group, go_agent_security_group_owner, go_agent_security_group_name):
    """
    For each ASG (app) in each VPC, add a rule to each SG associated with the ASG's launch configuration
    that allows SSH ingress from the GoCD agents' SG.

    BEFORE RUNNING THIS SCRIPT!:
    - Use the assume_role bash script to assume the role in the proper account/VPC (edx, edge, mckinsey, etc.)
        - If you don't know what this is, ask someone in DevOps.
    - THEN run this script.
    """
    asg_conn = AutoScaleConnection()
    ec2_conn = boto.ec2.connect_to_region('us-east-1')
    asgs = []
    launch_configs = {}
    security_groups = {}

    logging.debug('All ASGs:')
    for group in asg_conn.get_all_groups():
        logging.debug('    {}'.format(group))
        asgs.append(group)

    logging.debug('All launch configurations:')
    for launch_config in asg_conn.get_all_launch_configurations():
        logging.debug('    {}'.format(launch_config))
        launch_configs[launch_config.name] = launch_config

    logging.debug('All security groups:')
    for sec_group in ec2_conn.get_all_security_groups():
        logging.debug('    {}'.format(sec_group))
        security_groups[sec_group.id] = sec_group

    # Validate that each ASG has a launch configuration.
    for group in asgs:
        try:
            logging.info("Launch configuration for ASG '{}' is '{}'.".format(
                group.name, launch_configs[group.launch_config_name]
            ))
        except KeyError:
            logging.error("Launch configuration '{}' for ASG '{}' was not found!".format(
                group.launch_config_name, group.name
            ))
            raise

    # Construct a fake security group for the prod-tools-goagent-sg security group in the edx-tools account.
    # This group will be used to grant the go-agents ingress into the ASG's VPCs.
    go_agent_security_group = boto.ec2.securitygroup.SecurityGroup(
        name=go_agent_security_group_name,
        owner_id=go_agent_security_group_owner,
        id=go_agent_security_group
    )

    # For each launch config, check for the security group. Can support multiple security groups
    # but the edX DevOps convention is to use a single security group.
    for group in asgs:
        launch_config = launch_configs[group.launch_config_name]
        if len(launch_config.security_groups) > 1:
            err_msg = "Launch config '{}' for ASG '{}' has more than one security group!: {}".format(
                launch_config.name, group.name, launch_config.security_groups
            )
            logging.warning(err_msg)
            continue
        sg_name = launch_config.security_groups[0]
        try:
            # Find the security group.
            sec_group = security_groups[sg_name]
        except KeyError:
            logging.error("Security group '{}' for ASG '{}' was not found!.".format(sg_name, group.name))
        logging.info('BEFORE: Rules for security group {}:'.format(sec_group.name))
        logging.info(sec_group.rules)
        try:
            # Add the ingress rule to the security group.
            yes_no = raw_input("Apply the change to this security group? [Yes]")
            if yes_no in ("", "y", "Y", "yes"):
                sec_group.authorize(
                    ip_protocol='tcp',
                    from_port=22,
                    to_port=22,
                    src_group=go_agent_security_group,
                    dry_run=dry_run
                )
        except boto.exception.EC2ResponseError as exc:
            if exc.status == 412:
                # If the dry_run flag is set, then each rule addition will raise this exception.
                # Log it and carry on.
                logging.info('Dry run is True but rule addition would have succeeded for security group {}.'.format(
                    sg_name
                ))
            elif exc.code == "InvalidPermission.Duplicate":
                logging.info("Rule already exists for {}.".format(sg_name))
            else:
                raise
        logging.info('AFTER: Rules for security group {}:'.format(sg_name))
        logging.info(sec_group.rules)
scaling_confs = ['scaling_conf_name_1', 'scaling_conf_name_2']

# Connect EC2
aws_region = RegionInfo(name=region_name, endpoint=region_endpoint)
conn = EC2Connection(aws_access_key_id,
                     aws_secret_access_key,
                     region=aws_region)

# Connect autoscaling service
aws_region_as = RegionInfo(name=region_name,
                           endpoint=region_autoscale_endpoint)
conn_as = AutoScaleConnection(aws_access_key_id,
                              aws_secret_access_key,
                              region=aws_region_as)

lcs = conn_as.get_all_launch_configurations(names=scaling_confs)

for lc in lcs:
    try:
        img = conn.get_image(lc.image_id)
        snaps = conn.get_all_snapshots(
            filters={"description": "*" + img.id + "*"})
        image.deregister(delete_snapshot=False)
        for snap in snaps:
            snap.delete()
        print "scaling configuration image and these related " + str(
            snaps) + " snapshots removed"
    except:
        print "ami not found " + lc.image_id
        pass
    conn_as.delete_launch_configuration(lc.name)
示例#15
0
class IcsAS(object):

    """
    ICS Library for AutoScale
    """

    def __init__(self, region, **kwargs):
        self.conn = AutoScaleConnection(region=get_region(region), **kwargs)

    def to_list(self, input):
        """
        Validate input, if not list, but string, make it as a list
        """
        if input is None:
            return input
        elif isinstance(input, list):
            return input
        elif isinstance(input, basestring):
            return [input]
        else:
            raise IcsASException("Need the type '%s' but '%s' found"
                                 % ('list', type(input)))

    def get_group_name_from_instance(self, instance_id):
        """
        Get the ASG name from the specific instance id

        :type instance_id: string
        :param instance_id: EC2 instance id startwith 'i-xxxxxxx'

        :rtype: string
        :return: name of the ASG, this instance belongs to
        """
        instances = self.conn.get_all_autoscaling_instances(
            instance_ids=self.to_list(instance_id))
        if instances:
            return instances[0].group_name
        else:
            return None

    def get_instances_from_group_name(self, name):
        """
        Get the instance from the specific ASG name

        :type name: string
        :param name: the specific ASG name

        :rtype: list
        :return: a list contains all the instances
        """
        instances = []
        for group in self.conn.get_all_groups(names=self.to_list(name)):
            instances.extend(group.instances)
        return instances

    def get_group_from_name(self, name):
        """
        Get the ASG from its name

        :type name: string
        :param name: the ASG name

        :rtype: list
        :return: a list represents the specific ASG(s)
        """
        return self.conn.get_all_groups(names=self.to_list(name))

    def get_launch_config_from_name(self, name):
        """
        Get the Launch Configuration from its name

        :type name: string
        :param name: the Launch Configuration name

        :rtype: list
        :return: a list represents the specific Launch Configuration(s)
        """
        return self.conn.get_all_launch_configurations(
            names=self.to_list(name))

    def create_launch_config(self, launch_config):
        """
        Create the Launch Configuration

        :type launch_config: class
        :param launch_config: boto launch_config object

        :rtype: string
        :return: AWS request Id
        """
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_config_from_name(self, name):
        """
        Delete the Launch Configuration from its name

        :type name: string
        :param name: the name of launch configuration

        :rtype: string
        :return: AWS request Id
        """
        log.info("delete the launch configuration:")
        log.info(">> %s" % name)
        return self.conn.delete_launch_configuration(name)

    def update_launch_config(self, name, launch_config):
        """
        Update the Launch Configuration for specific ASG

        :type name: string
        :param name: the name of Auto-Scaling Group

        :type launch_config: class
        :param launch_config: boto launch_config object

        :rtype: string
        :return: AWS request Id
        """
        groups = self.get_group_from_name(name)
        if groups:
            group = groups[0]
        else:
            raise IcsASException("no such Auto-Scaling Group '%s' found"
                                 % name)

        self.create_launch_config(launch_config)
        old_lc_name = group.launch_config_name
        new_lc_name = launch_config.name
        group.__dict__["launch_config_name"] = launch_config.name
        group.update()

        if self.get_launch_config_from_name(new_lc_name):
            group = self.get_group_from_name(name)[0]
            if group.launch_config_name == new_lc_name:
                return self.delete_launch_config_from_name(old_lc_name)
            else:
                raise IcsASException("failed to update " +
                                     "launch config for ASG '%s'"
                                     % name)
        else:
            raise IcsASException("no such new launch config '%s'"
                                 % new_lc_name)

    def suspend_scaling_group(self, name, scaling_processes=None):
        """
        Suspends Auto Scaling processes for an Auto Scaling group.

        :type name: string
        :param name: the ASG name

        :type scaling_processes: string or list
        :param scaling_processes: scaling process names

         * Launch
         * Terminate
         * HealthCheck
         * ReplaceUnhealthy
         * AZRebalance
         * AlarmNotification
         * ScheduledActions
         * AddToLoadBalancer
        """
        if not isinstance(name, basestring):
            return None
        group = self.get_group_from_name(self.to_list(name))[0]
        return group.suspend_processes(self.to_list(scaling_processes))

    def resume_scaling_group(self, name, scaling_processes=None):
        """
        Resumes Auto Scaling processes for an Auto Scaling group.

        :type name: string
        :param name: the ASG name

        :type scaling_processes: string or list
        :param scaling_processes: scaling process names

         * Launch
         * Terminate
         * HealthCheck
         * ReplaceUnhealthy
         * AZRebalance
         * AlarmNotification
         * ScheduledActions
         * AddToLoadBalancer
        """
        if not isinstance(name, basestring):
            return None
        group = self.get_group_from_name(self.to_list(name))[0]
        return group.resume_processes(self.to_list(scaling_processes))

    def terminate_group_instance(self, instance_id, decrement_capacity=True):
        """
        Terminates the specified instance. The desired group size can
        also be adjusted, if desired.

        :type instance_id: str
        :param instance_id: The ID of the instance to be terminated.

        :type decrement_capability: bool
        :param decrement_capacity: Whether to decrement the size of the
            autoscaling group or not.
        """
        return self.conn.terminate_instance(
            instance_id=instance_id,
            decrement_capacity=decrement_capacity)

    def update_instance_health(self, instance_id, health_status,
                               grace_period=False):
        """
        Explicitly set the health status of an instance.

        :type instance_id: str
        :param instance_id: The identifier of the EC2 instance

        :type health_status: str
        :param health_status: The health status of the instance.

        * Healthy: the instance is healthy and should remain in service.
        * Unhealthy: the instance is unhealthy. \
            Auto Scaling should terminate and replace it.

        :type grace_period: bool
        :param grace_period: If True, this call should respect
            the grace period associated with the group.
        """

        self.conn.set_instance_health(instance_id, health_status,
                                      should_respect_grace_period=grace_period)
示例#16
0
region_name='ap-southeast-1'
region_ec2_endpoint='ec2.ap-southeast-1.amazonaws.com'
region_autoscale_endpoint='autoscaling.ap-southeast-1.amazonaws.com'

# Engine Scaling conf names to clean
scaling_confs = ['scaling_conf_name_1','scaling_conf_name_2']

# Connect EC2
aws_region = RegionInfo(name=region_name, endpoint=region_endpoint)
conn = EC2Connection(aws_access_key_id,aws_secret_access_key,region=aws_region)

# Connect autoscaling service
aws_region_as = RegionInfo(name=region_name, endpoint=region_autoscale_endpoint)
conn_as = AutoScaleConnection(aws_access_key_id, aws_secret_access_key,region=aws_region_as)

lcs = conn_as.get_all_launch_configurations(names=scaling_confs)

for lc in lcs:
    try:
        img = conn.get_image(lc.image_id)
        snaps = conn.get_all_snapshots(filters={"description":"*"+img.id+"*"})
        image.deregister(delete_snapshot=False)
        for snap in snaps:
            snap.delete()
        print "scaling configuration image and these related "+str(snaps)+ " snapshots removed"
    except:
        print "ami not found " + lc.image_id
        pass
    conn_as.delete_launch_configuration(lc.name)
    print "\ndeleted scaling configuration "+ str(lc.name)
示例#17
0
def main():
    parser = argparse.ArgumentParser(
      description = "triggers a full LSDA rollout")
    
    parser.add_argument("--inspect", action = "store_true",
      help = "pause before baking AMI", default = False)
    parser.add_argument("--clean", action = "store_true",
      help = "reset from clean Ubuntu 12.04 image", default = False)
    parser.add_argument("--no-restart", action = "store_true",
      dest = "no_restart", help = "don't restart all nodes in ASG",
      default = False)
    
    options = parser.parse_args()
    
    logging.info("Starting rollout.")
    
    conn_ec2 = boto.ec2.connect_to_region("us-east-1")
    conn_ec2_as = AutoScaleConnection()
    
    if not options.clean:
        logging.info("Searching for existing images...")
        
        group = conn_ec2_as.get_all_groups(['LSDA Worker Pool'])[0]
        launch_config = conn_ec2_as.get_all_launch_configurations(
          names=[group.launch_config_name])[0]
        
        existing_images = conn_ec2.get_all_images(owners = ["self"])[0]
        
        ami_id = launch_config.image_id
        logging.info("Using existing image {0}".format(ami_id))
    
    else:
        ami_id = 'ami-59a4a230' # Clean Ubuntu 12.04.
        logging.info("Using base image {0}".format(ami_id))
    
    reservation = conn_ec2.run_instances(
        image_id = ami_id,
        key_name = 'jeremy-aws-key',
        instance_type = 't1.micro',
        security_groups = ['Worker Nodes'],
    )
    
    try:
        instance = reservation.instances[0]
        logging.info("Waiting for instance {} to start...".format(instance.id))
        
        instance.update()
        while instance.ip_address is None:
            logging.info("Not ready. Retrying in 10 seconds...")
            time.sleep(10)
            instance.update()
        
        while True:
            result = subprocess.call(["ssh", "-o",
              "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no",
              "ubuntu@{}".format(instance.ip_address), "uname -r"])
            if result != 0:
                logging.info("Not ready for SSH. Retrying in 10 seconds...")
                time.sleep(10)
            else:
                break
        
        logging.info("Instance has started; running setup script.")
        logging.info("(IP address is {})".format(instance.ip_address))
        
        subprocess.check_call(["ssh", "-o", "UserKnownHostsFile=/dev/null",
          "-o", "StrictHostKeyChecking=no",
          "ubuntu@{}".format(instance.ip_address),
          "sudo stop lsda; sleep 20; sudo rm worker.sh;"
          "wget https://raw.github.com/fatlotus/lsda-infrastructure/"
          "master/servers/worker.sh; sudo bash worker.sh"])
        
        if options.inspect:
            logging.info("Connect to ubuntu@{} to inspect the image."
              .format(instance.ip_address))
            logging.info("When you're done, press CTRL-C.")
            
            try:
                while True:
                    time.sleep(3600)
            except KeyboardInterrupt:
                pass
        
        logging.info("Creating AMI from existing image.")
        new_image = instance.create_image(
            name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
              format(datetime.datetime.now())),
            description = "(automatically generated)"
        )
        
        time.sleep(10)
        
        image_object = conn_ec2.get_image(new_image)
        
        while image_object.state == "pending":
            logging.info("State is still pending. Retrying in 10 seconds.")
            time.sleep(10)
            image_object.update()
        
    finally:
        logging.warn("Stopping all nodes.")
        for node in reservation.instances:
            node.terminate()
    
    logging.info("Creating new LaunchConfiguration.")
    
    mapping = BlockDeviceMapping()
    mapping["/dev/sdb"] = BlockDeviceType(ephemeral_name = "ephemeral0")
    mapping["/dev/sdc"] = BlockDeviceType(ephemeral_name = "ephemeral1")
    
    new_launch_config = LaunchConfiguration(
        conn_ec2_as,
        name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
          format(datetime.datetime.now())),
        image_id = new_image,
        security_groups = ['sg-f9a08492'],
        instance_type = 'c3.large',
        block_device_mappings = [mapping],
        instance_profile_name = ("arn:aws:iam::470084502640:instance-profile"
          "/dal-access"),
        spot_price = 0.02,
    )
    conn_ec2_as.create_launch_configuration(new_launch_config)
    
    logging.info("Setting launch configuration in existing ASG.")
    group.launch_config_name = new_launch_config.name
    group.update()
    
    logging.info("Cleaning up old launch configurations.")
    for config in conn_ec2_as.get_all_launch_configurations():
        if config.image_id != new_launch_config.image_id:
            conn_ec2_as.delete_launch_configuration(config.name)
    
    logging.info("Cleaning up old images.")
    for image in conn_ec2.get_all_images(filters={"name":["LatestImage"]}):
        if image.id != new_image:
            conn_ec2.deregister_image(image.id, True)
    
    logging.info("Rollout complete. New image is {}.".format(new_image))
    
    if not options.no_restart:
        logging.info("Triggering reload of all nodes in ASG.")
        for instance in group.instances:
            for reservation in conn_ec2.get_all_instances(instance.instance_id):
                reservation.stop_all()
示例#18
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        if boto.__version__ < '2.6':
            self.conn = AutoScaleConnection(access_id,
                                            secret_key,
                                            region=reg,
                                            port=port,
                                            path=path,
                                            is_secure=True,
                                            security_token=token,
                                            debug=0)
        else:
            self.conn = AutoScaleConnection(access_id,
                                            secret_key,
                                            region=reg,
                                            port=port,
                                            path=path,
                                            validate_certs=False,
                                            is_secure=True,
                                            security_token=token,
                                            debug=0)
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        return []
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        return []
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        return self.conn.set_desired_capacity(group_name, desired_capacity,
                                              honor_cooldown)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records,
                                      next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj