def delete_autoscaling():
    con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                              aws_access_key_id=AWS_ACCESS_KEY,
                              region=RegionInfo(name=REGION,
                                               endpoint='autoscaling.%s.amazonaws.com' % REGION))

    print "Deleting autoscaling group.."
    group = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0]
    print "shutting down instances"
    group.shutdown_instances()
    time.sleep(LONG_SLEEP_PERIOD)
    print "Deleting autoscaling group itself"
    con.delete_auto_scaling_group(AUTOSCALING_GROUP_NAME, force_delete=True)
    print "Deleting launch configuration"
    con.delete_launch_configuration(AUTOSCALING_GROUP_NAME)



    con.close()
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('scale')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        port = 8773
        if endpoint[len(endpoint)-13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=self.token, debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Policies.json")
        return obj

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        obj = self.conn.get_all_tags(filters, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Tags.json")
        return obj

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
print "shutdown the instances in the autoscaling group"
ag.shutdown_instances()
sleep(20)

# # delete the autoscale group
print "delete the autoscaling group"
ag.delete()
# # delete the launch configuration
# lc.delete()


# clear the last execution results
# print 'Deleting the auto scaling group'
# conn_as.delete_auto_scaling_group('my_group')
print 'Deleting the launch configure'
conn_as.delete_launch_configuration('my_launch_config')





# if __name__ == '__main__':
#     main(sys.argv)







Exemple #4
0
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        self.conn = AutoScaleConnection(access_id,
                                        secret_key,
                                        region=reg,
                                        port=port,
                                        path=path,
                                        is_secure=True,
                                        security_token=token,
                                        debug=0)
        self.conn.APIVersion = '2011-01-01'
        if not (clc_host[len(clc_host) - 13:] == 'amazonaws.com'):
            self.conn.auth_region_name = 'Eucalyptus'
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0]
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self,
                                      config_names=None,
                                      max_records=None,
                                      next_token=None):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj

    # policy related
    def delete_policy(self, policy_name, autoscale_group=None):
        return self.conn.delete_policy(policy_name, autoscale_group)

    def get_all_policies(self,
                         as_group=None,
                         policy_names=None,
                         max_records=None,
                         next_token=None):
        return self.conn.get_all_policies(as_group, policy_names, max_records,
                                          next_token)

    def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
        return self.conn.execute_policy(policy_name, as_group, honor_cooldown)

    def create_scaling_policy(self, scaling_policy):
        return self.conn.create_scaling_policy(scaling_policy)

    def get_all_adjustment_types(self):
        return self.conn.get_all_adjustment_types()

    # tag related
    def delete_tags(self, tags):
        return self.conn.delete_tags(tags)

    def get_all_tags(self, filters=None, max_records=None, next_token=None):
        return self.conn.get_all_tags(filters, max_records, next_token)

    def create_or_update_tags(self, tags):
        return self.conn.create_or_update_tags(tags)
Exemple #5
0
class MSBManager:
    def __init__(self, aws_access_key, aws_secret_key):
        self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
        self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
        self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
        self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
        self.default_cooldown = 60

    def get_security_group(self, name):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        return sgs[0] if sgs else None

    def create_security_group(self, name, description):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        sg = sgs[0] if sgs else None
        if not sgs:
            sg = self.ec2_conn.create_security_group(name, description)

        try:
            sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
        except EC2ResponseError:
            pass
        return sg

    def remove_security_group(self, name):
        self.ec2_conn.delete_security_group(name=name)

    def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
        instance = None
        reservations = self.ec2_conn.get_all_instances()
        for reservation in reservations:
            for i in reservation.instances:
                if "Name" in i.tags and i.tags["Name"] == tags["Name"] and i.state == "running":
                    instance = i
                    break

        if not instance:
            reservation = self.ec2_conn.run_instances(
                image,
                instance_type=instance_type,
                key_name=key_name,
                placement=zone,
                security_groups=security_groups,
                monitoring_enabled=True,
            )
            instance = reservation.instances[0]
            while not instance.update() == "running":
                time.sleep(5)
            time.sleep(10)
            self.ec2_conn.create_tags([instance.id], tags)

        return instance

    def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
        req = self.ec2_conn.request_spot_instances(
            price=bid,
            instance_type=instance_type,
            image_id=image,
            placement=zone,
            key_name=key_name,
            security_groups=security_groups,
        )
        instance_id = None

        while not instance_id:
            job_sir_id = req[0].id
            requests = self.ec2_conn.get_all_spot_instance_requests()
            for sir in requests:
                if sir.id == job_sir_id:
                    instance_id = sir.instance_id
                    break
            print "Job {} not ready".format(job_sir_id)
            time.sleep(60)

        self.ec2_conn.create_tags([instance_id], tags)

    def remove_instance(self, instance_id):
        self.remove_instances([instance_id])

    def remove_instances(self, instance_ids):
        self.ec2_conn.terminate_instances(instance_ids)

    def remove_instance_by_tag_name(self, name):
        reservations = self.ec2_conn.get_all_instances()
        data_centers_intance_ids = []
        for reservation in reservations:
            for instance in reservation.instances:
                if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
                    data_centers_intance_ids.append(instance.id)
        if data_centers_intance_ids:
            self.remove_instances(data_centers_intance_ids)

    def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
        lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
        lb = lbs[0] if lbs else None
        if not lb:
            hc = HealthCheck(
                timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
            )
            ports = [(80, 80, "http")]
            zones = [zone]
            lb = self.elb_conn.create_load_balancer(name, zones, ports)

            self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
            lb.configure_health_check(hc)
            if instance_ids:
                lb.register_instances(instance_ids)

            params = {
                "LoadBalancerNames.member.1": lb.name,
                "Tags.member.1.Key": "15619project",
                "Tags.member.1.Value": project_tag_value,
            }
            lb.connection.get_status("AddTags", params, verb="POST")
        return lb

    def remove_elb(self, name):
        self.elb_conn.delete_load_balancer(name)

    def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
        lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
        lc = lcs[0] if lcs else None
        if not lc:
            lc = LaunchConfiguration(
                name=name,
                image_id=image,
                key_name=key_name,
                security_groups=[security_groups],
                instance_type=instance_type,
            )
            self.auto_scale_conn.create_launch_configuration(lc)
        return lc

    def remove_launch_configuration(self, name):
        self.auto_scale_conn.delete_launch_configuration(name)

    def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
        lc = self.create_launch_configuration()
        as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
        as_group = as_groups[0] if as_groups else None
        if not as_group:
            as_group = AutoScalingGroup(
                group_name=name,
                load_balancers=[lb_name],
                availability_zones=[zone],
                launch_config=lc,
                min_size=4,
                max_size=4,
                health_check_type="ELB",
                health_check_period=120,
                connection=self.auto_scale_conn,
                default_cooldown=self.default_cooldown,
                desired_capacity=4,
                tags=tags,
            )

            self.auto_scale_conn.create_auto_scaling_group(as_group)
            if instance_ids:
                self.auto_scale_conn.attach_instances(name, instance_ids)

            scale_up_policy = ScalingPolicy(
                name="scale_up",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=1,
                cooldown=self.default_cooldown,
            )
            scale_down_policy = ScalingPolicy(
                name="scale_down",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=-1,
                cooldown=self.default_cooldown,
            )

            self.auto_scale_conn.create_scaling_policy(scale_up_policy)
            self.auto_scale_conn.create_scaling_policy(scale_down_policy)

            scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_up"])[0]
            scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_down"])[0]

            alarm_dimensions = {"AutoScalingGroupName": name}
            scale_up_alarm = MetricAlarm(
                name="scale_up_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison=">",
                threshold=85,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_up_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_up_alarm)
            scale_down_alarm = MetricAlarm(
                name="scale_down_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison="<",
                threshold=60,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_down_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_down_alarm)

        return as_group

    def update_autoscaling_group_max_size(self, as_group, max_size):
        setattr(as_group, "max_size", max_size)
        as_group.update()

    def update_autoscaling_group_min_size(self, as_group, min_size):
        setattr(as_group, "min_size", min_size)
        as_group.update()

    def remove_autoscaling_group(self, name):
        self.auto_scale_conn.delete_auto_scaling_group(name)
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path='/services/AutoScaling'
        port=8773
        if clc_host[len(clc_host)-13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port=443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
        obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
        group = self.conn.get_all_groups([group_name])[0];
        # notice, honor_cooldown not supported.
        return group.set_capacity(desired_capacity)

    def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records, next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj
def main():
    parser = argparse.ArgumentParser(
      description = "triggers a full LSDA rollout")
    
    parser.add_argument("--inspect", action = "store_true",
      help = "pause before baking AMI", default = False)
    parser.add_argument("--clean", action = "store_true",
      help = "reset from clean Ubuntu 12.04 image", default = False)
    parser.add_argument("--no-restart", action = "store_true",
      dest = "no_restart", help = "don't restart all nodes in ASG",
      default = False)
    
    options = parser.parse_args()
    
    logging.info("Starting rollout.")
    
    conn_ec2 = boto.ec2.connect_to_region("us-east-1")
    conn_ec2_as = AutoScaleConnection()
    
    if not options.clean:
        logging.info("Searching for existing images...")
        
        group = conn_ec2_as.get_all_groups(['LSDA Worker Pool'])[0]
        launch_config = conn_ec2_as.get_all_launch_configurations(
          names=[group.launch_config_name])[0]
        
        existing_images = conn_ec2.get_all_images(owners = ["self"])[0]
        
        ami_id = launch_config.image_id
        logging.info("Using existing image {0}".format(ami_id))
    
    else:
        ami_id = 'ami-59a4a230' # Clean Ubuntu 12.04.
        logging.info("Using base image {0}".format(ami_id))
    
    reservation = conn_ec2.run_instances(
        image_id = ami_id,
        key_name = 'jeremy-aws-key',
        instance_type = 't1.micro',
        security_groups = ['Worker Nodes'],
    )
    
    try:
        instance = reservation.instances[0]
        logging.info("Waiting for instance {} to start...".format(instance.id))
        
        instance.update()
        while instance.ip_address is None:
            logging.info("Not ready. Retrying in 10 seconds...")
            time.sleep(10)
            instance.update()
        
        while True:
            result = subprocess.call(["ssh", "-o",
              "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no",
              "ubuntu@{}".format(instance.ip_address), "uname -r"])
            if result != 0:
                logging.info("Not ready for SSH. Retrying in 10 seconds...")
                time.sleep(10)
            else:
                break
        
        logging.info("Instance has started; running setup script.")
        logging.info("(IP address is {})".format(instance.ip_address))
        
        subprocess.check_call(["ssh", "-o", "UserKnownHostsFile=/dev/null",
          "-o", "StrictHostKeyChecking=no",
          "ubuntu@{}".format(instance.ip_address),
          "sudo stop lsda; sleep 20; sudo rm worker.sh;"
          "wget https://raw.github.com/fatlotus/lsda-infrastructure/"
          "master/servers/worker.sh; sudo bash worker.sh"])
        
        if options.inspect:
            logging.info("Connect to ubuntu@{} to inspect the image."
              .format(instance.ip_address))
            logging.info("When you're done, press CTRL-C.")
            
            try:
                while True:
                    time.sleep(3600)
            except KeyboardInterrupt:
                pass
        
        logging.info("Creating AMI from existing image.")
        new_image = instance.create_image(
            name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
              format(datetime.datetime.now())),
            description = "(automatically generated)"
        )
        
        time.sleep(10)
        
        image_object = conn_ec2.get_image(new_image)
        
        while image_object.state == "pending":
            logging.info("State is still pending. Retrying in 10 seconds.")
            time.sleep(10)
            image_object.update()
        
    finally:
        logging.warn("Stopping all nodes.")
        for node in reservation.instances:
            node.terminate()
    
    logging.info("Creating new LaunchConfiguration.")
    
    mapping = BlockDeviceMapping()
    mapping["/dev/sdb"] = BlockDeviceType(ephemeral_name = "ephemeral0")
    mapping["/dev/sdc"] = BlockDeviceType(ephemeral_name = "ephemeral1")
    
    new_launch_config = LaunchConfiguration(
        conn_ec2_as,
        name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
          format(datetime.datetime.now())),
        image_id = new_image,
        security_groups = ['sg-f9a08492'],
        instance_type = 'c3.large',
        block_device_mappings = [mapping],
        instance_profile_name = ("arn:aws:iam::470084502640:instance-profile"
          "/dal-access"),
        spot_price = 0.02,
    )
    conn_ec2_as.create_launch_configuration(new_launch_config)
    
    logging.info("Setting launch configuration in existing ASG.")
    group.launch_config_name = new_launch_config.name
    group.update()
    
    logging.info("Cleaning up old launch configurations.")
    for config in conn_ec2_as.get_all_launch_configurations():
        if config.image_id != new_launch_config.image_id:
            conn_ec2_as.delete_launch_configuration(config.name)
    
    logging.info("Cleaning up old images.")
    for image in conn_ec2.get_all_images(filters={"name":["LatestImage"]}):
        if image.id != new_image:
            conn_ec2.deregister_image(image.id, True)
    
    logging.info("Rollout complete. New image is {}.".format(new_image))
    
    if not options.no_restart:
        logging.info("Triggering reload of all nodes in ASG.")
        for instance in group.instances:
            for reservation in conn_ec2.get_all_instances(instance.instance_id):
                reservation.stop_all()
# Connect EC2
aws_region = RegionInfo(name=region_name, endpoint=region_endpoint)
conn = EC2Connection(aws_access_key_id,
                     aws_secret_access_key,
                     region=aws_region)

# Connect autoscaling service
aws_region_as = RegionInfo(name=region_name,
                           endpoint=region_autoscale_endpoint)
conn_as = AutoScaleConnection(aws_access_key_id,
                              aws_secret_access_key,
                              region=aws_region_as)

lcs = conn_as.get_all_launch_configurations(names=scaling_confs)

for lc in lcs:
    try:
        img = conn.get_image(lc.image_id)
        snaps = conn.get_all_snapshots(
            filters={"description": "*" + img.id + "*"})
        image.deregister(delete_snapshot=False)
        for snap in snaps:
            snap.delete()
        print "scaling configuration image and these related " + str(
            snaps) + " snapshots removed"
    except:
        print "ami not found " + lc.image_id
        pass
    conn_as.delete_launch_configuration(lc.name)
    print "\ndeleted scaling configuration " + str(lc.name)
Exemple #9
0
# ---------------------------Clean UP-------------------------------------------
res = conn.get_all_instances()
ids = []
for r in res:
    ids.append(r.instances[0].id)

for s in ids:
    if s == lg_id:
        continue
    try:
        conn.terminate_instances(instance_ids=[s])
    except:
        continue

time.sleep(100);

con_elb.delete_load_balancer('ELB')
time.sleep(100)

con_as.delete_auto_scaling_group('Project2.2_AutoSacling_Group', force_delete=True)
con_as.delete_launch_configuration('Project2.2_Lauch_Config')

while True:
    try:
        conn.delete_security_group(name='LBAS')
        conn.delete_security_group(name='Load_Generator')
        break
    except:
        time.sleep(5)

Exemple #10
0
class IcsAS(object):

    """
    ICS Library for AutoScale
    """

    def __init__(self, region, **kwargs):
        self.conn = AutoScaleConnection(region=get_region(region), **kwargs)

    def to_list(self, input):
        """
        Validate input, if not list, but string, make it as a list
        """
        if input is None:
            return input
        elif isinstance(input, list):
            return input
        elif isinstance(input, basestring):
            return [input]
        else:
            raise IcsASException("Need the type '%s' but '%s' found"
                                 % ('list', type(input)))

    def get_group_name_from_instance(self, instance_id):
        """
        Get the ASG name from the specific instance id

        :type instance_id: string
        :param instance_id: EC2 instance id startwith 'i-xxxxxxx'

        :rtype: string
        :return: name of the ASG, this instance belongs to
        """
        instances = self.conn.get_all_autoscaling_instances(
            instance_ids=self.to_list(instance_id))
        if instances:
            return instances[0].group_name
        else:
            return None

    def get_instances_from_group_name(self, name):
        """
        Get the instance from the specific ASG name

        :type name: string
        :param name: the specific ASG name

        :rtype: list
        :return: a list contains all the instances
        """
        instances = []
        for group in self.conn.get_all_groups(names=self.to_list(name)):
            instances.extend(group.instances)
        return instances

    def get_group_from_name(self, name):
        """
        Get the ASG from its name

        :type name: string
        :param name: the ASG name

        :rtype: list
        :return: a list represents the specific ASG(s)
        """
        return self.conn.get_all_groups(names=self.to_list(name))

    def get_launch_config_from_name(self, name):
        """
        Get the Launch Configuration from its name

        :type name: string
        :param name: the Launch Configuration name

        :rtype: list
        :return: a list represents the specific Launch Configuration(s)
        """
        return self.conn.get_all_launch_configurations(
            names=self.to_list(name))

    def create_launch_config(self, launch_config):
        """
        Create the Launch Configuration

        :type launch_config: class
        :param launch_config: boto launch_config object

        :rtype: string
        :return: AWS request Id
        """
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_config_from_name(self, name):
        """
        Delete the Launch Configuration from its name

        :type name: string
        :param name: the name of launch configuration

        :rtype: string
        :return: AWS request Id
        """
        log.info("delete the launch configuration:")
        log.info(">> %s" % name)
        return self.conn.delete_launch_configuration(name)

    def update_launch_config(self, name, launch_config):
        """
        Update the Launch Configuration for specific ASG

        :type name: string
        :param name: the name of Auto-Scaling Group

        :type launch_config: class
        :param launch_config: boto launch_config object

        :rtype: string
        :return: AWS request Id
        """
        groups = self.get_group_from_name(name)
        if groups:
            group = groups[0]
        else:
            raise IcsASException("no such Auto-Scaling Group '%s' found"
                                 % name)

        self.create_launch_config(launch_config)
        old_lc_name = group.launch_config_name
        new_lc_name = launch_config.name
        group.__dict__["launch_config_name"] = launch_config.name
        group.update()

        if self.get_launch_config_from_name(new_lc_name):
            group = self.get_group_from_name(name)[0]
            if group.launch_config_name == new_lc_name:
                return self.delete_launch_config_from_name(old_lc_name)
            else:
                raise IcsASException("failed to update " +
                                     "launch config for ASG '%s'"
                                     % name)
        else:
            raise IcsASException("no such new launch config '%s'"
                                 % new_lc_name)

    def suspend_scaling_group(self, name, scaling_processes=None):
        """
        Suspends Auto Scaling processes for an Auto Scaling group.

        :type name: string
        :param name: the ASG name

        :type scaling_processes: string or list
        :param scaling_processes: scaling process names

         * Launch
         * Terminate
         * HealthCheck
         * ReplaceUnhealthy
         * AZRebalance
         * AlarmNotification
         * ScheduledActions
         * AddToLoadBalancer
        """
        if not isinstance(name, basestring):
            return None
        group = self.get_group_from_name(self.to_list(name))[0]
        return group.suspend_processes(self.to_list(scaling_processes))

    def resume_scaling_group(self, name, scaling_processes=None):
        """
        Resumes Auto Scaling processes for an Auto Scaling group.

        :type name: string
        :param name: the ASG name

        :type scaling_processes: string or list
        :param scaling_processes: scaling process names

         * Launch
         * Terminate
         * HealthCheck
         * ReplaceUnhealthy
         * AZRebalance
         * AlarmNotification
         * ScheduledActions
         * AddToLoadBalancer
        """
        if not isinstance(name, basestring):
            return None
        group = self.get_group_from_name(self.to_list(name))[0]
        return group.resume_processes(self.to_list(scaling_processes))

    def terminate_group_instance(self, instance_id, decrement_capacity=True):
        """
        Terminates the specified instance. The desired group size can
        also be adjusted, if desired.

        :type instance_id: str
        :param instance_id: The ID of the instance to be terminated.

        :type decrement_capability: bool
        :param decrement_capacity: Whether to decrement the size of the
            autoscaling group or not.
        """
        return self.conn.terminate_instance(
            instance_id=instance_id,
            decrement_capacity=decrement_capacity)

    def update_instance_health(self, instance_id, health_status,
                               grace_period=False):
        """
        Explicitly set the health status of an instance.

        :type instance_id: str
        :param instance_id: The identifier of the EC2 instance

        :type health_status: str
        :param health_status: The health status of the instance.

        * Healthy: the instance is healthy and should remain in service.
        * Unhealthy: the instance is unhealthy. \
            Auto Scaling should terminate and replace it.

        :type grace_period: bool
        :param grace_period: If True, this call should respect
            the grace period associated with the group.
        """

        self.conn.set_instance_health(instance_id, health_status,
                                      should_respect_grace_period=grace_period)
region_name='ap-southeast-1'
region_ec2_endpoint='ec2.ap-southeast-1.amazonaws.com'
region_autoscale_endpoint='autoscaling.ap-southeast-1.amazonaws.com'

# Engine Scaling conf names to clean
scaling_confs = ['scaling_conf_name_1','scaling_conf_name_2']

# Connect EC2
aws_region = RegionInfo(name=region_name, endpoint=region_endpoint)
conn = EC2Connection(aws_access_key_id,aws_secret_access_key,region=aws_region)

# Connect autoscaling service
aws_region_as = RegionInfo(name=region_name, endpoint=region_autoscale_endpoint)
conn_as = AutoScaleConnection(aws_access_key_id, aws_secret_access_key,region=aws_region_as)

lcs = conn_as.get_all_launch_configurations(names=scaling_confs)

for lc in lcs:
    try:
        img = conn.get_image(lc.image_id)
        snaps = conn.get_all_snapshots(filters={"description":"*"+img.id+"*"})
        image.deregister(delete_snapshot=False)
        for snap in snaps:
            snap.delete()
        print "scaling configuration image and these related "+str(snaps)+ " snapshots removed"
    except:
        print "ami not found " + lc.image_id
        pass
    conn_as.delete_launch_configuration(lc.name)
    print "\ndeleted scaling configuration "+ str(lc.name)
class BotoScaleInterface(ScaleInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/AutoScaling'
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'autoscaling', 1)
            path = '/'
            reg = None
            port = 443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        if boto.__version__ < '2.6':
            self.conn = AutoScaleConnection(access_id,
                                            secret_key,
                                            region=reg,
                                            port=port,
                                            path=path,
                                            is_secure=True,
                                            security_token=token,
                                            debug=0)
        else:
            self.conn = AutoScaleConnection(access_id,
                                            secret_key,
                                            region=reg,
                                            port=port,
                                            path=path,
                                            validate_certs=False,
                                            is_secure=True,
                                            security_token=token,
                                            debug=0)
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
        f.close()

    ##
    # autoscaling methods
    ##
    def create_auto_scaling_group(self, as_group):
        return self.conn.create_auto_scaling_group(as_group)

    def delete_auto_scaling_group(self, name, force_delete=False):
        return self.conn.delete_auto_scaling_group(name, force_delete)

    def get_all_groups(self, names=None, max_records=None, next_token=None):
        return []
        obj = self.conn.get_all_groups(names, max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Groups.json")
        return obj

    def get_all_autoscaling_instances(self,
                                      instance_ids=None,
                                      max_records=None,
                                      next_token=None):
        return []
        obj = self.conn.get_all_autoscaling_instances(instance_ids,
                                                      max_records, next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_Instances.json")
        return obj

    def set_desired_capacity(self,
                             group_name,
                             desired_capacity,
                             honor_cooldown=False):
        return self.conn.set_desired_capacity(group_name, desired_capacity,
                                              honor_cooldown)

    def set_instance_health(self,
                            instance_id,
                            health_status,
                            should_respect_grace_period=True):
        return self.conn.set_instance_health(instance_id, health_status,
                                             should_respect_grace_period)

    def terminate_instance(self, instance_id, decrement_capacity=True):
        return self.conn.terminate_instance(instance_id, decrement_capacity)

    def update_autoscaling_group(self, as_group):
        as_group.connection = self.conn
        return as_group.update()

    def create_launch_configuration(self, launch_config):
        return self.conn.create_launch_configuration(launch_config)

    def delete_launch_configuration(self, launch_config_name):
        return self.conn.delete_launch_configuration(launch_config_name)

    def get_all_launch_configurations(self, config_names, max_records,
                                      next_token):
        obj = self.conn.get_all_launch_configurations(names=config_names,
                                                      max_records=max_records,
                                                      next_token=next_token)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
        return obj