def create_autoscaling_group(autoscale, cluster_name, master_node, opts, slave_group): lclist = autoscale.get_all_launch_configurations( names=[cluster_name + "-lc"]) if lclist: lc = lclist[0] else: lc = LaunchConfiguration( name=cluster_name + "-lc", image_id=opts.ami, key_name=opts.key_pair, security_groups=[slave_group.id], instance_type=opts.instance_type, user_data="SPARK_MASTER=" + master_node.private_dns_name + "\n", instance_monitoring=True, spot_price=opts.spot_price) autoscale.create_launch_configuration(lc) aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"]) if aglist: ag = aglist[0] else: ag = AutoScalingGroup(group_name=cluster_name + "-ag", launch_config=lc, min_size=2, max_size=8, connection=autoscale, vpc_zone_identifier=opts.subnet_id, availability_zones=[opts.zone]) autoscale.create_auto_scaling_group(ag) as_tag = boto.ec2.autoscale.Tag(key='Name', value=cluster_name + '-worker', propagate_at_launch=True, resource_id=cluster_name + "-ag") autoscale.create_or_update_tags([as_tag])
def main(): (opts, action, cluster_name) = parse_options() conn = boto.ec2.connect_to_region(opts.region) opts = validate_opts(conn, opts, action) if action == "launch": (master_group, slave_group) = setup_security_groups(conn, cluster_name, opts) master_node = find_instance_by_name(conn, cluster_name + '-master') if not master_node: master_node = start_master(conn, opts, cluster_name, master_group) print("Master node: {m}".format(m=master_node)) wait_for_cluster_state( conn=conn, cluster_instances=([master_node]), ) autoscale = boto.ec2.autoscale.connect_to_region(opts.region) create_autoscaling_group(autoscale, cluster_name, master_node, opts, slave_group) create_autoscaling_policy(autoscale, cluster_name, opts) wait_for_tcp_port(master_node.public_dns_name) print("SSH ready:") print("ssh ubuntu@{h}".format(h=master_node.public_dns_name)) wait_for_tcp_port(master_node.public_dns_name, port=18080) print("Spark master ready:") print( "Spark WebUI: http://{h}:18080".format(h=master_node.public_dns_name)) if action == "destroy": master_node = find_instance_by_name(conn, cluster_name + '-master') if master_node: print("Terminating master...") conn.create_tags([master_node.id], {"Name": "{c}-master-terminated".format(c=cluster_name)}) master_node.terminate() print("Shutting down autoscaling group...") autoscale = boto.ec2.autoscale.connect_to_region(opts.region) aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"]) ag = None if aglist: ag = aglist[0] ag.shutdown_instances() instances_ids = [i.instance_id for i in ag.instances] instances = conn.get_only_instances(instances_ids) else: instances = [] lclist = autoscale.get_all_launch_configurations(names=[cluster_name + "-lc"]) lc = None if lclist: lc = lclist[0] wait_for_cluster_state( conn, instances, cluster_state="terminated", name="instances") time.sleep(10) if ag: try: ag.delete() except Exception, e: print("Couldn't delete autoscaling group: %s" % e) if lc: try: lc.delete() except Exception, e: print("Couldn't delete launch configuration: %s" % e)
def create_autoscaling_group(autoscale, cluster_name, master_node, opts, slave_group): lclist = autoscale.get_all_launch_configurations( names=[cluster_name + "-lc"]) if lclist: lc = lclist[0] else: lc = LaunchConfiguration(name=cluster_name + "-lc", image_id=opts.ami, key_name=opts.key_pair, security_groups=[slave_group.id], instance_type=opts.instance_type, user_data="SPARK_MASTER=" + master_node.private_dns_name + "\n", instance_monitoring=True, spot_price=opts.max_spot_price) autoscale.create_launch_configuration(lc) aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"]) if aglist: ag = aglist[0] else: ag = AutoScalingGroup(group_name=cluster_name + "-ag", launch_config=lc, min_size=opts.min_instances, max_size=opts.max_instances, connection=autoscale, vpc_zone_identifier=opts.subnet_id, availability_zones=[opts.zone]) autoscale.create_auto_scaling_group(ag) as_tag = boto.ec2.autoscale.Tag(key='Name', value=cluster_name + '-worker', propagate_at_launch=True, resource_id=cluster_name + "-ag") autoscale.create_or_update_tags([as_tag])
def process_launch_configuration(): """get all launch configurations and return instance types and spot prices for each """ autoscale = boto.ec2.autoscale.connect_to_region(REGION) existing_confs = autoscale.get_all_launch_configurations() options = {} for conf in existing_confs: options[conf.name] = [conf.instance_type, conf.spot_price] return options
def process_launch_configuration(): """get all launch configurations and return instance types and spot prices for each """ autoscale = boto.ec2.autoscale.connect_to_region(REGION) existing_confs = autoscale.get_all_launch_configurations() options = {} for conf in existing_confs: options[conf.name] = [conf.instance_type, conf.spot_price] return options
def update_asg(ami, name='proxxy', region='us-east-1'): """Update Proxxy autoscaling group with a fresh AMI""" if ami is None: print "AMI not specified" exit(1) ec2 = boto.ec2.connect_to_region(region) autoscale = boto.ec2.autoscale.connect_to_region(region, use_block_device_types=True) # get AMI metadata ami = ec2.get_all_images(image_ids=[ami])[0] new_launch_config_name = 'proxxy-'+ami.id # get autoscaling group autoscale_group = autoscale.get_all_groups(names=[name])[0] # get old launch configuration old_launch_config_name = autoscale_group.launch_config_name if old_launch_config_name == new_launch_config_name: print "Autoscale Group '%s' already uses launch config '%s'" % (name, new_launch_config_name) exit(0) old_launch_config = autoscale.get_all_launch_configurations(names=[old_launch_config_name])[0] print "Old Launch Configuration: %s" % old_launch_config # create new launch configuration based on the old one new_launch_config = _copy_launch_config(old_launch_config) new_launch_config.name = new_launch_config_name new_launch_config.image_id = ami.id autoscale.create_launch_configuration(new_launch_config) print "New Launch Configuration: %s" % new_launch_config # switch autoscaling group from old LC to new LC autoscale_group.launch_config_name = new_launch_config_name result = autoscale_group.update() print vars(result) # delete old launch configuration old_launch_config.delete() print "Done"
if not args.image_id and not args.instance_type: print("Specify at least one of image_id or instance_type") sys.exit(0) ec2 = ec2.connect_to_region(args.region) try: if args.image_id: ec2.get_all_images(image_ids=[args.image_id]) except exception.EC2ResponseError: print("It seems that '{0}' is not a valid image_id name or it does not exist ".format(args.image_id)) sys.exit(1) autoscale = autoscale.connect_to_region(args.region) try: as_launch_config = autoscale.get_all_launch_configurations(names = [args.launch_config_name]).pop() except IndexError: print ("Couldn't found AutoScaling Launch Configuration") sys.exit(1) try: as_group = autoscale.get_all_groups(names=[args.autoscale_group_name])[0] except IndexError: print("Couldn't found autoscale group '{0}'".format(args.autoscale_group_name)) sys.exit(1) as_launch_config_tmp = copy(as_launch_config) as_launch_config_new = copy(as_launch_config) as_launch_config_tmp.name = "{0}-tmp".format(as_launch_config.name) print("Creating temporary AutoScaling Launch Config named: {0}".format(as_launch_config_tmp.name))
if stack.stack_status not in ('ROLLBACK_COMPLETE') and stack.stack_name.startswith('tcm-'): stacks.append(stack) except BotoServerError, e: # log return None clusters = [] for stack in stacks: cluster = {'stack': stack} for resource in stack.list_resources(): if resource.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer': cluster['elb'] = elb.get_all_load_balancers(load_balancer_names=[resource.physical_resource_id])[0] elif resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration': kwargs = {'names': [resource.physical_resource_id]} cluster['launch_config'] = autoscale.get_all_launch_configurations(**kwargs)[0] elif resource.resource_type == 'AWS::AutoScaling::AutoScalingGroup': cluster['group'] = autoscale.get_all_groups(names=[resource.physical_resource_id])[0] else: raise Exception("Unkonw resource type '%s'" % resource.resource_type) clusters.append(cluster) # sort list by stack creation time clusters.sort(key=lambda x: x['stack'].creation_time, reverse=True) return clusters def get_autoscaling_group(group_name): autoscale = get_aws_connection('autoscale')
sys.exit(0) ec2 = ec2.connect_to_region(args.region) try: if args.image_id: ec2.get_all_images(image_ids=[args.image_id]) except exception.EC2ResponseError: print( "It seems that '{0}' is not a valid image_id name or it does not exist " .format(args.image_id)) sys.exit(1) autoscale = autoscale.connect_to_region(args.region) try: as_launch_config = autoscale.get_all_launch_configurations( names=[args.launch_config_name]).pop() except IndexError: print("Couldn't found AutoScaling Launch Configuration") sys.exit(1) try: as_group = autoscale.get_all_groups(names=[args.autoscale_group_name])[0] except IndexError: print("Couldn't found autoscale group '{0}'".format( args.autoscale_group_name)) sys.exit(1) as_launch_config_tmp = copy(as_launch_config) as_launch_config_new = copy(as_launch_config) as_launch_config_tmp.name = "{0}-tmp".format(as_launch_config.name)
def main(): (opts, action, cluster_name) = parse_options() conn = boto.ec2.connect_to_region(opts.region) opts = validate_opts(conn, opts, action) if action == "launch": (master_group, slave_group) = setup_security_groups(conn, cluster_name, opts) master_node = find_instance_by_name(conn, cluster_name + '-master') if not master_node: master_node = start_master(conn, opts, cluster_name, master_group) print("Master node: {m}".format(m=master_node)) wait_for_cluster_state( conn=conn, cluster_instances=([master_node]), ) autoscale = boto.ec2.autoscale.connect_to_region(opts.region) create_autoscaling_group(autoscale, cluster_name, master_node, opts, slave_group) create_autoscaling_policy(autoscale, cluster_name, opts) wait_for_tcp_port(master_node.public_dns_name) print("SSH ready:") print("ssh ubuntu@{h}".format(h=master_node.public_dns_name)) wait_for_tcp_port(master_node.public_dns_name, port=18080) print("Spark master ready:") print("Spark WebUI: http://{h}:18080".format( h=master_node.public_dns_name)) if action == "destroy": master_node = find_instance_by_name(conn, cluster_name + '-master') if master_node: print("Terminating master...") conn.create_tags( [master_node.id], {"Name": "{c}-master-terminated".format(c=cluster_name)}) master_node.terminate() print("Shutting down autoscaling group...") autoscale = boto.ec2.autoscale.connect_to_region(opts.region) aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"]) ag = None if aglist: ag = aglist[0] ag.shutdown_instances() instances_ids = [i.instance_id for i in ag.instances] instances = conn.get_only_instances(instances_ids) else: instances = [] lclist = autoscale.get_all_launch_configurations( names=[cluster_name + "-lc"]) lc = None if lclist: lc = lclist[0] wait_for_cluster_state(conn, instances, cluster_state="terminated", name="instances") time.sleep(10) if ag: try: ag.delete() except Exception, e: print("Couldn't delete autoscaling group: %s" % e) if lc: try: lc.delete() except Exception, e: print("Couldn't delete launch configuration: %s" % e)