Exemple #1
0
def get_machines(num_instances_to_use, aws_group_name):
    machines = []
    # connect to AWS
    ec2 = boto.ec2.connect_to_region("us-west-2")
    autoscale = boto.ec2.autoscale.AutoScaleConnection()

    # how many machines are currently running?
    group = autoscale.get_all_groups(names=[aws_group_name])[0]
    num_instances = len(group.instances)
    print(GetTime(), "Number of instances online:", len(group.instances))

    # switch on more machines if we need them
    if num_instances < num_instances_to_use:
        print(GetTime(), "Launching instances...")
        autoscale.set_desired_capacity(aws_group_name, num_instances_to_use)

        # tell us status every few seconds
        group = None
        while num_instances < num_instances_to_use:
            group = autoscale.get_all_groups(names=[aws_group_name])[0]
            num_instances = len(group.instances)
            print(GetTime(), "Number of instances online:", len(group.instances))
            sleep(3)

    # grab instance IDs
    instance_ids = [i.instance_id for i in group.instances]
    print(GetTime(), "These instances are online:", instance_ids)

    instances = ec2.get_only_instances(instance_ids)
    for instance in instances:
        print(GetTime(), "Waiting for instance", instance.id, "to boot...")
        while 1:
            instance.update()
            if instance.state == "running":
                print(GetTime(), instance.id, "is running!")
                break
            sleep(3)
    for instance_id in instance_ids:
        print(GetTime(), "Waiting for instance", instance_id, "to report OK...")
        while 1:
            statuses = ec2.get_all_instance_status([instance_id])
            if len(statuses) < 1:
                sleep(3)
                continue
            status = statuses[0]
            if status.instance_status.status == "ok":
                print(GetTime(), instance.id, "reported OK!")
                break
            sleep(3)
    for instance in instances:
        machines.append(Machine(instance.ip_address))
    return machines
def update_group(launch_configuration):
    """Update auto scalling group with the new launch configuration"""
    autoscale = boto.ec2.autoscale.connect_to_region(REGION)
    group = autoscale.get_all_groups(names=[SCALE_GROUP])[0]
    setattr(group, 'launch_config_name', launch_configuration)
    group.update()
    print "Updated autoscale group: " + group.name
Exemple #3
0
def get_autoscaling_group(group_id):
    autoscale = boto.ec2.autoscale.connect_to_region(region)
    groups = autoscale.get_all_groups()
    for group in groups:
        if group.name == group_id:
            return group
    return None
def create_autoscaling_group(autoscale, cluster_name, master_node, opts, slave_group):
    lclist = autoscale.get_all_launch_configurations(
        names=[cluster_name + "-lc"])
    if lclist:
        lc = lclist[0]
    else:
        lc = LaunchConfiguration(
            name=cluster_name + "-lc",
            image_id=opts.ami,
            key_name=opts.key_pair,
            security_groups=[slave_group.id],
            instance_type=opts.instance_type,
            user_data="SPARK_MASTER=" + master_node.private_dns_name + "\n",
            instance_monitoring=True,
            spot_price=opts.spot_price)
        autoscale.create_launch_configuration(lc)
    aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"])
    if aglist:
        ag = aglist[0]
    else:
        ag = AutoScalingGroup(group_name=cluster_name + "-ag",
                              launch_config=lc,
                              min_size=2,
                              max_size=8,
                              connection=autoscale,
                              vpc_zone_identifier=opts.subnet_id,
                              availability_zones=[opts.zone])
        autoscale.create_auto_scaling_group(ag)
    as_tag = boto.ec2.autoscale.Tag(key='Name',
                                    value=cluster_name + '-worker',
                                    propagate_at_launch=True,
                                    resource_id=cluster_name + "-ag")
    autoscale.create_or_update_tags([as_tag])
def revive_environment(autoscale, environment):
    """Revive Environment on demand
    :type autoscale: boto.ec2.autoscale.AutoScaleConnection
    :param autoscale: AutoScale connection object
    :type environment: str
    :param environment: Environment Name
    """

    logging.info("Started Reviving Environment: %s" % environment)
    groups = autoscale.get_all_groups()
    for group in groups:
        try:
            group_tags = dict()
            for tag in group.tags:
                group_tags[tag.key] = tag.value

            group_environment = group_tags['environment']

            if environment == group_environment:
                desired_scaling_state = get_auto_scaling_group_state(group, group_tags)
                logging.info("Reviving Auto-Scaling Group: %s. Current state: { min: %s, desired: %s }." %
                             (group.name, group.min_size, group.desired_capacity))
                autoscale.create_scheduled_group_action(
                    as_group=group.name,
                    name="reviving now %s" % group.name,
                    time=(datetime.utcnow() + timedelta(seconds=1*60)),
                    desired_capacity=desired_scaling_state['desired'],
                    min_size=desired_scaling_state['min'],
                    max_size=desired_scaling_state['max']
                )

                group.update()
        except Exception as e:
            logging.error("Error while reviving environment: %s with Auto-Scaling Group %s: %s" %
                          (environment, group.name, e))
Exemple #6
0
def create_autoscaling_group(autoscale, cluster_name, master_node, opts,
                             slave_group):
    lclist = autoscale.get_all_launch_configurations(
        names=[cluster_name + "-lc"])
    if lclist:
        lc = lclist[0]
    else:
        lc = LaunchConfiguration(name=cluster_name + "-lc",
                                 image_id=opts.ami,
                                 key_name=opts.key_pair,
                                 security_groups=[slave_group.id],
                                 instance_type=opts.instance_type,
                                 user_data="SPARK_MASTER=" +
                                 master_node.private_dns_name + "\n",
                                 instance_monitoring=True,
                                 spot_price=opts.max_spot_price)
        autoscale.create_launch_configuration(lc)
    aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"])
    if aglist:
        ag = aglist[0]
    else:
        ag = AutoScalingGroup(group_name=cluster_name + "-ag",
                              launch_config=lc,
                              min_size=opts.min_instances,
                              max_size=opts.max_instances,
                              connection=autoscale,
                              vpc_zone_identifier=opts.subnet_id,
                              availability_zones=[opts.zone])
        autoscale.create_auto_scaling_group(ag)
    as_tag = boto.ec2.autoscale.Tag(key='Name',
                                    value=cluster_name + '-worker',
                                    propagate_at_launch=True,
                                    resource_id=cluster_name + "-ag")
    autoscale.create_or_update_tags([as_tag])
def main():
    (opts, action, cluster_name) = parse_options()
    conn = boto.ec2.connect_to_region(opts.region)
    opts = validate_opts(conn, opts, action)

    if action == "launch":
        (master_group, slave_group) = setup_security_groups(conn, cluster_name, opts)
        master_node = find_instance_by_name(conn, cluster_name + '-master')
        if not master_node:
            master_node = start_master(conn, opts, cluster_name, master_group)
        print("Master node: {m}".format(m=master_node))
        wait_for_cluster_state(
            conn=conn,
            cluster_instances=([master_node]),
        )
        autoscale = boto.ec2.autoscale.connect_to_region(opts.region)
        create_autoscaling_group(autoscale, cluster_name, master_node, opts, slave_group)
        create_autoscaling_policy(autoscale, cluster_name, opts)

        wait_for_tcp_port(master_node.public_dns_name)
        print("SSH ready:")
        print("ssh ubuntu@{h}".format(h=master_node.public_dns_name))
        wait_for_tcp_port(master_node.public_dns_name, port=18080)
        print("Spark master ready:")
        print(
            "Spark WebUI: http://{h}:18080".format(h=master_node.public_dns_name))
    if action == "destroy":
        master_node = find_instance_by_name(conn, cluster_name + '-master')
        if master_node:
            print("Terminating master...")
            conn.create_tags([master_node.id], {"Name": "{c}-master-terminated".format(c=cluster_name)})
            master_node.terminate()
        print("Shutting down autoscaling group...")
        autoscale = boto.ec2.autoscale.connect_to_region(opts.region)
        aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"])
        ag = None
        if aglist:
            ag = aglist[0]
            ag.shutdown_instances()
            instances_ids = [i.instance_id for i in ag.instances]
            instances = conn.get_only_instances(instances_ids)
        else:
            instances = []
        lclist = autoscale.get_all_launch_configurations(names=[cluster_name + "-lc"])
        lc = None
        if lclist:
            lc = lclist[0]
        wait_for_cluster_state(
            conn, instances, cluster_state="terminated", name="instances")
        time.sleep(10)
        if ag:
            try:
                ag.delete()
            except Exception, e:
                print("Couldn't delete autoscaling group: %s" % e)
        if lc:
            try:
                lc.delete()
            except Exception, e:
                print("Couldn't delete launch configuration: %s" % e)
Exemple #8
0
def get_autoscaling_group(group_name):
    autoscale = get_aws_connection('autoscale')
    groups = autoscale.get_all_groups(names=[group_name])
    if len(groups) == 1:
        return groups[0]
    else:
        raise Exception('%i autoscaling groups found for name %s' % (len(groups), group_name))
Exemple #9
0
def get_autoscaling_group(group_id):
    autoscale = boto.ec2.autoscale.connect_to_region(region)
    groups = autoscale.get_all_groups()
    for group in groups:
        if group.name == group_id:
            return group
    return None
def is_first_of_asg_group():
    """
    Returns True if the current instance is the first instance in the
    sorted by instance_id ASG group.

    XXX: some methods can be generalized and moved to a common.py file
    """
    # Collect together instance data
    try:
        instance_identity = get_instance_identity()
        instance_id = instance_identity['document']['instanceId']
        instance_region = instance_identity['document']['availabilityZone'].strip()[:-1]
        conn = boto.ec2.connect_to_region(instance_region)
        instance_data = conn.get_all_instances(
            instance_ids=[instance_id])[0].instances[0]
    except boto.exception.AWSConnectionError as e:
        log.error("There was a problem collecting instance data, '{}'").format(e.message)
        return False

    # my autoscaling group
    asg_group = instance_data.tags['aws:autoscaling:groupName']

    try:
        autoscale = boto.ec2.autoscale.connect_to_region(instance_region)
        group = autoscale.get_all_groups(names=[asg_group])[0]
        sorted_instances = sorted(group.instances, key=operator.attrgetter('instance_id'))
    except boto.exception.AWSConnectionError as e:
        log.error("There was a problem collecting instance data, '{}'").format(e.message)
        return False

    if sorted_instances[0].instance_id == instance_id:
        return True
    else:
        return False
def update_group(launch_configuration):
    """Update auto scalling group with the new launch configuration"""
    autoscale = boto.ec2.autoscale.connect_to_region(REGION)
    group = autoscale.get_all_groups(names=[SCALE_GROUP])[0]
    setattr(group, 'launch_config_name', launch_configuration)
    group.update()
    print "Updated autoscale group: " + group.name
Exemple #12
0
def increaseCap():
	group = autoscale.get_all_groups(names=['sw-ASG'])[0]
	numInstances = group.desired_capacity
	maxInstances = group.max_size
	if maxInstances >= (numInstances + 1):
		autoscale.set_desired_capacity('sw-ASG', numInstances + 1)
		print("Size increased from "+ str(numInstances) + " to " + str(numInstances+1))
	else: 
		print("You are already at Max capacity")
Exemple #13
0
def decreaseCap():
	group = autoscale.get_all_groups(names=['sw-ASG'])[0]
	numInstances = group.desired_capacity
	minInstances = group.min_size
	if numInstances -1 >= minInstances:
		autoscale.set_desired_capacity('sw-ASG', numInstances -1)
		print("Size increased from "+ str(numInstances) + " to " + str(numInstances-1))
	else: 
		print("You are already at Min capacity")
Exemple #14
0
def index():
    global reservation
    autoscale = boto.ec2.autoscale.AutoScaleConnection()
    ec2 = boto.ec2.connect_to_region('us-west-2')
    group = autoscale.get_all_groups(names=['Daala'])[0]
    num_instances = len(group.instances)
    instance_ids = [i.instance_id for i in group.instances]
    if len(instance_ids) > 0:
      instances = ec2.get_only_instances(instance_ids)
    else:
      instances = []
    return render_template('index.html',instances=instances,num_instances=num_instances, reservation=reservation)
Exemple #15
0
def sync_group():
    id = "i-f4bd6ea9"
    i = ec2.get_only_instances([id])[0]

    g = i.tags['aws:autoscaling:groupName']

    s = autoscale.get_all_groups(names=[g])[0]

    for ai in s.instances:
        #ai_id = ai.instance_id
        # get instance
        # get public dns
        # check r53
        pass
def handle_auto_scaling_group(autoscale, environment, roles, state, scheduled_time):
    """ Handle Auto-Scaling-Group Instances
    :type autoscale: boto.ec2.autoscale.AutoScaleConnection
    :param autoscale: AutoScale connection object
    :type environment: str
    :param environment: Environment name
    :type roles: list
    :param roles: Role Name
    :type state: str
    :param state: Desired State
    :type scheduled_time: datetime
    :param scheduled_time: Scheduled Time
    :returns: None
    """

    logging.info("Started Processing Auto Scaling Groups")
    groups = autoscale.get_all_groups()

    all_roles = get_roles(autoscale)
    roles_size = len(roles)
    kept_role = None

    if roles_size >= len(all_roles):
        kept_role = roles.pop()

    is_role_legal = any(role in all_roles for role in roles)

    if not is_role_legal:
        logging.error("Role: %s is not a legal role" % roles[0])
        sys.exit(1)

    for group in groups:
        try:
            group_tags = dict()
            for tag in group.tags:
                group_tags[tag.key] = tag.value

            group_role = group_tags['role']
            group_environment = group_tags['environment']

            if kept_role == group_role and environment == group_environment:
                _bring_auto_scaling_group_to_desired_state(autoscale, group, group_tags, state, scheduled_time, 1)

            if environment == group_environment and group_role in roles:
                logging.info("Found AutoScaling Group %s with role: %s." %
                             (group.name, group_role))
                _bring_auto_scaling_group_to_desired_state(autoscale, group, group_tags, state, scheduled_time)
        except Exception, e:
            logging.error("Error while processing Auto-Scaling Group %s: %s" %
                          (group.name, e))
Exemple #17
0
def wait_for_healthy_instances(autoscale, asg_name, min_count = 2):
    while True:
        asg = autoscale.get_all_groups(names=[asg_name])[0]
        print "Instances: %s" % asg.instances

        healthy_instances = [instance for instance in asg.instances if instance.lifecycle_state == 'InService']
        print "Healthy Instances: %s" % healthy_instances

        count = len(healthy_instances)
        if count < min_count:
            print "Need at least {} instances in service to continue, got {}, waiting...".format(min_count, count)
            time.sleep(30)
            asg.update()
        else:
            break
Exemple #18
0
def usage():
    group = autoscale.get_all_groups(names=['sw-ASG'])[0]
    reservations = ec2.get_all_instances(filters={
        "tag:SteveWalsh": "SteveWalsh",
        "instance-state-name": "running"
    })
    numInstances = 1  # set to one to stop errors
    avgCPU = 0

    for x in range(numInstances):
        try:
            instance = reservations[x].instances[0]
            num = instance.id
            stats = cw.get_metric_statistics(
                300,
                datetime.datetime.now() - datetime.timedelta(seconds=600),
                datetime.datetime.now(),
                'CPUUtilization',
                'AWS/EC2',
                'Average',
                dimensions={'InstanceId': [instance.id]})
            print(instance.state)
            #print(stats)
            dict = stats[0]
            if instance.state == "running":
                numInstances += 1
                print(numInstances)
            avgCPU = avgCPU + dict['Average']
            #print ("Instance " + str(x) + " CPU usage is :" + str(dict['Average']) + " state : " +instance.state)
            pass
        except Exception as e:
            raise e
    #print(str(avgCPU) +" : " + str(numInstances-1))
    avgCPU = avgCPU / (numInstances - 1)
    print("The average CPU usage across all instances is : " + str(avgCPU))

    if (avgCPU > 40):
        print("Average CPU usage is above 40")
        print("Want to add another Instance to ASG")
        correct = True
        while (correct):
            ans = input("Input (y or n) : ")
            if ans.lower() == 'y':
                increaseCap()
                correct = False
            if ans.lower() == 'n':
                correct = False
Exemple #19
0
	def aws_autoscaling_discovery(self, region=settings['DEFAULT_REGION']):
		as_group_info = {}
		as_groups = []
		result = {}

		self.logger.debug("Discovering autoscaling group names")

		try:
			autoscale = boto.ec2.autoscale.connect_to_region(region)

			for as_group in autoscale.get_all_groups():
				as_group_info['{#ASGROUPNAME}'] = as_group.name
				as_groups.append(as_group_info.copy())
			
			result['data'] = as_groups

		except Exception, e:
			result = {}
			self.logger.exception("Error discovering autoscaling group names. Details: %s" % (e))
Exemple #20
0
	def aws_autoscaling_discovery(self, region=settings['DEFAULT_REGION']):
		as_group_info = {}
		as_groups = []
		result = {}

		self.logger.debug("Discovering autoscaling group names")

		try:
			autoscale = boto.ec2.autoscale.connect_to_region(region)

			for as_group in autoscale.get_all_groups():
				as_group_info['{#ASGROUPNAME}'] = as_group.name
				as_groups.append(as_group_info.copy())
			
			result['data'] = as_groups

		except Exception, e:
			result = {}
			self.logger.exception("Error discovering autoscaling group names. Details: %s" % (e))
Exemple #21
0
def get_machine_info():
    autoscale = boto.ec2.autoscale.AutoScaleConnection()
    ec2 = boto.ec2.connect_to_region('us-west-2')
    group = autoscale.get_all_groups(names=['Daala'])[0]
    num_instances = len(group.instances)
    instance_ids = [i.instance_id for i in group.instances]
    if len(instance_ids) > 0:
      instances = ec2.get_only_instances(instance_ids)
    else:
      instances = []
    machines = []
    for instance in instances:
        machine = {}
        machine['id'] = instance.id
        machine['ip_address'] = instance.ip_address
        machine['state'] = instance.state
        machine['status'] = ec2.get_all_instance_status([instance.id])[0].instance_status.status
        machines.append(machine)
    return json.dumps(machines)
Exemple #22
0
def update_asg(ami, name='proxxy', region='us-east-1'):
    """Update Proxxy autoscaling group with a fresh AMI"""

    if ami is None:
        print "AMI not specified"
        exit(1)

    ec2 = boto.ec2.connect_to_region(region)
    autoscale = boto.ec2.autoscale.connect_to_region(region, use_block_device_types=True)

    # get AMI metadata
    ami = ec2.get_all_images(image_ids=[ami])[0]
    new_launch_config_name = 'proxxy-'+ami.id

    # get autoscaling group
    autoscale_group = autoscale.get_all_groups(names=[name])[0]

    # get old launch configuration
    old_launch_config_name = autoscale_group.launch_config_name
    if old_launch_config_name == new_launch_config_name:
        print "Autoscale Group '%s' already uses launch config '%s'" % (name, new_launch_config_name)
        exit(0)

    old_launch_config = autoscale.get_all_launch_configurations(names=[old_launch_config_name])[0]
    print "Old Launch Configuration: %s" % old_launch_config

    # create new launch configuration based on the old one
    new_launch_config = _copy_launch_config(old_launch_config)
    new_launch_config.name = new_launch_config_name
    new_launch_config.image_id = ami.id
    autoscale.create_launch_configuration(new_launch_config)
    print "New Launch Configuration: %s" % new_launch_config

    # switch autoscaling group from old LC to new LC
    autoscale_group.launch_config_name = new_launch_config_name
    result = autoscale_group.update()
    print vars(result)

    # delete old launch configuration
    old_launch_config.delete()

    print "Done"
Exemple #23
0
def usage():
	roundNum =1 
	turn = 0
	while (True):
		print("This is round : " + str(roundNum))
		roundNum += 1
		group = autoscale.get_all_groups(names=['sw-ASG'])[0]
		reservations = ec2.get_all_instances(filters={"tag:SteveWalsh":"SteveWalsh", "instance-state-name" : "running"})
		numInstances = 1 # set to one to stop errors
		avgCPU = 1 # set to one to stop errors
		print("There is a total of " + str(len(reservations)) + " instance/s running")
	
		for x in range(numInstances):
			instance = reservations[x].instances[0]
			num = instance.id
			stats = cw.get_metric_statistics(300,datetime.datetime.now()-datetime.timedelta(seconds=600),datetime.datetime.now(),'CPUUtilization','AWS/EC2','Average',dimensions={'InstanceId':[instance.id]})
				
			if instance.state == "running":
				dict = stats[0]
				numInstances += 1
				avgCPU = avgCPU + dict['Average']
		avgCPU = avgCPU -1 / (numInstances-1)
		print("The average CPU usage across all instances is : " + str(avgCPU))

		if (avgCPU > 40):
			turn +=1
		if avgCPU < 15:
			turn -=1
		
		if turn > 2:
			increaseCap()
			turn=0

		if turn < -1:
			decreaseCap()
			turn = 0

		print("Sleep for 30 seconds")
		time.sleep(30)	
Exemple #24
0
def rotate_asg(name='proxxy', region='us-east-1'):
    """Perform a rolling restart on Proxxy autoscaling group"""

    ec2 = boto.ec2.connect_to_region(region)
    elb = boto.ec2.elb.connect_to_region(region)
    autoscale = boto.ec2.autoscale.connect_to_region(region)

    autoscale_group = autoscale.get_all_groups(names=[name])[0]
    load_balancers = elb.get_all_load_balancers(load_balancer_names=autoscale_group.load_balancers)

    old_instances = copy.copy(autoscale_group.instances)

    original_min_size = autoscale_group.min_size
    original_desired_capacity = autoscale_group.desired_capacity
    min_instances_in_service = max(2, original_min_size)
    if original_min_size < min_instances_in_service:
        print "Temporarily increasing min size to %s" % min_instances_in_service
        autoscale_group.min_size = min_instances_in_service
        autoscale_group.desired_capacity = max(autoscale_group.min_size, autoscale_group.desired_capacity)
        autoscale_group.update()
        time.sleep(5)

    wait_for_instances_in_service(load_balancers, min_count=min_instances_in_service)

    for old_instance in old_instances:
        print "Terminating instance %s" % old_instance.instance_id
        autoscale.terminate_instance(old_instance.instance_id, decrement_capacity=False)
        time.sleep(30)
        wait_for_instances_in_service(load_balancers, min_count=min_instances_in_service)

    if original_min_size < min_instances_in_service:
        print "Decreasing min size back to %s" % original_min_size
        autoscale_group.min_size = original_min_size
        autoscale_group.desired_capacity = original_desired_capacity
        autoscale_group.update()
        time.sleep(5)

    print "Done"
def usage():
	group = autoscale.get_all_groups(names=['sw-ASG'])[0]
	reservations = ec2.get_all_instances(filters={"tag:SteveWalsh":"SteveWalsh", "instance-state-name" : "running"})
	numInstances = 1 # set to one to stop errors
	avgCPU = 0
	
	for x in range(numInstances):
		try:
			instance = reservations[x].instances[0]
			num = instance.id
			stats = cw.get_metric_statistics(300,datetime.datetime.now()-datetime.timedelta(seconds=600),datetime.datetime.now(),'CPUUtilization','AWS/EC2','Average',dimensions={'InstanceId':[instance.id]})
			print(instance.state)
			#print(stats)
			dict = stats[0]
			if instance.state == "running":
				numInstances += 1
				print(numInstances)
			avgCPU = avgCPU + dict['Average']
			#print ("Instance " + str(x) + " CPU usage is :" + str(dict['Average']) + " state : " +instance.state)
			pass
		except Exception as e:
			raise e
	#print(str(avgCPU) +" : " + str(numInstances-1))
	avgCPU = avgCPU / (numInstances-1)
	print("The average CPU usage across all instances is : " + str(avgCPU))

	if (avgCPU > 40):
		print("Average CPU usage is above 40")
		print("Want to add another Instance to ASG")
		correct = True
		while(correct):
			ans = input("Input (y or n) : ")
			if ans.lower() == 'y':
				increaseCap()
				correct = False
			if ans.lower() == 'n':
				correct = False
  if args.image_id:
    ec2.get_all_images(image_ids=[args.image_id])
except exception.EC2ResponseError:
  print("It seems that '{0}' is not a valid image_id name or it does not exist  ".format(args.image_id))
  sys.exit(1)

autoscale = autoscale.connect_to_region(args.region)

try:
  as_launch_config = autoscale.get_all_launch_configurations(names = [args.launch_config_name]).pop()
except IndexError:
  print ("Couldn't found AutoScaling Launch Configuration")
  sys.exit(1)

try:
  as_group = autoscale.get_all_groups(names=[args.autoscale_group_name])[0]
except IndexError:
  print("Couldn't found autoscale group '{0}'".format(args.autoscale_group_name))
  sys.exit(1)

as_launch_config_tmp = copy(as_launch_config)
as_launch_config_new = copy(as_launch_config)

as_launch_config_tmp.name = "{0}-tmp".format(as_launch_config.name)
print("Creating temporary AutoScaling Launch Config named: {0}".format(as_launch_config_tmp.name))
autoscale.create_launch_configuration(as_launch_config_tmp)

print("Setting AutoScaling Group Launch Configuration to {0}".format(as_launch_config_tmp.name))
setattr(as_group, 'launch_config_name', as_launch_config_tmp.name)
as_group.update()
Exemple #27
0
def info():
    group = autoscale.get_all_groups(names=['sw-ASG'])[0]
    print("Currnt running instances are : " + str(group.desired_capacity))
    print("Max : " + str(group.max_size) + "   Min : " + str(group.min_size))
Exemple #28
0
def stop_instances():
    autoscale = boto.ec2.autoscale.AutoScaleConnection()
    group = autoscale.get_all_groups(names=['Daala'])[0]
    autoscale.set_desired_capacity('Daala',0)
    return 'ok'
def get_current_stats(region,asgroup):
    """takes region and asgroup as argument returns the group object"""
    autoscale = boto.ec2.autoscale.connect_to_region(region)
    group = autoscale.get_all_groups([str(asgroup)])[0]
    return group
Exemple #30
0
num_instances_to_use = (31 + total_num_of_jobs) / 32

#...but lock AWS to a max number of instances
max_num_instances_to_use = 8

if num_instances_to_use > max_num_instances_to_use:
  print(GetTime(),'Ideally, we should use',num_instances_to_use,
    'AWS instances, but the max is',max_num_instances_to_use,'.')
  num_instances_to_use = max_num_instances_to_use

#connect to AWS
ec2 = boto.ec2.connect_to_region('us-west-2');
autoscale = boto.ec2.autoscale.AutoScaleConnection();

#how many machines are currently running?
group = autoscale.get_all_groups(names=['Daala'])[0]
num_instances = len(group.instances)
print(GetTime(),'Number of instances online:',len(group.instances))

#switch on more machines if we need them
if num_instances < num_instances_to_use:
    print(GetTime(),'Launching instances...')
    autoscale.set_desired_capacity('Daala',num_instances_to_use)

    #tell us status every few seconds
    group = None
    while num_instances < num_instances_to_use:
        group = autoscale.get_all_groups(names=['Daala'])[0]
        num_instances = len(group.instances)
        print(GetTime(),'Number of instances online:',len(group.instances))
        sleep(3)
Exemple #31
0
    except BotoServerError, e:
        # log
        return None

    clusters = []
    for stack in stacks:
            cluster = {'stack': stack}
            for resource in stack.list_resources():
                if resource.resource_type == 'AWS::ElasticLoadBalancing::LoadBalancer':
                    cluster['elb'] = elb.get_all_load_balancers(load_balancer_names=[resource.physical_resource_id])[0]

                elif resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration':
                    kwargs = {'names': [resource.physical_resource_id]}
                    cluster['launch_config'] = autoscale.get_all_launch_configurations(**kwargs)[0]
                elif  resource.resource_type == 'AWS::AutoScaling::AutoScalingGroup':
                    cluster['group'] = autoscale.get_all_groups(names=[resource.physical_resource_id])[0]
                else:
                    raise Exception("Unkonw resource type '%s'" % resource.resource_type)

            clusters.append(cluster)

    # sort list by stack creation time
    clusters.sort(key=lambda x: x['stack'].creation_time, reverse=True)

    return clusters


def get_autoscaling_group(group_name):
    autoscale = get_aws_connection('autoscale')
    groups = autoscale.get_all_groups(names=[group_name])
    if len(groups) == 1:
Exemple #32
0
    sys.exit(1)

parser = argparse.ArgumentParser(
    description='Dynamic inventory for autoscaling groups')
parser.add_argument('--list', help="list hosts", action="store_true")
parser.add_argument('--host', help="list host vars")
args = parser.parse_args()

if args.host:
    print "{}"

if not args.list:
    sys.exit(1)

autoscale = boto.ec2.autoscale.connect_to_region(region)
ec2 = boto.ec2.connect_to_region(region)

inventory = {"_meta": {"hostvars": {}}}
for autoscaling_group in autoscale.get_all_groups():
    instance_ids = [i.instance_id for i in autoscaling_group.instances]
    instance_dns_names = [
        i.private_dns_name for r in ec2.get_all_instances(instance_ids)
        for i in r.instances
    ]
    name = get_tag(autoscaling_group.tags, 'Name')
    if name not in inventory:
        inventory[name] = {"hosts": []}
    inventory[name]['hosts'] += instance_dns_names

print json.dumps(inventory)
Exemple #33
0
def main():
    (opts, action, cluster_name) = parse_options()
    conn = boto.ec2.connect_to_region(opts.region)
    opts = validate_opts(conn, opts, action)

    if action == "launch":
        (master_group,
         slave_group) = setup_security_groups(conn, cluster_name, opts)
        master_node = find_instance_by_name(conn, cluster_name + '-master')
        if not master_node:
            master_node = start_master(conn, opts, cluster_name, master_group)
        print("Master node: {m}".format(m=master_node))
        wait_for_cluster_state(
            conn=conn,
            cluster_instances=([master_node]),
        )
        autoscale = boto.ec2.autoscale.connect_to_region(opts.region)
        create_autoscaling_group(autoscale, cluster_name, master_node, opts,
                                 slave_group)
        create_autoscaling_policy(autoscale, cluster_name, opts)

        wait_for_tcp_port(master_node.public_dns_name)
        print("SSH ready:")
        print("ssh ubuntu@{h}".format(h=master_node.public_dns_name))
        wait_for_tcp_port(master_node.public_dns_name, port=18080)
        print("Spark master ready:")
        print("Spark WebUI: http://{h}:18080".format(
            h=master_node.public_dns_name))
    if action == "destroy":
        master_node = find_instance_by_name(conn, cluster_name + '-master')
        if master_node:
            print("Terminating master...")
            conn.create_tags(
                [master_node.id],
                {"Name": "{c}-master-terminated".format(c=cluster_name)})
            master_node.terminate()
        print("Shutting down autoscaling group...")
        autoscale = boto.ec2.autoscale.connect_to_region(opts.region)
        aglist = autoscale.get_all_groups(names=[cluster_name + "-ag"])
        ag = None
        if aglist:
            ag = aglist[0]
            ag.shutdown_instances()
            instances_ids = [i.instance_id for i in ag.instances]
            instances = conn.get_only_instances(instances_ids)
        else:
            instances = []
        lclist = autoscale.get_all_launch_configurations(
            names=[cluster_name + "-lc"])
        lc = None
        if lclist:
            lc = lclist[0]
        wait_for_cluster_state(conn,
                               instances,
                               cluster_state="terminated",
                               name="instances")
        time.sleep(10)
        if ag:
            try:
                ag.delete()
            except Exception, e:
                print("Couldn't delete autoscaling group: %s" % e)
        if lc:
            try:
                lc.delete()
            except Exception, e:
                print("Couldn't delete launch configuration: %s" % e)
def info():
	group = autoscale.get_all_groups(names=['sw-ASG'])[0]
	print("Currnt running instances are : " + str(group.desired_capacity))
	print("Max : " + str(group.max_size) + "   Min : " + str(group.min_size))
    print(
        "It seems that '{0}' is not a valid image_id name or it does not exist  "
        .format(args.image_id))
    sys.exit(1)

autoscale = autoscale.connect_to_region(args.region)

try:
    as_launch_config = autoscale.get_all_launch_configurations(
        names=[args.launch_config_name]).pop()
except IndexError:
    print("Couldn't found AutoScaling Launch Configuration")
    sys.exit(1)

try:
    as_group = autoscale.get_all_groups(names=[args.autoscale_group_name])[0]
except IndexError:
    print("Couldn't found autoscale group '{0}'".format(
        args.autoscale_group_name))
    sys.exit(1)

as_launch_config_tmp = copy(as_launch_config)
as_launch_config_new = copy(as_launch_config)

as_launch_config_tmp.name = "{0}-tmp".format(as_launch_config.name)
print("Creating temporary AutoScaling Launch Config named: {0}".format(
    as_launch_config_tmp.name))
autoscale.create_launch_configuration(as_launch_config_tmp)

print("Setting AutoScaling Group Launch Configuration to {0}".format(
    as_launch_config_tmp.name))
Exemple #36
0
configs = {
    'default': 'lstn.config.DefaultConfig',
    'production': 'lstn.config.ProductionConfig'
}

env = 'default'
try:
    print 'Detecting if this is an ec2 instance...'
    instance_id = urllib2.urlopen(
        'http://169.254.169.254/latest/meta-data/instance-id',
        timeout=5).read()
    autoscale = boto.ec2.autoscale.connect_to_region('us-west-2')

    print 'Reading autoscaling groups...'
    groups = autoscale.get_all_groups(names=['Lstn'])

    if groups:
        print 'Found autoscale group'
        group = groups[0]

        if group.tags:
            print 'Reading autoscale group tags...'
            tag = group.tags[0]

            if tag.key == 'lstn:environment':
                print 'Found environment tag'
                env = tag.value
except:
    pass
Exemple #37
0
cloudwatch.create_alarm(scale_up_alarm)
#scale down
scale_down_alarm = MetricAlarm(name='scale_down_on_cpu', namespace='AWS/EC2',metric='CPUUtilization', statistic='Average',comparison='<', threshold='60',period='60', evaluation_periods=1,alarm_actions=[scale_down_policy.policy_arn],dimensions=alarm_dimensions)
cloudwatch.create_alarm(scale_down_alarm)
print 'both clock watch created'

#load generator
ec2=boto.ec2.connect_to_region("us-east-1")
reservation=ec2.run_instances('ami-7aba0c12',key_name='jj',instance_type='m3.medium',security_groups=['http'])
time.sleep(60)
instance=reservation.instances[0]
id=instance.id

#loadDns=instance.public_dns_name
#get load dns
groupw=autoscale.get_all_groups(names=['jianGroup'])[0]
instances=ec2.get_only_instances(instance_ids=[id])
loadDns=instances[0].public_dns_name
print 'load generator dns is :%s'%(loadDns)

time.sleep(10)
#authenticate
response = urllib2.urlopen('http://'+loadDns+'/username?username=jianj')
print 'load generator authenticated'


#warm up
response=urllib2.urlopen('http://'+loadDns+'/warmup?dns='+lbdns+'&testId=jian')
print 'warm up started'
time.sleep(310)
print 'warmed up first time'
Exemple #38
0
import os
import datetime

configs = {
  'default': 'screener.config.DefaultConfig',
  'production': 'screener.config.ProductionConfig',
}

env = 'default'
try:
  print 'Detecting if this is an ec2 instance...'
  instance_id = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id', timeout=5).read()
  autoscale = boto.ec2.autoscale.connect_to_region('us-west-2')

  print 'Reading autoscaling groups...'
  groups = autoscale.get_all_groups(names=['Lstn'])

  if groups:
    for group in groups:
      instances = [instance.instance_id for instance in group.instances]
      if instance_id in instances:
        print 'Found autoscale group'

        if group.tags:
          print 'Reading autoscale group tags...'

          for tag in group.tags:
            if tag.key == 'screener:environment':
              print 'Found environment tag'
              env = tag.value
except:
Exemple #39
0
import boto.ec2.cloudwatch
import boto.ec2.autoscale
import datetime
import subprocess
import os
import time


ec2 = boto.ec2.connect_to_region('eu-west-1')
reservations = ec2.get_all_instances(filters={"tag:SteveWalsh":"SteveWalsh", "instance-state-name" : "running"})

cw = boto.ec2.cloudwatch.connect_to_region('eu-west-1') #Setting cloudWatch

autoscale = boto.ec2.autoscale.connect_to_region('eu-west-1') #Setting Autoscale

group = autoscale.get_all_groups(names=['sw-ASG'])[0] #Setting group



def usage():
	roundNum =1 
	turn = 0
	while (True):
		print("This is round : " + str(roundNum))
		roundNum += 1
		group = autoscale.get_all_groups(names=['sw-ASG'])[0]
		reservations = ec2.get_all_instances(filters={"tag:SteveWalsh":"SteveWalsh", "instance-state-name" : "running"})
		numInstances = 1 # set to one to stop errors
		avgCPU = 1 # set to one to stop errors
		print("There is a total of " + str(len(reservations)) + " instance/s running")
	
Exemple #40
0
def main():

    # pseudocode (repeats in code comments below)
    # check for autoscale group
    # if autoscale group not present, create it
    # else read launchconfig name from asg
    # define new launchconfig
    # assign launchconfig
    # delete old launchconfig - we can only have so many



    # read config
    print "reading configuration ..."
    config = ConfigParser.SafeConfigParser(allow_no_value=True)
    # This assumes that the file is either in the local directory being ran, or in the home/aws/ folder
    # We have to use the second option because XLD does not run the script in the same location that it's located
    config.read(['ec2-deploy.conf', os.path.expanduser('/var/lib/jenkins/workspace/AWS-Demo/cm/ec2-deploy.conf')])
    # check for autoscale group
    # FIXME: Should connect to region there
    # FIXME: proxy information?
    print "connecting to ec2..."
    #asconn = boto.ec2.autoscale.AutoScaleConnection(aws_access_key_id=config.get('auth', 'AWS_ACCESS_KEY_ID'), 
        #aws_secret_access_key=config.get('auth', 'AWS_SECRET_ACCESS_KEY'), security_token=config.get('auth', 'AWS_SECURITY_TOKEN'))
    #boto.set_stream_logger('boto')
    asconn = boto.ec2.autoscale.AutoScaleConnection()

    print "validating autoscaling group ..."
    asg = get_autoscale_group(config.get('autoscalegroup','name'), asconn)
    oldlc = None
    # read userdata
    userdata = ""
    with open(config.get('launchconfig', 'userdata_filename'), 'r') as udf:
        userdata=udf.read() 
    
    # define new launchconfig

    timenow = str(datetime.now()).split(".")[0]
    timenow = timenow.replace(" ", "").replace("-", "").replace(":", "")

    lcname = config.get('autoscalegroup', 'name') + "-lc-" + timenow
    print "Creating new launch config '{}'".format(lcname)
    newlc = LaunchConfiguration(
        name = lcname,
        image_id = config.get('launchconfig', 'ami'),
        key_name = config.get('launchconfig', 'keypair'),
        instance_type = config.get('launchconfig', 'instancetype'),
        # security_groups = sgnames_to_list( config.get('launchconfig', 'sgnames') , config.get('ec2', 'region')),
        security_groups = str(config.get('launchconfig', 'security_groups')).split(','),
        # classic_link_vpc_security_groups = str(config.get('launchconfig', 'security_groups')).split(','),         
        user_data = userdata,
        associate_public_ip_address = True,
        delete_on_termination = True,
        instance_monitoring = False,
        instance_profile_name = config.get('launchconfig', 'instance_profile_name')
        )
    print "new lc created"
    asconn.create_launch_configuration(newlc)
    print "lc associated, now checking if asg exists"
    # if autoscale group not present, create it
    if asg is None:
        print "Autoscaling Group '{}' not found, creating...".format(config.get('autoscalegroup', 'name'))
        azlist = str(config.get('autoscalegroup', 'azs')).split(',')
        elblist = str(config.get('autoscalegroup', 'elbs')).split(',')
        vpclist = str(config.get('launchconfig', 'subnet')).split(',')
        asg = AutoScalingGroup(
            connection = asconn,
            name = config.get('autoscalegroup', 'name'),
            load_balancers = elblist,
            availability_zones = azlist,
            desired_capacity = config.getint('autoscalegroup','desired_capacity'),
            launch_config = newlc,
            max_size = config.getint('autoscalegroup','max_size'),
            min_size = config.getint('autoscalegroup','min_size'),
            vpc_zone_identifier = vpclist
            )
        asconn.create_auto_scaling_group(asg)
        
    else:
        # else read launchconfig name from asg
        # Note that the oldlc is just the name of the lc we're about to delete
        oldlc = asg.launch_config_name
        print "Replacing launch configuration '{}' with new lc '{}'.".format(oldlc, lcname)
        asg.endElement("LaunchConfigurationName", lcname, asconn)
        asg.update()
        # this part now terminates each instance individually
        autoscale = boto.connect_autoscale()
        ec2 = boto.connect_ec2()
        group = autoscale.get_all_groups([config.get('autoscalegroup', 'name')])[0]
        instance_ids = [i.instance_id for i in group.instances]
        # reservations = ec2.get_all_instances(instance_ids)
        # instances = [i for r in reservations for i in r.instances]
        for i in instance_ids:
            asconn.terminate_instance(i,decrement_capacity=False)

    
    # delete old launchconfig - we can only have so many

    if oldlc is not None:
        print "Deleting old launch configuration ... "
        asconn.delete_launch_configuration(oldlc)
        print "done."
    
    # end main
    print "Now injecting the Name Tag"
    # can't figure out a better way to inject the boto Tag tag class - will need to fix later to make it look better
    taglist = Tag(key='Name', value=config.get('tags', 'name'), propagate_at_launch=True, resource_id=config.get('autoscalegroup', 'name'))
    asconn.create_or_update_tags([taglist])