def test_reduce_by_freshness_jacuzzi(): instances = [] # reduce by 100% of fresh (10) and 0% of old for i in range(10): i = mock.Mock() i.launch_time = "2014-11-01T02:44:03.000Z" instances.append(i) for i in range(30): i = mock.Mock() i.launch_time = "2013-11-01T02:44:03.000Z" instances.append(i) # fresh launch_time converted to UNIX time t_fresh = 1414809843 - FRESH_INSTANCE_DELAY_JACUZZI + 10 with mock.patch("time.time") as m_time: m_time.return_value = t_fresh assert reduce_by_freshness( 100, instances, "meh_type", "fake_slaveset") == 90
def test_reduce_by_freshness(): instances = [] # reduce by 100% of fresh (10) and 10% of old (3), 13 in total # add 10 fresh instances, 100% to be reduces for i in range(10): i = mock.Mock() i.launch_time = "2014-11-01T02:44:03.000Z" instances.append(i) # add 20 not fresh instances, 10% to be reduced for i in range(30): i = mock.Mock() i.launch_time = "2013-11-01T02:44:03.000Z" instances.append(i) # fresh launch_time converted to UNIX time t_fresh = 1414809843 - FRESH_INSTANCE_DELAY + 10 with mock.patch("time.time") as m_time: m_time.return_value = t_fresh assert reduce_by_freshness(100, instances, "meh_type") == 87
def aws_watch_pending(dburl, regions, builder_map, region_priorities, spot_config, ondemand_config, dryrun, latest_ami_percentage): # First find pending jobs in the db pending = find_pending(dburl) if not pending: gr_log.add("pending", 0) log.debug("no pending jobs! all done!") return log.debug("processing %i pending jobs", len(pending)) gr_log.add("pending", len(pending)) # Mapping of (instance types, slaveset) to # of instances we want to # creates # Map pending builder names to instance types pending_builder_map = map_builders(pending, builder_map) gr_log.add("aws_pending", sum(pending_builder_map.values())) if not pending_builder_map: log.debug("no pending jobs we can do anything about! all done!") return to_create_spot = pending_builder_map to_create_ondemand = defaultdict(int) # For each moz_instance_type, slaveset, find how many are currently # running, and scale our count accordingly all_instances = aws_get_all_instances(regions) cloudtools.graphite.generate_instance_stats(all_instances) # Reduce the requirements, pay attention to freshess and running instances to_delete = set() for (moz_instance_type, slaveset), count in to_create_spot.iteritems(): running = filter_instances_by_slaveset( aws_get_running_instances(all_instances, moz_instance_type), slaveset) spot_running = filter_spot_instances(running) to_create_spot[moz_instance_type, slaveset] = reduce_by_freshness(count, spot_running, moz_instance_type, slaveset) if to_create_spot[moz_instance_type, slaveset] == 0: log.debug("removing requirement for %s %s %s", "spot", moz_instance_type, slaveset) to_delete.add((moz_instance_type, slaveset)) # If slaveset is not None, and all our slaves are running, we should # remove it from the set of things to try and start instances for if slaveset and \ slaveset.issubset( set(i.tags.get('Name') for i in spot_running)): log.debug("removing %s %s since all the slaves are running", moz_instance_type, slaveset) to_delete.add((moz_instance_type, slaveset)) for moz_instance_type, slaveset in to_delete: del to_create_spot[moz_instance_type, slaveset] for (moz_instance_type, slaveset), count in to_create_spot.iteritems(): log.debug("need %i spot %s for slaveset %s", count, moz_instance_type, slaveset) # Cap by our global limits if applicable if spot_config and 'global' in spot_config.get('limits', {}): global_limit = spot_config['limits']['global'].get( moz_instance_type) # How many of this type of spot instance are running? n = len( filter_spot_instances( aws_get_running_instances(all_instances, moz_instance_type))) log.debug("%i %s spot instances running globally", n, moz_instance_type) if global_limit and n + count > global_limit: new_count = max(0, global_limit - n) log.debug( "decreasing requested number of %s from %i to %i (%i out of %i running)", moz_instance_type, count, new_count, n, global_limit) count = new_count if count <= 0: continue started = request_spot_instances( all_instances, moz_instance_type=moz_instance_type, start_count=count, regions=regions, region_priorities=region_priorities, spot_config=spot_config, dryrun=dryrun, slaveset=slaveset, latest_ami_percentage=latest_ami_percentage) count -= started log.debug("%s - started %i spot instances for slaveset %s; need %i", moz_instance_type, started, slaveset, count) gr_log.add("need.{moz_instance_type}.{jacuzzi_type}".format( moz_instance_type=moz_instance_type, jacuzzi_type=jacuzzi_suffix(slaveset)), count, collect=True) # Add leftover to ondemand to_create_ondemand[moz_instance_type, slaveset] += count for (moz_instance_type, slaveset), count in to_create_ondemand.iteritems(): log.debug("need %i ondemand %s for slaveset %s", count, moz_instance_type, slaveset) # Cap by our global limits if applicable if ondemand_config and 'global' in ondemand_config.get('limits', {}): global_limit = ondemand_config['limits']['global'].get( moz_instance_type) # How many of this type of ondemand instance are running? n = len( filter_ondemand_instances( aws_get_running_instances(all_instances, moz_instance_type))) log.debug("%i %s ondemand instances running globally", n, moz_instance_type) if global_limit and n + count > global_limit: new_count = max(0, global_limit - n) log.debug( "decreasing requested number of %s from %i to %i (%i out of %i running)", moz_instance_type, count, new_count, n, global_limit) count = new_count if count <= 0: continue if count < 1: continue # Check for stopped instances in the given regions and start them if # there are any started = aws_resume_instances(all_instances, moz_instance_type, count, regions, region_priorities, dryrun, slaveset) count -= started log.debug("%s - started %i instances for slaveset %s; need %i", moz_instance_type, started, slaveset, count)
def aws_watch_pending(dburl, regions, builder_map, region_priorities, spot_config, ondemand_config, dryrun, latest_ami_percentage): # First find pending jobs in the db pending = find_pending(dburl) if not pending: gr_log.add("pending", 0) log.debug("no pending jobs! all done!") return log.debug("processing %i pending jobs", len(pending)) gr_log.add("pending", len(pending)) # Mapping of instance types to # of instances we want to # creates # Map pending builder names to instance types pending_builder_map = map_builders(pending, builder_map) gr_log.add("aws_pending", sum(pending_builder_map.values())) if not pending_builder_map: log.debug("no pending jobs we can do anything about! all done!") return to_create_spot = pending_builder_map to_create_ondemand = defaultdict(int) # For each moz_instance_type find how many are currently # running, and scale our count accordingly all_instances = aws_get_all_instances(regions) cloudtools.graphite.generate_instance_stats(all_instances) # Reduce the requirements, pay attention to freshess and running instances to_delete = set() for moz_instance_type, count in to_create_spot.iteritems(): running = aws_get_running_instances(all_instances, moz_instance_type) spot_running = filter_spot_instances(running) to_create_spot[moz_instance_type] = reduce_by_freshness( count, spot_running, moz_instance_type) if to_create_spot[moz_instance_type] == 0: log.debug("removing requirement for %s %s", "spot", moz_instance_type) to_delete.add((moz_instance_type)) for moz_instance_type in to_delete: del to_create_spot[moz_instance_type] for (moz_instance_type), count in to_create_spot.iteritems(): log.debug("need %i spot %s", count, moz_instance_type) # Cap by our global limits if applicable if spot_config and 'global' in spot_config.get('limits', {}): global_limit = spot_config['limits']['global'].get(moz_instance_type) # How many of this type of spot instance are running? n = len(filter_spot_instances(aws_get_running_instances(all_instances, moz_instance_type))) log.debug("%i %s spot instances running globally", n, moz_instance_type) if global_limit and n + count > global_limit: new_count = max(0, global_limit - n) log.debug("decreasing requested number of %s from %i to %i (%i out of %i running)", moz_instance_type, count, new_count, n, global_limit) count = new_count if count <= 0: continue started = request_spot_instances( all_instances, moz_instance_type=moz_instance_type, start_count=count, regions=regions, region_priorities=region_priorities, spot_config=spot_config, dryrun=dryrun, latest_ami_percentage=latest_ami_percentage) count -= started log.debug("%s - started %i spot instances; need %i", moz_instance_type, started, count) # Add leftover to ondemand to_create_ondemand[moz_instance_type] += count for moz_instance_type, count in to_create_ondemand.iteritems(): log.debug("need %i ondemand %s", count, moz_instance_type) # Cap by our global limits if applicable if ondemand_config and 'global' in ondemand_config.get('limits', {}): global_limit = ondemand_config['limits']['global'].get(moz_instance_type) # How many of this type of ondemand instance are running? n = len(filter_ondemand_instances(aws_get_running_instances(all_instances, moz_instance_type))) log.debug("%i %s ondemand instances running globally", n, moz_instance_type) if global_limit and n + count > global_limit: new_count = max(0, global_limit - n) log.debug("decreasing requested number of %s from %i to %i (%i out of %i running)", moz_instance_type, count, new_count, n, global_limit) count = new_count if count <= 0: continue if count < 1: continue # Check for stopped instances in the given regions and start them if # there are any started = aws_resume_instances(all_instances, moz_instance_type, count, regions, region_priorities, dryrun) count -= started log.debug("%s - started %i instances; need %i", moz_instance_type, started, count)