def _remove_from_worker_pool(): """ Ensures that this instance is shut down, and unregisted from the worker pool. """ # Retrieve the current state of the pool. pool = AutoScaleConnection().get_all_groups(["LSDA Worker Pool"])[0] if pool.desired_capacity <= pool.min_size: return # Reduce the pool size and shut ourself down. pool.desired_capacity -= 1 pool.update()
def main(): """ Main entry point for the automated scaling daemon. """ # Configure logging. logging.basicConfig( format = "%(asctime)-15s %(levelname)5s %(message)s", level = logging.INFO ) # Read configuration. options = yaml.load(open("config.yaml")) # Connect to the RabbitMQ cluster. params = pika.ConnectionParameters(host=options["amqp"]) conn = pika.BlockingConnection(params) channel = conn.channel() while True: # Ensure that we have things stuck in the queue for the given amount # of time. for i in xrange(DELAY / 5): queue_length = get_queue_length(channel, "stable") logging.info("Queue length: {}".format(queue_length)) if queue_length == 0: break time.sleep(5) else: # Scale up! group = AutoScaleConnection().get_all_groups(["LSDA Worker Pool"])[0] group.desired_capacity = min( group.desired_capacity + 2, group.max_size) group.update() logging.info( "Triggering increase to {}".format(group.desired_capacity)) time.sleep(300) # Wait until next polling event. time.sleep(30)