def apply_lease_times_after_pool_fulfilled(self, appliance_pool_id, time_minutes): try: pool = AppliancePool.objects.get(id=appliance_pool_id) except ObjectDoesNotExist as e: self.logger.error("It seems such appliance pool %s doesn't exist: %s", appliance_pool_id, str(e)) return False if pool.fulfilled: pool.logger.info("Applying lease time and renaming appliances") for appliance in pool.appliances: apply_lease_times.delay(appliance.id, time_minutes) with transaction.atomic(): pool.finished = True pool.save(update_fields=['finished']) pool.logger.info( "Pool {} setup is finished".format(appliance_pool_id)) else: # Look whether we can swap any provisioning appliance with some in shepherd pool.logger.info("Pool isn't fulfilled yet") unfinished = list( Appliance.objects.filter(appliance_pool=pool, ready=False, marked_for_deletion=False).all()) random.shuffle(unfinished) if len(unfinished) > 0: pool.logger.info('There are %s unfinished appliances', len(unfinished)) n = Appliance.give_to_pool(pool, len(unfinished)) with transaction.atomic(): for _ in range(n): appl = unfinished.pop() appl.appliance_pool = None appl.save(update_fields=['appliance_pool']) try: pool.logger.info("Retrying to apply lease again") self.retry(args=(appliance_pool_id, time_minutes), countdown=30, max_retries=120) except MaxRetriesExceededError: # Bad luck, pool fulfillment failed. So destroy it. pool.logger.error( "Waiting for fulfillment failed. Initiating the destruction process." ) pool.kill()
def process_delayed_provision_tasks(self): """This picks up the provisioning tasks that were delayed due to ocncurrency limit of provision. Goes one task by one and when some of them can be provisioned, it starts the provisioning and then deletes the task. """ for task in DelayedProvisionTask.objects.order_by("id"): if task.pool.not_needed_anymore: task.delete() continue # Try retrieve from shepherd appliances_given = Appliance.give_to_pool(task.pool, 1) if appliances_given == 0: # No free appliance in shepherd, so do it on our own tpls = task.pool.possible_provisioning_templates if task.provider_to_avoid is not None: filtered_tpls = [ tpl for tpl in tpls if tpl.provider != task.provider_to_avoid ] if filtered_tpls: # There are other providers to provision on, so try one of them tpls = filtered_tpls # If there is no other provider to provision on, we will use the original list. # This will cause additional rejects until the provider quota is met if tpls: clone_template_to_pool(tpls[0].id, task.pool.id, task.lease_time) task.delete() else: # Try freeing up some space in provider for provider in task.pool.possible_providers: appliances = provider.free_shepherd_appliances.exclude( **task.pool.appliance_filter_params) if appliances: appl = random.choice(appliances) self.logger.info('Freeing some space in provider by ' 'killing appliance {}/{}'.format( appl.id, appl.name)) Appliance.kill(appl) break # Just one else: # There was a free appliance in shepherd, so we took it and we don't need this task more task.delete()
def request_appliance_pool(self, appliance_pool_id, time_minutes): """This task gives maximum possible amount of spinned-up appliances to the specified pool and then if there is need to spin up another appliances, it spins them up via clone_template_to_pool task.""" self.logger.info("Appliance pool {} requested for {} minutes.".format( appliance_pool_id, time_minutes)) pool = AppliancePool.objects.get(id=appliance_pool_id) n = Appliance.give_to_pool(pool) for i in range(pool.total_count - n): tpls = pool.possible_provisioning_templates if tpls: template_id = tpls[0].id clone_template_to_pool(template_id, pool.id, time_minutes) else: with transaction.atomic(): task = DelayedProvisionTask(pool=pool, lease_time=time_minutes) task.save() apply_lease_times_after_pool_fulfilled.delay(appliance_pool_id, time_minutes)