def on_nodegroup_delete(cls, ng): try: task = UpdateDnsmasqTaskManager().execute() except errors.TaskAlreadyRunning: raise errors.TaskAlreadyRunning( errors.UpdateDnsmasqTaskIsRunning.message ) if task.status == consts.TASK_STATUSES.error: raise ValueError(task.message)
def check_running_task(self, task_name): current_tasks = db().query(Task).filter_by(name=task_name) for task in current_tasks: if task.status == "running": raise errors.TaskAlreadyRunning() elif task.status in ("ready", "error"): objects.Task.delete(task) db().flush()
def check_running_task(self, task_names=None, delete_obsolete=None): """Checks running tasks and delete obsolete tasks. If there is no cluster, task_names should be specified. NOTE: Also this method removes already finished task if method delete_obsolete is not False :param task_names: the name of tasks to filter if there is no cluster and task_names is not specified the Exception will be raised :param delete_obsolete: callable of False, which will be called to delete obsolete tasks. by default Task.delete will be used. :raises: errors.TaskAlreadyRunning """ if isinstance(task_names, six.string_types): task_names = (task_names,) if delete_obsolete is None: delete_obsolete = objects.Task.delete all_tasks = objects.TaskCollection.all_not_deleted() if hasattr(self, 'cluster'): cluster = objects.Cluster.get_by_uid( self.cluster.id, lock_for_update=True, fail_if_not_found=True ) all_tasks = objects.TaskCollection.filter_by( all_tasks, cluster_id=cluster.id ) elif not task_names: # TODO(bgaifullin) there should not be tasks which is not linked # to cluster raise ValueError( "Either cluster or task_names should be specified." ) if task_names: all_tasks = objects.TaskCollection.filter_by_list( all_tasks, 'name', task_names ) all_tasks = objects.TaskCollection.order_by(all_tasks, 'id') in_progress_status = ( consts.TASK_STATUSES.running, consts.TASK_STATUSES.pending ) for task in all_tasks: if task.status in in_progress_status: raise errors.TaskAlreadyRunning() elif delete_obsolete: delete_obsolete(task)
def execute(self, **kwargs): try: self.check_running_task([ consts.TASK_NAMES.stop_deployment, consts.TASK_NAMES.reset_environment, consts.TASK_NAMES.cluster_deletion, ]) except errors.TaskAlreadyRunning: raise errors.TaskAlreadyRunning( "Stopping deployment task is already launched" ) deployment_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deployment, ) deployment_task = deployment_task.filter( Task.status != consts.TASK_STATUSES.pending ) deployment_task = objects.TaskCollection.order_by( deployment_task, '-id' ).first() provisioning_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.provision, ) provisioning_task = provisioning_task.filter( Task.status != consts.TASK_STATUSES.pending ) provisioning_task = objects.TaskCollection.order_by( provisioning_task, '-id' ).first() if not deployment_task and not provisioning_task: db().rollback() raise errors.DeploymentNotRunning( u"Nothing to stop - deployment is " u"not running on environment '{0}'".format( self.cluster.id ) ) # Updating action logs for deploy task deploy_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deploy ) deploy_task = objects.TaskCollection.order_by( deploy_task, 'id').first() if deploy_task: TaskHelper.set_ready_if_not_finished(deploy_task) db().commit() task = Task( name=consts.TASK_NAMES.stop_deployment, cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.StopDeploymentTask, deploy_task=deployment_task, provision_task=provisioning_task ) return task
def execute(self, nodes_to_delete, mclient_remove=True, **kwargs): cluster = None if hasattr(self, 'cluster'): cluster = self.cluster logger.info("Trying to execute node deletion task with nodes %s", ', '.join(str(node.id) for node in nodes_to_delete)) self.verify_nodes_with_cluster(nodes_to_delete) objects.NodeCollection.lock_nodes(nodes_to_delete) if cluster is None: # DeletionTask operates on cluster's nodes. # Nodes that are not in cluster are simply deleted. objects.NodeCollection.delete_by_ids([ n.id for n in nodes_to_delete]) db().flush() task = Task(name=consts.TASK_NAMES.node_deletion, progress=100, status=consts.TASK_STATUSES.ready) db().add(task) db().flush() return task try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.TaskAlreadyRunning( 'Cannot perform the actions because there are running tasks.' ) task = Task(name=consts.TASK_NAMES.node_deletion, cluster=self.cluster) db().add(task) for node in nodes_to_delete: objects.Node.update(node, {'status': consts.NODE_STATUSES.removing, 'pending_deletion': True}) db().flush() nodes_to_deploy = [] objects.Cluster.adjust_nodes_lists_on_controller_removing( self.cluster, nodes_to_delete, nodes_to_deploy) # NOTE(aroma): in case of removing of a controller node we do # implicit redeployment of all left controllers here in # order to preserve consistency of a HA cluster. # The reason following filtering is added is that we must # redeploy only controllers in ready status. Also in case # one of the nodes is in error state we must cancel the whole # operation as result of the redeployment in this case is unpredictable # and user may end up with not working cluster controllers_with_ready_status = [] for controller in nodes_to_deploy: if controller.status == consts.NODE_STATUSES.error: raise errors.ControllerInErrorState() elif controller.status == consts.NODE_STATUSES.ready: controllers_with_ready_status.append(controller) if controllers_with_ready_status: logger.debug("There are nodes to deploy: %s", " ".join([objects.Node.get_node_fqdn(n) for n in controllers_with_ready_status])) task_deployment = task.create_subtask( consts.TASK_NAMES.deployment) deployment_message = self._call_silently( task_deployment, self.get_deployment_task(), controllers_with_ready_status, method_name='message' ) db().flush() # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == consts.TASK_STATUSES.error: return task_deployment db().commit() rpc.cast('naily', [deployment_message]) db().commit() self._call_silently( task, tasks.DeletionTask, nodes=tasks.DeletionTask.prepare_nodes_for_task( nodes_to_delete, mclient_remove=mclient_remove)) return task
def dump_task_with_bad_model(*args, **kwargs): raise errors.TaskAlreadyRunning()