def execute(self, force=False, **kwargs): try: self.clear_tasks_history(force=force) except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( "Can't reset environment '{0}' when " "running deployment task exists.".format( self.cluster.id ) ) # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt ) for node in nodes: objects.Node.reset_vms_created_state(node) objects.ClusterPluginLinkCollection.delete_by_cluster_id( self.cluster.id) db().commit() supertask = Task( name=consts.TASK_NAMES.reset_environment, cluster=self.cluster ) db().add(supertask) al = TaskHelper.create_action_log(supertask) reset_nodes = supertask.create_subtask( consts.TASK_NAMES.reset_nodes ) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.remove_keys ) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.remove_ironic_bootstrap ) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(reset_nodes), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because ' 'there are another running tasks.' ) supertask = Task(name=self.deployment_type, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) current_cluster_status = self.cluster.status # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) #任务将被异步执行 mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type, current_cluster_status=current_cluster_status, **kwargs ) return supertask
def check_no_running_deployment(cls, cluster): tasks_q = objects.TaskCollection.get_by_name_and_cluster( cluster, cls.deployment_tasks).filter_by( status=consts.TASK_STATUSES.running) tasks_exists = db.query(tasks_q.exists()).scalar() if tasks_exists: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because there are ' 'running tasks {0}'.format(tasks_q.all()))
def _acquire_cluster(self): cluster = objects.Cluster.get_by_uid(self.cluster_id, fail_if_not_found=True, lock_for_update=True) running_tasks = objects.TaskCollection.all_in_progress( cluster_id=cluster.id) # TODO(bgaifullin) need new lock approach for cluster if objects.TaskCollection.count(running_tasks): raise errors.DeploymentAlreadyStarted() return cluster
def execute(self, **kwargs): # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running').first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ])) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task(name=consts.TASK_NAMES.reset_environment, cluster=self.cluster) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def _acquire_cluster(self): cluster = objects.Cluster.get_by_uid( self.cluster_id, fail_if_not_found=True, lock_for_update=True ) cluster_tasks = objects.TaskCollection.get_by_cluster_id( cluster_id=cluster.id ) cluster_tasks = objects.TaskCollection.filter_by( cluster_tasks, name=self.task_name ) cluster_tasks = objects.TaskCollection.filter_by_list( cluster_tasks, 'status', [consts.TASK_STATUSES.pending, consts.TASK_STATUSES.running] ) # TODO(bgaifullin) need new lock approach for cluster if objects.TaskCollection.count(cluster_tasks): raise errors.DeploymentAlreadyStarted() return cluster
def execute(self, force=False, **kwargs): try: self.clear_tasks_history(force=force) except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( u"Can't delete environment '{0}' when " u"running deployment task exists.".format( self.cluster.id ) ) # locking nodes nodes = objects.NodeCollection.filter_by( None, cluster_id=self.cluster.id ) nodes = objects.NodeCollection.order_by(nodes, 'id') objects.NodeCollection.lock_for_update(nodes).all() logger.debug("Labeling cluster nodes to delete") for node in self.cluster.nodes: node.pending_deletion = True db().add(node) db().flush() self.cluster.status = consts.CLUSTER_STATUSES.remove db().add(self.cluster) logger.debug("Creating cluster deletion task") task = Task(name=consts.TASK_NAMES.cluster_deletion, cluster=self.cluster) db().add(task) db().commit() self._call_silently( task, tasks.ClusterDeletionTask ) return task
def execute(self, nodes_to_delete, mclient_remove=True, **kwargs): cluster = None if hasattr(self, 'cluster'): cluster = self.cluster logger.info("Trying to execute node deletion task with nodes %s", ', '.join(str(node.id) for node in nodes_to_delete)) self.verify_nodes_with_cluster(nodes_to_delete) objects.NodeCollection.lock_nodes(nodes_to_delete) if cluster is None: # DeletionTask operates on cluster's nodes. # Nodes that are not in cluster are simply deleted. objects.NodeCollection.delete_by_ids([ n.id for n in nodes_to_delete]) db().flush() task = Task(name=consts.TASK_NAMES.node_deletion, progress=100, status=consts.TASK_STATUSES.ready) db().add(task) db().flush() return task try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because there are running tasks.' ) task = Task(name=consts.TASK_NAMES.node_deletion, cluster=self.cluster) db().add(task) for node in nodes_to_delete: objects.Node.update(node, {'status': consts.NODE_STATUSES.removing, 'pending_deletion': True}) db().flush() nodes_to_deploy = [] objects.Cluster.adjust_nodes_lists_on_controller_removing( self.cluster, nodes_to_delete, nodes_to_deploy) # NOTE(aroma): in case of removing of a controller node we do # implicit redeployment of all left controllers here in # order to preserve consistency of a HA cluster. # The reason following filtering is added is that we must # redeploy only controllers in ready status. Also in case # one of the nodes is in error state we must cancel the whole # operation as result of the redeployment in this case is unpredictable # and user may end up with not working cluster controllers_with_ready_status = [] for controller in nodes_to_deploy: if controller.status == consts.NODE_STATUSES.error: raise errors.ControllerInErrorState() elif controller.status == consts.NODE_STATUSES.ready: controllers_with_ready_status.append(controller) if controllers_with_ready_status: logger.debug("There are nodes to deploy: %s", " ".join([objects.Node.get_node_fqdn(n) for n in controllers_with_ready_status])) task_deployment = task.create_subtask( consts.TASK_NAMES.deployment) deployment_message = self._call_silently( task_deployment, self.get_deployment_task(), controllers_with_ready_status, method_name='message' ) db().flush() # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == consts.TASK_STATUSES.error: return task_deployment db().commit() rpc.cast('naily', [deployment_message]) db().commit() self._call_silently( task, tasks.DeletionTask, nodes=tasks.DeletionTask.prepare_nodes_for_task( nodes_to_delete, mclient_remove=mclient_remove)) return task