def execute(self): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id, )) current_tasks = orm().query(Task).filter_by(cluster_id=self.cluster.id, name="deploy") for task in current_tasks: if task.status == "running": raise errors.DeploymentAlreadyStarted() elif task.status in ("ready", "error"): for subtask in task.subtasks: orm().delete(subtask) orm().delete(task) orm().commit() nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) if not any([nodes_to_deploy, nodes_to_delete]): raise errors.WrongNodeStatus("No changes to deploy") self.cluster.status = 'deployment' orm().add(self.cluster) orm().commit() supertask = Task(name="deploy", cluster=self.cluster) orm().add(supertask) orm().commit() task_deletion, task_provision, task_deployment = None, None, None if nodes_to_delete: task_deletion = supertask.create_subtask("node_deletion") self._call_silently(task_deletion, tasks.DeletionTask) if nodes_to_deploy: TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy) task_provision = supertask.create_subtask("provision") # we assume here that task_provision just adds system to # cobbler and reboots systems, so it has extreamly small weight task_provision.weight = 0.05 provision_message = self._call_silently(task_provision, tasks.ProvisionTask, method_name='message') task_provision.cache = provision_message orm().add(task_provision) orm().commit() task_deployment = supertask.create_subtask("deployment") deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, method_name='message') task_deployment.cache = deployment_message orm().add(task_deployment) orm().commit() rpc.cast('naily', [provision_message, deployment_message]) logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid)) return supertask
def _call_silently(self, task, instance, *args, **kwargs): # create action_log for task al = TaskHelper.create_action_log(task) method = getattr(instance, kwargs.pop('method_name', 'execute')) if task.status == consts.TASK_STATUSES.error: TaskHelper.update_action_log(task, al) return try: to_return = method(task, *args, **kwargs) # update action_log instance for task # for asynchronous task it will be not final update # as they also are updated in rpc receiver TaskHelper.update_action_log(task, al) return to_return except errors.NoChanges as e: self._finish_task(task, al, consts.TASK_STATUSES.ready, str(e)) except Exception as exc: if any([ not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback ]): logger.error(traceback.format_exc()) self._finish_task(task, al, consts.TASK_STATUSES.error, str(exc))
def execute(self, nodes_to_deployment): TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format(' '.join( [n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) db().commit() deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, nodes_to_deployment): TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format( ' '.join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) db().commit() deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, data, check_admin_untagged=False): # Make a copy of original 'data' due to being changed by # 'tasks.CheckNetworksTask' data_copy = copy.deepcopy(data) locked_tasks = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.check_networks) locked_tasks = objects.TaskCollection.order_by(locked_tasks, 'id') check_networks = objects.TaskCollection.lock_for_update( locked_tasks).first() if check_networks: TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().flush() task = Task(name=consts.TASK_NAMES.check_networks, cluster=self.cluster) db().add(task) db().commit() self._call_silently(task, tasks.CheckNetworksTask, data_copy, check_admin_untagged) task = objects.Task.get_by_uid(task.id, fail_if_not_found=True, lock_for_update=True) if task.status == consts.TASK_STATUSES.running: # update task status with given data objects.Task.update(task, { 'status': consts.TASK_STATUSES.ready, 'progress': 100 }) db().commit() return task
def execute(self, data, check_admin_untagged=False): check_networks = db().query(Task).filter_by( cluster=self.cluster, name="check_networks" ).first() if check_networks: db().delete(check_networks) db().commit() task = Task( name="check_networks", cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.CheckNetworksTask, data, check_admin_untagged ) db().refresh(task) if task.status == 'running': TaskHelper.update_task_status( task.uuid, status="ready", progress=100 ) return task
def execute(self): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id)) network_info = self.serialize_network_cfg(self.cluster) logger.info(u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4))) self._remove_obsolete_tasks() supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") # we should have task committed for processing in other threads db().commit() TaskHelper.create_action_log(supertask) mule.call_task_manager_async(self.__class__, '_execute_async', self.cluster.id, supertask.id) return supertask
def check_before_deployment(self, supertask): # checking admin intersection with untagged network_info = self.serialize_network_cfg(self.cluster) network_info["networks"] = [n for n in network_info["networks"] if n["name"] != "fuelweb_admin"] check_networks = supertask.create_subtask(TASK_NAMES.check_networks) self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True) if check_networks.status == TASK_STATUSES.error: logger.warning("Checking networks failed: %s", check_networks.message) raise errors.CheckBeforeDeploymentError(check_networks.message) TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().refresh(supertask) db().flush() # checking prerequisites check_before = supertask.create_subtask(TASK_NAMES.check_before_deployment) logger.debug("Checking prerequisites task: %s", check_before.uuid) self._call_silently(check_before, tasks.CheckBeforeDeploymentTask) # if failed to check prerequisites # then task is already set to error if check_before.status == TASK_STATUSES.error: logger.warning("Checking prerequisites failed: %s", check_before.message) raise errors.CheckBeforeDeploymentError(check_before.message) logger.debug("Checking prerequisites is successful, starting deployment...") TaskHelper.set_ready_if_not_finished(check_before) db().delete(check_before) db().refresh(supertask) db().flush()
def execute(self): # locking required tasks locked_tasks = objects.TaskCollection.lock_cluster_tasks( self.cluster.id) # locking cluster objects.Cluster.get_by_uid(self.cluster.id, fail_if_not_found=True, lock_for_update=True) # locking nodes nodes = objects.NodeCollection.filter_by(None, cluster_id=self.cluster.id) nodes = objects.NodeCollection.order_by(nodes, 'id') objects.NodeCollection.lock_for_update(nodes).all() current_cluster_tasks = objects.TaskCollection.filter_by_list( locked_tasks, 'name', (consts.TASK_NAMES.cluster_deletion, )) deploy_running = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deploy, status=consts.TASK_STATUSES.running) deploy_running = objects.TaskCollection.order_by(deploy_running, 'id').first() if deploy_running: logger.error(u"Deleting cluster '{0}' " "while deployment is still running".format( self.cluster.name)) # Updating action logs for deploy task TaskHelper.set_ready_if_not_finished(deploy_running) logger.debug("Removing cluster tasks") for task in current_cluster_tasks: if task.status == consts.TASK_STATUSES.running: db().rollback() raise errors.DeletionAlreadyStarted() elif task.status in (consts.TASK_STATUSES.ready, consts.TASK_STATUSES.error): for subtask in task.subtasks: db().delete(subtask) db().delete(task) db().flush() logger.debug("Labeling cluster nodes to delete") for node in self.cluster.nodes: node.pending_deletion = True db().add(node) db().flush() self.cluster.status = consts.CLUSTER_STATUSES.remove db().add(self.cluster) logger.debug("Creating cluster deletion task") task = Task(name=consts.TASK_NAMES.cluster_deletion, cluster=self.cluster) db().add(task) db().commit() self._call_silently(task, tasks.ClusterDeletionTask) return task
def check_before_deployment(self, supertask): """Performs checks before deployment :param supertask: task SqlAlchemy object """ # checking admin intersection with untagged network_info = self.serialize_network_cfg(self.cluster) network_info["networks"] = [ n for n in network_info["networks"] if n["name"] != "fuelweb_admin" ] check_repo_connect = supertask.create_subtask( consts.TASK_NAMES.check_networks) self._call_silently( check_repo_connect, tasks.CheckRepositoryConnectionFromMasterNodeTask, ) if check_repo_connect.status == consts.TASK_STATUSES.error: logger.warning("Checking connectivity to repositories failed: %s", check_repo_connect.message) raise errors.CheckBeforeDeploymentError(check_repo_connect.message) check_networks = supertask.create_subtask( consts.TASK_NAMES.check_networks) self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True) if check_networks.status == consts.TASK_STATUSES.error: logger.warning("Checking networks failed: %s", check_networks.message) raise errors.CheckBeforeDeploymentError(check_networks.message) TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().refresh(supertask) db().flush() # checking prerequisites check_before = supertask.create_subtask( consts.TASK_NAMES.check_before_deployment) logger.debug("Checking prerequisites task: %s", check_before.uuid) self._call_silently(check_before, tasks.CheckBeforeDeploymentTask) # if failed to check prerequisites # then task is already set to error if check_before.status == consts.TASK_STATUSES.error: logger.warning("Checking prerequisites failed: %s", check_before.message) raise errors.CheckBeforeDeploymentError(check_before.message) logger.debug( "Checking prerequisites is successful, starting deployment...") TaskHelper.set_ready_if_not_finished(check_before) db().delete(check_before) db().refresh(supertask) db().flush()
def check_before_deployment(self, supertask): """Performs checks before deployment :param supertask: task SqlAlchemy object """ try: # if there are VIPs with same names in the network configuration # the error will be raised. Such situation may occur when, for # example, enabled plugins contain conflicting network # configuration network_info = self.serialize_network_cfg(self.cluster) except (errors.DuplicatedVIPNames, errors.NetworkRoleConflict) as e: raise errors.CheckBeforeDeploymentError(e.message) logger.info(u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4))) # checking admin intersection with untagged network_info["networks"] = [ n for n in network_info["networks"] if n["name"] != "fuelweb_admin" ] check_networks = supertask.create_subtask( consts.TASK_NAMES.check_networks) self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_all_parameters=True) if check_networks.status == consts.TASK_STATUSES.error: logger.warning("Checking networks failed: %s", check_networks.message) raise errors.CheckBeforeDeploymentError(check_networks.message) TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().refresh(supertask) db().flush() # checking prerequisites check_before = supertask.create_subtask( consts.TASK_NAMES.check_before_deployment) logger.debug("Checking prerequisites task: %s", check_before.uuid) self._call_silently(check_before, tasks.CheckBeforeDeploymentTask) # if failed to check prerequisites # then task is already set to error if check_before.status == consts.TASK_STATUSES.error: logger.warning("Checking prerequisites failed: %s", check_before.message) raise errors.CheckBeforeDeploymentError(check_before.message) logger.debug( "Checking prerequisites is successful, starting deployment...") TaskHelper.set_ready_if_not_finished(check_before) db().delete(check_before) db().refresh(supertask) db().flush()
def execute(self, force=False, **kwargs): try: self.clear_tasks_history(force=force) except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( "Can't reset environment '{0}' when " "running deployment task exists.".format( self.cluster.id ) ) # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt ) for node in nodes: objects.Node.reset_vms_created_state(node) objects.ClusterPluginLinkCollection.delete_by_cluster_id( self.cluster.id) db().commit() supertask = Task( name=consts.TASK_NAMES.reset_environment, cluster=self.cluster ) db().add(supertask) al = TaskHelper.create_action_log(supertask) reset_nodes = supertask.create_subtask( consts.TASK_NAMES.reset_nodes ) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.remove_keys ) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.remove_ironic_bootstrap ) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(reset_nodes), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self, data, check_admin_untagged=False): task = Task(name="check_networks", cluster=self.cluster) db().add(task) db().commit() self._call_silently(task, tasks.CheckNetworksTask, data, check_admin_untagged) db().refresh(task) if task.status == "running": TaskHelper.update_task_status(task.uuid, status="ready", progress=100) return task
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because ' 'there are another running tasks.' ) supertask = Task(name=self.deployment_type, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) current_cluster_status = self.cluster.status # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) #任务将被异步执行 mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type, current_cluster_status=current_cluster_status, **kwargs ) return supertask
def execute(self): logger.debug("Creating redhat_setup task") current_tasks = db().query(Task).filter_by(name="redhat_setup") for task in current_tasks: for subtask in task.subtasks: db().delete(subtask) db().delete(task) db().commit() supertask = Task(name="redhat_setup") supertask.result = { "release_info": { "release_id": self.data["release_id"] } } db().add(supertask) db().commit() subtasks_to_create = [ ('redhat_check_credentials', tasks.RedHatCheckCredentialsTask, 0.01), ('redhat_check_licenses', tasks.RedHatCheckLicensesTask, 0.01), ('redhat_download_release', tasks.RedHatDownloadReleaseTask, 1) ] messages = [] for task_name, task_class, weight in subtasks_to_create: task = supertask.create_subtask(task_name) task.weight = weight db().add(task) db().commit() msg = self._call_silently(task, task_class, self.data, method_name='message') db().refresh(task) if task.status == 'error': TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=task.message) return supertask task.cache = msg db().add(task) db().commit() messages.append(msg) db().refresh(supertask) if supertask.status == 'error': return supertask rpc.cast('naily', messages) return supertask
def execute(self, **kwargs): # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running').first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ])) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task(name=consts.TASK_NAMES.reset_environment, cluster=self.cluster) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because ' 'there are another running tasks.' ) supertask = Task(name=self.deployment_type, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) current_cluster_status = self.cluster.status # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type, current_cluster_status=current_cluster_status, **kwargs ) return supertask
def _call_silently(self, task, instance, *args, **kwargs): method = getattr(instance, kwargs.pop("method_name", "execute")) if task.status == "error": return try: return method(task, *args, **kwargs) except Exception as exc: err = str(exc) if any([not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback]): logger.error(traceback.format_exc()) TaskHelper.update_task_status(task.uuid, status="error", progress=100, msg=err)
def execute(self, data): task = Task(name="check_networks", cluster=self.cluster) orm().add(task) orm().commit() self._call_silently(task, tasks.CheckNetworksTask, data) orm().refresh(task) if task.status == 'running': TaskHelper.update_task_status(task.uuid, status="ready", progress=100) return task
def execute(self): deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running' ).first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format( self.cluster.id ) ) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ]) ) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task( name=consts.TASK_NAMES.reset_environment, cluster=self.cluster ) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment ) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def _redhat_messages(self, supertask, nodes_info): account = db().query(RedHatAccount).first() if not account: TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg="RHEL account is not found") return supertask rhel_data = { 'release_id': supertask.cluster.release.id, 'release_name': supertask.cluster.release.name, 'redhat': { 'license_type': account.license_type, 'username': account.username, 'password': account.password, 'satellite': account.satellite, 'activation_key': account.activation_key } } subtasks = [ supertask.create_subtask('redhat_check_credentials'), supertask.create_subtask('redhat_check_licenses') ] map(lambda t: setattr(t, "weight", 0.01), subtasks) db().commit() subtask_messages = [ self._call_silently(subtasks[0], tasks.RedHatCheckCredentialsTask, rhel_data, method_name='message'), self._call_silently(subtasks[1], tasks.RedHatCheckLicensesTask, rhel_data, nodes_info, method_name='message') ] for task, message in zip(subtasks, subtask_messages): task.cache = message db().commit() map(db().refresh, subtasks) for task in subtasks: if task.status == 'error': raise errors.RedHatSetupError(task.message) return subtask_messages
def _finish_task(self, task, log_item, status, message): data = {'status': status, 'progress': 100, 'message': message} # update task entity with given data objects.Task.update(task, data) # NOTE(romcheg): Flushing the data is required to unlock # tasks in order to temporary fix issues with # the deadlock detection query in tests and let the tests pass. # TODO(akislitsky): Get rid of this flush as soon as # task locking issues are resolved. db().flush() TaskHelper.update_action_log(task, log_item) db().commit()
def execute(self): task = Task( name='check_before_deployment', cluster=self.cluster ) db().add(task) db().commit() self._call_silently(task, tasks.CheckBeforeDeploymentTask) db().refresh(task) if task.status == 'running': TaskHelper.update_task_status( task.uuid, status="ready", progress=100) return task
def create_action_log(self, task_instance, operation_nodes): create_kwargs = TaskHelper.prepare_action_log_kwargs( task_instance, operation_nodes ) objects.ActionLog.create(create_kwargs)
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) network_info = self.serialize_network_cfg(self.cluster) logger.info( u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4) ) ) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks ) return supertask
def _call_silently(self, task, instance, *args, **kwargs): method = getattr(instance, kwargs.pop('method_name', 'execute')) if task.status == 'error': return try: return method(task, *args, **kwargs) except Exception as exc: err = str(exc) if any([ not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback ]): logger.error(traceback.format_exc()) TaskHelper.update_task_status(task.uuid, status="error", progress=100, msg=err)
def make_deploy_msgs(self,cluster,supertask,deploymsg,status): #ebs_rolelist=["gangliasrv","nagiossrv","gangliacli","nagioscli"] task_messages=[] ebs_rolelist=["keepalived","onecmdb","activemq","redis","nagios","ganglia","mysql","cloudmaster","clmrs","octopus","cmagent","x86master","rabbitmq","cmgather","monitor","AlarmApp","autoAlarm","ceilometer","PerformancePM","notify"] if status== 1: ebs_rolelist=["notify","PerformancePM","ceilometer","autoAlarm","AlarmApp","monitor","cmgather","rabbitmq","x86master","cmagent","octopus","clmrs","cloudmaster","mysql","ganglia","nagios","redis","activemq","onecmdb","keepalived"] #获取当前集群下所有处于已就绪的节点 nodes_to_startorstop=TaskHelper.nodes_to_startorstop(cluster) #获取所有节点的所有角色集合 nodes_roles=[] for node in nodes_to_startorstop: nodes_roles.extend(node.roles) nodes_roles=list(set(nodes_roles)) logger.info(deploymsg) for role in nodes_roles: if role in ebs_rolelist: task_deployment = supertask.create_subtask(TASK_NAMES.deployment) db().commit() newdeploymsg=copy.deepcopy(deploymsg) newdeploymsg['respond_to']="start_stop_resp" newdeploymsg['args']['task_uuid']=task_deployment.uuid deployment_infos=[] for deployment_info in deploymsg['args']['deployment_info']: if deployment_info["role"] != role: newdeploymsg['args']['deployment_info'].remove(deployment_info) else: if status == 2: deployment_info[role]['action']="start" logger.info(u"匹配到角色{0},节点id {1},开始启动...".format(role,deployment_info["ip"])) else: deployment_info[role]['action']="stop" logger.info(u"匹配到角色{0},节点id {1},开始停止...".format(role,deployment_info["ip"])) deployment_infos.append(deployment_info) newdeploymsg['args']['deployment_info']=deployment_infos task_messages.append(newdeploymsg) task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == TASK_STATUSES.error: return supertask task_deployment.cache = newdeploymsg db().commit() self.update_cluster_role_status(cluster,role,status) new_task_messages=[] logger.info(len(task_messages)) for ebsrole in ebs_rolelist: for task in task_messages: if task['args']['deployment_info'][0]['role'] == ebsrole: new_task_messages.append(task) return new_task_messages
def execute(self): logger.debug("Creating redhat_setup task") current_tasks = db().query(Task).filter_by(name="redhat_setup") for task in current_tasks: for subtask in task.subtasks: db().delete(subtask) db().delete(task) db().commit() supertask = Task(name="redhat_setup") supertask.result = {"release_info": {"release_id": self.data["release_id"]}} db().add(supertask) db().commit() subtasks_to_create = [ ("redhat_check_credentials", tasks.RedHatCheckCredentialsTask, 0.01), ("redhat_check_licenses", tasks.RedHatCheckLicensesTask, 0.01), ("redhat_download_release", tasks.RedHatDownloadReleaseTask, 1), ] messages = [] for task_name, task_class, weight in subtasks_to_create: task = supertask.create_subtask(task_name) task.weight = weight db().add(task) db().commit() msg = self._call_silently(task, task_class, self.data, method_name="message") db().refresh(task) if task.status == "error": TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=task.message) return supertask task.cache = msg db().add(task) db().commit() messages.append(msg) db().refresh(supertask) if supertask.status == "error": return supertask rpc.cast("naily", messages) return supertask
def _call_silently(self, task, instance, *args, **kwargs): # create action_log for task al = TaskHelper.create_action_log(task) method = getattr(instance, kwargs.pop('method_name', 'execute')) if task.status == TASK_STATUSES.error: TaskHelper.update_action_log(task, al) return try: to_return = method(task, *args, **kwargs) # update action_log instance for task # for asynchronous task it will be not final update # as they also are updated in rpc receiver TaskHelper.update_action_log(task, al) return to_return except Exception as exc: err = str(exc) if any([ not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback ]): logger.error(traceback.format_exc()) # update task entity with given data data = {'status': 'error', 'progress': 100, 'message': err} objects.Task.update(task, data) TaskHelper.update_action_log(task, al)
def execute(self, data, check_admin_untagged=False): check_networks = db().query(Task).filter_by( cluster=self.cluster, name="check_networks").first() if check_networks: db().delete(check_networks) db().commit() task = Task(name="check_networks", cluster=self.cluster) db().add(task) db().commit() self._call_silently(task, tasks.CheckNetworksTask, data, check_admin_untagged) db().refresh(task) if task.status == 'running': TaskHelper.update_task_status(task.uuid, status="ready", progress=100) return task
def _redhat_messages(self, supertask, nodes_info): account = db().query(RedHatAccount).first() if not account: TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg="RHEL account is not found") return supertask rhel_data = { "release_id": supertask.cluster.release.id, "release_name": supertask.cluster.release.name, "redhat": { "license_type": account.license_type, "username": account.username, "password": account.password, "satellite": account.satellite, "activation_key": account.activation_key, }, } subtasks = [ supertask.create_subtask("redhat_check_credentials"), supertask.create_subtask("redhat_check_licenses"), ] map(lambda t: setattr(t, "weight", 0.01), subtasks) db().commit() subtask_messages = [ self._call_silently(subtasks[0], tasks.RedHatCheckCredentialsTask, rhel_data, method_name="message"), self._call_silently( subtasks[1], tasks.RedHatCheckLicensesTask, rhel_data, nodes_info, method_name="message" ), ] for task, message in zip(subtasks, subtask_messages): task.cache = message db().commit() map(db().refresh, subtasks) for task in subtasks: if task.status == "error": raise errors.RedHatSetupError(task.message) return subtask_messages
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type ) return supertask
def execute(self, data, check_all_parameters=False, **kwargs): # Make a copy of original 'data' due to being changed by # 'tasks.CheckNetworksTask' data_copy = copy.deepcopy(data) locked_tasks = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.check_networks ) locked_tasks = objects.TaskCollection.order_by(locked_tasks, 'id') check_networks = objects.TaskCollection.lock_for_update( locked_tasks ).first() if check_networks: TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().flush() task = Task( name=consts.TASK_NAMES.check_networks, cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.CheckNetworksTask, data_copy, check_all_parameters ) task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.status == consts.TASK_STATUSES.running: # update task status with given data objects.Task.update( task, {'status': consts.TASK_STATUSES.ready, 'progress': 100}) db().commit() return task
def execute(self): deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running').first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ])) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task(name=consts.TASK_NAMES.reset_environment, cluster=self.cluster) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self, data): task = Task( name="check_networks", cluster=self.cluster ) orm().add(task) orm().commit() self._call_silently( task, tasks.CheckNetworksTask, data ) orm().refresh(task) if task.status == 'running': TaskHelper.update_task_status( task.uuid, status="ready", progress=100 ) return task
def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, status='running' ).filter( Task.name.in_([ 'deploy', 'deployment', 'reset_environment', 'stop_deployment' ]) ) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format( self.cluster.id ) ) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format( ' '.join([n.fqdn for n in nodes_to_change]))) task_update = Task(name='update', cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently( task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) task_update.cache = deployment_message for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update
def execute(self, data, check_admin_untagged=False): locked_tasks = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.check_networks ) locked_tasks = objects.TaskCollection.order_by(locked_tasks, 'id') check_networks = objects.TaskCollection.lock_for_update( locked_tasks ).first() if check_networks: TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().flush() task = Task( name=consts.TASK_NAMES.check_networks, cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.CheckNetworksTask, data, check_admin_untagged ) task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.status == consts.TASK_STATUSES.running: # update task status with given data data = {'status': consts.TASK_STATUSES.ready, 'progress': 100} objects.Task.update(task, data) db().commit() return task
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes Constraints: currently this task cannot deploy RedHat. For redhat here should be added additional tasks e.i. check credentials, check licenses, redhat downloading. Status of this task you can track here: https://blueprints.launchpad.net/fuel/+spec /nailgun-separate-provisioning-for-redhat """ TaskHelper.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision', cluster=self.cluster) db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().refresh(task_provision) task_provision.cache = provision_message for node in nodes_to_provision: node.pending_addition = False node.status = 'provisioning' node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id)) network_info = self.serialize_network_cfg(self.cluster) logger.info(u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4))) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks) return supertask
def execute(self): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) network_info = self.serialize_network_cfg(self.cluster) logger.info( u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4) ) ) self._remove_obsolete_tasks() supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") # we should have task committed for processing in other threads db().commit() TaskHelper.create_action_log(supertask) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, ) return supertask
def clear_tasks_history(self, force=False): try: self.check_running_task(delete_obsolete=False) except errors.TaskAlreadyRunning: if not force: raise logger.error( u"Force stop running tasks for cluster %s", self.cluster.name ) running_tasks = objects.TaskCollection.all_in_progress( self.cluster.id ) for task in running_tasks: # Force set task to finished state and update action log TaskHelper.set_ready_if_not_finished(task) # clear tasks history cluster_tasks = objects.TaskCollection.get_cluster_tasks( self.cluster.id ) cluster_tasks.delete(synchronize_session='fetch')
def clear_tasks_history(self, force=False): try: self.check_running_task(delete_obsolete=False) except errors.TaskAlreadyRunning: if not force: raise logger.error( u"Force stop running tasks for cluster %s", self.cluster.name ) running_tasks = objects.TaskCollection.all_in_progress( self.cluster.id ) for task in running_tasks: # Force set task to finished state and update action log TaskHelper.set_ready_if_not_finished(task) # clear tasks history cluster_tasks = objects.TransactionCollection.get_transactions( self.cluster.id ) cluster_tasks.delete(synchronize_session='fetch')
def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, status='running' ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.reset_environment, consts.TASK_NAMES.stop_deployment ]) ) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format( self.cluster.id ) ) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format( ' '.join([n.fqdn for n in nodes_to_change]))) task_update = Task(name=consts.TASK_NAMES.update, cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently( task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update
def _call_silently(self, task, instance, *args, **kwargs): # create action_log for task al = TaskHelper.create_action_log(task) method = getattr(instance, kwargs.pop('method_name', 'execute')) if task.status == consts.TASK_STATUSES.error: TaskHelper.update_action_log(task, al) return try: to_return = method(task, *args, **kwargs) # update action_log instance for task # for asynchronous task it will be not final update # as they also are updated in rpc receiver TaskHelper.update_action_log(task, al) return to_return except Exception as exc: err = str(exc) if any([ not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback ]): logger.error(traceback.format_exc()) # update task entity with given data data = {'status': 'error', 'progress': 100, 'message': err} objects.Task.update(task, data) # NOTE(romcheg): Flushing the data is required to unlock # tasks in order to temporary fix issues with # the deadlock detection query in tests and let the tests pass. # TODO(akislitsky): Get rid of this flush as soon as # task locking issues are resolved. db().flush() TaskHelper.update_action_log(task, al) db().commit()
def execute(self): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id, ) ) current_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, name="deploy" ) for task in current_tasks: if task.status == "running": raise errors.DeploymentAlreadyStarted() elif task.status in ("ready", "error"): for subtask in task.subtasks: db().delete(subtask) db().delete(task) db().commit() task_messages = [] nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): raise errors.WrongNodeStatus("No changes to deploy") self.cluster.status = 'deployment' db().add(self.cluster) db().commit() supertask = Task( name="deploy", cluster=self.cluster ) db().add(supertask) db().commit() if not self.cluster.replaced_provisioning_info \ and not self.cluster.replaced_deployment_info: try: self.check_before_deployment(supertask) except errors.CheckBeforeDeploymentError: return supertask # in case of Red Hat if self.cluster.release.operating_system == "RHEL": try: redhat_messages = self._redhat_messages( supertask, # provision only? [ {"uid": n.id, "platform_name": n.platform_name} for n in nodes_to_provision ] ) except Exception as exc: TaskHelper.update_task_status( supertask.uuid, status='error', progress=100, msg=str(exc) ) return supertask task_messages.extend(redhat_messages) # /in case of Red Hat task_deletion, task_provision, task_deployment = None, None, None if nodes_to_delete: task_deletion = supertask.create_subtask("node_deletion") logger.debug("Launching deletion task: %s", task_deletion.uuid) self._call_silently( task_deletion, tasks.DeletionTask ) if nodes_to_provision: TaskHelper.update_slave_nodes_fqdn(nodes_to_provision) logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision])) task_provision = supertask.create_subtask("provision") # we assume here that task_provision just adds system to # cobbler and reboots it, so it has extremely small weight task_provision.weight = 0.05 provision_message = self._call_silently( task_provision, tasks.ProvisionTask, method_name='message' ) db().refresh(task_provision) # if failed to generate task message for orchestrator # then task is already set to error if task_provision.status == 'error': return supertask task_provision.cache = provision_message db().add(task_provision) db().commit() task_messages.append(provision_message) if nodes_to_deploy: TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy) logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy])) task_deployment = supertask.create_subtask("deployment") deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, method_name='message' ) # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == 'error': return supertask task_deployment.cache = deployment_message db().add(task_deployment) db().commit() task_messages.append(deployment_message) if nodes_to_provision: for node in nodes_to_provision: node.status = 'provisioning' db().commit() if task_messages: rpc.cast('naily', task_messages) logger.debug( u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid ) ) return supertask
def _execute_async_content(self, supertask, deployment_tasks=None, nodes_to_provision_deploy=None): """Processes supertask async in mule :param supertask: SqlAlchemy task object """ nodes_to_delete = [] if nodes_to_provision_deploy: nodes_to_deploy = objects.NodeCollection.get_by_ids( nodes_to_provision_deploy) nodes_to_provision = filter( lambda n: any([n.pending_addition, n.needs_reprovision]), nodes_to_deploy) else: nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) objects.Cluster.adjust_nodes_lists_on_controller_removing( self.cluster, nodes_to_delete, nodes_to_deploy) task_messages = [] # Run validation if user didn't redefine # provisioning and deployment information if not(nodes_to_provision_deploy) and \ (not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(self.cluster)): try: self.check_before_deployment(supertask) except errors.CheckBeforeDeploymentError: db().commit() return task_deletion, task_provision, task_deployment = None, None, None if nodes_to_delete: task_deletion = self.delete_nodes(supertask, nodes_to_delete) if nodes_to_provision: objects.TaskCollection.lock_cluster_tasks(self.cluster.id) logger.debug( "There are nodes to provision: %s", " ".join([ objects.Node.get_node_fqdn(n) for n in nodes_to_provision ])) # For more accurate progress calculation task_weight = 0.4 task_provision = supertask.create_subtask( consts.TASK_NAMES.provision, weight=task_weight) # we should have task committed for processing in other threads db().commit() provision_message = self._call_silently(task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message') db().commit() task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True) # if failed to generate task message for orchestrator # then task is already set to error if task_provision.status == consts.TASK_STATUSES.error: return task_provision.cache = provision_message db().commit() task_messages.append(provision_message) if nodes_to_deploy: objects.TaskCollection.lock_cluster_tasks(self.cluster.id) logger.debug( "There are nodes to deploy: %s", " ".join( [objects.Node.get_node_fqdn(n) for n in nodes_to_deploy])) task_deployment = supertask.create_subtask( name=consts.TASK_NAMES.deployment) # we should have task committed for processing in other threads db().commit() deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deploy, deployment_tasks=deployment_tasks, method_name='message') db().commit() task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True) # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == consts.TASK_STATUSES.error: return task_deployment.cache = deployment_message db().commit() task_messages.append(deployment_message) if nodes_to_provision: nodes_to_provision = objects.NodeCollection.lock_nodes( nodes_to_provision) for node in nodes_to_provision: node.status = consts.NODE_STATUSES.provisioning db().commit() objects.Cluster.get_by_uid(self.cluster.id, fail_if_not_found=True, lock_for_update=True) self.cluster.status = consts.CLUSTER_STATUSES.deployment db().add(self.cluster) db().commit() # We have to execute node deletion task only when provision, # deployment and other tasks are in the database. Otherwise, # it may be executed too quick (e.g. our tests) and this # will affect parent task calculation - it will be marked # as 'ready' because by that time it have only two subtasks # - network_check and node_deletion - and they're ready. # In order to avoid that wrong behavior, let's send # deletion task to execution only when others subtasks in # the database. if task_deletion: self._call_silently(task_deletion, tasks.DeletionTask, tasks.DeletionTask.get_task_nodes_for_cluster( self.cluster), check_ceph=True) db().commit() if task_messages: rpc.cast('naily', task_messages) logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid))
def execute(self, **kwargs): # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running' ).first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format( self.cluster.id ) ) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ]) ) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task( name=consts.TASK_NAMES.reset_environment, cluster=self.cluster ) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment ) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment ) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self): # locking tasks for processing names = (consts.TASK_NAMES.deploy, consts.TASK_NAMES.stop_deployment, consts.TASK_NAMES.deployment, consts.TASK_NAMES.provision) objects.TaskCollection.lock_cluster_tasks(self.cluster.id, names=names) stop_running = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.stop_deployment, ) stop_running = objects.TaskCollection.order_by(stop_running, 'id').first() if stop_running: if stop_running.status == consts.TASK_STATUSES.running: raise errors.StopAlreadyRunning("Stopping deployment task " "is already launched") else: db().delete(stop_running) db().flush() deployment_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deployment, ) deployment_task = objects.TaskCollection.order_by( deployment_task, 'id').first() provisioning_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.provision, ) provisioning_task = objects.TaskCollection.order_by( provisioning_task, 'id').first() if not deployment_task and not provisioning_task: db().rollback() raise errors.DeploymentNotRunning( u"Nothing to stop - deployment is " u"not running on environment '{0}'".format(self.cluster.id)) # Updating action logs for deploy task deploy_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deploy) deploy_task = objects.TaskCollection.order_by(deploy_task, 'id').first() if deploy_task: TaskHelper.set_ready_if_not_finished(deploy_task) task = Task(name=consts.TASK_NAMES.stop_deployment, cluster=self.cluster) db().add(task) db().commit() self._call_silently(task, tasks.StopDeploymentTask, deploy_task=deployment_task, provision_task=provisioning_task) return task
def execute(self, **kwargs): stop_running = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.stop_deployment ) stop_running = objects.TaskCollection.order_by( stop_running, 'id' ).first() if stop_running: if stop_running.status in ( consts.TASK_STATUSES.running, consts.TASK_STATUSES.pending): raise errors.StopAlreadyRunning( "Stopping deployment task " "is already launched" ) else: db().delete(stop_running) db().commit() deployment_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deployment, ) deployment_task = deployment_task.filter( Task.status != consts.TASK_STATUSES.pending ) deployment_task = objects.TaskCollection.order_by( deployment_task, '-id' ).first() provisioning_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.provision, ) provisioning_task = provisioning_task.filter( Task.status != consts.TASK_STATUSES.pending ) provisioning_task = objects.TaskCollection.order_by( provisioning_task, '-id' ).first() if not deployment_task and not provisioning_task: db().rollback() raise errors.DeploymentNotRunning( u"Nothing to stop - deployment is " u"not running on environment '{0}'".format( self.cluster.id ) ) # Updating action logs for deploy task deploy_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deploy ) deploy_task = objects.TaskCollection.order_by( deploy_task, 'id').first() if deploy_task: TaskHelper.set_ready_if_not_finished(deploy_task) db().commit() task = Task( name=consts.TASK_NAMES.stop_deployment, cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.StopDeploymentTask, deploy_task=deployment_task, provision_task=provisioning_task ) return task
def check_before_deployment(self, supertask): """Performs checks before deployment :param supertask: task SqlAlchemy object """ try: # if there are VIPs with same names in the network configuration # the error will be raised. Such situation may occur when, for # example, enabled plugins contain conflicting network # configuration network_info = self.serialize_network_cfg(self.cluster) except (errors.DuplicatedVIPNames, errors.NetworkRoleConflict) as e: raise errors.CheckBeforeDeploymentError(e.message) logger.info( u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4) ) ) # checking admin intersection with untagged network_info["networks"] = [ n for n in network_info["networks"] if n["name"] != "fuelweb_admin" ] check_networks = supertask.create_subtask( consts.TASK_NAMES.check_networks) self._call_silently( check_networks, tasks.CheckNetworksTask, data=network_info, check_all_parameters=True ) if check_networks.status == consts.TASK_STATUSES.error: logger.warning( "Checking networks failed: %s", check_networks.message ) raise errors.CheckBeforeDeploymentError(check_networks.message) TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().refresh(supertask) db().flush() # checking prerequisites check_before = supertask.create_subtask( consts.TASK_NAMES.check_before_deployment ) logger.debug("Checking prerequisites task: %s", check_before.uuid) self._call_silently( check_before, tasks.CheckBeforeDeploymentTask ) # if failed to check prerequisites # then task is already set to error if check_before.status == consts.TASK_STATUSES.error: logger.warning( "Checking prerequisites failed: %s", check_before.message ) raise errors.CheckBeforeDeploymentError(check_before.message) logger.debug( "Checking prerequisites is successful, starting deployment..." ) TaskHelper.set_ready_if_not_finished(check_before) db().delete(check_before) db().refresh(supertask) db().flush()
def get_nodes_to_deploy(self, force=False): if objects.Release.is_lcm_supported(self.cluster.release): return list( objects.Cluster.get_nodes_not_for_deletion(self.cluster).all() ) return TaskHelper.nodes_to_deploy(self.cluster, force)
def _execute_async_content(self, supertask, deployment_tasks=None, nodes_to_provision_deploy=None, force=False, graph_type=None, current_cluster_status=None, **kwargs): """Processes supertask async in mule :param supertask: SqlAlchemy task object :param deployment_tasks: the list of task names to execute :param nodes_to_provision_deploy: the list of selected node ids :param force: the boolean flag, if True all nodes will be deployed :param graph_type: the name of deployment graph to use :param current_cluster_status: the status of cluster that was before starting this operation """ nodes_to_delete = [] affected_nodes = [] if nodes_to_provision_deploy: nodes_to_deploy = objects.NodeCollection.get_by_ids( nodes_to_provision_deploy) nodes_to_provision = filter(lambda n: any([ n.pending_addition, n.needs_reprovision]), nodes_to_deploy) else: nodes_to_deploy = self.get_nodes_to_deploy(force=force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) objects.Cluster.adjust_nodes_lists_on_controller_removing( self.cluster, nodes_to_delete, nodes_to_deploy) task_messages = [] # Run validation if user didn't redefine # provisioning and deployment information if not (nodes_to_provision_deploy or objects.Cluster.get_provisioning_info(self.cluster) or objects.Cluster.get_deployment_info(self.cluster)): try: self.check_before_deployment(supertask) except errors.CheckBeforeDeploymentError: if current_cluster_status is not None: self.cluster.status = current_cluster_status db().commit() return if current_cluster_status == consts.CLUSTER_STATUSES.operational: # rerun particular tasks on all deployed nodes modified_node_ids = {n.id for n in nodes_to_deploy} modified_node_ids.update(n.id for n in nodes_to_provision) modified_node_ids.update(n.id for n in nodes_to_delete) affected_nodes = objects.Cluster.get_nodes_by_status( self.cluster, status=consts.NODE_STATUSES.ready, exclude=modified_node_ids ).all() task_deletion, task_provision, task_deployment = None, None, None dry_run = is_dry_run(kwargs) if nodes_to_delete and not dry_run: task_deletion = self.delete_nodes(supertask, nodes_to_delete) self.reset_error_message(nodes_to_delete, dry_run) if nodes_to_provision and not dry_run: logger.debug("There are nodes to provision: %s", " ".join([objects.Node.get_node_fqdn(n) for n in nodes_to_provision])) # For more accurate progress calculation task_weight = 0.4 task_provision = supertask.create_subtask( consts.TASK_NAMES.provision, status=consts.TASK_STATUSES.pending, weight=task_weight) # we should have task committed for processing in other threads db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().commit() task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_provision.status == consts.TASK_STATUSES.error: return self.reset_error_message(nodes_to_provision, dry_run) task_provision.cache = provision_message db().commit() task_messages.append(provision_message) deployment_message = None if (nodes_to_deploy or affected_nodes or objects.Release.is_lcm_supported(self.cluster.release)): if nodes_to_deploy: logger.debug("There are nodes to deploy: %s", " ".join((objects.Node.get_node_fqdn(n) for n in nodes_to_deploy))) if affected_nodes: logger.debug("There are nodes affected by deployment: %s", " ".join((objects.Node.get_node_fqdn(n) for n in affected_nodes))) deployment_task_provider = self.get_deployment_task() transaction_name = self.get_deployment_transaction_name(dry_run) task_deployment = supertask.create_subtask( name=transaction_name, dry_run=dry_run, status=consts.TASK_STATUSES.pending ) # we should have task committed for processing in other threads db().commit() #构造向对端发送的rpc消息 deployment_message = self._call_silently( task_deployment, deployment_task_provider, nodes_to_deploy, affected_nodes=affected_nodes, deployment_tasks=deployment_tasks, method_name='message', reexecutable_filter=consts.TASKS_TO_RERUN_ON_DEPLOY_CHANGES, graph_type=graph_type, force=force, **kwargs ) db().commit() task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == consts.TASK_STATUSES.error: return task_deployment.cache = deployment_message self.reset_error_message(nodes_to_deploy, dry_run) db().commit() if deployment_message: task_messages.append(deployment_message) # Even if we don't have nodes to deploy, the deployment task # should be created. Why? Because we need to update both # nodes.yaml and /etc/hosts on all slaves. Since we need only # those two tasks, let's create stripped version of # deployment. if (nodes_to_delete and not nodes_to_deploy and not dry_run and not objects.Release.is_lcm_supported(self.cluster.release)): logger.debug( "No nodes to deploy, just update nodes.yaml everywhere.") task_deployment = supertask.create_subtask( name=consts.TASK_NAMES.deployment, status=consts.TASK_STATUSES.pending ) task_message = tasks.UpdateNodesInfoTask.message(task_deployment) task_deployment.cache = task_message task_messages.append(task_message) db().commit() if nodes_to_provision and not dry_run: nodes_to_provision = objects.NodeCollection.lock_nodes( nodes_to_provision ) for node in nodes_to_provision: node.status = consts.NODE_STATUSES.provisioning db().commit() if not dry_run: objects.Cluster.get_by_uid( self.cluster.id, fail_if_not_found=True ) self.cluster.status = consts.CLUSTER_STATUSES.deployment db().commit() # We have to execute node deletion task only when provision, # deployment and other tasks are in the database. Otherwise, # it may be executed too quick (e.g. our tests) and this # will affect parent task calculation - it will be marked # as 'ready' because by that time it have only two subtasks # - network_check and node_deletion - and they're ready. # In order to avoid that wrong behavior, let's send # deletion task to execution only when others subtasks in # the database. if task_deletion and not dry_run: self._call_silently( task_deletion, tasks.DeletionTask, tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster), check_ceph=True) if task_messages: db().commit() rpc.cast('naily', task_messages) logger.debug( u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid ) )
def execute(self, **kwargs): try: self.check_running_task([ consts.TASK_NAMES.stop_deployment, consts.TASK_NAMES.reset_environment, consts.TASK_NAMES.cluster_deletion, ]) except errors.TaskAlreadyRunning: raise errors.TaskAlreadyRunning( "Stopping deployment task is already launched" ) deployment_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deployment, ) deployment_task = deployment_task.filter( Task.status != consts.TASK_STATUSES.pending ) deployment_task = objects.TaskCollection.order_by( deployment_task, '-id' ).first() provisioning_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.provision, ) provisioning_task = provisioning_task.filter( Task.status != consts.TASK_STATUSES.pending ) provisioning_task = objects.TaskCollection.order_by( provisioning_task, '-id' ).first() if not deployment_task and not provisioning_task: db().rollback() raise errors.DeploymentNotRunning( u"Nothing to stop - deployment is " u"not running on environment '{0}'".format( self.cluster.id ) ) # Updating action logs for deploy task deploy_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=consts.TASK_NAMES.deploy ) deploy_task = objects.TaskCollection.order_by( deploy_task, 'id').first() if deploy_task: TaskHelper.set_ready_if_not_finished(deploy_task) db().commit() task = Task( name=consts.TASK_NAMES.stop_deployment, cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.StopDeploymentTask, deploy_task=deployment_task, provision_task=provisioning_task ) return task