def execute(self, nodes_to_provision, **kwargs): """Run provisioning task on specified nodes.""" logger.debug('Nodes to provision: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_provision]))) self.check_running_task() task_provision = Task(name=consts.TASK_NAMES.provision, status=consts.TASK_STATUSES.pending, cluster=self.cluster) db().add(task_provision) # update cluster status self.cluster.status = consts.CLUSTER_STATUSES.deployment db().commit() nodes_ids_to_provision = [node.id for node in nodes_to_provision] # perform async call of _execute_async mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, task_provision.id, nodes_ids_to_provision=nodes_ids_to_provision, **kwargs ) return task_provision
def execute(self, nodes_to_provision, **kwargs): """Run provisioning task on specified nodes.""" logger.debug('Nodes to provision: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_provision]))) self.check_running_task() task_provision = Task(name=consts.TASK_NAMES.provision, status=consts.TASK_STATUSES.pending, cluster=self.cluster) db().add(task_provision) # update cluster status self.cluster.status = consts.CLUSTER_STATUSES.deployment db().commit() nodes_ids_to_provision = [node.id for node in nodes_to_provision] # perform async call of _execute_async mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, task_provision.id, nodes_ids_to_provision=nodes_ids_to_provision, **kwargs ) return task_provision
def continue_(self, transaction): """Pick next pending task and send it to execution. Transaction may consist of a number of sub-transactions. We should execute them one-by-one. This method allows to pick first pending transaction and send it to execution. :param transaction: a top-level transaction to continue :return: True if sub transaction will be started, otherwise False """ sub_transaction = next(( sub_transaction for sub_transaction in transaction.subtasks if sub_transaction.status == consts.TASK_STATUSES.pending), None) if sub_transaction is None: # there is no sub-transaction, so we can close this transaction self.success(transaction) return False with try_transaction(transaction, self.fail): # uWSGI mule is a separate process, and that means it won't share # our DB session. Hence, we can't pass fetched DB instances to the # function we want to be executed in mule, so let's proceed with # unique identifiers. mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster_id, sub_transaction.id, ) return True
def continue_(self, transaction): """Pick next pending task and send it to execution. Transaction may consist of a number of sub-transactions. We should execute them one-by-one. This method allows to pick first pending transaction and send it to execution. :param transaction: a top-level transaction to continue :return: True if sub transaction will be started, otherwise False """ sub_transaction = next(( sub_transaction for sub_transaction in transaction.subtasks if sub_transaction.status == consts.TASK_STATUSES.pending), None) if sub_transaction is None: # there is no sub-transaction, so we can close this transaction self.success(transaction) return False with try_transaction(transaction, self.fail): # uWSGI mule is a separate process, and that means it won't share # our DB session. Hence, we can't pass fetched DB instances to the # function we want to be executed in mule, so let's proceed with # unique identifiers. mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster_id, sub_transaction.id, ) return True
def execute(self): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id)) network_info = self.serialize_network_cfg(self.cluster) logger.info(u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4))) self._remove_obsolete_tasks() supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") # we should have task committed for processing in other threads db().commit() TaskHelper.create_action_log(supertask) mule.call_task_manager_async(self.__class__, '_execute_async', self.cluster.id, supertask.id) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because ' 'there are another running tasks.' ) supertask = Task(name=self.deployment_type, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) current_cluster_status = self.cluster.status # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) #任务将被异步执行 mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type, current_cluster_status=current_cluster_status, **kwargs ) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( 'Cannot perform the actions because ' 'there are another running tasks.' ) supertask = Task(name=self.deployment_type, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) current_cluster_status = self.cluster.status # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type, current_cluster_status=current_cluster_status, **kwargs ) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) network_info = self.serialize_network_cfg(self.cluster) logger.info( u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4) ) ) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks ) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type ) return supertask
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None, force=False, graph_type=None, **kwargs): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster, force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) self.ensure_nodes_changed( nodes_to_provision, nodes_to_deploy, nodes_to_delete ) db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks, force=force, graph_type=graph_type ) return supertask
def execute(self, nodes_to_deployment, deployment_tasks=None, graph_type=None, force=False, **kwargs): deployment_tasks = deployment_tasks or [] self.check_running_task() logger.debug('Nodes to deploy: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_deployment]))) nodes_ids_to_deployment = [n.id for n in nodes_to_deployment] transaction_name = self.get_deployment_transaction_name( is_dry_run(kwargs)) task_deployment = Task( name=transaction_name, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending ) db().add(task_deployment) # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment db().commit() # perform async call mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, task_deployment.id, nodes_ids_to_deployment=nodes_ids_to_deployment, deployment_tasks=deployment_tasks, graph_type=graph_type, force=force, dry_run=kwargs.get('dry_run', False), noop_run=kwargs.get('noop_run', False) ) return task_deployment
def execute(self, nodes_to_deployment, deployment_tasks=None, graph_type=None, force=False, **kwargs): deployment_tasks = deployment_tasks or [] self.check_running_task() logger.debug('Nodes to deploy: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_deployment]))) nodes_ids_to_deployment = [n.id for n in nodes_to_deployment] transaction_name = self.get_deployment_transaction_name( is_dry_run(kwargs)) task_deployment = Task( name=transaction_name, cluster=self.cluster, dry_run=is_dry_run(kwargs), status=consts.TASK_STATUSES.pending ) db().add(task_deployment) # update cluster status if not is_dry_run(kwargs): self.cluster.status = consts.CLUSTER_STATUSES.deployment db().commit() # perform async call mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, task_deployment.id, nodes_ids_to_deployment=nodes_ids_to_deployment, deployment_tasks=deployment_tasks, graph_type=graph_type, force=force, dry_run=kwargs.get('dry_run', False), noop_run=kwargs.get('noop_run', False) ) return task_deployment
def continue_(self, transaction): """Pick next pending task and send it to execution. Transaction may consist of a number of sub-transactions. We should execute them one-by-one. This method allows to pick first pending transaction and send it to execution. :param transaction: a top-level transaction to continue """ with try_transaction(transaction, suppress=True): # uWSGI mule is a separate process, and that means it won't share # our DB session. Hence, we can't pass fetched DB instances to the # function we want to be executed in mule, so let's proceed with # unique identifiers. mule.call_task_manager_async( self.__class__, '_continue_async', self.cluster_id, transaction.id, )
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id)) network_info = self.serialize_network_cfg(self.cluster) logger.info(u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4))) self.check_no_running_deployment(self.cluster) self._remove_obsolete_tasks() supertask = Task(name=self.deployment_type, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = nodes_to_provision_deploy or \ TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") db().flush() TaskHelper.create_action_log(supertask) # we should have task committed for processing in other threads db().commit() nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy] if nodes_to_provision_deploy else None) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, nodes_to_provision_deploy=nodes_ids_to_deploy, deployment_tasks=deployment_tasks) return supertask
def execute(self): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) network_info = self.serialize_network_cfg(self.cluster) logger.info( u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4) ) ) self._remove_obsolete_tasks() supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") # we should have task committed for processing in other threads db().commit() TaskHelper.create_action_log(supertask) mule.call_task_manager_async( self.__class__, '_execute_async', self.cluster.id, supertask.id, ) return supertask