def _execute_sync(self, sub_transaction): cluster = sub_transaction.cluster graph = objects.Cluster.get_deployment_graph(cluster, sub_transaction.graph_type) nodes = _get_nodes_to_run(cluster, graph.get("node_filter"), sub_transaction.cache.get("nodes")) for node in nodes: # set progress to show that node is in progress state node.progress = 1 if not sub_transaction.dry_run: node.error_type = None node.error_msg = None resolver = role_resolver.RoleResolver(nodes) _adjust_graph_tasks(graph, cluster, resolver, sub_transaction.cache.get("tasks")) context = lcm.TransactionContext( _get_expected_state(cluster, nodes), _get_current_state(cluster, nodes, graph["tasks"], sub_transaction.cache.get("force")), ) # Attach desired state to the sub transaction, so when we continue # our top-level transaction, the new state will be calculated on # top of this. _dump_expected_state(sub_transaction, context.new, graph["tasks"]) message = make_astute_message(sub_transaction, context, graph, resolver) # Once rpc.cast() is called, the message is sent to Astute. By # that moment all transaction instanced must exist in database, # otherwise we may get wrong result due to RPC receiver won't # found entry to update. db().commit() rpc.cast("naily", [message])
def execute(cls, task): logger.debug("ProvisionTask.execute(task=%s)" % task.uuid) message = cls.message(task) task.cache = message db().add(task) db().commit() rpc.cast('naily', message)
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list(None, "id", nodes_ids, order_by="id") objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug("Nodes to provision: {0}".format(" ".join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name="provision") task_provision.node_ids = nodes_ids # node_ids参数在安装成功时候无用,但在安装失败的时候需要用到 db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.InstallosTask, nodes_to_provision, method_name="message" ) task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast("naily", provision_message) logger.info(u"消息发送完毕") return task_provision
def execute(self, nodes_to_deployment): TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format( ' '.join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) db().commit() deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, nodes_to_deployment): # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug("Nodes to deploy: {0}".format(" ".join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name="deployment", cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name="message" ) db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = "deploying" node.progress = 0 db().commit() rpc.cast("naily", deployment_message) return task_deployment
def execute(self, nodes_to_deployment): # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format(' '.join( [n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self): logger.info(u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id, )) current_tasks = orm().query(Task).filter_by(cluster_id=self.cluster.id, name="deploy") for task in current_tasks: if task.status == "running": raise errors.DeploymentAlreadyStarted() elif task.status in ("ready", "error"): for subtask in task.subtasks: orm().delete(subtask) orm().delete(task) orm().commit() nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) if not any([nodes_to_deploy, nodes_to_delete]): raise errors.WrongNodeStatus("No changes to deploy") self.cluster.status = 'deployment' orm().add(self.cluster) orm().commit() supertask = Task(name="deploy", cluster=self.cluster) orm().add(supertask) orm().commit() task_deletion, task_provision, task_deployment = None, None, None if nodes_to_delete: task_deletion = supertask.create_subtask("node_deletion") self._call_silently(task_deletion, tasks.DeletionTask) if nodes_to_deploy: TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy) task_provision = supertask.create_subtask("provision") # we assume here that task_provision just adds system to # cobbler and reboots systems, so it has extreamly small weight task_provision.weight = 0.05 provision_message = self._call_silently(task_provision, tasks.ProvisionTask, method_name='message') task_provision.cache = provision_message orm().add(task_provision) orm().commit() task_deployment = supertask.create_subtask("deployment") deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, method_name='message') task_deployment.cache = deployment_message orm().add(task_deployment) orm().commit() rpc.cast('naily', [provision_message, deployment_message]) logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid)) return supertask
def execute(cls, task): logger.debug("DeploymentTask.execute(task=%s)" % task.uuid) message = cls.message(task) task.cache = message orm().add(task) orm().commit() rpc.cast('naily', message)
def execute(self, nodes_to_deployment): TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format(' '.join( [n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) db().commit() deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, task, data): task_uuid = task.uuid nodes = [] for n in task.cluster.nodes: node_json = {'uid': n.id, 'networks': []} for nic in n.interfaces: vlans = [] for ng in nic.assigned_networks: # Handle FuelWeb admin network first. if not ng.cluster_id: vlans.append(0) continue data_ng = filter( lambda i: i['name'] == ng.name, data )[0] vlans.extend(data_ng['vlans']) if not vlans: continue node_json['networks'].append( {'iface': nic.name, 'vlans': vlans} ) nodes.append(node_json) message = {'method': 'verify_networks', 'respond_to': 'verify_networks_resp', 'args': {'task_uuid': task.uuid, 'nodes': nodes}} logger.debug("Network verification is called with: %s", message) task.cache = message orm().add(task) orm().commit() rpc.cast('naily', message)
def execute(cls, task, conf=None): logger.debug("DumpTask: task={0}".format(task.uuid)) message = make_astute_message(task, 'dump_environment', 'dump_environment_resp', {'settings': conf or cls.conf()}) db().flush() rpc.cast('naily', message)
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision', cluster=self.cluster) db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().refresh(task_provision) task_provision.cache = provision_message for node in nodes_to_provision: node.pending_addition = False node.status = 'provisioning' node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def execute(cls, task, data): logger.debug("%s(uuid=%s) is running" % (cls.__name__, task.uuid)) message = cls.message(task, data) task.cache = message task.result = {'release_info': data} db().add(task) db().commit() rpc.cast('naily', message)
def execute(cls, task, data): message = cls._message(task, data) logger.debug("%s method is called with: %s", task.name, message) task.cache = message db().add(task) db().commit() rpc.cast('naily', message)
def execute(cls, task, data): message = cls._message(task, data) logger.debug("%s method is called with: %s", task.name, message) task.cache = message db().add(task) db().commit() rpc.cast("naily", message)
def execute(self, task=None): # task is there for prev compatibility message = self.get_message() logger.debug("%s method is called with: %s", self.task.name, message) db().commit() rpc.cast('naily', message)
def execute(cls, task, data): logger.debug("%s(uuid=%s) is running" % (cls.__name__, task.uuid)) message = cls.message(task, data) task.cache = message task.result = {"release_info": data} db().add(task) db().commit() rpc.cast("naily", message)
def execute(self, force=False, **kwargs): try: self.clear_tasks_history(force=force) except errors.TaskAlreadyRunning: raise errors.DeploymentAlreadyStarted( "Can't reset environment '{0}' when " "running deployment task exists.".format( self.cluster.id ) ) # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt ) for node in nodes: objects.Node.reset_vms_created_state(node) objects.ClusterPluginLinkCollection.delete_by_cluster_id( self.cluster.id) db().commit() supertask = Task( name=consts.TASK_NAMES.reset_environment, cluster=self.cluster ) db().add(supertask) al = TaskHelper.create_action_log(supertask) reset_nodes = supertask.create_subtask( consts.TASK_NAMES.reset_nodes ) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.remove_keys ) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.remove_ironic_bootstrap ) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(reset_nodes), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self): logger.debug("Creating redhat_setup task") current_tasks = db().query(Task).filter_by(name="redhat_setup") for task in current_tasks: for subtask in task.subtasks: db().delete(subtask) db().delete(task) db().commit() supertask = Task(name="redhat_setup") supertask.result = { "release_info": { "release_id": self.data["release_id"] } } db().add(supertask) db().commit() subtasks_to_create = [ ('redhat_check_credentials', tasks.RedHatCheckCredentialsTask, 0.01), ('redhat_check_licenses', tasks.RedHatCheckLicensesTask, 0.01), ('redhat_download_release', tasks.RedHatDownloadReleaseTask, 1) ] messages = [] for task_name, task_class, weight in subtasks_to_create: task = supertask.create_subtask(task_name) task.weight = weight db().add(task) db().commit() msg = self._call_silently(task, task_class, self.data, method_name='message') db().refresh(task) if task.status == 'error': TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=task.message) return supertask task.cache = msg db().add(task) db().commit() messages.append(msg) db().refresh(supertask) if supertask.status == 'error': return supertask rpc.cast('naily', messages) return supertask
def execute(self, **kwargs): # FIXME(aroma): remove updating of 'deployed_before' # when stop action is reworked. 'deployed_before' # flag identifies whether stop action is allowed for the # cluster. Please, refer to [1] for more details. # [1]: https://bugs.launchpad.net/fuel/+bug/1529691 objects.Cluster.set_deployed_before_flag(self.cluster, value=False) deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running').first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ])) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task(name=consts.TASK_NAMES.reset_environment, cluster=self.cluster) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) remove_ironic_bootstrap_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def _execute_sync(self, sub_transaction): cluster = sub_transaction.cluster graph = objects.Cluster.get_deployment_graph( cluster, sub_transaction.graph_type ) nodes = _get_nodes_to_run( cluster, graph.get('node_filter'), sub_transaction.cache.get('nodes') ) logger.debug( "execute graph %s on nodes %s", sub_transaction.graph_type, [n.id for n in nodes] ) for node in nodes: # set progress to show that node is in progress state node.progress = 1 if not sub_transaction.dry_run: node.error_type = None node.error_msg = None # we should initialize primary roles for cluster before # role resolve has been created objects.Cluster.set_primary_roles(cluster, nodes) resolver = role_resolver.RoleResolver(nodes) _adjust_graph_tasks( graph, cluster, resolver, sub_transaction.cache.get('tasks')) context = lcm.TransactionContext( _get_expected_state(cluster, nodes), _get_current_state( cluster, nodes, graph['tasks'], sub_transaction.cache.get('force') )) # Attach desired state to the sub transaction, so when we continue # our top-level transaction, the new state will be calculated on # top of this. _dump_expected_state(sub_transaction, context.new, graph['tasks']) message = make_astute_message( sub_transaction, context, graph, resolver ) objects.Transaction.on_start(sub_transaction) helpers.TaskHelper.create_action_log(sub_transaction) # Once rpc.cast() is called, the message is sent to Astute. By # that moment all transaction instanced must exist in database, # otherwise we may get wrong result due to RPC receiver won't # found entry to update. db().commit() rpc.cast('naily', [message])
def _execute_sync(self, sub_transaction): cluster = sub_transaction.cluster graph = objects.Cluster.get_deployment_graph( cluster, sub_transaction.graph_type ) nodes = _get_nodes_to_run( cluster, graph.get('node_filter'), sub_transaction.cache.get('nodes') ) logger.debug( "execute graph %s on nodes %s", sub_transaction.graph_type, [n.id for n in nodes] ) for node in nodes: # set progress to show that node is in progress state node.progress = 1 if not sub_transaction.dry_run: node.error_type = None node.error_msg = None # we should initialize primary roles for cluster before # role resolve has been created objects.Cluster.set_primary_tags(cluster, nodes) resolver = resolvers.TagResolver(nodes) _adjust_graph_tasks( graph, cluster, resolver, sub_transaction.cache.get('tasks')) context = lcm.TransactionContext( _get_expected_state(cluster, nodes), _get_current_state( cluster, nodes, graph['tasks'], sub_transaction.cache.get('force') )) # Attach desired state to the sub transaction, so when we continue # our top-level transaction, the new state will be calculated on # top of this. _dump_expected_state(sub_transaction, context.new, graph['tasks']) message = make_astute_message( sub_transaction, context, graph, resolver ) objects.Transaction.on_start(sub_transaction) helpers.TaskHelper.create_action_log(sub_transaction) # Once rpc.cast() is called, the message is sent to Astute. By # that moment all transaction instanced must exist in database, # otherwise we may get wrong result due to RPC receiver won't # found entry to update. db().commit() rpc.cast('naily', [message])
def execute(cls, task): logger.debug("DumpTask: task={0}".format(task.uuid)) message = make_astute_message('dump_environment', 'dump_environment_resp', { 'task_uuid': task.uuid, 'settings': cls.conf() }) task.cache = message db().add(task) db().commit() rpc.cast('naily', message)
def execute(cls, task): logger.debug("DumpTask: task=%s" % task.uuid) message = { "method": "dump_environment", "respond_to": "dump_environment_resp", "args": {"task_uuid": task.uuid, "lastdump": settings.DUMP["lastdump"]}, } task.cache = message db().add(task) db().commit() rpc.cast("naily", message)
def execute(self): deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running' ).first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format( self.cluster.id ) ) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ]) ) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task( name=consts.TASK_NAMES.reset_environment, cluster=self.cluster ) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment ) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def _continue_sync(self, transaction): sub_transaction = next(( sub_transaction for sub_transaction in transaction.subtasks if sub_transaction.status == consts.TASK_STATUSES.pending), None) if sub_transaction is None: return False cluster = sub_transaction.cluster graph = objects.Cluster.get_deployment_graph( cluster, sub_transaction.graph_type ) nodes = _get_nodes_to_run( cluster, graph.get('node_filter'), sub_transaction.cache.get('nodes') ) for node in nodes: node.roles = list(set(node.roles + node.pending_roles)) node.pending_roles = [] node.error_type = None node.progress = 1 resolver = role_resolver.RoleResolver(nodes) _adjust_graph_tasks( graph, cluster, resolver, sub_transaction.cache.get('tasks')) context = lcm.TransactionContext( _get_expected_state(cluster, nodes), _get_current_state( cluster, nodes, graph['tasks'], sub_transaction.cache.get('force') )) # Attach desired state to the sub transaction, so when we continue # our top-level transaction, the new state will be calculated on # top of this. _dump_expected_state(sub_transaction, context.new, graph['tasks']) with try_transaction(sub_transaction): message = make_astute_message( sub_transaction, context, graph, resolver) # Once rpc.cast() is called, the message is sent to Astute. By # that moment all transaction instanced must exist in database, # otherwise we may get wrong result due to RPC receiver won't # found entry to update. db().commit() rpc.cast('naily', [message])
def _execute_async(self, task_deployment_id, nodes_ids_to_deployment, deployment_tasks=None, graph_type=None, force=False, dry_run=False, noop_run=False): """Supposed to be executed inside separate process. :param task_deployment_id: id of task :param nodes_ids_to_deployment: node ids :param graph_type: graph type :param force: force :param dry_run: the dry run flag :param noop_run: the noop run flag """ task_deployment = objects.Task.get_by_uid( task_deployment_id, fail_if_not_found=True, lock_for_update=False ) nodes_to_deployment = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids_to_deployment, order_by='id' ) self.reset_error_message(nodes_to_deployment, dry_run) deployment_message = self._call_silently( task_deployment, self.get_deployment_task(), nodes_to_deployment, deployment_tasks=deployment_tasks, method_name='message', graph_type=graph_type, force=force, dry_run=dry_run, noop_run=noop_run ) db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid( task_deployment_id, fail_if_not_found=True, lock_for_update=True ) task_deployment.cache = deployment_message db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(cls, task): logger.debug("DumpTask: task={0}".format(task.uuid)) message = make_astute_message( task, 'dump_environment', 'dump_environment_resp', { 'settings': cls.conf() } ) db().commit() rpc.cast('naily', message)
def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, status='running' ).filter( Task.name.in_([ 'deploy', 'deployment', 'reset_environment', 'stop_deployment' ]) ) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format( self.cluster.id ) ) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format( ' '.join([n.fqdn for n in nodes_to_change]))) task_update = Task(name='update', cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently( task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) task_update.cache = deployment_message for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update
def execute(cls, task, deploy_task=None, provision_task=None): if provision_task: rpc.cast( 'naily', cls.message(task, provision_task), service=True ) if deploy_task: rpc.cast( 'naily', cls.message(task, deploy_task), service=True )
def execute(cls, task, deploy_task, provision_task): if provision_task: rpc.cast( 'naily', cls.message(task, provision_task), service=True ) if deploy_task: rpc.cast( 'naily', cls.message(task, deploy_task), service=True )
def execute(cls, task): logger.debug("DumpTask: task=%s" % task.uuid) message = { 'method': 'dump_environment', 'respond_to': 'dump_environment_resp', 'args': { 'task_uuid': task.uuid, 'lastdump': settings.DUMP["lastdump"] } } task.cache = message db().add(task) db().commit() rpc.cast('naily', message)
def execute(self, nodes_to_provision, **kwargs): """Run provisioning task on specified nodes.""" # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids, order_by='id' ) logger.debug('Nodes to provision: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_provision]))) task_provision = Task(name=consts.TASK_NAMES.provision, status=consts.TASK_STATUSES.pending, cluster=self.cluster) db().add(task_provision) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = consts.NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def delete_node_by_astute(task, node): node_to_delete = tasks.DeletionTask.format_node_to_delete(node) msg_delete = tasks.make_astute_message( task, 'remove_nodes', 'remove_nodes_resp', { 'nodes': [node_to_delete], 'check_ceph': False, 'engine': { 'url': settings.COBBLER_URL, 'username': settings.COBBLER_USER, 'password': settings.COBBLER_PASSWORD, 'master_ip': settings.MASTER_IP, } }) rpc.cast('naily', msg_delete)
def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, status='running' ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.reset_environment, consts.TASK_NAMES.stop_deployment ]) ) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format( self.cluster.id ) ) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format( ' '.join([n.fqdn for n in nodes_to_change]))) task_update = Task(name=consts.TASK_NAMES.update, cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently( task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update
def _execute_async(self, task_provision_id, nodes_ids_to_provision, **kwargs): nodes = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids_to_provision, order_by='id' ) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() task_provision = objects.Task.get_by_uid( task_provision_id, fail_if_not_found=True, lock_for_update=True ) provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision_id, fail_if_not_found=True, lock_for_update=True ) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes: node.pending_addition = False node.status = consts.NODE_STATUSES.provisioning node.progress = 0 node.error_msg = None node.error_type = None db().commit() rpc.cast('naily', provision_message) return task_provision
def execute(cls, task, data): logger.debug("Download release task(uuid=%s) is running" % task.uuid) message = { 'method': 'download_release', 'respond_to': 'download_release_resp', 'args': { 'task_uuid': task.uuid, 'release_info': data } } task.cache = message task.result = {'release_info': data} db().add(task) db().commit() rpc.cast('naily', message)
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids, order_by='id' ) objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision') db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.InstallosTask, nodes_to_provision, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) logger.info(u'消息发送完毕') return task_provision
def execute(self, filters, force=False, graph_type=None, **kwargs): self.check_running_task(consts.TASK_NAMES.deployment) task = Task(name=consts.TASK_NAMES.deployment, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(task) nodes_to_update = objects.Cluster.get_nodes_to_update_config( self.cluster, filters.get('node_ids'), filters.get('node_role')) message = self._call_silently( task, self.get_deployment_task(), nodes_to_update, graph_type=graph_type, method_name='message', force=force ) # locking task task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.is_completed(): return task # locking nodes objects.NodeCollection.lock_nodes(nodes_to_update) task.cache = copy.copy(message) task.cache['nodes'] = [n.id for n in nodes_to_update] for node in nodes_to_update: node.status = consts.NODE_STATUSES.deploying node.progress = 0 db().commit() rpc.cast('naily', message) return task
def execute(self): logger.debug("Creating redhat_setup task") current_tasks = db().query(Task).filter_by(name="redhat_setup") for task in current_tasks: for subtask in task.subtasks: db().delete(subtask) db().delete(task) db().commit() supertask = Task(name="redhat_setup") supertask.result = {"release_info": {"release_id": self.data["release_id"]}} db().add(supertask) db().commit() subtasks_to_create = [ ("redhat_check_credentials", tasks.RedHatCheckCredentialsTask, 0.01), ("redhat_check_licenses", tasks.RedHatCheckLicensesTask, 0.01), ("redhat_download_release", tasks.RedHatDownloadReleaseTask, 1), ] messages = [] for task_name, task_class, weight in subtasks_to_create: task = supertask.create_subtask(task_name) task.weight = weight db().add(task) db().commit() msg = self._call_silently(task, task_class, self.data, method_name="message") db().refresh(task) if task.status == "error": TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=task.message) return supertask task.cache = msg db().add(task) db().commit() messages.append(msg) db().refresh(supertask) if supertask.status == "error": return supertask rpc.cast("naily", messages) return supertask
def delete_node_by_astute(task, node): node_to_delete = tasks.DeletionTask.format_node_to_delete(node) msg_delete = tasks.make_astute_message( task, 'remove_nodes', 'remove_nodes_resp', { 'nodes': [node_to_delete], 'check_ceph': False, 'engine': { 'url': settings.COBBLER_URL, 'username': settings.COBBLER_USER, 'password': settings.COBBLER_PASSWORD, 'master_ip': settings.MASTER_IP, } } ) rpc.cast('naily', msg_delete)
def execute(self, nodes_to_deployment, deployment_tasks=None, graph_type=None, force=False, **kwargs): deployment_tasks = deployment_tasks or [] logger.debug('Nodes to deploy: {0}'.format( ' '.join([objects.Node.get_node_fqdn(n) for n in nodes_to_deployment]))) task_deployment = Task( name=consts.TASK_NAMES.deployment, cluster=self.cluster, status=consts.TASK_STATUSES.pending ) db().add(task_deployment) deployment_message = self._call_silently( task_deployment, self.get_deployment_task(), nodes_to_deployment, deployment_tasks=deployment_tasks, method_name='message', graph_type=graph_type, force=force) db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, filters, force=False, **kwargs): self.check_running_task(consts.TASK_NAMES.deployment) task = Task(name=consts.TASK_NAMES.deployment, cluster=self.cluster, status=consts.TASK_STATUSES.pending) db().add(task) nodes_to_update = objects.Cluster.get_nodes_to_update_config( self.cluster, filters.get('node_ids'), filters.get('node_role')) message = self._call_silently( task, self.get_deployment_task(), nodes_to_update, method_name='message', force=force ) # locking task task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.is_completed(): return task # locking nodes objects.NodeCollection.lock_nodes(nodes_to_update) task.cache = copy.copy(message) task.cache['nodes'] = [n.id for n in nodes_to_update] for node in nodes_to_update: node.status = consts.NODE_STATUSES.deploying node.progress = 0 db().commit() rpc.cast('naily', message) return task
def execute(self): deploy_running = db().query(Task).filter_by( cluster=self.cluster, name=consts.TASK_NAMES.deploy, status='running').first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.stop_deployment ])) for task in obsolete_tasks: db().delete(task) nodes = objects.Cluster.get_nodes_by_role( self.cluster, consts.VIRTUAL_NODE_TYPES.virt) for node in nodes: objects.Node.reset_vms_created_state(node) db().commit() supertask = Task(name=consts.TASK_NAMES.reset_environment, cluster=self.cluster) db().add(supertask) al = TaskHelper.create_action_log(supertask) remove_keys_task = supertask.create_subtask( consts.TASK_NAMES.reset_environment) db.commit() rpc.cast('naily', [ tasks.ResetEnvironmentTask.message(supertask), tasks.RemoveClusterKeys.message(remove_keys_task) ]) TaskHelper.update_action_log(supertask, al) return supertask
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list(None, 'id', nodes_ids, order_by='id') objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format(' '.join( [n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision') task_provision.node_ids = nodes_ids #node_ids参数在安装成功时候无用,但在安装失败的时候需要用到 db().add(task_provision) db().commit() provision_message = self._call_silently(task_provision, tasks.InstallosTask, nodes_to_provision, method_name='message') task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) logger.info(u'消息发送完毕') return task_provision
def execute(self, nodes_to_deployment, deployment_tasks=None): deployment_tasks = deployment_tasks or [] # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format( ' '.join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task( name=consts.TASK_NAMES.deployment, cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, deployment_tasks=deployment_tasks, method_name='message') db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment
def execute(self, nodes_to_provision): """Run provisioning task on specified nodes Constraints: currently this task cannot deploy RedHat. For redhat here should be added additional tasks e.i. check credentials, check licenses, redhat downloading. Status of this task you can track here: https://blueprints.launchpad.net/fuel/+spec /nailgun-separate-provisioning-for-redhat """ TaskHelper.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision', cluster=self.cluster) db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().refresh(task_provision) task_provision.cache = provision_message for node in nodes_to_provision: node.pending_addition = False node.status = 'provisioning' node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision
def _execute_async_content(self, supertask, deployment_tasks=None, nodes_to_provision_deploy=None, force=False, graph_type=None, current_cluster_status=None, **kwargs): """Processes supertask async in mule :param supertask: SqlAlchemy task object :param deployment_tasks: the list of task names to execute :param nodes_to_provision_deploy: the list of selected node ids :param force: the boolean flag, if True all nodes will be deployed :param graph_type: the name of deployment graph to use :param current_cluster_status: the status of cluster that was before starting this operation """ nodes_to_delete = [] affected_nodes = [] if nodes_to_provision_deploy: nodes_to_deploy = objects.NodeCollection.get_by_ids( nodes_to_provision_deploy) nodes_to_provision = filter(lambda n: any([ n.pending_addition, n.needs_reprovision]), nodes_to_deploy) else: nodes_to_deploy = self.get_nodes_to_deploy(force=force) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) objects.Cluster.adjust_nodes_lists_on_controller_removing( self.cluster, nodes_to_delete, nodes_to_deploy) task_messages = [] # Run validation if user didn't redefine # provisioning and deployment information if not (nodes_to_provision_deploy or objects.Cluster.get_provisioning_info(self.cluster) or objects.Cluster.get_deployment_info(self.cluster)): try: self.check_before_deployment(supertask) except errors.CheckBeforeDeploymentError: if current_cluster_status is not None: self.cluster.status = current_cluster_status db().commit() return if current_cluster_status == consts.CLUSTER_STATUSES.operational: # rerun particular tasks on all deployed nodes modified_node_ids = {n.id for n in nodes_to_deploy} modified_node_ids.update(n.id for n in nodes_to_provision) modified_node_ids.update(n.id for n in nodes_to_delete) affected_nodes = objects.Cluster.get_nodes_by_status( self.cluster, status=consts.NODE_STATUSES.ready, exclude=modified_node_ids ).all() task_deletion, task_provision, task_deployment = None, None, None dry_run = is_dry_run(kwargs) if nodes_to_delete and not dry_run: task_deletion = self.delete_nodes(supertask, nodes_to_delete) self.reset_error_message(nodes_to_delete, dry_run) if nodes_to_provision and not dry_run: logger.debug("There are nodes to provision: %s", " ".join([objects.Node.get_node_fqdn(n) for n in nodes_to_provision])) # For more accurate progress calculation task_weight = 0.4 task_provision = supertask.create_subtask( consts.TASK_NAMES.provision, status=consts.TASK_STATUSES.pending, weight=task_weight) # we should have task committed for processing in other threads db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) db().commit() task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_provision.status == consts.TASK_STATUSES.error: return self.reset_error_message(nodes_to_provision, dry_run) task_provision.cache = provision_message db().commit() task_messages.append(provision_message) deployment_message = None if (nodes_to_deploy or affected_nodes or objects.Release.is_lcm_supported(self.cluster.release)): if nodes_to_deploy: logger.debug("There are nodes to deploy: %s", " ".join((objects.Node.get_node_fqdn(n) for n in nodes_to_deploy))) if affected_nodes: logger.debug("There are nodes affected by deployment: %s", " ".join((objects.Node.get_node_fqdn(n) for n in affected_nodes))) deployment_task_provider = self.get_deployment_task() transaction_name = self.get_deployment_transaction_name(dry_run) task_deployment = supertask.create_subtask( name=transaction_name, dry_run=dry_run, status=consts.TASK_STATUSES.pending ) # we should have task committed for processing in other threads db().commit() #构造向对端发送的rpc消息 deployment_message = self._call_silently( task_deployment, deployment_task_provider, nodes_to_deploy, affected_nodes=affected_nodes, deployment_tasks=deployment_tasks, method_name='message', reexecutable_filter=consts.TASKS_TO_RERUN_ON_DEPLOY_CHANGES, graph_type=graph_type, force=force, **kwargs ) db().commit() task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == consts.TASK_STATUSES.error: return task_deployment.cache = deployment_message self.reset_error_message(nodes_to_deploy, dry_run) db().commit() if deployment_message: task_messages.append(deployment_message) # Even if we don't have nodes to deploy, the deployment task # should be created. Why? Because we need to update both # nodes.yaml and /etc/hosts on all slaves. Since we need only # those two tasks, let's create stripped version of # deployment. if (nodes_to_delete and not nodes_to_deploy and not dry_run and not objects.Release.is_lcm_supported(self.cluster.release)): logger.debug( "No nodes to deploy, just update nodes.yaml everywhere.") task_deployment = supertask.create_subtask( name=consts.TASK_NAMES.deployment, status=consts.TASK_STATUSES.pending ) task_message = tasks.UpdateNodesInfoTask.message(task_deployment) task_deployment.cache = task_message task_messages.append(task_message) db().commit() if nodes_to_provision and not dry_run: nodes_to_provision = objects.NodeCollection.lock_nodes( nodes_to_provision ) for node in nodes_to_provision: node.status = consts.NODE_STATUSES.provisioning db().commit() if not dry_run: objects.Cluster.get_by_uid( self.cluster.id, fail_if_not_found=True ) self.cluster.status = consts.CLUSTER_STATUSES.deployment db().commit() # We have to execute node deletion task only when provision, # deployment and other tasks are in the database. Otherwise, # it may be executed too quick (e.g. our tests) and this # will affect parent task calculation - it will be marked # as 'ready' because by that time it have only two subtasks # - network_check and node_deletion - and they're ready. # In order to avoid that wrong behavior, let's send # deletion task to execution only when others subtasks in # the database. if task_deletion and not dry_run: self._call_silently( task_deletion, tasks.DeletionTask, tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster), check_ceph=True) if task_messages: db().commit() rpc.cast('naily', task_messages) logger.debug( u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid ) )
def execute(self, nodes_to_delete, mclient_remove=True, **kwargs): cluster = None if hasattr(self, 'cluster'): cluster = self.cluster logger.info("Trying to execute node deletion task with nodes %s", ', '.join(str(node.id) for node in nodes_to_delete)) self.verify_nodes_with_cluster(nodes_to_delete) objects.NodeCollection.lock_nodes(nodes_to_delete) if cluster is None: # DeletionTask operates on cluster's nodes. # Nodes that are not in cluster are simply deleted. objects.NodeCollection.delete_by_ids([ n.id for n in nodes_to_delete]) db().flush() task = Task(name=consts.TASK_NAMES.node_deletion, progress=100, status=consts.TASK_STATUSES.ready) db().add(task) db().flush() return task try: self.check_running_task() except errors.TaskAlreadyRunning: raise errors.TaskAlreadyRunning( 'Cannot perform the actions because there are running tasks.' ) task = Task(name=consts.TASK_NAMES.node_deletion, cluster=self.cluster) db().add(task) for node in nodes_to_delete: objects.Node.update(node, {'status': consts.NODE_STATUSES.removing, 'pending_deletion': True}) db().flush() nodes_to_deploy = [] objects.Cluster.adjust_nodes_lists_on_controller_removing( self.cluster, nodes_to_delete, nodes_to_deploy) # NOTE(aroma): in case of removing of a controller node we do # implicit redeployment of all left controllers here in # order to preserve consistency of a HA cluster. # The reason following filtering is added is that we must # redeploy only controllers in ready status. Also in case # one of the nodes is in error state we must cancel the whole # operation as result of the redeployment in this case is unpredictable # and user may end up with not working cluster controllers_with_ready_status = [] for controller in nodes_to_deploy: if controller.status == consts.NODE_STATUSES.error: raise errors.ControllerInErrorState() elif controller.status == consts.NODE_STATUSES.ready: controllers_with_ready_status.append(controller) if controllers_with_ready_status: logger.debug("There are nodes to deploy: %s", " ".join([objects.Node.get_node_fqdn(n) for n in controllers_with_ready_status])) task_deployment = task.create_subtask( consts.TASK_NAMES.deployment) deployment_message = self._call_silently( task_deployment, self.get_deployment_task(), controllers_with_ready_status, method_name='message' ) db().flush() # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == consts.TASK_STATUSES.error: return task_deployment db().commit() rpc.cast('naily', [deployment_message]) db().commit() self._call_silently( task, tasks.DeletionTask, nodes=tasks.DeletionTask.prepare_nodes_for_task( nodes_to_delete, mclient_remove=mclient_remove)) return task