예제 #1
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()

        supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        mule.call_task_manager_async(self.__class__, '_execute_async',
                                     self.cluster.id, supertask.id)

        return supertask
예제 #2
0
파일: manager.py 프로젝트: anlaneg/fuel-web
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
                force=False, graph_type=None, **kwargs):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )
        try:
            self.check_running_task()
        except errors.TaskAlreadyRunning:
            raise errors.DeploymentAlreadyStarted(
                'Cannot perform the actions because '
                'there are another running tasks.'
            )

        supertask = Task(name=self.deployment_type, cluster=self.cluster,
                         dry_run=is_dry_run(kwargs),
                         status=consts.TASK_STATUSES.pending)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster, force)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        self.ensure_nodes_changed(
            nodes_to_provision, nodes_to_deploy, nodes_to_delete
        )

        db().flush()
        TaskHelper.create_action_log(supertask)

        current_cluster_status = self.cluster.status
        # update cluster status
        if not is_dry_run(kwargs):
            self.cluster.status = consts.CLUSTER_STATUSES.deployment

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        #任务将被异步执行
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks,
            force=force,
            graph_type=graph_type,
            current_cluster_status=current_cluster_status,
            **kwargs
        )

        return supertask
예제 #3
0
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
                force=False, graph_type=None, **kwargs):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )
        try:
            self.check_running_task()
        except errors.TaskAlreadyRunning:
            raise errors.DeploymentAlreadyStarted(
                'Cannot perform the actions because '
                'there are another running tasks.'
            )

        supertask = Task(name=self.deployment_type, cluster=self.cluster,
                         dry_run=is_dry_run(kwargs),
                         status=consts.TASK_STATUSES.pending)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster, force)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        self.ensure_nodes_changed(
            nodes_to_provision, nodes_to_deploy, nodes_to_delete
        )

        db().flush()
        TaskHelper.create_action_log(supertask)

        current_cluster_status = self.cluster.status
        # update cluster status
        if not is_dry_run(kwargs):
            self.cluster.status = consts.CLUSTER_STATUSES.deployment

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks,
            force=force,
            graph_type=graph_type,
            current_cluster_status=current_cluster_status,
            **kwargs
        )

        return supertask
예제 #4
0
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self.check_no_running_deployment(self.cluster)
        self._remove_obsolete_tasks()

        supertask = Task(name=self.deployment_type, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        db().flush()
        TaskHelper.create_action_log(supertask)

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks
        )

        return supertask
예제 #5
0
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
                force=False, graph_type=None, **kwargs):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        self.check_no_running_deployment(self.cluster)
        self._remove_obsolete_tasks()

        supertask = Task(name=self.deployment_type, cluster=self.cluster,
                         status=consts.TASK_STATUSES.pending)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster, force)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        self.ensure_nodes_changed(
            nodes_to_provision, nodes_to_deploy, nodes_to_delete
        )

        db().flush()
        TaskHelper.create_action_log(supertask)

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks,
            force=force,
            graph_type=graph_type
        )

        return supertask
예제 #6
0
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
                force=False, graph_type=None, **kwargs):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        self.check_no_running_deployment(self.cluster)
        self._remove_obsolete_tasks()

        supertask = Task(name=self.deployment_type, cluster=self.cluster,
                         status=consts.TASK_STATUSES.pending)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster, force)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        self.ensure_nodes_changed(
            nodes_to_provision, nodes_to_deploy, nodes_to_delete
        )

        db().flush()
        TaskHelper.create_action_log(supertask)

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks,
            force=force,
            graph_type=graph_type
        )

        return supertask
예제 #7
0
파일: manager.py 프로젝트: SergK/fuel-web
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            jsonutils.dumps(network_info, indent=4)))

        self.check_no_running_deployment(self.cluster)
        self._remove_obsolete_tasks()

        supertask = Task(name=self.deployment_type, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        db().flush()
        TaskHelper.create_action_log(supertask)

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks)

        return supertask
예제 #8
0
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self._remove_obsolete_tasks()

        supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
        )

        return supertask
예제 #9
0
파일: manager.py 프로젝트: SergK/fuel-web
    def _execute_async_content(self,
                               supertask,
                               deployment_tasks=None,
                               nodes_to_provision_deploy=None):
        """Processes supertask async in mule
        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(
                lambda n: any([n.pending_addition, n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not(nodes_to_provision_deploy) and \
            (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)

            logger.debug(
                "There are nodes to provision: %s", " ".join([
                    objects.Node.get_node_fqdn(n) for n in nodes_to_provision
                ]))

            # For more accurate progress calculation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision, weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')
            db().commit()

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)

            logger.debug(
                "There are nodes to deploy: %s", " ".join(
                    [objects.Node.get_node_fqdn(n) for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                deployment_tasks=deployment_tasks,
                method_name='message')

            db().commit()
            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = consts.CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        # We have to execute node deletion task only when provision,
        # deployment and other tasks are in the database. Otherwise,
        # it may be executed too quick (e.g. our tests) and this
        # will affect parent task calculation - it will be marked
        # as 'ready' because by that time it have only two subtasks
        # - network_check and node_deletion - and they're  ready.
        # In order to avoid that wrong behavior, let's send
        # deletion task to execution only when others subtasks in
        # the database.
        if task_deletion:
            self._call_silently(task_deletion,
                                tasks.DeletionTask,
                                tasks.DeletionTask.get_task_nodes_for_cluster(
                                    self.cluster),
                                check_ceph=True)
            db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
예제 #10
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id, ))

        current_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id,
                                                   name="deploy")
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        supertask = Task(name="deploy", cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # checking admin intersection with untagged
        network_info = NetworkConfigurationSerializer.serialize_for_cluster(
            self.cluster)
        check_networks = supertask.create_subtask('check_networks')
        self._call_silently(check_networks,
                            tasks.CheckNetworksTask,
                            data=network_info,
                            check_admin_untagged=True)
        db().refresh(check_networks)
        if check_networks.status == 'error':
            return supertask
        db().delete(check_networks)
        db().commit()

        # checking prerequisites
        check_before = supertask.create_subtask('check_before_deployment')
        logger.debug("Checking prerequisites task: %s", check_before.uuid)
        self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
        db().refresh(check_before)
        # if failed to check prerequisites
        # then task is already set to error
        if check_before.status == 'error':
            logger.debug("Checking prerequisites failed: %s",
                         check_before.message)
            return supertask
        logger.debug(
            "Checking prerequisites is successful, starting deployment...")
        db().delete(check_before)
        db().commit()

        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [{
                        "uid": n.id,
                        "platform_name": n.platform_name
                    } for n in nodes_to_provision])
            except Exception as exc:
                TaskHelper.update_task_status(supertask.uuid,
                                              status='error',
                                              progress=100,
                                              msg=str(exc))
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    method_name='message')
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     method_name='message')

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == 'error':
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask
예제 #11
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster)
                and not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            # we should have task committed for processing in other threads
            db().commit()
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)
            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     nodes_to_deploy,
                                                     method_name='message')

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask
예제 #12
0
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        current_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name='deploy')

        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                db().delete(task)
                db().commit()

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
        ).filter(
            Task.name.in_([
                'stop_deployment',
                'reset_environment'
            ])
        )
        for task in obsolete_tasks:
            db().delete(task)
        db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        supertask = Task(name='deploy', cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # Run validation if user didn't redefine
        # provisioning and deployment information
        if not self.cluster.replaced_provisioning_info \
           and not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            # For more accurate progress calulation
            task_weight = 0.4
            task_deletion = supertask.create_subtask("node_deletion",
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask("provision",
                                                      weight=task_weight)
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                method_name='message'
            )

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == 'error':
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            for node in nodes_to_provision:
                node.status = 'provisioning'
                db().commit()

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
예제 #13
0
    def _execute_async_content(self, supertask):
        """Processes supertask async in mule
        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster)
                and not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(
                consts.TASK_NAMES.node_deletion, weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(
                task_deletion, tasks.DeletionTask,
                tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster))
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision, weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                consts.TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     nodes_to_deploy,
                                                     method_name='message')

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = consts.CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
예제 #14
0
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id,
            )
        )
        current_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name="deploy"
        )
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        supertask = Task(
            name="deploy",
            cluster=self.cluster
        )
        db().add(supertask)
        db().commit()
        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(
                task_deletion,
                tasks.DeletionTask
            )

        task_messages = []
        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                method_name='message'
            )
            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                method_name='message'
            )
            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
예제 #15
0
    def execute(self):
        #开始执行部署变更
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))
        #显示网络信息(openstack部署前执行网络验证)
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()  #obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        #如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
        if self.cluster.cluster_type == 1:
            if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
                db().rollback()
                raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster)
                and not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                if self.cluster.cluster_type == 1:
                    self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        #nodes_to_deploy=self.cluster.nodes
        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     nodes_to_deploy,
                                                     method_name='message')

            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
            logger.info(u'执行部署变更,开始操作cluster_deploy_msg表')
            data = {}
            data['cluster_id'] = self.cluster.id
            data['cluster_deploymsg'] = jsonutils.dumps(deployment_message)
            clusterdeploymsg = db().query(ClusterdeployMsg).filter_by(
                cluster_id=self.cluster.id).first()

            if clusterdeploymsg:
                objects.ClusterdeployMsgObject.update(
                    clusterdeploymsg,
                    {'cluster_deploymsg': data['cluster_deploymsg']})
            else:
                objects.ClusterdeployMsgCollection.create(data)

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)
        else:
            pass
        if len(nodes_to_delete) <= 0 and len(nodes_to_deploy) <= 0:
            #这里不能单纯用nodes_to_deploy是否为空来判断是启动或者停止
            #因为nodes_to_delete不为空而nodes_to_deploy为空的话依然会
            #执行下面的代码,此种情况程序会出现异常
            logger.info(u'这里执行的是启动或者停止部署')
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            # openstack定制化环境点击部署变更和启动,停止按钮没有执行
            # 此程序,而是直接通过调用/opt/start.py执行
            # 目前只有cloudmaster和ebs环境会执行此处代码
            db().commit()
            deployment_message = self.get_task_deploy_msg()
            deploymsg = jsonutils.loads(deployment_message)
            deploymsg['args']['task_uuid'] = task_deployment.uuid
            #deployment_info[]是列表,这个列表中含有的元素都是字典
            #角色下面的label参数就只是在此处添加和修改的.
            deployment_info_list = deploymsg['args']['deployment_info']
            oprolename = web.cookies().get("oprolename")
            opaction = web.cookies().get("opaction")

            #此处删除和启动停止无关的角色信息
            for deployment_info in deployment_info_list:
                if deployment_info["role"] == oprolename:
                    deploymsg['args']['deployment_info'] = []
                    deploymsg['args']['deployment_info'].append(
                        deployment_info)

            for deployment_info in deployment_info_list:  #此处是一个列表
                deployment_info_keys = deployment_info.keys()
                changelable_keys = []
                operationalrole = ""
                for key in deployment_info_keys:
                    if key.lower() == oprolename.lower():
                        operationalrole = key
                    else:
                        changelable_keys.append(key)

                deployment_info[operationalrole]['action'] = opaction
                deployment_info[operationalrole]['label'] = '0'
                for key in changelable_keys:
                    if type(
                            deployment_info[key]
                    ) == dict and deployment_info[key].get('label') != None:
                        deployment_info[key]['label'] = '1'

            logger.info(deployment_info[operationalrole]['action'])
            logger.info(oprolename)

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deploymsg
            db().commit()
            task_messages.append(deploymsg)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask
예제 #16
0
    def _execute_async_content(self, supertask, deployment_tasks=None,
                               nodes_to_provision_deploy=None, force=False,
                               graph_type=None):
        """Processes supertask async in mule

        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = []
        affected_nodes = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(lambda n: any([
                n.pending_addition,
                n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = self.get_nodes_to_deploy(force=force)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not (nodes_to_provision_deploy or
                objects.Cluster.get_provisioning_info(self.cluster) or
                objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        if self.cluster.status == consts.CLUSTER_STATUSES.operational:
            # rerun particular tasks on all deployed nodes
            modified_node_ids = {n.id for n in nodes_to_deploy}
            modified_node_ids.update(n.id for n in nodes_to_provision)
            modified_node_ids.update(n.id for n in nodes_to_delete)
            affected_nodes = objects.Cluster.get_nodes_by_status(
                self.cluster,
                status=consts.NODE_STATUSES.ready,
                exclude=modified_node_ids
            ).all()

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)

        if nodes_to_provision:
            logger.debug("There are nodes to provision: %s",
                         " ".join([objects.Node.get_node_fqdn(n)
                                   for n in nodes_to_provision]))

            # For more accurate progress calculation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                status=consts.TASK_STATUSES.pending,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().commit()

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        deployment_message = None
        if (nodes_to_deploy or affected_nodes or
                objects.Release.is_lcm_supported(self.cluster.release)):
            if nodes_to_deploy:
                logger.debug("There are nodes to deploy: %s",
                             " ".join((objects.Node.get_node_fqdn(n)
                                       for n in nodes_to_deploy)))
            if affected_nodes:
                logger.debug("There are nodes affected by deployment: %s",
                             " ".join((objects.Node.get_node_fqdn(n)
                                       for n in affected_nodes)))

            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment,
                status=consts.TASK_STATUSES.pending
            )

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                self.get_deployment_task(),
                nodes_to_deploy,
                affected_nodes=affected_nodes,
                deployment_tasks=deployment_tasks,
                method_name='message',
                reexecutable_filter=consts.TASKS_TO_RERUN_ON_DEPLOY_CHANGES,
                graph_type=graph_type,
                force=force
            )

            db().commit()
            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            db().commit()

        if deployment_message:
            task_messages.append(deployment_message)

        # Even if we don't have nodes to deploy, the deployment task
        # should be created. Why? Because we need to update both
        # nodes.yaml and /etc/hosts on all slaves. Since we need only
        # those two tasks, let's create stripped version of
        # deployment.
        if (nodes_to_delete and not nodes_to_deploy and
                not objects.Release.is_lcm_supported(self.cluster.release)):
            logger.debug(
                "No nodes to deploy, just update nodes.yaml everywhere.")

            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment,
                status=consts.TASK_STATUSES.pending
            )
            task_message = tasks.UpdateNodesInfoTask.message(task_deployment)
            task_deployment.cache = task_message
            task_messages.append(task_message)
            db().commit()

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(
            self.cluster.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        self.cluster.status = consts.CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        # We have to execute node deletion task only when provision,
        # deployment and other tasks are in the database. Otherwise,
        # it may be executed too quick (e.g. our tests) and this
        # will affect parent task calculation - it will be marked
        # as 'ready' because by that time it have only two subtasks
        # - network_check and node_deletion - and they're  ready.
        # In order to avoid that wrong behavior, let's send
        # deletion task to execution only when others subtasks in
        # the database.
        if task_deletion:
            self._call_silently(
                task_deletion,
                tasks.DeletionTask,
                tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster),
                check_ceph=True)
            db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
예제 #17
0
파일: manager.py 프로젝트: anlaneg/fuel-web
    def _execute_async_content(self, supertask, deployment_tasks=None,
                               nodes_to_provision_deploy=None, force=False,
                               graph_type=None, current_cluster_status=None,
                               **kwargs):
        """Processes supertask async in mule

        :param supertask: SqlAlchemy task object
        :param deployment_tasks: the list of task names to execute
        :param nodes_to_provision_deploy: the list of selected node ids
        :param force: the boolean flag, if True all nodes will be deployed
        :param graph_type: the name of deployment graph to use
        :param current_cluster_status: the status of cluster that was
                                       before starting this operation
        """

        nodes_to_delete = []
        affected_nodes = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(lambda n: any([
                n.pending_addition,
                n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = self.get_nodes_to_deploy(force=force)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not (nodes_to_provision_deploy or
                objects.Cluster.get_provisioning_info(self.cluster) or
                objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                if current_cluster_status is not None:
                    self.cluster.status = current_cluster_status
                db().commit()
                return

        if current_cluster_status == consts.CLUSTER_STATUSES.operational:
            # rerun particular tasks on all deployed nodes
            modified_node_ids = {n.id for n in nodes_to_deploy}
            modified_node_ids.update(n.id for n in nodes_to_provision)
            modified_node_ids.update(n.id for n in nodes_to_delete)
            affected_nodes = objects.Cluster.get_nodes_by_status(
                self.cluster,
                status=consts.NODE_STATUSES.ready,
                exclude=modified_node_ids
            ).all()

        task_deletion, task_provision, task_deployment = None, None, None

        dry_run = is_dry_run(kwargs)

        if nodes_to_delete and not dry_run:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)
            self.reset_error_message(nodes_to_delete, dry_run)

        if nodes_to_provision and not dry_run:
            logger.debug("There are nodes to provision: %s",
                         " ".join([objects.Node.get_node_fqdn(n)
                                   for n in nodes_to_provision]))

            # For more accurate progress calculation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                status=consts.TASK_STATUSES.pending,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().commit()

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            self.reset_error_message(nodes_to_provision, dry_run)
            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        deployment_message = None

        if (nodes_to_deploy or affected_nodes or
                objects.Release.is_lcm_supported(self.cluster.release)):
            if nodes_to_deploy:
                logger.debug("There are nodes to deploy: %s",
                             " ".join((objects.Node.get_node_fqdn(n)
                                       for n in nodes_to_deploy)))
            if affected_nodes:
                logger.debug("There are nodes affected by deployment: %s",
                             " ".join((objects.Node.get_node_fqdn(n)
                                       for n in affected_nodes)))

            deployment_task_provider = self.get_deployment_task()

            transaction_name = self.get_deployment_transaction_name(dry_run)

            task_deployment = supertask.create_subtask(
                name=transaction_name,
                dry_run=dry_run,
                status=consts.TASK_STATUSES.pending
            )
            # we should have task committed for processing in other threads
            db().commit()
            #构造向对端发送的rpc消息
            deployment_message = self._call_silently(
                task_deployment,
                deployment_task_provider,
                nodes_to_deploy,
                affected_nodes=affected_nodes,
                deployment_tasks=deployment_tasks,
                method_name='message',
                reexecutable_filter=consts.TASKS_TO_RERUN_ON_DEPLOY_CHANGES,
                graph_type=graph_type,
                force=force,
                **kwargs
            )

            db().commit()
            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            self.reset_error_message(nodes_to_deploy, dry_run)
            db().commit()

        if deployment_message:
            task_messages.append(deployment_message)

        # Even if we don't have nodes to deploy, the deployment task
        # should be created. Why? Because we need to update both
        # nodes.yaml and /etc/hosts on all slaves. Since we need only
        # those two tasks, let's create stripped version of
        # deployment.
        if (nodes_to_delete and not nodes_to_deploy and
                not dry_run and
                not objects.Release.is_lcm_supported(self.cluster.release)):
            logger.debug(
                "No nodes to deploy, just update nodes.yaml everywhere.")

            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment,
                status=consts.TASK_STATUSES.pending
            )
            task_message = tasks.UpdateNodesInfoTask.message(task_deployment)
            task_deployment.cache = task_message
            task_messages.append(task_message)
            db().commit()

        if nodes_to_provision and not dry_run:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        if not dry_run:
            objects.Cluster.get_by_uid(
                self.cluster.id,
                fail_if_not_found=True
            )
            self.cluster.status = consts.CLUSTER_STATUSES.deployment
            db().commit()

        # We have to execute node deletion task only when provision,
        # deployment and other tasks are in the database. Otherwise,
        # it may be executed too quick (e.g. our tests) and this
        # will affect parent task calculation - it will be marked
        # as 'ready' because by that time it have only two subtasks
        # - network_check and node_deletion - and they're  ready.
        # In order to avoid that wrong behavior, let's send
        # deletion task to execution only when others subtasks in
        # the database.
        if task_deletion and not dry_run:
            self._call_silently(
                task_deletion,
                tasks.DeletionTask,
                tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster),
                check_ceph=True)

        if task_messages:
            db().commit()
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
예제 #18
0
    def execute(self):
        # 开始执行部署变更
        logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))
        # 显示网络信息(openstack部署前执行网络验证)
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()  # obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        # 如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
        if self.cluster.cluster_type == 1:
            if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
                db().rollback()
                raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(
            self.cluster
        ):
            try:
                if self.cluster.cluster_type == 1:
                    self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion, weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision, weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision, tasks.ProvisionTask, nodes_to_provision, method_name="message"
            )

            task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        # nodes_to_deploy=self.cluster.nodes
        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment, tasks.DeploymentTask, nodes_to_deploy, method_name="message"
            )

            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
            # clusterdeploymsg只要保存状态为new的部署信息,如果第一次部署失败有错误节点
            # 第二次点击部署变更的时候会只会发送消息给错误节点,这样的信息覆盖掉第一次
            # 完整的部署信息会导致集群启动和停止的失败(因为只发送给错误节点).
            logger.info(u"执行部署变更操作,开始操作cluster_deploy_msg表")
            data = {}
            data["cluster_id"] = self.cluster.id
            data["cluster_deploymsg"] = jsonutils.dumps(deployment_message)
            clusterdeploymsg = db().query(ClusterdeployMsg).filter_by(cluster_id=self.cluster.id).first()

            if clusterdeploymsg:
                if self.cluster.status == CLUSTER_STATUSES.new:
                    objects.ClusterdeployMsgObject.update(
                        clusterdeploymsg, {"cluster_deploymsg": data["cluster_deploymsg"]}
                    )
            else:
                objects.ClusterdeployMsgCollection.create(data)

            task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)
        else:
            pass
        if len(nodes_to_delete) <= 0 and len(nodes_to_deploy) <= 0:
            # 这里不能单纯用nodes_to_deploy是否为空来判断是启动或者停止
            # 因为nodes_to_delete不为空而nodes_to_deploy为空的话依然会
            # 执行下面的代码,此种情况程序会出现异常

            oprolename = web.cookies().get("oprolename")
            opaction = web.cookies().get("opaction")
            nodes_to_startorstop = TaskHelper.nodes_to_startorstop(self.cluster)
            if oprolename != "all":
                logger.info(u"这里执行的是单角色启动或者停止部署")
                task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

                # we should have task committed for processing in other threads
                # openstack定制化环境点击部署变更和启动,停止按钮没有执行
                # 此程序,而是直接通过调用/opt/start.py执行
                # 目前只有cloudmaster和ebs环境会执行此处代码
                db().commit()

                deployment_message = self._call_silently(
                    task_deployment, tasks.DeploymentTask, nodes_to_startorstop, method_name="message"
                )
                deploymsg = deployment_message
                deploymsg["respond_to"] = "start_stop_resp"
                deploymsg["args"]["task_uuid"] = task_deployment.uuid
                # deployment_info[]是列表,这个列表中含有的元素都是字典
                # 角色下面的label参数就只是在此处添加和修改的.
                deployment_info_old_list = deploymsg["args"]["deployment_info"]
                deployment_info_list = []
                nodeuids = []
                for key, item in groupby(deployment_info_old_list, itemgetter("uid")):
                    nodeuids.append(key)

                deployment_info_list = deployment_info_old_list[0 : len(nodeuids)]
                # 此处删除和启动停止无关的角色信息
                deploymsg["args"]["deployment_info"] = []
                for i in range(len(deployment_info_list)):
                    deployment_info = deployment_info_list[i]
                    deployment_info["role"] = oprolename
                    deployment_info["uid"] = nodeuids[i]
                    deploymsg["args"]["deployment_info"].append(deployment_info)

                for deployment_info in deployment_info_list:  # 此处是一个列表
                    deployment_info_keys = deployment_info.keys()
                    changelable_keys = []
                    operationalrole = ""
                    for key in deployment_info_keys:
                        if key.lower() == oprolename.lower():
                            operationalrole = key
                        else:
                            changelable_keys.append(key)

                    deployment_info[operationalrole]["action"] = opaction
                    deployment_info[operationalrole]["label"] = "0"
                    for key in changelable_keys:
                        if type(deployment_info[key]) == dict and deployment_info[key].get("label") != None:
                            deployment_info[key]["label"] = "1"

                logger.info(deployment_info[operationalrole]["action"])
                logger.info(oprolename)

                task_deployment = objects.Task.get_by_uid(
                    task_deployment.id, fail_if_not_found=True, lock_for_update=True
                )
                # if failed to generate task message for orchestrator
                # then task is already set to error
                if task_deployment.status == TASK_STATUSES.error:
                    return supertask

                task_deployment.cache = deploymsg
                db().commit()
                task_messages.append(deploymsg)

            else:
                logger.info(u"这里执行的是一键启动和停止操作")
                serialized_cluster = deployment_serializers.serialize(self.cluster, nodes_to_startorstop)
                pre_deployment = plugins_serializers.pre_deployment_serialize(self.cluster, nodes_to_startorstop)
                post_deployment = plugins_serializers.post_deployment_serialize(self.cluster, nodes_to_startorstop)
                deployment_message = self.make_astute_message(
                    "deploy",
                    "deploy_resp",
                    {
                        "deployment_info": serialized_cluster,
                        "pre_deployment": pre_deployment,
                        "post_deployment": post_deployment,
                    },
                )
                if self.cluster.cluster_type == 3:
                    ebs_start = ebs.StartAllRole()
                    if opaction == "stop":
                        task_messages = ebs_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 1)
                    else:
                        task_messages = ebs_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 2)
                elif self.cluster.cluster_type == 2:
                    cloud_start = cld.StartAllRole()
                    if opaction == "stop":
                        task_messages = cloud_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 1)
                    else:
                        task_messages = cloud_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 2)
                else:
                    pass

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id, fail_if_not_found=True, lock_for_update=True)
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast("naily", task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id, supertask.uuid
            )
        )
        return supertask
예제 #19
0
파일: manager.py 프로젝트: igajsin/fuel-web
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self._remove_obsolete_tasks()

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)
        # we should have task committed for processing in other threads
        db().commit()

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        # Run validation if user didn't redefine
        # provisioning and deployment information
        if not self.cluster.replaced_provisioning_info and \
                not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            # we should have task committed for processing in other threads
            db().commit()
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)
            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                method_name='message'
            )

            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(
            self.cluster.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
예제 #20
0
    def execute(self):
        #开始执行部署变更
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))
        #显示网络信息
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()  #obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        #如果是openstack环境,就执行原来的判断看集群中是否有节点的变化
        if self.cluster.cluster_type == 1:
            if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
                db().rollback()
                raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster)
                and not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                if self.cluster.cluster_type == 1:
                    self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     nodes_to_deploy,
                                                     method_name='message')

            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
            logger.info(u'开始操作cluster_deploy_msg表')
            data = {}
            data['cluster_id'] = self.cluster.id
            data['cluster_deploymsg'] = jsonutils.dumps(deployment_message)
            clusterdeploymsg = db().query(ClusterdeployMsg).filter_by(
                cluster_id=self.cluster.id).first()

            if clusterdeploymsg:
                objects.ClusterdeployMsgObject.update(
                    clusterdeploymsg,
                    {'cluster_deploymsg': data['cluster_deploymsg']})
            else:
                objects.ClusterdeployMsgCollection.create(data)

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)
        else:
            logger.info('there is only start or stop cluster role...')
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self.get_task_deploy_msg()
            deploymsg = jsonutils.loads(deployment_message)
            #logger.info(ClusterStartRole.Rolename)
            deploymsg['args']['task_uuid'] = task_deployment.uuid
            cluster_roles = []
            rolelist = db().query(Role).filter_by(
                release_id=self.cluster.release_id).filter_by(
                    cluster_type=self.cluster.cluster_type)
            #logger.debug("rolelist is s%",rolelist)
            for role in rolelist:
                cluster_roles.append(role.name)
            logger.info(cluster_roles)
            logger.info(ClusterStartRole.Rolename)
            deployment_info = deploymsg['args']['deployment_info']
            deployment_info_keys = deployment_info.keys()
            operationalrole = ""
            for key in deployment_info_keys:
                if key == ClusterStartRole.Rolename:
                    operationalrole = key

            for item in deployment_info:
                item[ClusterStartRole.
                     Rolename]['action'] = ClusterStartRole.action
                #logger.debug("cluster_roles is s%",cluster_roles)
                for role in cluster_roles:
                    if item.get(role) != None:  #这里获取的是角色对象
                        if item.get(role) != ClusterStartRole.Rolename:
                            logger.info(item.get(role))
                            item[role]['label'] = "1"
                        else:
                            logger.info(item[role])
                            item[role]['label'] = "0"

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deploymsg
            db().commit()
            task_messages.append(deploymsg)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        # if task_messages:
        #    rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask
예제 #21
0
    def _execute_async_content(self, supertask, deployment_tasks=None,
                               nodes_to_provision_deploy=None):
        """Processes supertask async in mule
        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(lambda n: any([
                n.pending_addition,
                n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not(nodes_to_provision_deploy) and \
            (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().commit()

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                deployment_tasks=deployment_tasks,
                method_name='message'
            )

            db().commit()
            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(
            self.cluster.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        self.cluster.status = consts.CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        # We have to execute node deletion task only when provision,
        # deployment and other tasks are in the database. Otherwise,
        # it may be executed too quick (e.g. our tests) and this
        # will affect parent task calculation - it will be marked
        # as 'ready' because by that time it have only two subtasks
        # - network_check and node_deletion - and they're  ready.
        # In order to avoid that wrong behavior, let's send
        # deletion task to execution only when others subtasks in
        # the database.
        if task_deletion:
            self._call_silently(
                task_deletion,
                tasks.DeletionTask,
                tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster),
                check_ceph=True)
            db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
예제 #22
0
파일: manager.py 프로젝트: nfschina/fuelweb
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))

        current_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id, name="deploy")
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = "deployment"
        db().add(self.cluster)
        db().commit()

        supertask = Task(name="deploy", cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # checking admin intersection with untagged
        network_info = NetworkConfigurationSerializer.serialize_for_cluster(self.cluster)
        check_networks = supertask.create_subtask("check_networks")
        self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True)
        db().refresh(check_networks)
        if check_networks.status == "error":
            return supertask
        db().delete(check_networks)
        db().commit()

        # checking prerequisites
        check_before = supertask.create_subtask("check_before_deployment")
        logger.debug("Checking prerequisites task: %s", check_before.uuid)
        self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
        db().refresh(check_before)
        # if failed to check prerequisites
        # then task is already set to error
        if check_before.status == "error":
            logger.debug("Checking prerequisites failed: %s", check_before.message)
            return supertask
        logger.debug("Checking prerequisites is successful, starting deployment...")
        db().delete(check_before)
        db().commit()

        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [{"uid": n.id, "platform_name": n.platform_name} for n in nodes_to_provision],
                )
            except Exception as exc:
                TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=str(exc))
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(task_provision, tasks.ProvisionTask, method_name="message")
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == "error":
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, method_name="message")

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == "error":
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if task_messages:
            rpc.cast("naily", task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id, supertask.uuid
            )
        )
        return supertask
예제 #23
0
파일: manager.py 프로젝트: xglhjk6/fuel-web
    def _execute_async_content(self,
                               supertask,
                               deployment_tasks=None,
                               nodes_to_provision_deploy=None):
        """Processes supertask async in mule

        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = []
        nodes_to_resetup = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(
                lambda n: any([n.pending_addition, n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not (nodes_to_provision_deploy
                or objects.Cluster.get_provisioning_info(self.cluster)
                or objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        if self.cluster.status == consts.CLUSTER_STATUSES.operational:
            # rerun particular tasks on all deployed nodes
            affected_nodes = set(nodes_to_deploy + nodes_to_provision +
                                 nodes_to_delete)
            ready_nodes = set(
                objects.Cluster.get_nodes_by_status(
                    self.cluster, consts.NODE_STATUSES.ready))
            nodes_to_resetup = ready_nodes.difference(affected_nodes)

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)

        if nodes_to_provision:
            logger.debug(
                "There are nodes to provision: %s", " ".join([
                    objects.Node.get_node_fqdn(n) for n in nodes_to_provision
                ]))

            # For more accurate progress calculation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                status=consts.TASK_STATUSES.pending,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')
            db().commit()

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        deployment_message = None
        if nodes_to_deploy:
            logger.debug(
                "There are nodes to deploy: %s", " ".join(
                    [objects.Node.get_node_fqdn(n) for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment,
                status=consts.TASK_STATUSES.pending)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                deployment_tasks=deployment_tasks,
                method_name='message')

            db().commit()
            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            db().commit()

        if nodes_to_resetup:
            logger.debug(
                "There are nodes to resetup: %s", ", ".join(
                    [objects.Node.get_node_fqdn(n) for n in nodes_to_resetup]))

            if not deployment_message:
                task_deployment = supertask.create_subtask(
                    name=consts.TASK_NAMES.deployment,
                    status=consts.TASK_STATUSES.pending)
                # we should have task committed for processing in other threads
                db().commit()

            resetup_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_resetup,
                reexecutable_filter=consts.TASKS_TO_RERUN_ON_DEPLOY_CHANGES,
                method_name='message')
            db().commit()

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            if deployment_message:
                deployment_message['args']['deployment_info'].extend(
                    resetup_message['args']['deployment_info'])
            else:
                deployment_message = resetup_message
            task_deployment.cache = deployment_message
            db().commit()

        if deployment_message:
            task_messages.append(deployment_message)

        # Even if we don't have nodes to deploy, the deployment task
        # should be created. Why? Because we need to update both
        # nodes.yaml and /etc/hosts on all slaves. Since we need only
        # those two tasks, let's create stripped version of
        # deployment.
        if nodes_to_delete and not nodes_to_deploy:
            logger.debug(
                "No nodes to deploy, just update nodes.yaml everywhere.")

            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment,
                status=consts.TASK_STATUSES.pending)
            task_message = tasks.UpdateNodesInfoTask.message(task_deployment)
            task_deployment.cache = task_message
            task_messages.append(task_message)
            db().commit()

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = consts.CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        # We have to execute node deletion task only when provision,
        # deployment and other tasks are in the database. Otherwise,
        # it may be executed too quick (e.g. our tests) and this
        # will affect parent task calculation - it will be marked
        # as 'ready' because by that time it have only two subtasks
        # - network_check and node_deletion - and they're  ready.
        # In order to avoid that wrong behavior, let's send
        # deletion task to execution only when others subtasks in
        # the database.
        if task_deletion:
            self._call_silently(task_deletion,
                                tasks.DeletionTask,
                                tasks.DeletionTask.get_task_nodes_for_cluster(
                                    self.cluster),
                                check_ceph=True)
            db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
예제 #24
0
    def _execute_async_content(self, supertask):

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(
                consts.TASK_NAMES.node_deletion,
                weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(
                task_deletion,
                tasks.DeletionTask,
                tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster))
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                consts.TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                method_name='message'
            )

            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            for node in nodes_to_provision:
                node.status = consts.NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(
            self.cluster.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        self.cluster.status = consts.CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
    def execute(self):
        #开始执行部署变更
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )
        #显示网络信息
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self._remove_obsolete_tasks() #obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        #如果是openstack环境,就执行原来的判断看集群中是否有节点的变化
        if self.cluster.cluster_type==1:
           if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
              db().rollback()
              raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                if self.cluster.cluster_type==1:
                   self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                method_name='message'
            )
            
            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
            logger.info(u'开始操作cluster_deploy_msg表')
            data={}
            data['cluster_id']=self.cluster.id
            data['cluster_deploymsg']=jsonutils.dumps(deployment_message)
            clusterdeploymsg=db().query(ClusterdeployMsg).filter_by(cluster_id=self.cluster.id).first()
           
            if clusterdeploymsg:
                objects.ClusterdeployMsgObject.update(clusterdeploymsg,{'cluster_deploymsg':data['cluster_deploymsg']})
            else:
                objects.ClusterdeployMsgCollection.create(data)
            

            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)
        else:
            logger.info('there is only start or stop cluster role...')
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message =self.get_task_deploy_msg()
            deploymsg=jsonutils.loads(deployment_message)
            #logger.info(ClusterStartRole.Rolename)
            deploymsg['args']['task_uuid']=task_deployment.uuid
            cluster_roles=[]
            rolelist=db().query(Role).filter_by(release_id=self.cluster.release_id).filter_by(cluster_type=self.cluster.cluster_type)
            #logger.debug("rolelist is s%",rolelist)
            for role in rolelist:
                cluster_roles.append(role.name)
            logger.info(cluster_roles)
            logger.info(ClusterStartRole.Rolename)
            deployment_info=deploymsg['args']['deployment_info']
            deployment_info_keys=deployment_info.keys()
            operationalrole=""
            for key in deployment_info_keys:
                if key == ClusterStartRole.Rolename:
                   operationalrole=key

            for item in deployment_info:
                item[ClusterStartRole.Rolename]['action']=ClusterStartRole.action
                #logger.debug("cluster_roles is s%",cluster_roles)
                for role in cluster_roles:
                    if item.get(role)!=None: #这里获取的是角色对象
                       if item.get(role)!=ClusterStartRole.Rolename:
                          logger.info(item.get(role))
                          item[role]['label']="1"  
                       else:
                          logger.info(item[role])
                          item[role]['label']="0"          

            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deploymsg
            db().commit()
            task_messages.append(deploymsg)

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(
            self.cluster.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()


        # if task_messages:
        #    rpc.cast('naily', task_messages)
            

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
예제 #26
0
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id,
            )
        )

        current_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name="deploy"
        )
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        supertask = Task(
            name="deploy",
            cluster=self.cluster
        )
        db().add(supertask)
        db().commit()
        if not self.cluster.replaced_provisioning_info \
           and not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                return supertask
        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [
                        {"uid": n.id, "platform_name": n.platform_name}
                        for n in nodes_to_provision
                    ]
                )
            except Exception as exc:
                TaskHelper.update_task_status(
                    supertask.uuid,
                    status='error',
                    progress=100,
                    msg=str(exc)
                )
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(
                task_deletion,
                tasks.DeletionTask
            )

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                method_name='message'
            )
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                method_name='message'
            )

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == 'error':
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            for node in nodes_to_provision:
                node.status = 'provisioning'
                db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
예제 #27
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            json.dumps(network_info, indent=4)))

        current_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id,
                                                   name='deploy')

        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                db().delete(task)
                db().commit()

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id, ).filter(
                Task.name.in_(['stop_deployment', 'reset_environment']))
        for task in obsolete_tasks:
            db().delete(task)
        db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        supertask = Task(name='deploy', cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # Run validation if user didn't redefine
        # provisioning and deployment information
        if not self.cluster.replaced_provisioning_info \
           and not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                return supertask

        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [{
                        "uid": n.id,
                        "platform_name": n.platform_name
                    } for n in nodes_to_provision])
            except Exception as exc:
                TaskHelper.update_task_status(supertask.uuid,
                                              status='error',
                                              progress=100,
                                              msg=str(exc))
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            # For more accurate progress calulation
            task_weight = 0.4
            task_deletion = supertask.create_subtask("node_deletion",
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask("provision",
                                                      weight=task_weight)
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     nodes_to_deploy,
                                                     method_name='message')

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == 'error':
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if nodes_to_provision:
            for node in nodes_to_provision:
                node.status = 'provisioning'
                db().commit()

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask