Ejemplo n.º 1
0
    def __init__(self, task, data):
        """Collect Network Groups data
        """
        self.cluster = task.cluster
        self.task = task
        self.data = data
        self.net_man = self.cluster.network_manager()
        self.net_provider = self.cluster.net_provider
        admin_ng = self.net_man.get_admin_network_group()
        fields = NetworkGroup.__mapper__.columns.keys()
        net = NetworkConfigurationSerializer.serialize_network_group(admin_ng,
                                                                     fields)
        # change Admin name for UI
        net.update(name='admin (PXE)')
        self.networks = [net]
        for ng in self.cluster.network_groups:
            net = NetworkConfigurationSerializer.serialize_network_group(
                ng,
                fields)
            self.networks.append(net)
        # merge with data['networks']
        if 'networks' in data:
            for data_net in data['networks']:
                for net in self.networks:
                    if data_net['id'] == net['id']:
                        net.update(data_net)
                        break
                else:
                    raise errors.NetworkCheckError(
                        u"Invalid network ID: {0}".format(data_net['id']),
                        add_client=False)

        self.result = []
        self.err_msgs = []
Ejemplo n.º 2
0
    def __init__(self, task, data):
        """Collect Network Groups data
        """
        self.cluster = task.cluster
        self.task = task
        self.data = data
        self.net_man = self.cluster.network_manager()
        self.net_provider = self.cluster.net_provider
        admin_ng = self.net_man.get_admin_network_group()
        fields = NetworkGroup.__mapper__.columns.keys()
        net = NetworkConfigurationSerializer.serialize_network_group(
            admin_ng, fields)
        # change Admin name for UI
        net.update(name='admin (PXE)')
        self.networks = [net]
        for ng in self.cluster.network_groups:
            net = NetworkConfigurationSerializer.serialize_network_group(
                ng, fields)
            self.networks.append(net)
        # merge with data['networks']
        if 'networks' in data:
            for data_net in data['networks']:
                for net in self.networks:
                    if data_net['id'] == net['id']:
                        net.update(data_net)
                        break
                else:
                    raise errors.NetworkCheckError(
                        u"Invalid network ID: {0}".format(data_net['id']),
                        add_client=False)

        self.result = []
        self.err_msgs = []
Ejemplo n.º 3
0
    def PUT(self, cluster_id):
        cluster = self.get_object_or_404(
            Cluster,
            cluster_id,
            log_404=(
                "warning",
                "Error: there is no cluster "
                "with id '{0}' in DB.".format(cluster_id)))

        try:
            network_info = \
                NetworkConfigurationSerializer.serialize_for_cluster(
                    cluster
                )
            logger.info(
                u"Network info:\n{0}".format(
                    json.dumps(network_info, indent=4)
                )
            )
            task_manager = DeploymentTaskManager(
                cluster_id=cluster.id
            )
            task = task_manager.execute()
        except Exception as exc:
            logger.warn(u'ClusterChangesHandler: error while execution'
                        ' deploy task: {0}'.format(str(exc)))
            raise web.badrequest(str(exc))

        return TaskHandler.render(task)
Ejemplo n.º 4
0
    def PUT(self, cluster_id):
        """:returns: JSONized Task object.
        :http: * 200 (task successfully executed)
               * 404 (cluster not found in db)
               * 400 (failed to execute task)
        """
        cluster = self.get_object_or_404(
            Cluster,
            cluster_id,
            log_404=(
                "warning",
                "Error: there is no cluster "
                "with id '{0}' in DB.".format(cluster_id)))

        try:
            network_info = \
                NetworkConfigurationSerializer.serialize_for_cluster(
                    cluster
                )
            logger.info(
                u"Network info:\n{0}".format(
                    json.dumps(network_info, indent=4)
                )
            )
            task_manager = DeploymentTaskManager(
                cluster_id=cluster.id
            )
            task = task_manager.execute()
        except Exception as exc:
            logger.warn(u'ClusterChangesHandler: error while execution'
                        ' deploy task: {0}'.format(str(exc)))
            raise web.badrequest(str(exc))

        return TaskHandler.render(task)
Ejemplo n.º 5
0
    def __init__(self, task, data):
        """Collect Network Groups data
        """
        self.cluster = task.cluster
        self.task = task
        self.data = data
        self.net_man = objects.Cluster.get_network_manager(self.cluster)
        self.net_provider = self.cluster.net_provider
        admin_ng = self.net_man.get_admin_network_group()
        fields = NetworkGroup.__mapper__.columns.keys() + ['meta']
        net = NetworkConfigurationSerializer.serialize_network_group(
            admin_ng, fields)
        # change Admin name for UI
        net.update(name='admin (PXE)')
        self.networks = [net]
        for ng in self.cluster.network_groups:
            net = NetworkConfigurationSerializer.serialize_network_group(
                ng, fields)
            self.networks.append(net)
        # merge with data['networks']
        if 'networks' in data:
            for data_net in data['networks']:
                for net in self.networks:
                    if data_net['id'] == net['id']:
                        if data_net.get('meta'):
                            data_net.pop('meta')
                        net.update(data_net)
                        break
                else:
                    raise errors.NetworkCheckError(
                        u"Invalid network ID: {0}".format(data_net['id']))
        # get common networking parameters
        serializer = {
            'neutron': NeutronNetworkConfigurationSerializer,
            'nova_network': NovaNetworkConfigurationSerializer
        }
        self.network_config = serializer[self.net_provider].\
            serialize_network_params(self.cluster)
        self.network_config.update(data.get('networking_parameters', {}))

        self.result = []
        self.err_msgs = []
Ejemplo n.º 6
0
    def __init__(self, task, data):
        """Collect Network Groups data
        """
        self.cluster = task.cluster
        self.task = task
        self.data = data
        self.net_man = objects.Cluster.get_network_manager(self.cluster)
        self.net_provider = self.cluster.net_provider
        admin_ng = self.net_man.get_admin_network_group()
        fields = NetworkGroup.__mapper__.columns.keys() + ['meta']
        net = NetworkConfigurationSerializer.serialize_network_group(admin_ng,
                                                                     fields)
        # change Admin name for UI
        net.update(name='admin (PXE)')
        self.networks = [net]
        for ng in self.cluster.network_groups:
            net = NetworkConfigurationSerializer.serialize_network_group(
                ng,
                fields)
            self.networks.append(net)
        # merge with data['networks']
        if 'networks' in data:
            for data_net in data['networks']:
                for net in self.networks:
                    if data_net['id'] == net['id']:
                        if data_net.get('meta'):
                            data_net.pop('meta')
                        net.update(data_net)
                        break
                else:
                    raise errors.NetworkCheckError(
                        u"Invalid network ID: {0}".format(data_net['id']))
        # get common networking parameters
        serializer = {'neutron': NeutronNetworkConfigurationSerializer,
                      'nova_network': NovaNetworkConfigurationSerializer}
        self.network_config = serializer[self.net_provider].\
            serialize_network_params(self.cluster)
        self.network_config.update(data.get('networking_parameters', {}))

        self.result = []
        self.err_msgs = []
Ejemplo n.º 7
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))

        current_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id, name="deploy")
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = "deployment"
        db().add(self.cluster)
        db().commit()

        supertask = Task(name="deploy", cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # checking admin intersection with untagged
        network_info = NetworkConfigurationSerializer.serialize_for_cluster(self.cluster)
        check_networks = supertask.create_subtask("check_networks")
        self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True)
        db().refresh(check_networks)
        if check_networks.status == "error":
            return supertask
        db().delete(check_networks)
        db().commit()

        # checking prerequisites
        check_before = supertask.create_subtask("check_before_deployment")
        logger.debug("Checking prerequisites task: %s", check_before.uuid)
        self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
        db().refresh(check_before)
        # if failed to check prerequisites
        # then task is already set to error
        if check_before.status == "error":
            logger.debug("Checking prerequisites failed: %s", check_before.message)
            return supertask
        logger.debug("Checking prerequisites is successful, starting deployment...")
        db().delete(check_before)
        db().commit()

        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [{"uid": n.id, "platform_name": n.platform_name} for n in nodes_to_provision],
                )
            except Exception as exc:
                TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=str(exc))
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(task_provision, tasks.ProvisionTask, method_name="message")
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == "error":
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, method_name="message")

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == "error":
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if task_messages:
            rpc.cast("naily", task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id, supertask.uuid
            )
        )
        return supertask
Ejemplo n.º 8
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id, ))

        current_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id,
                                                   name="deploy")
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        supertask = Task(name="deploy", cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # checking admin intersection with untagged
        network_info = NetworkConfigurationSerializer.serialize_for_cluster(
            self.cluster)
        check_networks = supertask.create_subtask('check_networks')
        self._call_silently(check_networks,
                            tasks.CheckNetworksTask,
                            data=network_info,
                            check_admin_untagged=True)
        db().refresh(check_networks)
        if check_networks.status == 'error':
            return supertask
        db().delete(check_networks)
        db().commit()

        # checking prerequisites
        check_before = supertask.create_subtask('check_before_deployment')
        logger.debug("Checking prerequisites task: %s", check_before.uuid)
        self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
        db().refresh(check_before)
        # if failed to check prerequisites
        # then task is already set to error
        if check_before.status == 'error':
            logger.debug("Checking prerequisites failed: %s",
                         check_before.message)
            return supertask
        logger.debug(
            "Checking prerequisites is successful, starting deployment...")
        db().delete(check_before)
        db().commit()

        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [{
                        "uid": n.id,
                        "platform_name": n.platform_name
                    } for n in nodes_to_provision])
            except Exception as exc:
                TaskHelper.update_task_status(supertask.uuid,
                                              status='error',
                                              progress=100,
                                              msg=str(exc))
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    method_name='message')
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     method_name='message')

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == 'error':
                return supertask

            task_deployment.cache = deployment_message
            db().add(task_deployment)
            db().commit()
            task_messages.append(deployment_message)

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask