コード例 #1
0
    def test_deployment_serialization_ignore_customized(self, _):
        cluster = self._create_cluster_with_extensions()

        data = [{"uid": n.uid} for n in cluster.nodes]
        mserializer = mock.MagicMock()
        mserializer.return_value = mock.MagicMock()
        mserializer.return_value.serialize.return_value = data

        with mock.patch(
                'nailgun.orchestrator.deployment_serializers.'
                'get_serializer_for_cluster',
                return_value=mserializer):
            with mock.patch('nailgun.orchestrator.deployment_serializers.'
                            'fire_callback_on_deployment_data_serialization'
                            ) as mfire_callback:

                replaced_data = ["it's", "something"]
                with mock.patch.object(
                        cluster.nodes[0], 'replaced_deployment_info',
                        new_callable=mock.Mock(return_value=replaced_data)):

                    graph = orchestrator_graph.AstuteGraph(cluster)
                    deployment_serializers.serialize(
                        graph, cluster, cluster.nodes, ignore_customized=True)

        mfire_callback.assert_called_once_with(data, cluster, cluster.nodes)
コード例 #2
0
ファイル: test_extensions.py プロジェクト: ogelbukh/fuel-web
    def test_deployment_serialization_ignore_customized_false(self, _):
        cluster = self._create_cluster_with_extensions(nodes_kwargs=[
            {
                'roles': ['controller'],
                'pending_addition': True
            },
            {
                'roles': ['controller'],
                'pending_addition': True
            },
            {
                'roles': ['controller'],
                'pending_addition': True
            },
            {
                'roles': ['controller'],
                'pending_addition': True
            },
        ])

        data = [{"uid": n.uid} for n in cluster.nodes]
        expected_data = copy.deepcopy(data[1:])

        mserializer = mock.MagicMock()
        mserializer.return_value = mock.MagicMock()
        mserializer.return_value.serialize.return_value = data

        with mock.patch(
                'nailgun.orchestrator.deployment_serializers.'
                'get_serializer_for_cluster',
                return_value=mserializer):
            with mock.patch(
                    'nailgun.orchestrator.deployment_serializers.'
                    'fire_callback_on_deployment_data_serialization',
            ) as mfire_callback:

                replaced_data = ["it's", "something"]
                with mock.patch.object(
                        cluster.nodes[0],
                        'replaced_deployment_info',
                        new_callable=mock.Mock(return_value=replaced_data)):

                    graph = orchestrator_graph.AstuteGraph(cluster)
                    deployment_serializers.serialize(graph,
                                                     cluster,
                                                     cluster.nodes,
                                                     ignore_customized=False)

        self.assertEqual(mfire_callback.call_args[0][0], expected_data)
        self.assertIs(mfire_callback.call_args[0][1], cluster)
        self.assertItemsEqual(mfire_callback.call_args[0][2],
                              cluster.nodes[1:])
コード例 #3
0
ファイル: task.py プロジェクト: mrasskazov/fuelweb
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster)

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster}}
コード例 #4
0
ファイル: test_pipelines.py プロジェクト: sebrandon1/fuel-web
    def test_block_device_disks(self):
        self.env.create(
            release_kwargs={'version': self.env_version},
            cluster_kwargs={
                'mode': consts.CLUSTER_MODES.ha_compact,
                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
        self.cluster_db = self.env.clusters[0]

        self.env.create_node(
            cluster_id=self.cluster_db.id,
            roles=['cinder-block-device']
        )
        self.env.create_node(
            cluster_id=self.cluster_db.id,
            roles=['controller']
        )
        serialized_for_astute = deployment_serializers.serialize(
            AstuteGraph(self.cluster_db),
            self.cluster_db,
            self.cluster_db.nodes)
        for node in serialized_for_astute['nodes']:
            self.assertIn("node_volumes", node)
            for node_volume in node["node_volumes"]:
                if node_volume["id"] == "cinder-block-device":
                    self.assertEqual(node_volume["volumes"], [])
                else:
                    self.assertNotEqual(node_volume["volumes"], [])
コード例 #5
0
ファイル: task.py プロジェクト: blkart/fuel-web
    def message(cls, task, nodes):
        logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)

        for n in nodes:
            if n.pending_roles:
                n.roles += n.pending_roles
                n.pending_roles = []
            n.status = 'provisioned'
            n.progress = 0

        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)

        serialized_cluster = deployment_serializers.serialize(
            orchestrator_graph, task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            'deploy',
            'deploy_resp',
            {
                'deployment_info': serialized_cluster
            }
        )
        db().flush()
        return rpc_message
コード例 #6
0
ファイル: task.py プロジェクト: cxb811201/fuel-web
    def message(cls, task, nodes):
        logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)

        for n in nodes:
            if n.pending_roles:
                n.roles += n.pending_roles
                n.pending_roles = []
            n.status = 'provisioned'
            n.progress = 0

        # here we replace deployment data if user redefined them
        serialized_cluster = deployment_serializers.serialize(
            task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            'deploy',
            'deploy_resp',
            {
                'deployment_info': serialized_cluster
            }
        )
        db().commit()
        return rpc_message
コード例 #7
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster)

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster
            }
        }
コード例 #8
0
ファイル: task.py プロジェクト: vefimova/fuel-web
    def message(cls, task, nodes):
        logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)

        for n in nodes:
            if n.pending_roles:
                n.roles += n.pending_roles
                n.pending_roles = []
            n.status = 'provisioned'
            n.progress = 0

        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)

        serialized_cluster = deployment_serializers.serialize(
            orchestrator_graph, task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            'deploy',
            'deploy_resp',
            {
                'deployment_info': serialized_cluster
            }
        )
        db().flush()
        return rpc_message
コード例 #9
0
ファイル: task.py プロジェクト: koder-ua/fuel-cert
    def message(cls, task, nodes):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace deployment data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False
        db().commit()

        return make_astute_message('deploy', 'deploy_resp', {
            'task_uuid': task.uuid,
            'deployment_info': serialized_cluster
        })
コード例 #10
0
    def test_block_device_disks(self):
        self.env.create(release_kwargs={'version': self.env_version},
                        cluster_kwargs={
                            'mode': consts.CLUSTER_MODES.ha_compact,
                            'net_provider':
                            consts.CLUSTER_NET_PROVIDERS.neutron,
                            'net_segment_type':
                            consts.NEUTRON_SEGMENT_TYPES.vlan
                        })
        self.cluster_db = self.env.clusters[0]

        self.env.create_node(cluster_id=self.cluster_db.id,
                             roles=['cinder-block-device'])
        self.env.create_node(cluster_id=self.cluster_db.id,
                             roles=['controller'])
        serialized_for_astute = deployment_serializers.serialize(
            AstuteGraph(self.cluster_db), self.cluster_db,
            self.cluster_db.nodes)
        for node in serialized_for_astute:
            self.assertIn("node_volumes", node)
            for node_volume in node["node_volumes"]:
                if node_volume["id"] == "cinder-block-device":
                    self.assertEqual(node_volume["volumes"], [])
                else:
                    self.assertNotEqual(node_volume["volumes"], [])
コード例 #11
0
 def _serialize(self, cluster, nodes):
     if objects.Release.is_lcm_supported(cluster.release):
         return deployment_serializers.serialize_for_lcm(
             cluster, nodes, ignore_customized=True
         )
     graph = orchestrator_graph.AstuteGraph(cluster)
     return deployment_serializers.serialize(
         graph, cluster, nodes, ignore_customized=True)
コード例 #12
0
 def _serialize(self, cluster, nodes):
     if objects.Release.is_lcm_supported(cluster.release):
         return deployment_serializers.serialize_for_lcm(
             cluster, nodes, ignore_customized=True)
     graph = orchestrator_graph.AstuteGraph(cluster)
     return deployment_serializers.serialize(graph,
                                             cluster,
                                             nodes,
                                             ignore_customized=True)
コード例 #13
0
ファイル: orchestrator.py プロジェクト: zhanghui9700/fuel-web
    def _serialize(self, cluster, nodes):
        if objects.Release.is_lcm_supported(cluster.release):
            serialized = deployment_serializers.serialize_for_lcm(
                cluster, nodes, ignore_customized=True)
        else:
            graph = orchestrator_graph.AstuteGraph(cluster)
            serialized = deployment_serializers.serialize(
                graph, cluster, nodes, ignore_customized=True)

        return _deployment_info_in_compatible_format(
            serialized, utils.parse_bool(web.input(split='0').split))
コード例 #14
0
ファイル: orchestrator.py プロジェクト: openstack/fuel-web
    def _serialize(self, cluster, nodes):
        if objects.Release.is_lcm_supported(cluster.release):
            serialized = deployment_serializers.serialize_for_lcm(
                cluster, nodes, ignore_customized=True
            )
        else:
            graph = orchestrator_graph.AstuteGraph(cluster)
            serialized = deployment_serializers.serialize(
                graph, cluster, nodes, ignore_customized=True)

        return _deployment_info_in_compatible_format(
            serialized, utils.parse_bool(web.input(split='0').split)
        )
コード例 #15
0
ファイル: task.py プロジェクト: yxh1990/fuel-cloudmaster
    def message(cls, task, nodes):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in (NODE_STATUSES.deploying,):
                    n.status = NODE_STATUSES.provisioned
                n.progress = 0
                db().add(n)
        db().flush()
        
        deployment_tasks=[]
        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
        orchestrator_graph.only_tasks(deployment_tasks)

        # serialized_cluster = deployment_serializers.serialize(
        #     orchestrator_graph,task.cluster, nodes)
        serialized_cluster = deployment_serializers.serialize(
            task.cluster, nodes)

        pre_deployment = plugins_serializers.pre_deployment_serialize(
            task.cluster, nodes)
        post_deployment = plugins_serializers.post_deployment_serialize(
            task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            'deploy',
            'deploy_resp',
            {
                'deployment_info': serialized_cluster,
                'pre_deployment': pre_deployment,
                'post_deployment': post_deployment
            }
        )
        db().commit()
        return rpc_message
コード例 #16
0
ファイル: task.py プロジェクト: vefimova/fuel-web
    def message(cls, task, nodes, deployment_tasks=None):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        deployment_tasks = deployment_tasks or []

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in (consts.NODE_STATUSES.deploying,):
                    n.status = consts.NODE_STATUSES.provisioned
                n.progress = 0
                db().add(n)
        db().flush()

        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
        orchestrator_graph.only_tasks(deployment_tasks)

        #NOTE(dshulyak) At this point parts of the orchestration can be empty,
        # it should not cause any issues with deployment/progress and was
        # done by design
        serialized_cluster = deployment_serializers.serialize(
            orchestrator_graph, task.cluster, nodes)
        pre_deployment = stages.pre_deployment_serialize(
            orchestrator_graph, task.cluster, nodes)
        post_deployment = stages.post_deployment_serialize(
            orchestrator_graph, task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            cls._get_deployment_method(task.cluster),
            'deploy_resp',
            {
                'deployment_info': serialized_cluster,
                'pre_deployment': pre_deployment,
                'post_deployment': post_deployment
            }
        )
        db().flush()
        return rpc_message
コード例 #17
0
ファイル: task.py プロジェクト: blkart/fuel-web
    def message(cls, task, nodes, deployment_tasks=None):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        deployment_tasks = deployment_tasks or []

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in (consts.NODE_STATUSES.deploying,):
                    n.status = consts.NODE_STATUSES.provisioned
                n.progress = 0
                db().add(n)
        db().flush()

        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
        orchestrator_graph.only_tasks(deployment_tasks)

        #NOTE(dshulyak) At this point parts of the orchestration can be empty,
        # it should not cause any issues with deployment/progress and was
        # done by design
        serialized_cluster = deployment_serializers.serialize(
            orchestrator_graph, task.cluster, nodes)
        pre_deployment = stages.pre_deployment_serialize(
            orchestrator_graph, task.cluster, nodes)
        post_deployment = stages.post_deployment_serialize(
            orchestrator_graph, task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            cls._get_deployment_method(task.cluster),
            'deploy_resp',
            {
                'deployment_info': serialized_cluster,
                'pre_deployment': pre_deployment,
                'post_deployment': post_deployment
            }
        )
        db().flush()
        return rpc_message
コード例 #18
0
ファイル: test_extensions.py プロジェクト: huyupeng/fuel-web
    def test_deployment_serialization_ignore_customized_false(self, _):
        cluster = self._create_cluster_with_extensions(
            nodes_kwargs=[
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True},
            ]
        )

        data = [{"uid": n.uid} for n in cluster.nodes]
        expected_data = copy.deepcopy(data[1:])

        mserializer = mock.MagicMock()
        mserializer.return_value = mock.MagicMock()
        mserializer.return_value.serialize.return_value = data

        with mock.patch(
                'nailgun.orchestrator.deployment_serializers.'
                'get_serializer_for_cluster',
                return_value=mserializer):
            with mock.patch('nailgun.orchestrator.deployment_serializers.'
                            'fire_callback_on_deployment_data_serialization',
                            ) as mfire_callback:

                replaced_data = ["it's", "something"]
                with mock.patch.object(
                        cluster.nodes[0], 'replaced_deployment_info',
                        new_callable=mock.Mock(return_value=replaced_data)):

                    graph = orchestrator_graph.AstuteGraph(cluster)
                    deployment_serializers.serialize(
                        graph, cluster, cluster.nodes, ignore_customized=False)

        self.assertEqual(mfire_callback.call_args[0][0], expected_data)
        self.assertIs(mfire_callback.call_args[0][1], cluster)
        self.assertItemsEqual(
            mfire_callback.call_args[0][2], cluster.nodes[1:])
コード例 #19
0
ファイル: task.py プロジェクト: yxh1990/deployfuel
    def message(cls, task, nodes):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in (NODE_STATUSES.deploying, ):
                    n.status = NODE_STATUSES.provisioned
                n.progress = 0
                db().add(n)
        db().flush()

        deployment_tasks = []
        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
        orchestrator_graph.only_tasks(deployment_tasks)

        # serialized_cluster = deployment_serializers.serialize(
        #     orchestrator_graph,task.cluster, nodes)
        serialized_cluster = deployment_serializers.serialize(
            task.cluster, nodes)

        pre_deployment = plugins_serializers.pre_deployment_serialize(
            task.cluster, nodes)
        post_deployment = plugins_serializers.post_deployment_serialize(
            task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task, 'deploy', 'deploy_resp', {
                'deployment_info': serialized_cluster,
                'pre_deployment': pre_deployment,
                'post_deployment': post_deployment
            })
        db().commit()
        return rpc_message
コード例 #20
0
ファイル: task.py プロジェクト: rsokolkov/fuel-web
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in ("deploying"):
                    n.status = "provisioned"
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or deployment_serializers.serialize(task.cluster)

        # After searilization set pending_addition to False
        for node in db().query(Node).filter(Node.id.in_(nodes_ids)):
            node.pending_addition = False
        db().commit()

        return {
            "method": "deploy",
            "respond_to": "deploy_resp",
            "args": {"task_uuid": task.uuid, "deployment_info": serialized_cluster},
        }
コード例 #21
0
ファイル: task.py プロジェクト: Axam/nsx-web
    def message(cls, task, nodes):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in (NODE_STATUSES.deploying,):
                    n.status = NODE_STATUSES.provisioned
                n.progress = 0
                db().add(n)
        db().flush()

        # here we replace deployment data if user redefined them
        serialized_cluster = deployment_serializers.serialize(
            task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False
        db().commit()

        return make_astute_message(
            'deploy',
            'deploy_resp',
            {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster
            }
        )
コード例 #22
0
ファイル: task.py プロジェクト: yxh1990/deployfuel
    def message(cls, task, nodes):
        logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)

        for n in nodes:
            if n.pending_roles:
                n.roles += n.pending_roles
                n.pending_roles = []
            n.status = 'provisioned'
            n.progress = 0

        # here we replace deployment data if user redefined them
        serialized_cluster = deployment_serializers.serialize(
            task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task, 'deploy', 'deploy_resp',
            {'deployment_info': serialized_cluster})
        db().commit()
        return rpc_message
コード例 #23
0
ファイル: task.py プロジェクト: e0ne/fuel-web
    def message(cls, task, nodes):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        TaskHelper.raise_if_node_offline(nodes)

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster, nodes)

        # After searilization set pending_addition to False
        for node in nodes:
            node.pending_addition = False
        db().commit()

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster}}
コード例 #24
0
ファイル: test_pipelines.py プロジェクト: sebrandon1/fuel-web
 def serialize(cluster):
     return deployment_serializers.serialize(
         AstuteGraph(cluster),
         cluster,
         cluster.nodes)['common']
コード例 #25
0
ファイル: manager.py プロジェクト: yxh1990/deployfuel
    def execute(self):
        #开始执行部署变更
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id))
        #显示网络信息(openstack部署前执行网络验证)
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(
            jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()  #obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        #如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
        if self.cluster.cluster_type == 1:
            if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
                db().rollback()
                raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster)
                and not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                if self.cluster.cluster_type == 1:
                    self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    nodes_to_provision,
                                                    method_name='message')

            task_provision = objects.Task.get_by_uid(task_provision.id,
                                                     fail_if_not_found=True,
                                                     lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        #nodes_to_deploy=self.cluster.nodes
        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     nodes_to_deploy,
                                                     method_name='message')

            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
            # clusterdeploymsg只要保存状态为new的部署信息,如果第一次部署失败有错误节点
            # 第二次点击部署变更的时候会只会发送消息给错误节点,这样的信息覆盖掉第一次
            # 完整的部署信息会导致集群启动和停止的失败(因为只发送给错误节点).
            logger.info(u'执行部署变更操作,开始操作cluster_deploy_msg表')
            data = {}
            data['cluster_id'] = self.cluster.id
            data['cluster_deploymsg'] = jsonutils.dumps(deployment_message)
            clusterdeploymsg = db().query(ClusterdeployMsg).filter_by(
                cluster_id=self.cluster.id).first()

            if clusterdeploymsg:
                if self.cluster.status == CLUSTER_STATUSES.new:
                    objects.ClusterdeployMsgObject.update(
                        clusterdeploymsg,
                        {'cluster_deploymsg': data['cluster_deploymsg']})
            else:
                objects.ClusterdeployMsgCollection.create(data)

            task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                      fail_if_not_found=True,
                                                      lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)
        else:
            pass
        if len(nodes_to_delete) <= 0 and len(nodes_to_deploy) <= 0:
            #这里不能单纯用nodes_to_deploy是否为空来判断是启动或者停止
            #因为nodes_to_delete不为空而nodes_to_deploy为空的话依然会
            #执行下面的代码,此种情况程序会出现异常

            oprolename = web.cookies().get("oprolename")
            opaction = web.cookies().get("opaction")
            nodes_to_startorstop = TaskHelper.nodes_to_startorstop(
                self.cluster)
            if oprolename != "all":
                logger.info(u'这里执行的是单角色启动或者停止部署')
                task_deployment = supertask.create_subtask(
                    TASK_NAMES.deployment)

                # we should have task committed for processing in other threads
                # openstack定制化环境点击部署变更和启动,停止按钮没有执行
                # 此程序,而是直接通过调用/opt/start.py执行
                # 目前只有cloudmaster和ebs环境会执行此处代码
                db().commit()

                deployment_message = self._call_silently(task_deployment,
                                                         tasks.DeploymentTask,
                                                         nodes_to_startorstop,
                                                         method_name='message')
                deploymsg = deployment_message
                deploymsg['respond_to'] = "start_stop_resp"
                deploymsg['args']['task_uuid'] = task_deployment.uuid
                #deployment_info[]是列表,这个列表中含有的元素都是字典
                #角色下面的label参数就只是在此处添加和修改的.
                deployment_info_old_list = deploymsg['args']['deployment_info']
                deployment_info_list = []
                nodeuids = []
                for key, item in groupby(deployment_info_old_list,
                                         itemgetter('uid')):
                    nodeuids.append(key)

                deployment_info_list = deployment_info_old_list[0:len(nodeuids
                                                                      )]
                #此处删除和启动停止无关的角色信息
                deploymsg['args']['deployment_info'] = []
                for i in range(len(deployment_info_list)):
                    deployment_info = deployment_info_list[i]
                    deployment_info["role"] = oprolename
                    deployment_info["uid"] = nodeuids[i]
                    deploymsg['args']['deployment_info'].append(
                        deployment_info)

                for deployment_info in deployment_info_list:  #此处是一个列表
                    deployment_info_keys = deployment_info.keys()
                    changelable_keys = []
                    operationalrole = ""
                    for key in deployment_info_keys:
                        if key.lower() == oprolename.lower():
                            operationalrole = key
                        else:
                            changelable_keys.append(key)

                    deployment_info[operationalrole]['action'] = opaction
                    deployment_info[operationalrole]['label'] = '0'
                    for key in changelable_keys:
                        if type(deployment_info[key]
                                ) == dict and deployment_info[key].get(
                                    'label') != None:
                            deployment_info[key]['label'] = '1'

                logger.info(deployment_info[operationalrole]['action'])
                logger.info(oprolename)

                task_deployment = objects.Task.get_by_uid(
                    task_deployment.id,
                    fail_if_not_found=True,
                    lock_for_update=True)
                # if failed to generate task message for orchestrator
                # then task is already set to error
                if task_deployment.status == TASK_STATUSES.error:
                    return supertask

                task_deployment.cache = deploymsg
                db().commit()
                task_messages.append(deploymsg)

            else:
                logger.info(u'这里执行的是一键启动和停止操作')
                serialized_cluster = deployment_serializers.serialize(
                    self.cluster, nodes_to_startorstop)
                pre_deployment = plugins_serializers.pre_deployment_serialize(
                    self.cluster, nodes_to_startorstop)
                post_deployment = plugins_serializers.post_deployment_serialize(
                    self.cluster, nodes_to_startorstop)
                deployment_message = self.make_astute_message(
                    'deploy', 'deploy_resp', {
                        'deployment_info': serialized_cluster,
                        'pre_deployment': pre_deployment,
                        'post_deployment': post_deployment
                    })
                if self.cluster.cluster_type == 3:
                    ebs_start = ebs.StartAllRole()
                    if opaction == "stop":
                        task_messages = ebs_start.make_deploy_msgs(
                            self.cluster, supertask, deployment_message, 1)
                    else:
                        task_messages = ebs_start.make_deploy_msgs(
                            self.cluster, supertask, deployment_message, 2)
                else:
                    pass

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision)
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast('naily', task_messages)

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask
コード例 #26
0
    def test_disks_attrs(self):
        self.cluster = self.env.create(
            release_kwargs={
                'version': self.env_version,
                'operating_system': consts.RELEASE_OS.ubuntu
            },
            cluster_kwargs={
                'mode': consts.CLUSTER_MODES.ha_compact,
                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan
            })
        self.cluster_db = self.env.clusters[0]

        disks = [
            {
                "model": "TOSHIBA MK1002TS",
                "name": "sda",
                "disk": "sda",
                "size": 1004886016
            },
        ]
        expected_node_volumes_hash = [{
            u'name':
            u'sda',
            u'extra': [],
            u'free_space':
            330,
            u'volumes': [{
                u'type': u'boot',
                u'size': 300
            }, {
                u'mount': u'/boot',
                u'type': u'partition',
                u'file_system': u'ext2',
                u'name': u'Boot',
                u'size': 200
            }, {
                u'type': u'lvm_meta_pool',
                u'size': 64
            }, {
                u'vg': u'os',
                u'type': u'pv',
                u'lvm_meta_size': 64,
                u'size': 394
            }, {
                u'vg': u'vm',
                u'type': u'pv',
                u'lvm_meta_size': 0,
                u'size': 0
            }],
            u'type':
            u'disk',
            u'id':
            u'sda',
            u'bootable':
            True,
            u'size':
            958
        }, {
            u'_allocate_size':
            u'min',
            u'label':
            u'Base System',
            u'min_size':
            19456,
            u'volumes': [{
                u'mount': u'/',
                u'size': -3766,
                u'type': u'lv',
                u'name': u'root',
                u'file_system': u'ext4'
            }, {
                u'mount': u'swap',
                u'size': 4096,
                u'type': u'lv',
                u'name': u'swap',
                u'file_system': u'swap'
            }],
            u'type':
            u'vg',
            u'id':
            u'os'
        }, {
            u'_allocate_size':
            u'all',
            u'label':
            u'Virtual Storage',
            u'min_size':
            5120,
            u'volumes': [{
                u'mount': u'/var/lib/nova',
                u'size': 0,
                u'type': u'lv',
                u'name': u'nova',
                u'file_system': u'xfs'
            }],
            u'type':
            u'vg',
            u'id':
            u'vm'
        }]
        self.env.create_node(
            cluster_id=self.cluster_db.id,
            roles=['compute'],
            meta={"disks": disks},
        )
        serialized_for_astute = deployment_serializers.serialize(
            AstuteGraph(self.cluster_db), self.cluster_db,
            self.cluster_db.nodes)

        for node in serialized_for_astute['nodes']:
            self.assertIn("node_volumes", node)
            self.assertItemsEqual(expected_node_volumes_hash,
                                  node["node_volumes"])
コード例 #27
0
 def get_deployment_info(cluster, nodes):
     return deployment_serializers.serialize(AstuteGraph(cluster), cluster,
                                             nodes)
コード例 #28
0
 def serialize(cluster):
     return deployment_serializers.serialize(AstuteGraph(cluster), cluster,
                                             cluster.nodes)['common']
コード例 #29
0
 def _serialize(self, cluster, nodes):
     return deployment_serializers.serialize(cluster,
                                             nodes,
                                             ignore_customized=True)
コード例 #30
0
ファイル: test_pipelines.py プロジェクト: openstack/fuel-web
 def get_deployment_info(cluster, nodes):
     return deployment_serializers.serialize(
         AstuteGraph(cluster), cluster, nodes)
コード例 #31
0
ファイル: orchestrator.py プロジェクト: andrei4ka/fuel-web
 def _serialize(self, cluster, nodes):
     graph = deployment_graph.AstuteGraph(cluster)
     return deployment_serializers.serialize(graph,
                                             cluster,
                                             nodes,
                                             ignore_customized=True)
コード例 #32
0
ファイル: orchestrator.py プロジェクト: TorstenS73/fuel-web
 def _serialize(self, cluster, nodes):
     graph = deployment_graph.AstuteGraph(cluster)
     return deployment_serializers.serialize(
         graph, cluster, nodes, ignore_customized=True)
コード例 #33
0
ファイル: orchestrator.py プロジェクト: Zipfer/fuel-web
 def _serialize(self, cluster, nodes):
     return deployment_serializers.serialize(
         cluster, nodes, ignore_customized=True)
コード例 #34
0
ファイル: manager.py プロジェクト: yxh1990/fuel-cloudmaster
    def execute(self):
        # 开始执行部署变更
        logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))
        # 显示网络信息(openstack部署前执行网络验证)
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()  # obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        # 如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
        if self.cluster.cluster_type == 1:
            if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
                db().rollback()
                raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(
            self.cluster
        ):
            try:
                if self.cluster.cluster_type == 1:
                    self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion, weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision, weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision, tasks.ProvisionTask, nodes_to_provision, method_name="message"
            )

            task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        # nodes_to_deploy=self.cluster.nodes
        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment, tasks.DeploymentTask, nodes_to_deploy, method_name="message"
            )

            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
            # clusterdeploymsg只要保存状态为new的部署信息,如果第一次部署失败有错误节点
            # 第二次点击部署变更的时候会只会发送消息给错误节点,这样的信息覆盖掉第一次
            # 完整的部署信息会导致集群启动和停止的失败(因为只发送给错误节点).
            logger.info(u"执行部署变更操作,开始操作cluster_deploy_msg表")
            data = {}
            data["cluster_id"] = self.cluster.id
            data["cluster_deploymsg"] = jsonutils.dumps(deployment_message)
            clusterdeploymsg = db().query(ClusterdeployMsg).filter_by(cluster_id=self.cluster.id).first()

            if clusterdeploymsg:
                if self.cluster.status == CLUSTER_STATUSES.new:
                    objects.ClusterdeployMsgObject.update(
                        clusterdeploymsg, {"cluster_deploymsg": data["cluster_deploymsg"]}
                    )
            else:
                objects.ClusterdeployMsgCollection.create(data)

            task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == TASK_STATUSES.error:
                return supertask

            task_deployment.cache = deployment_message
            db().commit()
            task_messages.append(deployment_message)
        else:
            pass
        if len(nodes_to_delete) <= 0 and len(nodes_to_deploy) <= 0:
            # 这里不能单纯用nodes_to_deploy是否为空来判断是启动或者停止
            # 因为nodes_to_delete不为空而nodes_to_deploy为空的话依然会
            # 执行下面的代码,此种情况程序会出现异常

            oprolename = web.cookies().get("oprolename")
            opaction = web.cookies().get("opaction")
            nodes_to_startorstop = TaskHelper.nodes_to_startorstop(self.cluster)
            if oprolename != "all":
                logger.info(u"这里执行的是单角色启动或者停止部署")
                task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

                # we should have task committed for processing in other threads
                # openstack定制化环境点击部署变更和启动,停止按钮没有执行
                # 此程序,而是直接通过调用/opt/start.py执行
                # 目前只有cloudmaster和ebs环境会执行此处代码
                db().commit()

                deployment_message = self._call_silently(
                    task_deployment, tasks.DeploymentTask, nodes_to_startorstop, method_name="message"
                )
                deploymsg = deployment_message
                deploymsg["respond_to"] = "start_stop_resp"
                deploymsg["args"]["task_uuid"] = task_deployment.uuid
                # deployment_info[]是列表,这个列表中含有的元素都是字典
                # 角色下面的label参数就只是在此处添加和修改的.
                deployment_info_old_list = deploymsg["args"]["deployment_info"]
                deployment_info_list = []
                nodeuids = []
                for key, item in groupby(deployment_info_old_list, itemgetter("uid")):
                    nodeuids.append(key)

                deployment_info_list = deployment_info_old_list[0 : len(nodeuids)]
                # 此处删除和启动停止无关的角色信息
                deploymsg["args"]["deployment_info"] = []
                for i in range(len(deployment_info_list)):
                    deployment_info = deployment_info_list[i]
                    deployment_info["role"] = oprolename
                    deployment_info["uid"] = nodeuids[i]
                    deploymsg["args"]["deployment_info"].append(deployment_info)

                for deployment_info in deployment_info_list:  # 此处是一个列表
                    deployment_info_keys = deployment_info.keys()
                    changelable_keys = []
                    operationalrole = ""
                    for key in deployment_info_keys:
                        if key.lower() == oprolename.lower():
                            operationalrole = key
                        else:
                            changelable_keys.append(key)

                    deployment_info[operationalrole]["action"] = opaction
                    deployment_info[operationalrole]["label"] = "0"
                    for key in changelable_keys:
                        if type(deployment_info[key]) == dict and deployment_info[key].get("label") != None:
                            deployment_info[key]["label"] = "1"

                logger.info(deployment_info[operationalrole]["action"])
                logger.info(oprolename)

                task_deployment = objects.Task.get_by_uid(
                    task_deployment.id, fail_if_not_found=True, lock_for_update=True
                )
                # if failed to generate task message for orchestrator
                # then task is already set to error
                if task_deployment.status == TASK_STATUSES.error:
                    return supertask

                task_deployment.cache = deploymsg
                db().commit()
                task_messages.append(deploymsg)

            else:
                logger.info(u"这里执行的是一键启动和停止操作")
                serialized_cluster = deployment_serializers.serialize(self.cluster, nodes_to_startorstop)
                pre_deployment = plugins_serializers.pre_deployment_serialize(self.cluster, nodes_to_startorstop)
                post_deployment = plugins_serializers.post_deployment_serialize(self.cluster, nodes_to_startorstop)
                deployment_message = self.make_astute_message(
                    "deploy",
                    "deploy_resp",
                    {
                        "deployment_info": serialized_cluster,
                        "pre_deployment": pre_deployment,
                        "post_deployment": post_deployment,
                    },
                )
                if self.cluster.cluster_type == 3:
                    ebs_start = ebs.StartAllRole()
                    if opaction == "stop":
                        task_messages = ebs_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 1)
                    else:
                        task_messages = ebs_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 2)
                elif self.cluster.cluster_type == 2:
                    cloud_start = cld.StartAllRole()
                    if opaction == "stop":
                        task_messages = cloud_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 1)
                    else:
                        task_messages = cloud_start.make_deploy_msgs(self.cluster, supertask, deployment_message, 2)
                else:
                    pass

        if nodes_to_provision:
            nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
            for node in nodes_to_provision:
                node.status = NODE_STATUSES.provisioning
            db().commit()

        objects.Cluster.get_by_uid(self.cluster.id, fail_if_not_found=True, lock_for_update=True)
        self.cluster.status = CLUSTER_STATUSES.deployment
        db().add(self.cluster)
        db().commit()

        if task_messages:
            rpc.cast("naily", task_messages)

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id, supertask.uuid
            )
        )
        return supertask
コード例 #35
0
ファイル: test_pipelines.py プロジェクト: sebrandon1/fuel-web
    def test_disks_attrs(self):
        self.cluster = self.env.create(
            release_kwargs={
                'version': self.env_version,
                'operating_system': consts.RELEASE_OS.ubuntu},
            cluster_kwargs={
                'mode': consts.CLUSTER_MODES.ha_compact,
                'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
                'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
        self.cluster_db = self.env.clusters[0]

        disks = [
            {
                "model": "TOSHIBA MK1002TS",
                "name": "sda",
                "disk": "sda",
                "size": 1004886016
            },
        ]
        expected_node_volumes_hash = [
            {
                u'name': u'sda',
                u'extra': [],
                u'free_space': 330,
                u'volumes': [
                    {
                        u'type': u'boot',
                        u'size': 300
                    },
                    {
                        u'mount': u'/boot',
                        u'type': u'partition',
                        u'file_system': u'ext2',
                        u'name': u'Boot',
                        u'size': 200
                    },
                    {
                        u'type': u'lvm_meta_pool',
                        u'size': 64
                    },
                    {
                        u'vg': u'os',
                        u'type': u'pv',
                        u'lvm_meta_size': 64,
                        u'size': 394
                    },
                    {
                        u'vg': u'vm',
                        u'type': u'pv',
                        u'lvm_meta_size': 0,
                        u'size': 0
                    }
                ],
                u'type': u'disk',
                u'id': u'sda',
                u'bootable': True,
                u'size': 958
            },
            {
                u'_allocate_size': u'min',
                u'label': u'Base System',
                u'min_size': 19456,
                u'volumes': [
                    {
                        u'mount': u'/',
                        u'size': -3766,
                        u'type': u'lv',
                        u'name': u'root',
                        u'file_system': u'ext4'
                    },
                    {
                        u'mount': u'swap',
                        u'size': 4096,
                        u'type': u'lv',
                        u'name': u'swap',
                        u'file_system': u'swap'
                    }
                ],
                u'type': u'vg',
                u'id': u'os'
            },
            {
                u'_allocate_size': u'all',
                u'label': u'Virtual Storage',
                u'min_size': 5120,
                u'volumes': [
                    {
                        u'mount': u'/var/lib/nova',
                        u'size': 0,
                        u'type': u'lv',
                        u'name': u'nova',
                        u'file_system': u'xfs'
                    }
                ],
                u'type': u'vg',
                u'id': u'vm'
            }
        ]
        self.env.create_node(
            cluster_id=self.cluster_db.id,
            roles=['compute'],
            meta={"disks": disks},
        )
        serialized_for_astute = deployment_serializers.serialize(
            AstuteGraph(self.cluster_db),
            self.cluster_db,
            self.cluster_db.nodes)

        for node in serialized_for_astute['nodes']:
            self.assertIn("node_volumes", node)
            self.assertItemsEqual(
                expected_node_volumes_hash, node["node_volumes"])