Example #1
0
    def test_redeploy_with_critial_roles(self):
        cluster = self.create_env([{
            'roles': ['controller'],
            'status': 'error'
        }, {
            'roles': ['controller'],
            'status': 'provisioned'
        }, {
            'roles': ['controller'],
            'status': 'provisioned'
        }, {
            'roles': ['compute', 'cinder'],
            'status': 'provisioned'
        }, {
            'roles': ['compute'],
            'status': 'provisioned'
        }, {
            'roles': ['cinder'],
            'status': 'provisioned'
        }])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(nodes), 6)

        controllers = self.filter_by_role(nodes, 'controller')
        self.assertEqual(len(controllers), 3)

        cinders = self.filter_by_role(nodes, 'cinder')
        self.assertEqual(len(cinders), 2)

        computes = self.filter_by_role(nodes, 'compute')
        self.assertEqual(len(computes), 2)
Example #2
0
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(
            nodes_kwargs=[
                {"roles": ["controller"]},
                {"roles": ["controller"], "pending_addition": True},
                {"roles": ["controller"], "pending_addition": True},
            ]
        )

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEqual(len(n_nodes), 2)

        # All controllers must re-deploy (run puppet)
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEqual(supertask.name, "deploy")

        self.env.wait_ready(supertask)
        self.assertEqual(supertask.status, "ready")

        controllers = self.filter_by_role(cluster.nodes, "controller")
        self.assertEqual(len(controllers), 3)
Example #3
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster)

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster}}
Example #4
0
    def test_redeploy_all_controller_and_compute_cinder(self):
        cluster = self.create_env([{
            'roles': ['controller'],
            'status': 'error'
        }, {
            'roles': ['controller']
        }, {
            'roles': ['controller', 'cinder']
        }, {
            'roles': ['compute', 'cinder']
        }, {
            'roles': ['compute'],
            'status': 'error'
        }, {
            'roles': ['cinder'],
            'status': 'error'
        }])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEquals(len(nodes), 5)

        controllers = self.filter_by_role(nodes, 'controller')
        self.assertEquals(len(controllers), 3)

        cinders = self.filter_by_role(nodes, 'cinder')
        self.assertEquals(len(cinders), 2)

        computes = self.filter_by_role(nodes, 'compute')
        self.assertEquals(len(computes), 1)
Example #5
0
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(nodes_kwargs=[{
            'roles': ['controller']
        }, {
            'roles': ['controller'],
            'pending_addition': True
        }, {
            'roles': ['controller'],
            'pending_addition': True
        }])

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEqual(len(n_nodes), 2)

        # All controllers must re-deploy (run puppet)
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEqual(supertask.name, 'deploy')

        self.env.wait_ready(supertask)
        self.assertEqual(supertask.status, 'ready')

        controllers = self.filter_by_role(cluster.nodes, 'controller')
        self.assertEqual(len(controllers), 3)
Example #6
0
    def test_redeploy_with_stopped_nodes(self):
        cluster = self.create_env([{
            'roles': ['controller'],
            'status': 'error'
        }, {
            'roles': ['controller'],
            'status': 'stopped'
        }, {
            'roles': ['controller'],
            'status': 'stopped'
        }, {
            'roles': ['compute', 'cinder'],
            'status': 'stopped'
        }, {
            'roles': ['compute'],
            'status': 'error',
            'error_type': 'stop_deployment'
        }, {
            'roles': ['cinder'],
            'status': 'error',
            'error_type': 'deploy'
        }])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(nodes), 6)

        controllers = self.filter_by_role(nodes, 'controller')
        self.assertEqual(len(controllers), 3)

        cinders = self.filter_by_role(nodes, 'cinder')
        self.assertEqual(len(cinders), 2)

        computes = self.filter_by_role(nodes, 'compute')
        self.assertEqual(len(computes), 2)
Example #7
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster)

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster
            }
        }
Example #8
0
    def test_nodes_to_deploy_if_lcm(self):
        cluster = self.env.create(nodes_kwargs=[
            {
                'status': consts.NODE_STATUSES.ready
            },
            {
                'status': consts.NODE_STATUSES.discover
            },
            {
                'status': consts.NODE_STATUSES.provisioning
            },
            {
                'status': consts.NODE_STATUSES.provisioned
            },
            {
                'status': consts.NODE_STATUSES.deploying
            },
            {
                'status': consts.NODE_STATUSES.error,
                'error_type': consts.NODE_ERRORS.deploy
            },
            {
                'status': consts.NODE_STATUSES.error,
                'error_type': consts.NODE_ERRORS.provision
            },
            {
                'status': consts.NODE_STATUSES.stopped
            },
            {
                'status': consts.NODE_STATUSES.removing
            },
            {
                'status': consts.NODE_STATUSES.ready,
                'pending_deletion': True
            },
        ],
                                  release_kwargs={
                                      'version': 'mitaka-9.0',
                                      'operating_system':
                                      consts.RELEASE_OS.ubuntu
                                  })
        nodes_to_deploy = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(5, len(nodes_to_deploy))

        expected_status = [
            consts.NODE_STATUSES.provisioned, consts.NODE_STATUSES.stopped,
            consts.NODE_STATUSES.ready, consts.NODE_STATUSES.error,
            consts.NODE_STATUSES.deploying
        ]
        for node in nodes_to_deploy:
            self.assertIn(node.status, expected_status)
            self.assertIn(node.error_type, [None, consts.NODE_ERRORS.deploy])
            self.assertFalse(node.pending_deletion)
Example #9
0
    def test_redeploy_all_controller_if_single_controller_failed(self):
        cluster = self.create_env([
            {'roles': ['controller'], 'status': 'error'},
            {'roles': ['controller']},
            {'roles': ['controller', 'cinder']},
            {'roles': ['compute', 'cinder']},
            {'roles': ['compute']},
            {'roles': ['cinder']}])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEquals(len(nodes), 3)

        controllers = self.filter_by_role(nodes, 'controller')
        self.assertEquals(len(controllers), 3)
Example #10
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = set(TaskHelper.nodes_to_deploy(self) +
                    TaskHelper.nodes_in_provisioning(self))

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = sorted([n.id for n in nodes])
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')
Example #11
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = set(TaskHelper.nodes_to_deploy(self) +
                    TaskHelper.nodes_in_provisioning(self))

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = sorted([n.id for n in nodes])
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')
Example #12
0
    def test_redeploy_only_compute_cinder(self):
        cluster = self.create_env([
            {'roles': ['controller']},
            {'roles': ['controller']},
            {'roles': ['controller', 'cinder']},
            {'roles': ['compute', 'cinder']},
            {'roles': ['compute'], 'status': 'error'},
            {'roles': ['cinder'], 'status': 'error'}])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEquals(len(nodes), 2)

        cinders = self.filter_by_role(nodes, 'cinder')
        self.assertEquals(len(cinders), 1)

        computes = self.filter_by_role(nodes, 'compute')
        self.assertEquals(len(computes), 1)
Example #13
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = sorted(
            set(TaskHelper.nodes_to_deploy(self) + TaskHelper.nodes_in_provisioning(self)), key=lambda node: node.id
        )

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

            for node in nodes:
                netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
Example #14
0
    def test_redeploy_with_critial_roles(self):
        cluster = self.create_env([
            {'roles': ['controller'], 'status': 'error'},
            {'roles': ['controller'], 'status': 'provisioned'},
            {'roles': ['controller'], 'status': 'provisioned'},
            {'roles': ['compute', 'cinder'], 'status': 'provisioned'},
            {'roles': ['compute'], 'status': 'provisioned'},
            {'roles': ['cinder'], 'status': 'provisioned'}])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(nodes), 6)

        controllers = self.filter_by_role(nodes, 'controller')
        self.assertEqual(len(controllers), 3)

        cinders = self.filter_by_role(nodes, 'cinder')
        self.assertEqual(len(cinders), 2)

        computes = self.filter_by_role(nodes, 'compute')
        self.assertEqual(len(computes), 2)
Example #15
0
    def _check_nodes_are_online(cls, task):
        offline_nodes = db().query(Node).\
            filter(Node.cluster == task.cluster).\
            filter_by(online=False).\
            filter_by(pending_deletion=False)

        offline_nodes_not_ready = [n for n in offline_nodes
                                   if n.status != consts.NODE_STATUSES.ready]
        nodes_to_deploy = TaskHelper.nodes_to_deploy(task.cluster)
        offline_nodes_to_redeploy = [
            n for n in offline_nodes
            if n.status == consts.NODE_STATUSES.ready and n in nodes_to_deploy]

        if offline_nodes_not_ready or offline_nodes_to_redeploy:
            node_names = ','.join(
                map(lambda n: n.full_name,
                    offline_nodes_not_ready + offline_nodes_to_redeploy))
            raise errors.NodeOffline(
                u'Nodes "{0}" are offline.'
                ' Remove them from environment '
                'and try again.'.format(node_names))
Example #16
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = sorted(set(
            TaskHelper.nodes_to_deploy(self) +
            TaskHelper.nodes_in_provisioning(self)),
                       key=lambda node: node.id)

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')

            for node in nodes:
                netmanager.assign_admin_ips(
                    node.id, len(node.meta.get('interfaces', [])))
Example #17
0
    def _check_nodes_are_online(cls, task):
        offline_nodes = db().query(Node).\
            filter(Node.cluster == task.cluster).\
            filter_by(online=False).\
            filter_by(pending_deletion=False)

        offline_nodes_not_ready = [n for n in offline_nodes
                                   if n.status != consts.NODE_STATUSES.ready]
        nodes_to_deploy = TaskHelper.nodes_to_deploy(task.cluster)
        offline_nodes_to_redeploy = [
            n for n in offline_nodes
            if n.status == consts.NODE_STATUSES.ready and n in nodes_to_deploy]

        if offline_nodes_not_ready or offline_nodes_to_redeploy:
            node_names = ','.join(
                map(lambda n: n.full_name,
                    offline_nodes_not_ready + offline_nodes_to_redeploy))
            raise errors.NodeOffline(
                u'Nodes "{0}" are offline.'
                ' Remove them from environment '
                'and try again.'.format(node_names))
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(
            nodes_kwargs=[
                {'roles': ['controller']},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True}])

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEqual(len(n_nodes), 2)

        # All controllers must re-deploy
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEqual(supertask.name, consts.TASK_NAMES.deploy)
        self.assertNotEqual(supertask.status, consts.TASK_STATUSES.error)

        controllers = self.filter_by_role(cluster.nodes, 'controller')
        self.assertEqual(len(controllers), 3)
Example #19
0
    def test_redeploy_with_stopped_nodes(self):
        cluster = self.create_env([
            {'roles': ['controller'], 'status': 'error'},
            {'roles': ['controller'], 'status': 'stopped'},
            {'roles': ['controller'], 'status': 'stopped'},
            {'roles': ['compute', 'cinder'], 'status': 'stopped'},
            {'roles': ['compute'], 'status': 'error',
             'error_type': 'stop_deployment'},
            {'roles': ['cinder'], 'status': 'error',
             'error_type': 'deploy'}])

        nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(len(nodes), 6)

        controllers = self.filter_by_role(nodes, 'controller')
        self.assertEqual(len(controllers), 3)

        cinders = self.filter_by_role(nodes, 'cinder')
        self.assertEqual(len(cinders), 2)

        computes = self.filter_by_role(nodes, 'compute')
        self.assertEqual(len(computes), 2)
Example #20
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in ("deploying"):
                    n.status = "provisioned"
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or deployment_serializers.serialize(task.cluster)

        # After searilization set pending_addition to False
        for node in db().query(Node).filter(Node.id.in_(nodes_ids)):
            node.pending_addition = False
        db().commit()

        return {
            "method": "deploy",
            "respond_to": "deploy_resp",
            "args": {"task_uuid": task.uuid, "deployment_info": serialized_cluster},
        }
Example #21
0
    def test_deploy_grow_controllers(self):
        cluster = self.create_env(
            nodes_kwargs=[
                {'roles': ['controller']},
                {'roles': ['controller'], 'pending_addition': True},
                {'roles': ['controller'], 'pending_addition': True}])

        # We have to build 2 new controllers
        n_nodes = TaskHelper.nodes_to_provision(cluster)
        self.assertEquals(len(n_nodes), 2)

        # All controllers must re-deploy (run puppet)
        r_nodes = TaskHelper.nodes_to_deploy(cluster)
        self.assertEquals(len(r_nodes), 3)

        supertask = self.env.launch_deployment()
        self.assertEquals(supertask.name, 'deploy')

        self.env.wait_ready(supertask)
        self.assertEquals(supertask.status, 'ready')

        controllers = self.filter_by_role(cluster.nodes, 'controller')
        self.assertEquals(len(controllers), 3)
Example #22
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                # if task.cluster.facts not empty dict, it will be used
                # instead of computing cluster facts through serialize
                'deployment_info': task.cluster.facts
                or serialize(task.cluster)
            }
        }

        return message
Example #23
0
    def test_nodes_to_deploy_if_lcm(self):
        cluster = self.env.create(
            nodes_kwargs=[
                {'status': consts.NODE_STATUSES.ready},
                {'status': consts.NODE_STATUSES.discover},
                {'status': consts.NODE_STATUSES.provisioning},
                {'status': consts.NODE_STATUSES.provisioned},
                {'status': consts.NODE_STATUSES.deploying},
                {'status': consts.NODE_STATUSES.error,
                 'error_type': consts.NODE_ERRORS.deploy},
                {'status': consts.NODE_STATUSES.error,
                 'error_type': consts.NODE_ERRORS.provision},
                {'status': consts.NODE_STATUSES.stopped},
                {'status': consts.NODE_STATUSES.removing},
                {'status': consts.NODE_STATUSES.ready,
                 'pending_deletion': True},
            ],
            release_kwargs={
                'version': 'mitaka-9.0',
                'operating_system': consts.RELEASE_OS.ubuntu
            }
        )
        nodes_to_deploy = TaskHelper.nodes_to_deploy(cluster)
        self.assertEqual(5, len(nodes_to_deploy))

        expected_status = [
            consts.NODE_STATUSES.provisioned,
            consts.NODE_STATUSES.stopped,
            consts.NODE_STATUSES.ready,
            consts.NODE_STATUSES.error,
            consts.NODE_STATUSES.deploying
        ]
        for node in nodes_to_deploy:
            self.assertIn(node.status, expected_status)
            self.assertIn(node.error_type, [None, consts.NODE_ERRORS.deploy])
            self.assertFalse(node.pending_deletion)
Example #24
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        for n in nodes:
            n.pending_addition = False
            if n.status in ('ready', 'deploying'):
                n.status = 'provisioned'
            n.progress = 0
            orm().add(n)
            orm().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = orm().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            net_name = net.name + '_network_range'
            if net.name == 'floating':
                cluster_attrs[net_name] = \
                    cls.__get_ip_addresses_in_ranges(net)
            elif net.name == 'public':
                # We shouldn't pass public_network_range attribute
                continue
            else:
                cluster_attrs[net_name] = net.cidr

        cluster_attrs['network_manager'] = task.cluster.net_manager

        fixed_net = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).filter_by(name='fixed').first()
        # network_size is required for all managers, otherwise
        #  puppet will use default (255)
        cluster_attrs['network_size'] = fixed_net.network_size
        if cluster_attrs['network_manager'] == 'VlanManager':
            cluster_attrs['num_networks'] = fixed_net.amount
            cluster_attrs['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
 def test_related_pending_deletion_nodes_not_present(self):
     cluster = self.cluster
     controllers = [n for n in cluster.nodes if 'controller' in n.all_roles]
     nodes_to_deploy = TaskHelper.nodes_to_deploy(cluster)
     self.assertItemsEqual(controllers, nodes_to_deploy)
Example #26
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        # FIXME(mihgen): We need to pass all other nodes, so astute
        #  can know about all the env, not only about added nodes.
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue
            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['master_ip'] = settings.MASTER_IP
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = db().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = db().query(NetworkGroup).filter_by(cluster_id=cluster_id).all()
        for net in ng_db:
            net_name = net.name + '_network_range'
            if net.name == 'floating':
                cluster_attrs[net_name] = \
                    cls.__get_ip_ranges_first_last(net)
            elif net.name == 'public':
                # We shouldn't pass public_network_range attribute
                continue
            else:
                cluster_attrs[net_name] = net.cidr

        net_params = {}
        net_params['network_manager'] = task.cluster.net_manager

        fixed_net = db().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).filter_by(name='fixed').first()
        # network_size is required for all managers, otherwise
        #  puppet will use default (255)
        net_params['network_size'] = fixed_net.network_size
        if net_params['network_manager'] == 'VlanManager':
            net_params['num_networks'] = fixed_net.amount
            net_params['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        cluster_attrs['novanetwork_parameters'] = net_params

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Example #27
0
 def get_default_nodes(self, cluster):
     TaskHelper.nodes_to_deploy(cluster)
Example #28
0
 def get_default_nodes(self, cluster):
     if objects.Release.is_lcm_supported(cluster.release):
         return objects.Cluster.get_nodes_not_for_deletion(cluster).all()
     return TaskHelper.nodes_to_deploy(cluster)
 def test_related_pending_deletion_nodes_not_present_with_force(self):
     cluster = self.env.clusters[0]
     controllers = [n for n in cluster.nodes if 'controller' in n.all_roles]
     nodes_to_deploy = TaskHelper.nodes_to_deploy(cluster, force=True)
     self.assertItemsEqual(controllers, nodes_to_deploy)
 def get_nodes_to_deployment(cls, cluster):
     """Nodes which need to deploy."""
     return sorted(TaskHelper.nodes_to_deploy(cluster),
                   key=lambda node: node.id)
Example #31
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        for n in nodes:
            n.pending_addition = False
            if n.status in ('ready', 'deploying'):
                n.status = 'provisioned'
            n.progress = 0
            orm().add(n)
            orm().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = orm().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            cluster_attrs[net.name + '_network_range'] = net.cidr

        cluster_attrs['network_manager'] = task.cluster.net_manager

        if cluster_attrs['network_manager'] == 'VlanManager':
            fixed_net = orm().query(NetworkGroup).filter_by(
                cluster_id=cluster_id).filter_by(name='fixed').first()

            cluster_attrs['network_size'] = fixed_net.network_size
            cluster_attrs['num_networks'] = fixed_net.amount
            cluster_attrs['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Example #32
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        # FIXME(mihgen): We need to pass all other nodes, so astute
        #  can know about all the env, not only about added nodes.
        for n in db().query(Node).filter_by(
            cluster=task.cluster
        ).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue
            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['master_ip'] = settings.MASTER_IP
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = db().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = db().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            net_name = net.name + '_network_range'
            if net.name == 'floating':
                cluster_attrs[net_name] = \
                    cls.__get_ip_ranges_first_last(net)
            elif net.name == 'public':
                # We shouldn't pass public_network_range attribute
                continue
            else:
                cluster_attrs[net_name] = net.cidr

        net_params = {}
        net_params['network_manager'] = task.cluster.net_manager

        fixed_net = db().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).filter_by(name='fixed').first()
        # network_size is required for all managers, otherwise
        #  puppet will use default (255)
        net_params['network_size'] = fixed_net.network_size
        if net_params['network_manager'] == 'VlanManager':
            net_params['num_networks'] = fixed_net.amount
            net_params['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        cluster_attrs['novanetwork_parameters'] = net_params

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
 def get_default_nodes(self, cluster):
     if objects.Release.is_lcm_supported(cluster.release):
         return objects.Cluster.get_nodes_not_for_deletion(cluster).all()
     return TaskHelper.nodes_to_deploy(cluster)
Example #34
0
 def get_nodes_to_deployment(cls, cluster):
     """Nodes which need to deploy."""
     return sorted(TaskHelper.nodes_to_deploy(cluster),
                   key=lambda node: node.id)