Ejemplo n.º 1
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = set(TaskHelper.nodes_to_deploy(self) +
                    TaskHelper.nodes_in_provisioning(self))

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = sorted([n.id for n in nodes])
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')
Ejemplo n.º 2
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = set(TaskHelper.nodes_to_deploy(self) +
                    TaskHelper.nodes_in_provisioning(self))

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = sorted([n.id for n in nodes])
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')
Ejemplo n.º 3
0
    def prepare_for_deployment(cls, nodes):
        """Prepare environment for deployment,
        assign management, public, storage ips
        """
        cls.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')

            for node in nodes:
                netmanager.assign_admin_ips(
                    node.id, len(node.meta.get('interfaces', [])))
Ejemplo n.º 4
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = sorted(
            set(TaskHelper.nodes_to_deploy(self) + TaskHelper.nodes_in_provisioning(self)), key=lambda node: node.id
        )

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

            for node in nodes:
                netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
Ejemplo n.º 5
0
    def prepare_for_deployment(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        nodes = sorted(set(
            TaskHelper.nodes_to_deploy(self) +
            TaskHelper.nodes_in_provisioning(self)),
                       key=lambda node: node.id)

        TaskHelper.update_slave_nodes_fqdn(nodes)

        nodes_ids = [n.id for n in nodes]
        netmanager = NetworkManager()
        if nodes_ids:
            netmanager.assign_ips(nodes_ids, 'management')
            netmanager.assign_ips(nodes_ids, 'public')
            netmanager.assign_ips(nodes_ids, 'storage')

            for node in nodes:
                netmanager.assign_admin_ips(
                    node.id, len(node.meta.get('interfaces', [])))
Ejemplo n.º 6
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        for n in nodes:
            n.pending_addition = False
            if n.status in ('ready', 'deploying'):
                n.status = 'provisioned'
            n.progress = 0
            orm().add(n)
            orm().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = orm().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            net_name = net.name + '_network_range'
            if net.name == 'floating':
                cluster_attrs[net_name] = \
                    cls.__get_ip_addresses_in_ranges(net)
            elif net.name == 'public':
                # We shouldn't pass public_network_range attribute
                continue
            else:
                cluster_attrs[net_name] = net.cidr

        cluster_attrs['network_manager'] = task.cluster.net_manager

        fixed_net = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).filter_by(name='fixed').first()
        # network_size is required for all managers, otherwise
        #  puppet will use default (255)
        cluster_attrs['network_size'] = fixed_net.network_size
        if cluster_attrs['network_manager'] == 'VlanManager':
            cluster_attrs['num_networks'] = fixed_net.amount
            cluster_attrs['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Ejemplo n.º 7
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        # FIXME(mihgen): We need to pass all other nodes, so astute
        #  can know about all the env, not only about added nodes.
        for n in db().query(Node).filter_by(
            cluster=task.cluster
        ).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue
            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['master_ip'] = settings.MASTER_IP
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = db().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = db().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            net_name = net.name + '_network_range'
            if net.name == 'floating':
                cluster_attrs[net_name] = \
                    cls.__get_ip_ranges_first_last(net)
            elif net.name == 'public':
                # We shouldn't pass public_network_range attribute
                continue
            else:
                cluster_attrs[net_name] = net.cidr

        net_params = {}
        net_params['network_manager'] = task.cluster.net_manager

        fixed_net = db().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).filter_by(name='fixed').first()
        # network_size is required for all managers, otherwise
        #  puppet will use default (255)
        net_params['network_size'] = fixed_net.network_size
        if net_params['network_manager'] == 'VlanManager':
            net_params['num_networks'] = fixed_net.amount
            net_params['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        cluster_attrs['novanetwork_parameters'] = net_params

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Ejemplo n.º 8
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        for n in nodes:
            n.pending_addition = False
            if n.status in ('ready', 'deploying'):
                n.status = 'provisioned'
            n.progress = 0
            orm().add(n)
            orm().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = orm().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            cluster_attrs[net.name + '_network_range'] = net.cidr

        cluster_attrs['network_manager'] = task.cluster.net_manager

        if cluster_attrs['network_manager'] == 'VlanManager':
            fixed_net = orm().query(NetworkGroup).filter_by(
                cluster_id=cluster_id).filter_by(name='fixed').first()

            cluster_attrs['network_size'] = fixed_net.network_size
            cluster_attrs['num_networks'] = fixed_net.amount
            cluster_attrs['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Ejemplo n.º 9
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        # FIXME(mihgen): We need to pass all other nodes, so astute
        #  can know about all the env, not only about added nodes.
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue
            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['master_ip'] = settings.MASTER_IP
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = db().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = db().query(NetworkGroup).filter_by(cluster_id=cluster_id).all()
        for net in ng_db:
            net_name = net.name + '_network_range'
            if net.name == 'floating':
                cluster_attrs[net_name] = \
                    cls.__get_ip_ranges_first_last(net)
            elif net.name == 'public':
                # We shouldn't pass public_network_range attribute
                continue
            else:
                cluster_attrs[net_name] = net.cidr

        net_params = {}
        net_params['network_manager'] = task.cluster.net_manager

        fixed_net = db().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).filter_by(name='fixed').first()
        # network_size is required for all managers, otherwise
        #  puppet will use default (255)
        net_params['network_size'] = fixed_net.network_size
        if net_params['network_manager'] == 'VlanManager':
            net_params['num_networks'] = fixed_net.amount
            net_params['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        cluster_attrs['novanetwork_parameters'] = net_params

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Ejemplo n.º 10
0
class TestOSTFHandler(BaseHandlers):

    def setUp(self):
        super(TestOSTFHandler, self).setUp()
        self.netmanager = NetworkManager()

    def get(self, cluster_id):
        url = reverse(
            'OSTFHandler',
            kwargs={'cluster_id': cluster_id})
        return self.app.get(url, headers=self.default_headers)

    def assing_ip_to_nodes(self):
        nodes_ids = [node.id for node in self.db.query(Node).all()]
        self.netmanager.assign_ips(nodes_ids, 'management')
        self.netmanager.assign_ips(nodes_ids, 'public')
        map(self.netmanager.assign_admin_ips, nodes_ids)

    def get_admin_network_cidr(self):
        return db().query(Network).filter_by(
            name="fuelweb_admin"
        ).first().cidr

    def test_get_multinode_mode(self):
        fqdn = 'fqdn.com'
        self.env.create(
            nodes_kwargs=[
                {'role': 'compute'},
                {'role': 'controller', 'fqdn': fqdn}])
        self.assing_ip_to_nodes()
        cluster_id = self.env.clusters[0].id

        result = json.loads(self.get(cluster_id).body)

        end_point_ip = self.netmanager.get_end_point_ip(cluster_id)
        subnet = self.get_admin_network_cidr()
        expected = {
            'controller_nodes_ips': [SubnetMatcher(subnet)],
            'horizon_url': 'http://%s/' % end_point_ip,
            'controller_nodes_names': [fqdn],
            'admin_tenant_name': 'admin',
            'admin_username': '******',
            'keystone_url': 'http://%s:5000/' % end_point_ip,
            'admin_password': '******'}

        self.assertDictContainsSubset(result, expected)

    def test_get_ha_mode(self):
        fqdns = ['fqdn1.com', 'fqdn2.com', 'fqdn3.com']
        self.env.create(
            cluster_kwargs={
                'mode': 'ha',
                'type': 'compute'},
            nodes_kwargs=[
                {'role': 'controller', 'fqdn': fqdns[0]},
                {'role': 'controller', 'fqdn': fqdns[1]},
                {'role': 'controller', 'fqdn': fqdns[2]}])

        self.assing_ip_to_nodes()

        cluster_id = self.env.clusters[0].id
        result = json.loads(self.get(cluster_id).body)
        end_point_ip = self.netmanager.get_end_point_ip(cluster_id)

        subnets = [self.get_admin_network_cidr() for _ in range(3)]
        expected = {
            'controller_nodes_ips': map(SubnetMatcher, subnets),
            'horizon_url': 'http://%s/' % end_point_ip,
            'controller_nodes_names': sorted(fqdns),
            'admin_tenant_name': 'admin',
            'admin_username': '******',
            'keystone_url': 'http://%s:5000/' % end_point_ip,
            'admin_password': '******'}

        self.assertDictContainsSubset(result, expected)