def test_floatin_ranges_generation(self):
        # Set ip ranges for floating ips
        ranges = [['172.16.0.2', '172.16.0.4'], ['172.16.0.3', '172.16.0.5'],
                  ['172.16.0.10', '172.16.0.12']]

        floating_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'floating').filter(
                NetworkGroup.cluster_id == self.cluster.id).first()

        # Remove floating ip addr ranges
        self.db.query(IPAddrRange).filter(IPAddrRange.network_group_id ==
                                          floating_network_group.id).delete()

        # Add new ranges
        for ip_range in ranges:
            new_ip_range = IPAddrRange(
                first=ip_range[0],
                last=ip_range[1],
                network_group_id=floating_network_group.id)

            self.db.add(new_ip_range)
        self.db.commit()
        facts = self.serializer.serialize(self.cluster)

        for fact in facts:
            self.assertEquals(fact['floating_network_range'], [
                '172.16.0.2-172.16.0.4', '172.16.0.3-172.16.0.5',
                '172.16.0.10-172.16.0.12'
            ])
예제 #2
0
    def test_assign_admin_ips_large_range(self):
        map(self.db.delete, self.db.query(IPAddrRange).all())
        admin_net_id = self.env.network_manager.get_admin_network_id()
        admin_ng = self.db.query(Network).get(admin_net_id).network_group
        mock_range = IPAddrRange(
            first='10.0.0.1',
            last='10.255.255.254',
            network_group_id=admin_ng.id
        )
        self.db.add(mock_range)
        self.db.commit()
        # Creating two nodes
        n1 = self.env.create_node()
        n2 = self.env.create_node()
        nc = zip([n1.id, n2.id], [2048, 2])

        # Assinging admin IPs on created nodes
        map(lambda (n, c): self.env.network_manager.assign_admin_ips(n, c), nc)

        # Asserting count of admin node IPs
        def asserter(x):
            n, c = x
            l = len(self.db.query(IPAddr).filter_by(network=admin_net_id).
                    filter_by(node=n).all())
            self.assertEquals(l, c)
        map(asserter, nc)
예제 #3
0
    def create_network_groups(self, cluster_id):
        """Method for creation of network groups for cluster.

        :param cluster_id: Cluster database ID.
        :type  cluster_id: int
        :returns: None
        :raises: errors.OutOfVLANs, errors.OutOfIPs,
        errors.NoSuitableCIDR
        """
        cluster_db = db().query(Cluster).get(cluster_id)
        networks_metadata = \
            cluster_db.release.networks_metadata["nova_network"]

        for network in networks_metadata["networks"]:
            new_ip_range = IPAddrRange(first=network["ip_range"][0],
                                       last=network["ip_range"][1])

            nw_group = NetworkGroup(release=cluster_db.release.id,
                                    name=network['name'],
                                    cidr=network['cidr'],
                                    netmask=network['netmask'],
                                    gateway=network['gateway'],
                                    cluster_id=cluster_id,
                                    vlan_start=network['vlan_start'],
                                    amount=1,
                                    network_size=network['network_size']
                                    if 'network_size' in network else 256)
            db().add(nw_group)
            db().commit()
            nw_group.ip_ranges.append(new_ip_range)
            db().commit()
            self.create_networks(nw_group)
예제 #4
0
    def update_ranges_from_cidr(self, network_group, cidr):
        """Update network ranges for cidr
        """
        db().query(IPAddrRange).filter_by(
            network_group_id=network_group.id).delete()

        new_cidr = IPNetwork(cidr)
        ip_range = IPAddrRange(network_group_id=network_group.id,
                               first=str(new_cidr[2]),
                               last=str(new_cidr[-2]))

        db().add(ip_range)
        db().commit()
예제 #5
0
    def test_assign_admin_ips_only_one(self):
        map(self.db.delete, self.db.query(IPAddrRange).all())
        admin_net_id = self.env.network_manager.get_admin_network_id()
        admin_ng = self.db.query(Network).get(admin_net_id).network_group
        mock_range = IPAddrRange(first='10.0.0.1',
                                 last='10.0.0.1',
                                 network_group_id=admin_ng.id)
        self.db.add(mock_range)
        self.db.commit()

        node = self.env.create_node()
        self.env.network_manager.assign_admin_ips(node.id, 1)

        admin_net_id = self.env.network_manager.get_admin_network_id()

        admin_ips = self.db.query(IPAddr).\
            filter_by(node=node.id).\
            filter_by(network=admin_net_id).all()
        self.assertEquals(len(admin_ips), 1)
        self.assertEquals(admin_ips[0].ip_addr, '10.0.0.1')
예제 #6
0
    def create_network_groups(self, cluster_id):
        '''
        Method for creation of network groups for cluster.

        :param cluster_id: Cluster database ID.
        :type  cluster_id: int
        :returns: None
        :raises: errors.OutOfVLANs, errors.OutOfIPs,
        errors.NoSuitableCIDR
        '''
        used_nets = [n.cidr for n in self.db.query(Network).all()]
        used_vlans = [v.id for v in self.db.query(Vlan).all()]
        cluster_db = self.db.query(Cluster).get(cluster_id)

        networks_metadata = cluster_db.release.networks_metadata

        free_vlans = set(
            range(int(settings.VLANS_RANGE_START), int(
                settings.VLANS_RANGE_END))) - set(used_vlans)

        if not free_vlans or len(free_vlans) < len(networks_metadata):
            raise errors.OutOfVLANs()

        for network in networks_metadata:
            vlan_start = sorted(list(free_vlans))[0]
            logger.debug(u"Found free vlan: %s", vlan_start)
            pool = settings.NETWORK_POOLS.get(network['access'])
            if not pool:
                raise errors.InvalidNetworkAccess(
                    u"Invalid access '{0}' for network '{1}'".format(
                        network['access'], network['name']))
            nets_free_set = IPSet(pool) -\
                IPSet(settings.NET_EXCLUDE) -\
                IPSet(
                    IPRange(
                        settings.ADMIN_NETWORK["first"],
                        settings.ADMIN_NETWORK["last"]
                    )
                ) -\
                IPSet(used_nets)
            if not nets_free_set:
                raise errors.OutOfIPs()

            free_cidrs = sorted(list(nets_free_set._cidrs))
            new_net = None
            for fcidr in free_cidrs:
                for n in fcidr.subnet(24, count=1):
                    new_net = n
                    break
                if new_net:
                    break
            if not new_net:
                raise errors.NoSuitableCIDR()

            new_ip_range = IPAddrRange(first=str(new_net[2]),
                                       last=str(new_net[-2]))

            nw_group = NetworkGroup(release=cluster_db.release.id,
                                    name=network['name'],
                                    access=network['access'],
                                    cidr=str(new_net),
                                    netmask=str(new_net.netmask),
                                    gateway=str(new_net[1]),
                                    cluster_id=cluster_id,
                                    vlan_start=vlan_start,
                                    amount=1)
            self.db.add(nw_group)
            self.db.commit()
            nw_group.ip_ranges.append(new_ip_range)
            self.db.commit()
            self.create_networks(nw_group)

            free_vlans = free_vlans - set([vlan_start])
            used_nets.append(str(new_net))
예제 #7
0
    def create_network_groups(self, cluster_id):
        '''Method for creation of network groups for cluster.

        :param cluster_id: Cluster database ID.
        :type  cluster_id: int
        :returns: None
        :raises: errors.OutOfVLANs, errors.OutOfIPs,
        errors.NoSuitableCIDR
        '''
        used_nets = []
        used_vlans = []

        global_params = db().query(GlobalParameters).first()

        cluster_db = db().query(Cluster).get(cluster_id)

        networks_metadata = cluster_db.release.networks_metadata

        admin_network_range = db().query(IPAddrRange).filter_by(
            network_group_id=self.get_admin_network_group_id()).all()[0]

        def _free_vlans():
            free_vlans = set(range(
                *global_params.parameters["vlan_range"])) - set(used_vlans)
            if not free_vlans or len(free_vlans) < len(networks_metadata):
                raise errors.OutOfVLANs()
            return sorted(list(free_vlans))

        public_vlan = _free_vlans()[0]
        used_vlans.append(public_vlan)
        for network in networks_metadata:
            free_vlans = _free_vlans()
            vlan_start = public_vlan if network.get("use_public_vlan") \
                else free_vlans[0]

            logger.debug("Found free vlan: %s", vlan_start)
            pool = network.get('pool')
            if not pool:
                raise errors.InvalidNetworkPool(
                    u"Invalid pool '{0}' for network '{1}'".format(
                        pool, network['name']))

            nets_free_set = IPSet(pool) -\
                IPSet(
                    IPNetwork(global_params.parameters["net_exclude"])
                ) -\
                IPSet(
                    IPRange(
                        admin_network_range.first,
                        admin_network_range.last
                    )
                ) -\
                IPSet(used_nets)
            if not nets_free_set:
                raise errors.OutOfIPs()

            free_cidrs = sorted(list(nets_free_set._cidrs))
            new_net = None
            for fcidr in free_cidrs:
                for n in fcidr.subnet(24, count=1):
                    new_net = n
                    break
                if new_net:
                    break
            if not new_net:
                raise errors.NoSuitableCIDR()

            new_ip_range = IPAddrRange(first=str(new_net[2]),
                                       last=str(new_net[-2]))

            nw_group = NetworkGroup(release=cluster_db.release.id,
                                    name=network['name'],
                                    cidr=str(new_net),
                                    netmask=str(new_net.netmask),
                                    gateway=str(new_net[1]),
                                    cluster_id=cluster_id,
                                    vlan_start=vlan_start,
                                    amount=1)
            db().add(nw_group)
            db().commit()
            nw_group.ip_ranges.append(new_ip_range)
            db().commit()
            self.create_networks(nw_group)

            used_vlans.append(vlan_start)
            used_nets.append(str(new_net))
예제 #8
0
    def test_deploy_cast_with_right_args(self, mocked_rpc):
        self.env.create(
            cluster_kwargs={
                "mode": "ha",
                "type": "compute"
            },
            nodes_kwargs=[
                {"role": "controller", "pending_addition": True},
                {"role": "controller", "pending_addition": True},
                {"role": "controller", "pending_addition": True},
            ]
        )
        cluster_db = self.env.clusters[0]
        cluster_depl_mode = 'ha'

        # Set ip ranges for floating ips
        ranges = [['172.16.0.2', '172.16.0.4'],
                  ['172.16.0.3', '172.16.0.5'],
                  ['172.16.0.10', '172.16.0.12']]

        floating_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'floating').filter(
                NetworkGroup.cluster_id == cluster_db.id).first()

        # Remove floating ip addr ranges
        self.db.query(IPAddrRange).filter(
            IPAddrRange.network_group_id == floating_network_group.id).delete()

        # Add new ranges
        for ip_range in ranges:
            new_ip_range = IPAddrRange(
                first=ip_range[0],
                last=ip_range[1],
                network_group_id=floating_network_group.id)

            self.db.add(new_ip_range)
        self.db.commit()

        # Update netmask for public network
        public_network_group = self.db.query(NetworkGroup).filter(
            NetworkGroup.name == 'public').filter(
                NetworkGroup.cluster_id == cluster_db.id).first()
        public_network_group.netmask = '255.255.255.128'
        self.db.commit()

        supertask = self.env.launch_deployment()
        deploy_task_uuid = [x.uuid for x in supertask.subtasks
                            if x.name == 'deployment'][0]

        msg = {'method': 'deploy', 'respond_to': 'deploy_resp',
               'args': {}}
        self.db.add(cluster_db)
        cluster_attrs = cluster_db.attributes.merged_attrs_values()

        nets_db = self.db.query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_db.id).all()

        for net in nets_db:
            if net.name != 'public':
                cluster_attrs[net.name + '_network_range'] = net.cidr

        cluster_attrs['floating_network_range'] = [
            '172.16.0.2-172.16.0.4',
            '172.16.0.3-172.16.0.5',
            '172.16.0.10-172.16.0.12'
        ]

        management_vip = self.env.network_manager.assign_vip(
            cluster_db.id,
            'management'
        )
        public_vip = self.env.network_manager.assign_vip(
            cluster_db.id,
            'public'
        )

        net_params = {}
        net_params['network_manager'] = "FlatDHCPManager"
        net_params['network_size'] = 256

        cluster_attrs['novanetwork_parameters'] = net_params

        cluster_attrs['management_vip'] = management_vip
        cluster_attrs['public_vip'] = public_vip
        cluster_attrs['master_ip'] = '127.0.0.1'
        cluster_attrs['deployment_mode'] = cluster_depl_mode
        cluster_attrs['deployment_id'] = cluster_db.id

        msg['args']['attributes'] = cluster_attrs
        msg['args']['task_uuid'] = deploy_task_uuid
        nodes = []
        provision_nodes = []

        admin_net_id = self.env.network_manager.get_admin_network_id()

        for n in sorted(self.env.nodes, key=lambda n: n.id):

            q = self.db.query(IPAddr).join(Network).\
                filter(IPAddr.node == n.id).filter(
                    not_(IPAddr.network == admin_net_id)
                )

            """
            Here we want to get node IP addresses which belong
            to storage and management networks respectively
            """
            node_ip_management, node_ip_storage = map(
                lambda x: q.filter_by(name=x).first().ip_addr
                + "/" + cluster_attrs[x + '_network_range'].split('/')[1],
                ('management', 'storage')
            )
            node_ip_public = q.filter_by(name='public').first().ip_addr + '/25'

            nodes.append({'uid': n.id, 'status': n.status, 'ip': n.ip,
                          'error_type': n.error_type, 'mac': n.mac,
                          'role': n.role, 'id': n.id, 'fqdn':
                          '%s-%d.%s' % (n.role, n.id, settings.DNS_DOMAIN),
                          'progress': 0, 'meta': n.meta, 'online': True,
                          'network_data': [{'brd': '192.168.0.255',
                                            'ip': node_ip_management,
                                            'vlan': 101,
                                            'gateway': '192.168.0.1',
                                            'netmask': '255.255.255.0',
                                            'dev': 'eth0',
                                            'name': 'management'},
                                           {'brd': '172.16.1.255',
                                            'ip': node_ip_public,
                                            'vlan': 100,
                                            'gateway': '172.16.1.1',
                                            'netmask': '255.255.255.128',
                                            'dev': 'eth0',
                                            'name': u'public'},
                                           {'name': u'storage',
                                            'ip': node_ip_storage,
                                            'vlan': 102,
                                            'dev': 'eth0',
                                            'netmask': '255.255.255.0',
                                            'brd': '192.168.1.255',
                                            'gateway': u'192.168.1.1'},
                                           {'vlan': 100,
                                            'name': 'floating',
                                            'dev': 'eth0'},
                                           {'vlan': 103,
                                            'name': 'fixed',
                                            'dev': 'eth0'},
                                           {'name': u'admin',
                                            'dev': 'eth0'}]})

            pnd = {
                'profile': cluster_attrs['cobbler']['profile'],
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': n.ip,
                'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
                'name': TaskHelper.make_slave_name(n.id, n.role),
                'hostname': n.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'ks_spaces': "\"%s\"" % json.dumps(
                        n.attributes.volumes).replace("\"", "\\\""),
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                }
            }

            netmanager = NetworkManager()
            netmanager.assign_admin_ips(
                n.id,
                len(n.meta.get('interfaces', []))
            )

            admin_ips = set([i.ip_addr for i in self.db.query(IPAddr).
                            filter_by(node=n.id).
                            filter_by(network=admin_net_id)])

            for i in n.meta.get('interfaces', []):
                if 'interfaces' not in pnd:
                    pnd['interfaces'] = {}
                pnd['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                if 'interfaces_extra' not in pnd:
                    pnd['interfaces_extra'] = {}
                pnd['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                if i['mac'] == n.mac:
                    pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
                    pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'

            provision_nodes.append(pnd)

        controller_nodes = filter(
            lambda node: node['role'] == 'controller',
            nodes)
        msg['args']['attributes']['controller_nodes'] = controller_nodes
        msg['args']['nodes'] = nodes

        provision_task_uuid = [x.uuid for x in supertask.subtasks
                               if x.name == 'provision'][0]
        provision_msg = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': provision_task_uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': provision_nodes,
            }
        }

        args, kwargs = nailgun.task.manager.rpc.cast.call_args
        self.assertEquals(len(args), 2)
        self.assertEquals(len(args[1]), 2)

        self.datadiff(args[1][0], provision_msg)
        self.datadiff(args[1][1], msg)