def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_provider'] = cluster.net_provider
        result['net_l23_provider'] = cluster.net_l23_provider
        result['net_segment_type'] = cluster.net_segment_type
        result['networks'] = map(
            cls.serialize_network_group,
            cluster.network_groups
        )

        net_manager = NeutronManager()
        result['networks'].append(
            cls.serialize_network_group(
                net_manager.get_admin_network_group()
            )
        )
        # result['networks'] = [cls.serialize_network_group(ng)
        #                       for ng in cluster.network_groups
        #                       if ng.name != 'private']

        result['neutron_parameters'] = {
            'predefined_networks': cluster.neutron_config.predefined_networks,
            'L2': cluster.neutron_config.L2,
            'segmentation_type': cluster.neutron_config.segmentation_type
        }
        return result
Beispiel #2
0
    def get_default(self, node):
        if node.cluster and node.cluster.net_provider == 'neutron':
            network_manager = NeutronManager()
        else:
            network_manager = NetworkManager()

        return network_manager.get_default_networks_assignment(node)
Beispiel #3
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.neutron import NeutronManager
        network_manager = NeutronManager()
        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = db().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        cls._set_ip_ranges(ng['id'], value)
                    else:
                        if key == 'cidr' and \
                                not ng['name'] in ('private',):
                            network_manager.update_ranges_from_cidr(
                                ng_db, value)

                        setattr(ng_db, key, value)

                if ng['name'] != 'private':
                    network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')

        if 'neutron_parameters' in network_configuration:
            for key, value in network_configuration['neutron_parameters'] \
                    .items():
                setattr(cluster.neutron_config, key, value)
            db().add(cluster.neutron_config)
            db().commit()
Beispiel #4
0
    def get_default(self, node):
        if node.cluster and node.cluster.net_provider == 'neutron':
            network_manager = NeutronManager()
        else:
            network_manager = NetworkManager()

        return network_manager.get_default_networks_assignment(node)
Beispiel #5
0
    def verify_data_correctness(cls, node):
        db_node = db().query(Node).filter_by(id=node['id']).first()
        if not db_node:
            raise errors.InvalidData("There is no node with ID '%d' in DB" %
                                     node['id'],
                                     log_message=True)
        interfaces = node['interfaces']
        db_interfaces = db_node.interfaces
        if len(interfaces) != len(db_interfaces):
            raise errors.InvalidData(
                "Node '%d' has different amount of interfaces" % node['id'],
                log_message=True)
        # FIXIT: we should use not all networks but appropriate for this
        # node only.
        db_network_groups = db().query(NetworkGroup).filter_by(
            cluster_id=db_node.cluster_id).all()
        if not db_network_groups:
            raise errors.InvalidData("There are no networks related to"
                                     " node '%d' in DB" % node['id'],
                                     log_message=True)
        network_group_ids = set([ng.id for ng in db_network_groups])

        if db_node.cluster and db_node.cluster.net_provider == 'neutron':
            net_manager = NeutronManager()
        else:
            net_manager = NetworkManager()

        admin_ng_id = net_manager.get_admin_network_group_id()

        for iface in interfaces:
            db_iface = filter(lambda i: i.id == iface['id'], db_interfaces)
            if not db_iface:
                raise errors.InvalidData("There is no interface with ID '%d'"
                                         " for node '%d' in DB" %
                                         (iface['id'], node['id']),
                                         log_message=True)
            db_iface = db_iface[0]

            for net in iface['assigned_networks']:
                if net['id'] not in network_group_ids and not \
                        net['id'] == admin_ng_id:
                    raise errors.InvalidData(
                        "Node '%d' shouldn't be connected to"
                        " network with ID '%d'" % (node['id'], net['id']),
                        log_message=True)
                elif net['id'] != admin_ng_id:
                    network_group_ids.remove(net['id'])

        # Check if there are unassigned networks for this node.
        if network_group_ids:
            raise errors.InvalidData(
                "Too few networks to assign to node '%d'" % node['id'],
                log_message=True)
    def test_check_ip_conflicts(self):
        mgmt = self.find_net_by_name(consts.NETWORKS.management)

        # firstly check default IPs from management net assigned to nodes
        ips = NeutronManager.get_assigned_ips_by_network_id(mgmt['id'])
        self.assertListEqual(['192.168.0.1', '192.168.0.2'], sorted(ips),
                             "Default IPs were changed for some reason.")

        mgmt['cidr'] = '10.101.0.0/24'
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, consts.NETWORK_NOTATION.cidr, False)
        self.assertTrue(result)

        mgmt['cidr'] = '192.168.0.0/28'
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, consts.NETWORK_NOTATION.cidr, False)
        self.assertFalse(result)

        mgmt['ip_ranges'] = [['192.168.0.1', '192.168.0.15']]
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, consts.NETWORK_NOTATION.ip_ranges, False)
        self.assertFalse(result)

        mgmt['ip_ranges'] = [['10.101.0.1', '10.101.0.255']]
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, consts.NETWORK_NOTATION.ip_ranges, False)
        self.assertTrue(result)
    def test_gre_get_default_nic_assignment(self):
        self.env.create(
            cluster_kwargs={
                'net_provider': 'neutron',
                'net_segment_type': 'gre'},
            nodes_kwargs=[
                {'api': True,
                 'pending_addition': True}
            ])
        node_db = self.env.nodes[0]

        admin_nic_id = node_db.admin_interface.id
        admin_nets = [n.name for n in self.db.query(
            NodeNICInterface).get(admin_nic_id).assigned_networks_list]

        other_nic = self.db.query(NodeNICInterface).filter_by(
            node_id=node_db.id
        ).filter(
            not_(NodeNICInterface.id == admin_nic_id)
        ).first()
        other_nets = [n.name for n in other_nic.assigned_networks_list]

        nics = NeutronManager.get_default_networks_assignment(node_db)

        def_admin_nic = [n for n in nics if n['id'] == admin_nic_id]
        def_other_nic = [n for n in nics if n['id'] == other_nic.id]

        self.assertEquals(len(def_admin_nic), 1)
        self.assertEquals(len(def_other_nic), 1)
        self.assertEquals(
            set(admin_nets),
            set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(other_nets),
            set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
Beispiel #8
0
    def test_gre_get_default_nic_assignment(self):
        self.env.create(cluster_kwargs={
            'net_provider': 'neutron',
            'net_segment_type': 'gre'
        },
                        nodes_kwargs=[{
                            'api': True,
                            'pending_addition': True
                        }])
        node_db = self.env.nodes[0]

        admin_nic_id = node_db.admin_interface.id
        admin_nets = [
            n.name for n in self.db.query(NodeNICInterface).get(
                admin_nic_id).assigned_networks_list
        ]

        other_nic = self.db.query(NodeNICInterface).filter_by(
            node_id=node_db.id).filter(
                not_(NodeNICInterface.id == admin_nic_id)).first()
        other_nets = [n.name for n in other_nic.assigned_networks_list]

        nics = NeutronManager.get_default_networks_assignment(node_db)

        def_admin_nic = [n for n in nics if n['id'] == admin_nic_id]
        def_other_nic = [n for n in nics if n['id'] == other_nic.id]

        self.assertEquals(len(def_admin_nic), 1)
        self.assertEquals(len(def_other_nic), 1)
        self.assertEquals(
            set(admin_nets),
            set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(other_nets),
            set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
    def test_check_ip_conflicts(self):
        mgmt = self.find_net_by_name('management')

        # firstly check default IPs from management net assigned to nodes
        ips = NeutronManager.get_assigned_ips_by_network_id(mgmt['id'])
        self.assertListEqual(['192.168.0.1', '192.168.0.2'], sorted(ips),
                             "Default IPs were changed for some reason.")

        mgmt['cidr'] = '10.101.0.0/24'
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, 'cidr', False)
        self.assertTrue(result)

        mgmt['cidr'] = '192.168.0.0/28'
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, 'cidr', False)
        self.assertFalse(result)

        mgmt['ip_ranges'] = [['192.168.0.1', '192.168.0.15']]
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, 'ip_ranges', False)
        self.assertFalse(result)

        mgmt['ip_ranges'] = [['10.101.0.1', '10.101.0.255']]
        result = NetworkConfigurationValidator._check_for_ip_conflicts(
            mgmt, self.cluster, 'ip_ranges', False)
        self.assertTrue(result)
    def test_get_default_nic_networkgroups(self):
        cluster = self.env.create_cluster(api=True,
                                          net_provider='neutron',
                                          net_segment_type='gre')
        node = self.env.create_node(api=True)
        node_db = self.env.nodes[0]

        admin_nic = node_db.admin_interface
        other_iface = self.db.query(NodeNICInterface).filter_by(
            node_id=node['id']
        ).filter(
            not_(NodeNICInterface.id == admin_nic.id)
        ).first()

        interfaces = deepcopy(node_db.meta['interfaces'])

        # allocate ip from admin subnet
        admin_ip = str(IPNetwork(
            NetworkManager.get_admin_network_group().cidr)[0])
        for interface in interfaces:
            if interface['mac'] == admin_nic.mac:
                # reset admin ip for previous admin interface
                interface['ip'] = None
            elif interface['mac'] == other_iface.mac:
                # set new admin interface
                interface['ip'] = admin_ip

        node_db.meta['interfaces'] = interfaces

        self.app.put(
            reverse('NodeCollectionHandler'),
            json.dumps([{
                        'mac': admin_nic.mac,
                        'meta': node_db.meta,
                        'is_agent': True,
                        'cluster_id': cluster["id"]
                        }]),
            headers=self.default_headers,
            expect_errors=True
        )

        new_main_nic_id = node_db.admin_interface.id
        admin_nets = [n.name for n in self.db.query(
            NodeNICInterface).get(new_main_nic_id).assigned_networks]
        other_nets = [n.name for n in other_iface.assigned_networks]

        nics = NeutronManager.get_default_networks_assignment(node_db)
        def_admin_nic = [n for n in nics if n['id'] == new_main_nic_id]
        def_other_nic = [n for n in nics if n['id'] == other_iface.id]

        self.assertEquals(len(def_admin_nic), 1)
        self.assertEquals(len(def_other_nic), 1)
        self.assertEquals(new_main_nic_id, other_iface.id)
        self.assertEquals(
            set(admin_nets),
            set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(other_nets),
            set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
    def network_cluster_attrs(cls, cluster):
        """Cluster attributes."""
        attrs = {'quantum': True,
                 'quantum_settings': cls.neutron_attrs(cluster)}

        if cluster.mode == 'multinode':
            nm = NeutronManager()
            for node in cluster.nodes:
                if cls._node_has_role_by_name(node, 'controller'):
                    mgmt_cidr = nm.get_node_network_by_netname(
                        node.id,
                        'management'
                    )['ip']
                    attrs['management_vip'] = mgmt_cidr.split('/')[0]
                    break

        return attrs
Beispiel #12
0
    def network_cluster_attrs(cls, cluster):
        """Cluster attributes."""
        attrs = {
            'quantum': True,
            'quantum_settings': cls.neutron_attrs(cluster)
        }

        if cluster.mode == 'multinode':
            nm = NeutronManager()
            for node in cluster.nodes:
                if cls._node_has_role_by_name(node, 'controller'):
                    mgmt_cidr = nm.get_node_network_by_netname(
                        node.id, 'management')['ip']
                    attrs['management_vip'] = mgmt_cidr.split('/')[0]
                    break

        return attrs
Beispiel #13
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.neutron import NeutronManager
        network_manager = NeutronManager()
        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                if ng['id'] == network_manager.get_admin_network_group_id():
                    continue

                ng_db = db().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        cls._set_ip_ranges(ng['id'], value)
                    else:
                        if key == 'cidr' and \
                                not ng['name'] in ('private',):
                            network_manager.update_ranges_from_cidr(
                                ng_db, value)

                        setattr(ng_db, key, value)

                if ng['name'] != 'private':
                    network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')

        if 'neutron_parameters' in network_configuration:
            for key, value in network_configuration['neutron_parameters'] \
                    .items():
                setattr(cluster.neutron_config, key, value)
            db().add(cluster.neutron_config)
            db().commit()
Beispiel #14
0
    def POST(self):
        """:returns: JSONized Cluster object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid cluster data specified)
               * 409 (cluster with such parameters already exists)
        """
        # It's used for cluster creating only.
        data = self.checked_data()

        cluster = Cluster()
        cluster.release = db().query(Release).get(data["release"])
        # TODO(NAME): use fields
        for field in ("name", "mode", "net_provider", "net_segment_type"):
            if data.get(field):
                setattr(cluster, field, data.get(field))
        db().add(cluster)
        db().commit()
        attributes = Attributes(
            editable=cluster.release.attributes_metadata.get("editable"),
            generated=cluster.release.attributes_metadata.get("generated"),
            cluster=cluster)
        attributes.generate_fields()

        if cluster.net_provider == 'nova_network':
            netmanager = NetworkManager()
        elif cluster.net_provider == 'neutron':
            netmanager = NeutronManager()

        try:
            netmanager.create_network_groups(cluster.id)
            if cluster.net_provider == 'neutron':
                netmanager.create_neutron_config(cluster)

            cluster.add_pending_changes("attributes")
            cluster.add_pending_changes("networks")

            if 'nodes' in data and data['nodes']:
                nodes = db().query(Node).filter(Node.id.in_(
                    data['nodes'])).all()
                map(cluster.nodes.append, nodes)
                db().commit()
                for node in nodes:
                    netmanager.allow_network_assignment_to_all_interfaces(node)
                    netmanager.assign_networks_by_default(node)

            raise web.webapi.created(
                json.dumps(ClusterHandler.render(cluster), indent=4))
        except (errors.OutOfVLANs, errors.OutOfIPs, errors.NoSuitableCIDR,
                errors.InvalidNetworkPool) as e:
            # Cluster was created in this request,
            # so we no need to use ClusterDeletionManager.
            # All relations wiil be cascade deleted automaticly.
            # TODO(NAME): investigate transactions
            db().delete(cluster)

            raise web.badrequest(e.message)
    def test_vlan_get_default_nic_assignment(self):
        meta = self.env.default_metadata()
        self.env.set_interfaces_in_meta(
            meta,
            [{'name': 'eth0', 'mac': '00:00:00:00:00:11'},
             {'name': 'eth1', 'mac': '00:00:00:00:00:22'},
             {'name': 'eth2', 'mac': '00:00:00:00:00:33'}])
        self.env.create(
            cluster_kwargs={
                'net_provider': 'neutron',
                'net_segment_type': 'vlan'},
            nodes_kwargs=[
                {'api': True,
                 'meta': meta,
                 'pending_addition': True}
            ])
        node_db = self.env.nodes[0]

        admin_nic_id = node_db.admin_interface.id
        admin_nets = [n.name for n in self.db.query(
            NodeNICInterface).get(admin_nic_id).assigned_networks_list]

        other_nics = self.db.query(NodeNICInterface).filter_by(
            node_id=node_db.id
        ).filter(
            not_(NodeNICInterface.id == admin_nic_id)
        ).all()
        priv_nic, other_nic = None, None
        for nic in other_nics:
            names = [n.name for n in nic.assigned_networks_list]
            if names == ['private']:
                priv_nic = nic
                prin_nets = names
            elif names:
                other_nic = nic
                other_nets = names

        self.assertTrue(priv_nic and other_nic)
        nics = NeutronManager.get_default_networks_assignment(node_db)
        def_admin_nic = [n for n in nics if n['id'] == admin_nic_id]
        def_priv_nic = [n for n in nics if n['id'] == priv_nic.id]
        def_other_nic = [n for n in nics if n['id'] == other_nic.id]

        self.assertEquals(len(def_admin_nic), 1)
        self.assertEquals(len(def_priv_nic), 1)
        self.assertEquals(len(def_other_nic), 1)
        self.assertEquals(
            set(admin_nets),
            set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(prin_nets),
            set([n['name'] for n in def_priv_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(other_nets),
            set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
Beispiel #16
0
    def test_vlan_get_default_nic_assignment(self):
        meta = self.env.default_metadata()
        self.env.set_interfaces_in_meta(meta, [{
            'name': 'eth0',
            'mac': '00:00:00:00:00:11'
        }, {
            'name': 'eth1',
            'mac': '00:00:00:00:00:22'
        }, {
            'name': 'eth2',
            'mac': '00:00:00:00:00:33'
        }])
        self.env.create(cluster_kwargs={
            'net_provider': 'neutron',
            'net_segment_type': 'vlan'
        },
                        nodes_kwargs=[{
                            'api': True,
                            'meta': meta,
                            'pending_addition': True
                        }])
        node_db = self.env.nodes[0]

        admin_nic_id = node_db.admin_interface.id
        admin_nets = [
            n.name for n in self.db.query(NodeNICInterface).get(
                admin_nic_id).assigned_networks_list
        ]

        other_nics = self.db.query(NodeNICInterface).filter_by(
            node_id=node_db.id).filter(
                not_(NodeNICInterface.id == admin_nic_id)).all()
        other_nic, empty_nic = None, None
        for nic in other_nics:
            names = [n.name for n in nic.assigned_networks_list]
            if 'public' in names:
                other_nic = nic
                other_nets = names
            elif names == []:
                empty_nic = nic

        self.assertTrue(other_nic and empty_nic)
        nics = NeutronManager.get_default_networks_assignment(node_db)
        def_admin_nic = [n for n in nics if n['id'] == admin_nic_id]
        def_other_nic = [n for n in nics if n['id'] == other_nic.id]

        self.assertEquals(len(def_admin_nic), 1)
        self.assertEquals(len(def_other_nic), 1)
        self.assertEquals(
            set(admin_nets),
            set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(other_nets),
            set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
Beispiel #17
0
    def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_provider'] = cluster.net_provider
        result['net_l23_provider'] = cluster.net_l23_provider
        result['net_segment_type'] = cluster.net_segment_type
        result['networks'] = map(cls.serialize_network_group,
                                 cluster.network_groups)

        net_manager = NeutronManager()
        result['networks'].append(
            cls.serialize_network_group(net_manager.get_admin_network_group()))
        # result['networks'] = [cls.serialize_network_group(ng)
        #                       for ng in cluster.network_groups
        #                       if ng.name != 'private']

        result['neutron_parameters'] = {
            'predefined_networks': cluster.neutron_config.predefined_networks,
            'L2': cluster.neutron_config.L2,
            'segmentation_type': cluster.neutron_config.segmentation_type
        }
        return result
Beispiel #18
0
    def PUT(self, cluster_id):
        data = json.loads(web.data())
        if data.get("networks"):
            data["networks"] = [
                n for n in data["networks"] if n.get("name") != "fuelweb_admin"
            ]
        cluster = self.get_object_or_404(Cluster, cluster_id)
        self.check_net_provider(cluster)

        self.check_if_network_configuration_locked(cluster)

        task_manager = CheckNetworksTaskManager(cluster_id=cluster.id)
        task = task_manager.execute(data)

        if task.status != 'error':

            try:
                if 'networks' in data:
                    self.validator.validate_networks_update(
                        json.dumps(data)
                    )

                if 'neutron_parameters' in data:
                    self.validator.validate_neutron_params(
                        json.dumps(data),
                        cluster_id=cluster_id
                    )

                NeutronManager.update(cluster, data)
            except Exception as exc:
                TaskHelper.set_error(task.uuid, exc)
                logger.error(traceback.format_exc())

        data = build_json_response(TaskHandler.render(task))
        if task.status == 'error':
            db().rollback()
        else:
            db().commit()
        raise web.accepted(data=data)
    def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_provider'] = cluster.net_provider
        result['net_l23_provider'] = cluster.net_l23_provider
        result['net_segment_type'] = cluster.net_segment_type
        result['networks'] = map(
            cls.serialize_network_group,
            cluster.network_groups
        )

        net_manager = NeutronManager()
        result['networks'].append(
            cls.serialize_network_group(
                net_manager.get_admin_network_group()
            )
        )
        # result['networks'] = [cls.serialize_network_group(ng)
        #                       for ng in cluster.network_groups
        #                       if ng.name != 'private']

        if cluster.is_ha_mode:
            nw_metadata = cluster.release.networks_metadata["neutron"]
            for network in nw_metadata["networks"]:
                if network.get("assign_vip") is not False:
                    result['{0}_vip'.format(
                        network["name"]
                    )] = net_manager.assign_vip(
                        cluster.id,
                        network["name"]
                    )

        result['neutron_parameters'] = {
            'predefined_networks': cluster.neutron_config.predefined_networks,
            'L2': cluster.neutron_config.L2,
            'segmentation_type': cluster.neutron_config.segmentation_type
        }
        return result
    def check_networks_assignment(self, node_db):
        node_nics = self.db.query(NodeNICInterface).filter_by(node_id=node_db.id).all()

        def_nics = NeutronManager.get_default_interfaces_configuration(node_db)

        self.assertEqual(len(node_nics), len(def_nics))
        for n_nic in node_nics:
            n_assigned = set(n["name"] for n in n_nic.assigned_networks)
            for d_nic in def_nics:
                if d_nic["id"] == n_nic.id:
                    d_assigned = (
                        set(n["name"] for n in d_nic["assigned_networks"]) if d_nic.get("assigned_networks") else set()
                    )
                    self.assertEqual(n_assigned, d_assigned)
                    break
            else:
                self.fail("NIC is not found")
Beispiel #21
0
    def check_networks_assignment(self, node_db):
        node_nics = self.db.query(NodeNICInterface).filter_by(
            node_id=node_db.id).all()

        def_nics = NeutronManager.get_default_interfaces_configuration(node_db)

        self.assertEqual(len(node_nics), len(def_nics))
        for n_nic in node_nics:
            n_assigned = set(n['name'] for n in n_nic.assigned_networks)
            for d_nic in def_nics:
                if d_nic['id'] == n_nic.id:
                    d_assigned = set(n['name']
                                     for n in d_nic['assigned_networks']) \
                        if d_nic.get('assigned_networks') else set()
                    self.assertEqual(n_assigned, d_assigned)
                    break
            else:
                self.fail("NIC is not found")
    def check_networks_assignment(self, node_db):
        node_nics = self.db.query(NodeNICInterface).filter_by(
            node_id=node_db.id
        ).all()

        def_nics = NeutronManager.get_default_networks_assignment(node_db)

        self.assertEqual(len(node_nics), len(def_nics))
        for n_nic in node_nics:
            n_assigned = set(n['name'] for n in n_nic.assigned_networks)
            for d_nic in def_nics:
                if d_nic['id'] == n_nic.id:
                    d_assigned = set(n['name']
                                     for n in d_nic['assigned_networks']) \
                        if d_nic.get('assigned_networks') else set()
                    self.assertEqual(n_assigned, d_assigned)
                    break
            else:
                self.fail("NIC is not found")
Beispiel #23
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.neutron import NeutronManager
        network_manager = NeutronManager()
        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                if ng['id'] == network_manager.get_admin_network_group_id():
                    continue

                ng_db = db().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        cls._set_ip_ranges(ng['id'], value)
                    else:
                        if key == 'cidr' and \
                                ng['name'] not in ('private', 'public'):
                            network_manager.update_range_mask_from_cidr(
                                ng_db, value)

                        setattr(ng_db, key, value)

                if ng['name'] == 'public':
                    cls.update_cidr_from_gw_mask(ng_db, ng)
                    #TODO(NAME) get rid of unmanaged parameters in request
                    if 'neutron_parameters' in network_configuration:
                        pre_nets = network_configuration[
                            'neutron_parameters']['predefined_networks']
                        pre_nets['net04_ext']['L3']['gateway'] = ng['gateway']
                if ng['name'] != 'private':
                    network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')

        if 'neutron_parameters' in network_configuration:
            for key, value in network_configuration['neutron_parameters'] \
                    .items():
                setattr(cluster.neutron_config, key, value)
            db().add(cluster.neutron_config)
            db().commit()
Beispiel #24
0
    def verify_data_correctness(cls, node):
        db_node = db().query(Node).filter_by(id=node['id']).first()
        if not db_node:
            raise errors.InvalidData(
                "There is no node with ID '%d' in DB" % node['id'],
                log_message=True
            )
        interfaces = node['interfaces']
        db_interfaces = db_node.interfaces
        if len(interfaces) != len(db_interfaces):
            raise errors.InvalidData(
                "Node '%d' has different amount of interfaces" % node['id'],
                log_message=True
            )
        # FIXIT: we should use not all networks but appropriate for this
        # node only.
        db_network_groups = db().query(NetworkGroup).filter_by(
            cluster_id=db_node.cluster_id
        ).all()
        if not db_network_groups:
            raise errors.InvalidData(
                "There are no networks related to"
                " node '%d' in DB" % node['id'],
                log_message=True
            )
        network_group_ids = set([ng.id for ng in db_network_groups])

        if db_node.cluster and db_node.cluster.net_provider == 'neutron':
            net_manager = NeutronManager()
        else:
            net_manager = NetworkManager()

        admin_ng_id = net_manager.get_admin_network_group_id()

        for iface in interfaces:
            db_iface = filter(
                lambda i: i.id == iface['id'],
                db_interfaces
            )
            if not db_iface:
                raise errors.InvalidData(
                    "There is no interface with ID '%d'"
                    " for node '%d' in DB" %
                    (iface['id'], node['id']),
                    log_message=True
                )
            db_iface = db_iface[0]

            for net in iface['assigned_networks']:
                if net['id'] not in network_group_ids and not \
                        net['id'] == admin_ng_id:
                    raise errors.InvalidData(
                        "Node '%d' shouldn't be connected to"
                        " network with ID '%d'" %
                        (node['id'], net['id']),
                        log_message=True
                    )
                elif net['id'] != admin_ng_id:
                    network_group_ids.remove(net['id'])

        # Check if there are unassigned networks for this node.
        if network_group_ids:
            raise errors.InvalidData(
                "Too few networks to assign to node '%d'" % node['id'],
                log_message=True
            )
Beispiel #25
0
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            # Choosing network manager
            if nd.get('cluster_id') is not None:
                cluster = db().query(Cluster).get(nd['cluster_id'])
            else:
                cluster = node.cluster

            if cluster and cluster.net_provider == "nova_network":
                network_manager = NetworkManager()
            elif cluster and cluster.net_provider == "neutron":
                network_manager = NeutronManager()
            # essential rollback - we can't avoid it now
            elif not cluster:
                network_manager = NetworkManager()
            # /Choosing network manager

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]

            regenerate_volumes = any(
                ('roles' in nd
                 and set(nd['roles']) != set(node.roles), 'pending_roles' in nd
                 and set(nd['pending_roles']) != set(node.pending_roles),
                 node.cluster_id != old_cluster_id))

            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug("Node is already provisioning - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)),
                            regenerate_volumes)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                network_manager.update_interfaces_info(node)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('interfaces'),
            joinedload('interfaces.assigned_networks')).\
            filter(Node.id.in_([n.id for n in nodes_updated])).all()
        return self.render(nodes)
Beispiel #26
0
    def neutron_check_config(cls, task, data):

        if 'networks' in data:
            networks = data['networks']
        else:
            networks = map(lambda x: x.__dict__, task.cluster.network_groups)

        result = []

        # check: networks VLAN IDs should not be in
        # Neutron L2 private VLAN ID range (VLAN segmentation only)
        tagged_nets = dict((n["name"], n["vlan_start"]) for n in filter(
            lambda n: (n["vlan_start"] is not None), networks))

        if tagged_nets:
            if task.cluster.net_segment_type == 'vlan':
                if 'neutron_parameters' in data:
                    l2cfg = data['neutron_parameters']['L2']
                else:
                    l2cfg = task.cluster.neutron_config.L2
                for net, net_conf in l2cfg['phys_nets'].iteritems():
                    vrange = net_conf['vlan_range']
                    if vrange:
                        break
                else:
                    err_msg = u"Wrong VLAN range.\n"
                    raise errors.NetworkCheckError(err_msg, add_client=False)

                net_intersect = [name for name, vlan in tagged_nets.iteritems()
                                 if vrange[0] <= vlan <= vrange[1]]
                if net_intersect:
                    nets_with_errors = ", ".\
                        join(net_intersect)
                    err_msg = u"Networks VLAN tags are in " \
                              "ID range defined for Neutron L2. " \
                              "You should assign VLAN tags that are " \
                              "not in Neutron L2 VLAN ID range:\n{0}". \
                        format(nets_with_errors)
                    raise errors.NetworkCheckError(err_msg, add_client=False)

            # check: networks VLAN IDs should not intersect
            net_intersect = [name for name, vlan in tagged_nets.iteritems()
                             if tagged_nets.values().count(vlan) >= 2]
            if net_intersect:
                nets_with_errors = ", ". \
                    join(net_intersect)
                err_msg = u"Some networks use the same VLAN tags. " \
                          "You should assign different VLAN tag " \
                          "to every network:\n{0}". \
                    format(nets_with_errors)
                raise errors.NetworkCheckError(err_msg, add_client=False)

        def expose_error_messages():
            if err_msgs:
                task.result = result
                db().add(task)
                db().commit()
                full_err_msg = "\n".join(err_msgs)
                raise errors.NetworkCheckError(full_err_msg, add_client=False)

        # check intersection of address ranges
        # between admin networks and all other networks
        net_man = NeutronManager()
        admin_ng = net_man.get_admin_network_group()
        admin_range = netaddr.IPNetwork(admin_ng.cidr)
        err_msgs = []
        for ng in networks:
            net_errors = []
            sub_ranges = []
            ng_db = db().query(NetworkGroup).get(ng['id'])
            if not ng_db:
                net_errors.append("id")
                err_msgs.append("Invalid network ID: {0}".format(ng['id']))
            else:
                if ng.get('cidr'):
                    fnet = netaddr.IPNetwork(ng['cidr'])
                    if net_man.is_range_intersection(fnet, admin_range):
                        net_errors.append("cidr")
                        err_msgs.append(
                            u"Intersection with admin "
                            "network(s) '{0}' found".format(
                                admin_ng.cidr
                            )
                        )
                    # ng['amount'] is always equal 1 for Neutron
                    if fnet.size < ng['network_size']:  # * ng['amount']:
                        net_errors.append("cidr")
                        err_msgs.append(
                            u"CIDR size for network '{0}' "
                            "is less than required".format(
                                ng.get('name') or ng_db.name or ng_db.id
                            )
                        )
                # Check for intersection with Admin network
                if 'ip_ranges' in ng:
                    for k, v in enumerate(ng['ip_ranges']):
                        ip_range = netaddr.IPRange(v[0], v[1])
                        if net_man.is_range_intersection(admin_range,
                                                         ip_range):
                            net_errors.append("cidr")
                            err_msgs.append(
                                u"IP range {0} - {1} in {2} network intersects"
                                " with admin range of {3}".format(
                                    v[0], v[1],
                                    ng.get('name') or ng_db.name or ng_db.id,
                                    admin_ng.cidr
                                )
                            )
                            sub_ranges.append(k)
            if net_errors:
                result.append({
                    "id": int(ng["id"]),
                    "range_errors": sub_ranges,
                    "errors": net_errors
                })
        expose_error_messages()

        # check intersection of address ranges
        # between networks except admin network
        ng_names = dict((ng['id'], (ng.get('name')) or
                         db().query(NetworkGroup).get(ng['id']).name)
                        for ng in networks)
        ngs = list(networks)
        for ng1 in networks:
            net_errors = []
            ngs.remove(ng1)
            for ng2 in ngs:
                if ng1.get('cidr') and ng2.get('cidr'):
                    cidr1 = netaddr.IPNetwork(ng1['cidr'])
                    cidr2 = netaddr.IPNetwork(ng2['cidr'])
                    if net_man.is_cidr_intersection(cidr1, cidr2):
                        net_errors.append("cidr")
                        err_msgs.append(
                            u"Intersection between network address "
                            "spaces found:\n{0}".format(
                                ", ".join([ng_names[ng1['id']],
                                           ng_names[ng2['id']]])
                            )
                        )
            if net_errors:
                result.append({
                    "id": int(ng1["id"]),
                    "errors": net_errors
                })
        expose_error_messages()

        # check Public gateway, Floating Start and Stop IPs
        # belong to Public CIDR
        if 'neutron_parameters' in data:
            pre_net = data['neutron_parameters']['predefined_networks']
        else:
            pre_net = task.cluster.neutron_config.predefined_networks
        public = [n for n in networks if n['name'] == 'public'][0]
        net_errors = []
        fl_range = pre_net['net04_ext']['L3']['floating']
        if public.get('cidr') and public.get('gateway'):
            cidr = netaddr.IPNetwork(public['cidr'])
            if netaddr.IPAddress(public['gateway']) not in cidr:
                net_errors.append("gateway")
                err_msgs.append(
                    u"Public gateway {0} is not in Public "
                    "address space {1}.".format(
                        public['gateway'], public['cidr']
                    )
                )
            if netaddr.IPRange(fl_range[0], fl_range[1]) not in cidr:
                net_errors.append("float_range")
                err_msgs.append(
                    u"Floating address range {0}:{1} is not in Public "
                    "address space {2}.".format(
                        netaddr.IPAddress(fl_range[0]),
                        netaddr.IPAddress(fl_range[1]),
                        public['cidr']
                    )
                )
        else:
            net_errors.append("format")
            err_msgs.append(
                u"Public gateway or CIDR specification is invalid."
            )
        result = {"id": int(public["id"]), "errors": net_errors}
        expose_error_messages()

        # check internal Gateway is in Internal CIDR
        internal = pre_net['net04']['L3']
        if internal.get('cidr') and internal.get('gateway'):
            cidr = netaddr.IPNetwork(internal['cidr'])
            if netaddr.IPAddress(internal['gateway']) not in cidr:
                net_errors.append("gateway")
                err_msgs.append(
                    u"Internal gateway {0} is not in Internal "
                    "address space {1}.".format(
                        internal['gateway'], internal['cidr']
                    )
                )
            if net_man.is_range_intersection(
                    netaddr.IPRange(fl_range[0], fl_range[1]),
                    cidr):
                net_errors.append("cidr")
                err_msgs.append(
                    u"Intersection between Internal CIDR and Floating range."
                )
        else:
            net_errors.append("format")
            err_msgs.append(
                u"Internal gateway or CIDR specification is invalid."
            )
        result = {"name": "internal", "errors": net_errors}
        expose_error_messages()