def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map( cls.serialize_network_group, cluster.network_groups ) net_manager = NetworkManager() result['networks'].append( cls.serialize_network_group( net_manager.get_admin_network_group() ) ) if cluster.dns_nameservers: result['dns_nameservers'] = { "nameservers": cluster.dns_nameservers } if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata["nova_network"] for network in nw_metadata["networks"]: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"] )] = net_manager.assign_vip( cluster.id, network["name"] ) return result
def test_get_default_nic_networkgroups(self): cluster = self.env.create_cluster(api=True, net_provider='neutron', net_segment_type='gre') node = self.env.create_node(api=True) node_db = self.env.nodes[0] admin_nic = node_db.admin_interface other_iface = self.db.query(NodeNICInterface).filter_by( node_id=node['id'] ).filter( not_(NodeNICInterface.id == admin_nic.id) ).first() interfaces = deepcopy(node_db.meta['interfaces']) # allocate ip from admin subnet admin_ip = str(IPNetwork( NetworkManager.get_admin_network_group().cidr)[0]) for interface in interfaces: if interface['mac'] == admin_nic.mac: # reset admin ip for previous admin interface interface['ip'] = None elif interface['mac'] == other_iface.mac: # set new admin interface interface['ip'] = admin_ip node_db.meta['interfaces'] = interfaces self.app.put( reverse('NodeCollectionHandler'), json.dumps([{ 'mac': admin_nic.mac, 'meta': node_db.meta, 'is_agent': True, 'cluster_id': cluster["id"] }]), headers=self.default_headers, expect_errors=True ) new_main_nic_id = node_db.admin_interface.id admin_nets = [n.name for n in self.db.query( NodeNICInterface).get(new_main_nic_id).assigned_networks] other_nets = [n.name for n in other_iface.assigned_networks] nics = NeutronManager.get_default_networks_assignment(node_db) def_admin_nic = [n for n in nics if n['id'] == new_main_nic_id] def_other_nic = [n for n in nics if n['id'] == other_iface.id] self.assertEquals(len(def_admin_nic), 1) self.assertEquals(len(def_other_nic), 1) self.assertEquals(new_main_nic_id, other_iface.id) self.assertEquals( set(admin_nets), set([n['name'] for n in def_admin_nic[0]['assigned_networks']])) self.assertEquals( set(other_nets), set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
def test_get_default_nic_networkgroups(self): cluster = self.env.create_cluster(api=True) node = self.env.create_node(api=True) node_db = self.env.nodes[0] admin_nic = node_db.admin_interface other_iface = self.db.query(NodeNICInterface).filter_by( node_id=node['id'] ).filter( not_(NodeNICInterface.id == admin_nic.id) ).first() interfaces = deepcopy(node_db.meta['interfaces']) # allocate ip from admin subnet admin_ip = str(IPNetwork( NetworkManager.get_admin_network_group().cidr)[0]) for interface in interfaces: if interface['mac'] == admin_nic.mac: # reset admin ip for previous admin interface interface['ip'] = None elif interface['mac'] == other_iface.mac: # set new admin interface interface['ip'] = admin_ip node_db.meta['interfaces'] = interfaces self.app.put( reverse('NodeCollectionHandler'), json.dumps([{ 'mac': admin_nic.mac, 'meta': node_db.meta, 'is_agent': True, 'cluster_id': cluster["id"] }]), headers=self.default_headers, expect_errors=True ) new_main_nic_id = node_db.admin_interface.id self.assertEquals(new_main_nic_id, other_iface.id) self.assertEquals( other_iface.assigned_networks, NovaNetworkManager.get_default_nic_networkgroups( node_db, other_iface)) self.assertEquals( self.db.query( NodeNICInterface).get(admin_nic.id).assigned_networks, NovaNetworkManager.get_default_nic_networkgroups( node_db, admin_nic))
def admin_interface(self): """Iterate over interfaces, if admin subnet include ip address of current interface then return this interface. :raises: errors.CanNotFindInterface """ from nailgun.network.manager import NetworkManager admin_ng = NetworkManager.get_admin_network_group() for interface in self.interfaces: if admin_ng in interface.assigned_networks_list: return interface for interface in self.interfaces: ip_addr = interface.ip_addr if NetworkManager.is_ip_belongs_to_admin_subnet(ip_addr): return interface logger.warning(u'Cannot find admin interface for node ' 'return first interface: "%s"' % self.full_name) return self.interfaces[0]
def serialize_interfaces(cls, node): interfaces = {} interfaces_extra = {} net_manager = NetworkManager() admin_ips = net_manager.get_admin_ips_for_interfaces(node) admin_netmask = net_manager.get_admin_network_group().netmask for interface in node.meta.get('interfaces', []): name = interface['name'] interfaces[name] = { 'mac_address': interface['mac'], 'static': '0', 'netmask': admin_netmask, 'ip_address': admin_ips[name]} # interfaces_extra field in cobbler ks_meta # means some extra data for network interfaces # configuration. It is used by cobbler snippet. # For example, cobbler interface model does not # have 'peerdns' field, but we need this field # to be configured. So we use interfaces_extra # branch in order to set this unsupported field. interfaces_extra[name] = { 'peerdns': 'no', 'onboot': 'no'} # We want node to be able to PXE boot via any of its # interfaces. That is why we add all discovered # interfaces into cobbler system. But we want # assignted fqdn to be resolved into one IP address # because we don't completely support multiinterface # configuration yet. if interface['mac'] == node.mac: interfaces[name]['dns_name'] = node.fqdn interfaces_extra[name]['onboot'] = 'yes' return { 'interfaces': interfaces, 'interfaces_extra': interfaces_extra}
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map(cls.serialize_network_group, cluster.network_groups) net_manager = NetworkManager() result['networks'].append( cls.serialize_network_group(net_manager.get_admin_network_group())) if cluster.dns_nameservers: result['dns_nameservers'] = { "nameservers": cluster.dns_nameservers } if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata["nova_network"] for network in nw_metadata["networks"]: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"])] = net_manager.assign_vip( cluster.id, network["name"]) return result
def serialize_interfaces(cls, node): interfaces = {} interfaces_extra = {} net_manager = NetworkManager() admin_ips = net_manager.get_admin_ips_for_interfaces(node) admin_netmask = net_manager.get_admin_network_group().netmask for interface in node.meta.get('interfaces', []): name = interface['name'] interfaces[name] = { 'mac_address': interface['mac'], 'static': '0', 'netmask': admin_netmask, 'ip_address': admin_ips[name] } # interfaces_extra field in cobbler ks_meta # means some extra data for network interfaces # configuration. It is used by cobbler snippet. # For example, cobbler interface model does not # have 'peerdns' field, but we need this field # to be configured. So we use interfaces_extra # branch in order to set this unsupported field. interfaces_extra[name] = {'peerdns': 'no', 'onboot': 'no'} # We want node to be able to PXE boot via any of its # interfaces. That is why we add all discovered # interfaces into cobbler system. But we want # assignted fqdn to be resolved into one IP address # because we don't completely support multiinterface # configuration yet. if interface['mac'] == node.mac: interfaces[name]['dns_name'] = node.fqdn interfaces_extra[name]['onboot'] = 'yes' return {'interfaces': interfaces, 'interfaces_extra': interfaces_extra}
def nova_net_check(cls, task, data, check_admin_untagged): # If not set in data then fetch from db if 'net_manager' in data: netmanager = data['net_manager'] else: netmanager = task.cluster.net_manager if 'networks' in data: networks = data['networks'] else: networks = map(lambda x: x.__dict__, task.cluster.network_groups) result = [] err_msgs = [] # checking if there are untagged # networks on the same interface # (main) as admin network if check_admin_untagged: untagged_nets = set( n["id"] for n in filter( lambda n: (n["vlan_start"] is None), networks)) if untagged_nets: logger.info( "Untagged networks found, " "checking admin network intersection...") admin_interfaces = map(lambda node: node.admin_interface, task.cluster.nodes) found_intersection = [] for iface in admin_interfaces: nets = dict( (n.id, n.name) for n in iface.assigned_networks) err_nets = set(nets.keys()) & untagged_nets if err_nets: err_net_names = [ '"{0}"'.format(nets[i]) for i in err_nets] found_intersection.append( [iface.node.name, err_net_names]) if found_intersection: nodes_with_errors = [ u'Node "{0}": {1}'.format( name, ", ".join(_networks) ) for name, _networks in found_intersection] err_msg = u"Some untagged networks are " \ "assigned to the same physical interface as " \ "admin (PXE) network. You can whether turn " \ "on tagging for these OpenStack " \ "networks or move them to another physical " \ "interface:\n{0}".format("\n".join( nodes_with_errors)) raise errors.NetworkCheckError(err_msg, add_client=False) net_man = NetworkManager() admin_ng = net_man.get_admin_network_group() admin_range = netaddr.IPNetwork(admin_ng.cidr) for ng in networks: net_errors = [] sub_ranges = [] ng_db = db().query(NetworkGroup).get(ng['id']) if not ng_db: net_errors.append("id") err_msgs.append(u"Invalid network ID: {0}".format(ng['id'])) else: if ng.get('cidr'): fnet = netaddr.IPNetwork(ng['cidr']) if net_man.is_range_intersection(fnet, admin_range): net_errors.append("cidr") err_msgs.append( u"Intersection with admin " "network(s) '{0}' found".format( admin_ng.cidr ) ) if fnet.size < ng['network_size'] * ng['amount']: net_errors.append("cidr") err_msgs.append( u"CIDR size for network '{0}' " "is less than required".format( ng.get('name') or ng_db.name or ng_db.id ) ) # Check for intersection with Admin network if 'ip_ranges' in ng: for k, v in enumerate(ng['ip_ranges']): ip_range = netaddr.IPRange(v[0], v[1]) if net_man.is_range_intersection(admin_range, ip_range): net_errors.append("cidr") err_msgs.append( u"IP range {0} - {1} in {2} network intersects" " with admin range of {3}".format( v[0], v[1], ng.get('name') or ng_db.name or ng_db.id, admin_ng.cidr ) ) sub_ranges.append(k) if ng.get('amount') > 1 and netmanager == 'FlatDHCPManager': net_errors.append("amount") err_msgs.append( u"Network amount for '{0}' is more than 1 " "while using FlatDHCP manager.".format( ng.get('name') or ng_db.name or ng_db.id ) ) if net_errors: result.append({ "id": int(ng["id"]), "range_errors": sub_ranges, "errors": net_errors }) if err_msgs: task.result = result db().add(task) db().commit() full_err_msg = "\n".join(err_msgs) raise errors.NetworkCheckError(full_err_msg, add_client=False)