def test_nuage_underlay_on(self): """test_nuage_underlay_on Check that when providing --underlay True, subnets accross networks go into the same domain. """ kwargs = {'router:external': True} n1 = self.create_network(manager=self.admin_manager, **kwargs) n2 = self.create_network(manager=self.admin_manager, **kwargs) s1 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, underlay=True, manager=self.admin_manager) s2 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, underlay=True, manager=self.admin_manager) s3 = self.create_subnet(n2, cidr=data_utils.gimme_a_cidr(), mask_bits=24, underlay=True, manager=self.admin_manager) self.assertEqual(s1['nuage_uplink'], s2['nuage_uplink'], "Subnet not going into same underlay domain") self.assertEqual( s2['nuage_uplink'], s3['nuage_uplink'], "Not all underlay=True subnets are in the same " "underlay domain.")
def test_nuage_network_multiple_gw(self): """test_nuage_network_multiple_gw Check that when a router can have a gateway to multiple subnets in the same network. """ kwargs = {'router:external': True} n1 = self.create_network(manager=self.admin_manager, **kwargs) s1 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, manager=self.admin_manager) s2 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, manager=self.admin_manager) self.assertEqual(s1['nuage_uplink'], s2['nuage_uplink'], "Subnet not created in same domain") kwargs = { 'external_gateway_info': { 'network_id': n1['id'], 'external_fixed_ips': [{ 'subnet_id': s1['id'] }, { 'subnet_id': s2['id'] }] } } self.create_router(manager=self.admin_manager, external_gateway_info_on=False, **kwargs)
def test_get_linked_shared_resource_l2_domains(self): """test_get_linked_shared_resource_l2_domains Test that when linked a shared subnet to a private l2domain the attributes returned are those of the shared subnet. """ # Create private enterprise enterprise_name = data_utils.rand_name('test-verify-linked-domains') enterprise = self.NuageNetworksClient.create_netpartition( enterprise_name)['net_partition'] self.addCleanup(self.NuageNetworksClient.delete_netpartition, enterprise['id']) # Create shared resources cidr4 = nuage_utils.gimme_a_cidr() cidr6 = nuage_utils.gimme_a_cidr(ip_version=6) shared_l2dom_template = self.vsd.create_l2domain_template( enterprise='Shared Infrastructure', ip_type='DUALSTACK', cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=True, enable_dhcpv6=True ) self.addCleanup(shared_l2dom_template.delete) shared_l2dom = self.vsd.create_l2domain( enterprise='Shared Infrastructure', template=shared_l2dom_template) self.addCleanup(shared_l2dom.delete) # Link in created enterprise l2dom_template = self.vsd.create_l2domain_template( enterprise=enterprise_name, dhcp_managed=False) self.addCleanup(l2dom_template.delete) l2dom = self.vsd.create_l2domain( enterprise=enterprise_name, template=l2dom_template, associated_shared_network_resource_id=shared_l2dom.id) self.addCleanup(l2dom.delete) # Get l2domain domains = self.NuageNetworksClient.get_domains( vsd_organisation_id=enterprise['id'])['vsd_domains'] # Check that l2domain is found self.assertNotEmpty(domains, 'Enterprise should contain domains') self.assertEqual(1, len(domains), 'Exactly one l2domain should be found, but ' 'found: {}'.format(len(domains))) domain_json = domains[0] # Verify self._verify_l2domain(domain_json, l2dom, shared_l2dom) # SET DHCP option 3: gateway dhcp_option = self.vsd.vspk.NUDHCPOption( actual_type=3, actual_values=[str(IPAddress(cidr4.first) + 1)]) shared_l2dom.create_child(dhcp_option) domains = self.NuageNetworksClient.get_domains( vsd_organisation_id=enterprise['id'])['vsd_domains'] domain_json = domains[0] self._verify_l2domain(domain_json, l2dom, shared_l2dom)
def test_subport_connectivity(self): self._setup_resources() vlan_tag = 10 # create resources for access to vms access_router = self.create_public_router() access_network = self.create_network() access_subnet = self.create_subnet(access_network, cidr=data_utils.gimme_a_cidr()) self.router_attach(access_router, access_subnet) subport_network = self._create_network(vlan_tag) subport_subnet = self.create_subnet(subport_network, cidr=data_utils.gimme_a_cidr(), gateway=None) net = IPNetwork(subport_subnet['cidr']) servers = [ self._create_server_with_port_and_subport( access_network, subport_network, vlan_tag) for _ in range(2)] # Compute/configure dot1q interfaces for server in servers: dev = server['server'].console().get_nic_name_by_mac( server['trunkport']['mac_address'] ) self.assertIsNotNone( dev, message="Could not compute device name for interface") # Configure VLAN interfaces on server command = CONFIGURE_VLAN_INTERFACE_COMMANDS % { 'itf': dev, 'tag': vlan_tag, 'mac': server['trunkport']['mac_address'], 'ip': server['subport']['fixed_ips'][0]['ip_address'], 'len': net.prefixlen, } server['server'].send(command) out = server['server'].send( 'ip addr list') LOG.debug("Interfaces on server %s: %s", server, out) self.assert_ping( servers[0]['server'], servers[1]['server'], address=servers[1]['subport']['fixed_ips'][0]['ip_address'])
def _setup_resources(self, ip_version=(4, ), is_flat=False, is_l3=False, is_vsd_mgd=False): kwargs = { 'provider:network_type': 'flat' if is_flat else 'vlan', 'provider:physical_network': 'physnet1' } self.network = self.create_network( manager=self.admin_manager, tenant_id=self.manager.networks_client.tenant_id, **kwargs) if is_vsd_mgd: if is_l3: vsd_subnet = self.l3subnet create_vsd_managed_subnet = self.create_l3_vsd_managed_subnet else: vsd_subnet = self.l2domain create_vsd_managed_subnet = self.create_l2_vsd_managed_subnet self.subnet = [] for ip_type in ip_version: if ip_type == 6 and is_l3: # IP address information obtained from Openstack Networking # (dnsmasq) using DHCPv6 stateful - Only L3 IPv6 ipv6_address_mode = 'dhcpv6-stateful' self.subnet.append( create_vsd_managed_subnet( self.network, vsd_subnet, ip_version=ip_type, ipv6_address_mode=ipv6_address_mode, manager=self.admin_manager, dhcp_managed=True)) else: self.subnet.append( create_vsd_managed_subnet(self.network, vsd_subnet, ip_version=ip_type, manager=self.admin_manager, dhcp_managed=False)) else: self.subnet = [] for ip_type in ip_version: netmask = (self.netmask_ipv4 if ip_type == 4 else self.netmask_ipv6) self.subnet.append( self.create_subnet(self.network, ip_version=ip_type, cidr=data_utils.gimme_a_cidr( ip_type, netmask), manager=self.admin_manager, enable_dhcp=False)) if self.is_trunk: self._create_trunk_subports_resources(ip_version, is_vsd_mgd, is_l3)
def _setup_resources(self, is_l2=False): # setup basic topology for servers we can log into self.network = self.create_network() self.subnet = self.create_subnet(self.network) router = self.create_public_router() self.router_attach(router, self.subnet) self.keypair = self.create_keypair() self.secgroup = self._create_empty_security_group() self.create_security_group_rule_with_manager( security_group=self.secgroup, direction='ingress', ethertype='IPv4', protocol='tcp') self.create_security_group_rule_with_manager( security_group=self.secgroup, direction='ingress', ethertype='IPv4', protocol='icmp') # create additional network for L2 if is_l2: self.test_network = self.create_network() self.test_subnet = self.create_subnet( self.test_network, cidr=data_utils.gimme_a_cidr(), gateway=None) else: self.test_network = self.network self.test_subnet = self.subnet
def _verify_update_external_subnet_with_underlay_neg(self): ext_network = self._create_network(external=True) underlay_states = [False, True] cidr = nuage_data_utils.gimme_a_cidr() for underlay in underlay_states: subnet_name = data_utils.rand_name( 'underlay-subnet-update-not-allowed') create_body = self.admin_subnets_client.create_subnet( network_id=ext_network['id'], cidr=str(cidr.cidr), ip_version=self._ip_version, name=subnet_name, underlay=underlay) subnet = create_body['subnet'] self.assertEqual(subnet['name'], subnet_name) # Response should include underlay status self.assertEqual(subnet['underlay'], underlay) subnet_id = subnet['id'] new_name = subnet_name + '-updated-1' # Checking OPENSTACK-721: update name update_body = self.admin_subnets_client.update_subnet( subnet_id, name=new_name) self.assertEqual(update_body['subnet']['name'], new_name) new_underlay = False if underlay else True kwargs = { 'name': new_name, 'underlay': new_underlay } self.assertRaises(exceptions.BadRequest, self.admin_subnets_client.update_subnet, subnet_id, **kwargs) self.admin_subnets_client.delete_subnet(subnet_id) cidr = cidr.next(1)
def _create_vsd_domain(self, is_l3=True, ip_version=(4,)): cidr4 = None cidr6 = None enable_dhcpv4 = False enable_dhcpv6 = False gateway4 = None gateway6 = None for ip_type in ip_version: if ip_type == 4: cidr4 = data_utils.gimme_a_cidr(ip_type) gateway4 = str(cidr4[1]) if is_l3 else None elif ip_type == 6: cidr6 = data_utils.gimme_a_cidr(ip_type) gateway6 = str(cidr6[1]) if is_l3 else None kwargs = {} if CONF.nuage_sut.gateway_type == 'cisco': kwargs['ingress_replication_enabled'] = True if is_l3: l3template = self.vsd_create_l3domain_template() self.domain = self.vsd_create_l3domain(template_id=l3template.id) zone = self.vsd_create_zone(domain=self.domain) self.l3subnet = self.create_vsd_subnet( zone=zone, cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=enable_dhcpv4, enable_dhcpv6=enable_dhcpv6, gateway4=gateway4, gateway6=gateway6, ip_type=self.ip_types[ip_version], **kwargs ) else: l2template = self.vsd_create_l2domain_template( cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=enable_dhcpv4, enable_dhcpv6=enable_dhcpv6, ip_type=self.ip_types[ip_version], ) self.l2domain = self.vsd_create_l2domain(template=l2template, **kwargs)
def _create_subports_vsd_resources(self, ip_version, is_l3): enable_dhcpv4 = False enable_dhcpv6 = False is_ipv4 = 4 in ip_version is_ipv6 = 6 in ip_version if is_l3: self.subports['l3subnets'] = [] for _ in range(self.num_subports): cidr4 = (data_utils.gimme_a_cidr(4, self.netmask_ipv4) if is_ipv4 else None) cidr6 = (data_utils.gimme_a_cidr(6, self.netmask_ipv6) if is_ipv6 else None) self.subports['l3subnets'].append( self.create_vsd_subnet( zone=self.zone, cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=enable_dhcpv4, enable_dhcpv6=enable_dhcpv6, gateway4=str(cidr4[1]) if is_ipv4 else None, gateway6=str(cidr6[1]) if is_ipv6 else None, ip_type=self.ip_types[ip_version])) else: l2templates = [ self.vsd_create_l2domain_template( cidr4=(data_utils.gimme_a_cidr(4, self.netmask_ipv4) if is_ipv4 else None), cidr6=(data_utils.gimme_a_cidr(6, self.netmask_ipv6) if is_ipv6 else None), enable_dhcpv4=enable_dhcpv4, enable_dhcpv6=enable_dhcpv6, ip_type=self.ip_types[ip_version], ) for _ in range(self.num_subports) ] self.subports['l2domains'] = [ self.vsd_create_l2domain(template=l2template) for l2template in l2templates ]
def _create_vsd_domain(self, is_l3=False, ip_version=(4, )): cidr4 = None cidr6 = None enable_dhcpv4 = False enable_dhcpv6 = is_l3 gateway4 = None gateway6 = None for ip_type in ip_version: if ip_type == 4: cidr4 = data_utils.gimme_a_cidr(ip_type, self.netmask_ipv4) gateway4 = str(cidr4[1]) if is_l3 else None elif ip_type == 6: cidr6 = data_utils.gimme_a_cidr(ip_type, self.netmask_ipv6) gateway6 = str(cidr6[1]) if is_l3 else None if is_l3: l3template = self.vsd_create_l3domain_template() self.domain = self.vsd_create_l3domain(template_id=l3template.id) self.zone = self.vsd_create_zone(domain=self.domain) self.l3subnet = self.create_vsd_subnet( zone=self.zone, cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=enable_dhcpv4, enable_dhcpv6=enable_dhcpv6, gateway4=gateway4, gateway6=gateway6, ip_type=self.ip_types[ip_version]) else: l2template = self.vsd_create_l2domain_template( cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=enable_dhcpv4, enable_dhcpv6=enable_dhcpv6, ip_type=self.ip_types[ip_version], ) self.l2domain = self.vsd_create_l2domain(template=l2template)
def test_nuage_network_update_to_external(self): """test_nuage_network_update_to_external Check that when providing --nuage-uplink that is the same as the parent of an existing subnet on the network no error is thrown. """ if self.is_dhcp_agent_present(): raise self.skipException( 'Multiple subnets in a network not supported when DHCP agent ' 'is enabled.') n1 = self.create_network(manager=self.admin_manager) s1 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, manager=self.admin_manager) s2 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, manager=self.admin_manager) filters = { 'device_owner': 'network:dhcp:nuage', 'network_id': n1['id'] } dhcp_ports = self.os_admin.ports_client.list_ports(**filters)['ports'] self.assertEqual(2, len(dhcp_ports)) kwargs = {'router:external': True} self.update_network(n1['id'], manager=self.admin_manager, **kwargs) dhcp_ports = self.ports_client.list_ports(**filters)['ports'] self.assertEqual(0, len(dhcp_ports)) s1 = self.os_admin.subnets_client.show_subnet(s1['id'])['subnet'] s2 = self.os_admin.subnets_client.show_subnet(s2['id'])['subnet'] self.assertEqual(s1['nuage_uplink'], s2['nuage_uplink'], "Subnet not created with provided nuage_uplink")
def _build_net_topology(self, router=None): network = self.create_network(manager=self.admin_manager) gateway_mac_address = None # Creates either single stack IPv4 / IPv6 or dualstack networks for ip_version in self.ip_versions: subnet = self.create_subnet(network, cidr=utils.gimme_a_cidr(ip_version), ip_version=ip_version, manager=self.admin_manager) # pre-6.0 i.e. no SS v6 support yet, do the below for v4 only: if router and (Topology.has_single_stack_v6_support() or ip_version == 4): self.router_attach(router, subnet, manager=self.admin_manager) vspk_subnet = self.vsd.get_subnet(by_subnet=subnet) # gateway mac is the same for IPv4/6 in dualstack network gateway_mac_address = vspk_subnet.gateway_mac_address return network, gateway_mac_address
def _create_vsd_floatingip_pool(cls): name = data_utils.rand_name('fip-pool') # randomize fip cidr to avoid parallel runs issues fip_pool_cidr = nuage_data_utils.gimme_a_cidr() address = IPAddress(fip_pool_cidr.first) netmask = fip_pool_cidr.netmask gateway = address + 1 extra_params = {"underlay": True} vsd_fip_pool = cls.nuage_client.create_floatingip_pool( name=name, address=str(address), gateway=str(gateway), netmask=str(netmask), extra_params=extra_params) cls.vsd_shared_domains.append(vsd_fip_pool) return vsd_fip_pool
def create_uplink_subnet(self, parent_id=None): cidr = nuage_data_utils.gimme_a_cidr() address, netmask, gateway = nuage_data_utils.get_cidr_attributes(cidr) uplink_subnet_dict = { 'name': data_utils.rand_name('uplink-'), 'address': address, 'netmask': netmask, 'gateway': gateway, 'uplinkVportName': 'vlan1', 'uplinkInterfaceIP': str(cidr.ip + 2), 'uplinkInterfaceMAC': "00:11:22:33:44:55", 'uplinkGWVlanAttachmentID': self.gateway_vlan[0]['ID'], 'sharedResourceParentID': parent_id } uplink_subnet = self.nuage_client.create_uplink_subnet( **uplink_subnet_dict) self.addCleanup(self.delete_uplink_subnet, str(uplink_subnet[0]['ID'])) return uplink_subnet
def test_subport_connectivity(self): self._setup_resources() vlan_tag = 10 subport_network = self.create_network() self.create_subnet(subport_network, cidr=utils.gimme_a_cidr(), gateway=None) servers = [ self._create_server_with_port_and_subport(subport_network, vlan_tag) for _ in range(2) ] for server in servers: # Configure VLAN interfaces on server command = CONFIGURE_VLAN_INTERFACE_COMMANDS % { 'itf': 'eth0', 'tag': vlan_tag } server['server'].send(command) out = server['server'].send('ip addr list') LOG.debug("Interfaces on server %s: %s", server, out) # Ping from server1 to server2 via VLAN interface should fail because # we haven't allowed ICMP self.assert_ping( servers[0]['server'], servers[1]['server'], address=servers[1]['subport']['fixed_ips'][0]['ip_address'], should_pass=False) # allow intra-securitygroup traffic self.create_security_group_rule(security_group=self.secgroup, direction='ingress', ethertype='IPv4', protocol='icmp', remote_group_id=self.secgroup['id']) self.assert_ping( servers[0]['server'], servers[1]['server'], address=servers[1]['subport']['fixed_ips'][0]['ip_address'])
def test_update_external_subnet_with_wrong_gateway(self): underlay_states = [False, True] for underlay in underlay_states: ext_network = self._create_network(external=True) cidr = nuage_data_utils.gimme_a_cidr() sub = self.admin_subnets_client.create_subnet( network_id=ext_network['id'], cidr=cidr, ip_version=self._ip_version, underlay=underlay)['subnet'] new_gateway = '100.0.0.1' msg = "Network Gateway IP Address {} is out of range.".format( new_gateway) self.assertRaisesRegex(exceptions.BadRequest, msg, self.admin_subnets_client.update_subnet, sub['id'], gateway_ip=new_gateway) self.admin_subnets_client.delete_subnet(sub['id'])
def _create_vsd_floatingip_pool(self): name = data_utils.rand_name('fip-pool') fip_pool_cidr = nuage_data_utils.gimme_a_cidr() address = IPAddress(fip_pool_cidr.first) netmask = fip_pool_cidr.netmask gateway = address + 1 extra_params = {"underlay": True} vsd_fip_pool = self.nuage_client.create_floatingip_pool( name=name, address=str(address), gateway=str(gateway), netmask=str(netmask), extra_params=extra_params) self.addCleanup(self.nuage_client.delete_vsd_shared_resource, vsd_fip_pool[0]['ID']) return vsd_fip_pool[0]
def test_nuage_external_network_update_to_internal(self): """test_nuage_external_network_update_to_internal Releases below Rocky: OPENSTACK-2340 Releases from Rocky: Check that external network with subnets can not be updated to internal """ if Topology.from_openstack('ROCKY'): kwargs = {'router:external': True} n1 = self.create_network(manager=self.admin_manager, **kwargs) self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, manager=self.admin_manager) kwargs = {'router:external': False} msg = ('External network with subnets can not be ' 'changed to non-external network') self.assertRaisesRegex(exceptions.BadRequest, msg, self.update_network, n1['id'], self.admin_manager, **kwargs)
def test_nuage_underlay_on_off(self): """test_nuage_underlay_on_off Check that when providing --underlay True subsequent subnets cannot be created using --underlay False """ kwargs = {'router:external': True} n1 = self.create_network(manager=self.admin_manager, **kwargs) s1 = self.create_subnet(n1, cidr=data_utils.gimme_a_cidr(), mask_bits=24, underlay=True, manager=self.admin_manager) self.assertRaises(exceptions.BadRequest, self.create_subnet, n1, cidr=IPNetwork('20.0.0.0/24'), mask_bits=24, underlay=False, nuage_uplink=s1['nuage_uplink'], manager=self.admin_manager)
def test_make_network_with_routed_subnet_external(self): int_network = self.create_network() cidr = nuage_data_utils.gimme_a_cidr() subnet = self.create_subnet(network=int_network, cidr=cidr, mask_bits=24, ip_version=self._ip_version) router = self.create_router( external_network_id=CONF.network.public_network_id) # Attach subnet self.create_router_interface(router_id=router['id'], subnet_id=subnet['id']) kwargs = {'router:external': True} msg = ('Network {} cannot be updated. There are one or more ports ' 'still in use on the network.').format(int_network["id"]) self.assertRaisesRegex( exceptions.BadRequest, msg, self.admin_networks_client.update_network, int_network['id'], **kwargs)
def _create_trunk_subports_resources(self, ip_version, is_vsd_mgd, is_l3): kwargs = { 'provider:network_type': 'vlan', 'provider:physical_network': 'physnet1' } self.subports['networks'] = [ self.create_network(manager=self.admin_manager, **kwargs) for _ in range(self.num_subports) ] if is_vsd_mgd: self._create_subports_vsd_resources(ip_version, is_l3) if is_l3: vsd_subnets = self.subports['l3subnets'] create_vsd_managed_subnet = self.create_l3_vsd_managed_subnet else: vsd_subnets = self.subports['l2domains'] create_vsd_managed_subnet = self.create_l2_vsd_managed_subnet for ip_type in ip_version: for network, vsd_subnet in zip(self.subports['networks'], vsd_subnets): create_vsd_managed_subnet(network, vsd_subnet, ip_version=ip_type, manager=self.admin_manager, dhcp_managed=False) else: for ip_type in ip_version: netmask = (self.netmask_ipv4 if ip_type == 4 else self.netmask_ipv6) for network in self.subports['networks']: self.create_subnet(network, enable_dhcp=False, cidr=data_utils.gimme_a_cidr( ip_type, netmask), ip_version=ip_type, manager=self.admin_manager)
def test_update_external_subnet_with_gateway(self): underlay_states = [False, True] for underlay in underlay_states: ext_network = self._create_network(external=True) cidr = nuage_data_utils.gimme_a_cidr() allocation_pools = [{ 'start': str(netaddr.IPAddress(cidr) + 3), 'end': str(netaddr.IPAddress(cidr) + 6) }] sub = self.admin_subnets_client.create_subnet( network_id=ext_network['id'], cidr=cidr, ip_version=self._ip_version, underlay=underlay, allocation_pools=allocation_pools)['subnet'] old_gateway = sub['gateway_ip'] new_gateway = str(netaddr.IPAddress(cidr) + 2) updated_sub = self.admin_subnets_client.update_subnet( sub['id'], gateway_ip=new_gateway)['subnet'] curr_gateway = updated_sub['gateway_ip'] self.assertNotEqual(old_gateway, curr_gateway) self.assertEqual(new_gateway, curr_gateway) self.admin_subnets_client.delete_subnet(sub['id'])
def _cli_show_external_subnet_with_underlay(self): """_cli_show_external_subnet_with_underlay Show an external fip subnet created with underlay Response includes underlay values according default setting in the .ini file """ # self.needs_ini_nuage_fip_underlay(default_underlay) # avoid overlapping cidr's: use different ones ;-) # cleanup is at class level ftb, so use a different cidr according # the value of default_underlay default_underlay = self.nuage_fip_underlay_ini if default_underlay is None: cidr_addition = 0 elif default_underlay is False: cidr_addition = 20 else: cidr_addition = 500 cidr_net = nuage_data_utils.gimme_a_cidr().next(cidr_addition) underlay_states = [False, True] for underlay in underlay_states: rand_name_str = data_utils.rand_name() ext_network_name = "ext-fip-network-" + rand_name_str ext_network = self.create_network_with_args(ext_network_name, " --router:external") ext_subnet_name = "ext-fip-underlay-subnet-" + rand_name_str underlay_str = "--underlay=" + str(underlay) subnet = self.create_subnet_with_args(ext_network['name'], cidr_net.__str__(), "--name ", ext_subnet_name, underlay_str) show_subnet = self.show_subnet(subnet['id']) # underlay value should match the default one self.assertIn(str(underlay).lower(), str(show_subnet['underlay']).lower()) cidr_net = cidr_net.next(1)
def _verify_list_external_subnets_underlay(self): """_verify_list_external_subnets_underlay List external subnets with and without underlay The created fip subnets must be in the list and a show of them reveals the same underlay value used during creation """ cidr = nuage_data_utils.gimme_a_cidr() my_subnet_list = ['subnet-underlay_false', 'subnet_underlay_true', 'subnet_underlay'] for this_subnet in my_subnet_list: this_ext_network = self._create_network(external=True) if re.search('true', this_subnet): underlay = True elif re.search('false', this_subnet): underlay = False else: # Use this for checking default behavior when # nuage_fip_underlay is present in .ini file underlay = None subnet_name = data_utils.rand_name( 'list-external-fip-' + this_subnet) if underlay is None: create_body = self.admin_subnets_client.create_subnet( network_id=this_ext_network['id'], cidr=str(cidr.cidr), ip_version=self._ip_version, name=subnet_name) else: create_body = self.admin_subnets_client.create_subnet( network_id=this_ext_network['id'], cidr=str(cidr.cidr), ip_version=self._ip_version, name=subnet_name, underlay=underlay) # Verify the subnet exists in the list of all subnets list_body = self.admin_subnets_client.list_subnets() subnet_found = False for subnet in list_body['subnets']: if create_body['subnet']['id'] == subnet['id']: # our created subnet is in the list: check the underlay # field of the show output, as it is not # in the list output for performance reasons # (requires an extra api call per subnet...) subnet_found = True show_body = self.admin_subnets_client.show_subnet( subnet['id']) underlay_listed = show_body['subnet']['underlay'] if underlay is not None: # Verify underlay with the value used at creation time # assign underlay_compare to underlay, # the value used at creation underlay_compare = create_body['subnet']['underlay'] else: # No underlay given: should match the value in the # .ini file # Note that 'None' has same effect as false: # check on False underlay_compare = self.nuage_fip_underlay_ini if underlay_compare is None: underlay_compare = False self.assertEqual(str(underlay_listed), str(underlay_compare), "FIP NOK: listed underlay values do not " "match") self.assertEqual(subnet_found, True, "FIP NOK: created fip subnet " "is not in the subnet list") cidr = cidr.next(1)
def test_get_linked_shared_resource_l3_domains(self): """test_get_linked_shared_resource_l3_domains Test that when linked a shared subnet to a private l3subnet the attributes returned are those of the shared subnet. :return: """ # Create private enterprise enterprise_name = data_utils.rand_name('test-verify-linked-domains') enterprise = self.NuageNetworksClient.create_netpartition( enterprise_name)['net_partition'] self.addCleanup(self.NuageNetworksClient.delete_netpartition, enterprise['id']) # Create shared resources cidr4 = nuage_utils.gimme_a_cidr() cidr6 = nuage_utils.gimme_a_cidr(ip_version=6) shared_l3dom_template = self.vsd.create_l3domain_template( enterprise='Shared Infrastructure') self.addCleanup(shared_l3dom_template.delete) shared_l3dom = self.vsd.create_l3domain( enterprise='Shared Infrastructure', template_id=shared_l3dom_template.id) self.addCleanup(shared_l3dom.delete) shared_zone = self.vsd.create_zone(domain=shared_l3dom) self.addCleanup(shared_zone.delete) shared_l3subnet = self.vsd.create_subnet( zone=shared_zone, ip_type='DUALSTACK', cidr4=cidr4, cidr6=cidr6, enable_dhcpv4=True, enable_dhcpv6=True, resource_type='PUBLIC' ) self.addCleanup(shared_l3subnet.delete) # Link in created enterprise l3dom_template = self.vsd.create_l3domain_template( enterprise=enterprise_name) self.addCleanup(l3dom_template.delete) l3dom = self.vsd.create_l3domain( enterprise=enterprise_name, template_id=l3dom_template.id) self.addCleanup(l3dom.delete) public_zone = self.vsd.create_zone(domain=l3dom, public_zone=True) self.addCleanup(public_zone.delete) l3subnet = self.vsd.create_subnet( zone=public_zone, associated_shared_network_resource_id=shared_l3subnet.id ) self.addCleanup(l3subnet.delete) # Get l3 subnet subnets = self.NuageNetworksClient.get_vsd_subnets( public_zone.id)['vsd_subnets'] # Check that l3subnet is found self.assertNotEmpty(subnets, 'Enterprise should contain l3 subnets') self.assertEqual(1, len(subnets), 'Exactly one l3 subnet should be found, but ' 'found: {}'.format(len(subnets))) subnet_json = subnets[0] # Verify self._verify_l3_subnet(subnet_json, vspk_subnet=l3subnet, vspk_backend_subnet=shared_l3subnet, with_enterprise=False)
def test_nova_qos_multinic(self): """test_nova_qos_multinic Test NOVA QOS with fip rate limiting when changing the associated fip. """ BW_LIMIT_NOVA = 2000 BW_LIMIT_NOVA_FLAVOR = BW_LIMIT_NOVA // 8 BW_LIMIT_FIP_EGRESS1 = 1500 BW_LIMIT_FIP_INGRESS1 = 1250 BW_LIMIT_FIP_EGRESS2 = 1200 BW_LIMIT_FIP_INGRESS2 = 1100 network1 = self.create_network() cidr1 = data_utils.gimme_a_cidr() subnet41 = self.create_subnet(network=network1, cidr=cidr1) network2 = self.create_network() cidr2 = data_utils.gimme_a_cidr() subnet42 = self.create_subnet(network=network2, cidr=cidr2) router = self.create_router( external_network_id=CONF.network.public_network_id) self.router_attach(router, subnet41) self.router_attach(router, subnet42) # Ensure TCP traffic is allowed security_group = self.create_open_ssh_security_group() self.create_traffic_sg_rule(security_group, direction='ingress', ip_version=4, dest_port=self.DEST_PORT) # Create NOVA QOS flavor flavor = self._create_nova_qos_flavor(BW_LIMIT_NOVA_FLAVOR) port1 = self.create_port(network=network1, security_groups=[security_group['id']]) # router option = 0 to prevent default route being installed. port2 = self.create_port(network=network2, security_groups=[security_group['id']], extra_dhcp_opts=[{ 'opt_name': 'router', 'opt_value': '0' }]) server = self.create_tenant_server(ports=[port1, port2], flavor=flavor['id'], prepare_for_connectivity=True) # VRS-35132: Ethernet fragmentation causes QOS to drop packets. server.send('sudo ip link set dev eth0 mtu {}'.format( base_nuage_qos.QOS_MTU)) # VRS-35132: Ethernet fragmentation causes QOS to drop packets. server.send('sudo ip link set dev eth1 mtu {}'.format( base_nuage_qos.QOS_MTU)) # Associate a second IP to the second interface floatingip1 = server.associated_fip # Flip floating ip of server # First delete wrong default route, set timeout shorter because the # request will time out as return traffic does not have a route. original_timeout = server.console().ssh_client.timeout server.console().ssh_client.timeout = 10 try: server.console().exec_command( 'sudo ip r del default; ' 'sudo ip r add default via {}'.format(subnet42['gateway_ip'])) except exceptions.TimeoutException: pass server.console().ssh_client.timeout = original_timeout server.associated_fip = None self.create_fip_to_server(server, port=port2) server.init_console() floatingip2 = server.associated_fip # Test both FIPS throughput with only NOVA QOS active self._test_bandwidth(server, egress_bw=BW_LIMIT_NOVA, ingress_bw=BW_LIMIT_NOVA, test_msg='FIP2 with Nova QOS only.') # Flip floating ip of server # First delete wrong default route, set timeout shorter because the # request will time out as return traffic does not have a route. original_timeout = server.console().ssh_client.timeout server.console().ssh_client.timeout = 10 try: server.console().exec_command( 'sudo ip r del default; ' 'sudo ip r add default via {}'.format(subnet41['gateway_ip'])) except exceptions.TimeoutException: pass server.console().ssh_client.timeout = original_timeout server.associated_fip = floatingip1 server.init_console() self._test_bandwidth(server, egress_bw=BW_LIMIT_NOVA, ingress_bw=BW_LIMIT_NOVA, test_msg='FIP1 with Nova Qos Only.') # update ingress & egress fip rate limiting self.update_floatingip( floatingip1, nuage_egress_fip_rate_kbps=BW_LIMIT_FIP_EGRESS1, nuage_ingress_fip_rate_kbps=BW_LIMIT_FIP_INGRESS1) self.update_floatingip( floatingip2, nuage_egress_fip_rate_kbps=BW_LIMIT_FIP_EGRESS2, nuage_ingress_fip_rate_kbps=BW_LIMIT_FIP_INGRESS2) # Check bw limited self._test_bandwidth(server, egress_bw=BW_LIMIT_FIP_EGRESS1, ingress_bw=BW_LIMIT_FIP_INGRESS1, test_msg='Fip1 with Fip RL active.') # Flip floating ip of server, change default route server.console().ssh_client.timeout = 10 try: server.console().exec_command( 'sudo ip r del default; ' 'sudo ip r add default via {}'.format(subnet42['gateway_ip'])) except exceptions.TimeoutException: pass server.console().ssh_client.timeout = original_timeout server.associated_fip = floatingip2 server.init_console() self._test_bandwidth(server, egress_bw=BW_LIMIT_FIP_EGRESS2, ingress_bw=BW_LIMIT_FIP_INGRESS2, test_msg='Fip2 with Fip RL active.')
def _create_os_resources(self): # Trunking requires admin user self.parent.manager = self.parent.admin_manager cidr_parent = utils.gimme_a_cidr() cidr_sub1 = utils.gimme_a_cidr() cidr_sub2 = utils.gimme_a_cidr() cidr_sub3 = utils.gimme_a_cidr() if self.is_l3: # Create vsd managed l3 domain and subnets vsd_l3dom_tmplt = self.parent.vsd.create_l3domain_template() self.parent.addCleanup(vsd_l3dom_tmplt.delete) vsd_l3dom = self.parent.vsd.create_domain( template_id=vsd_l3dom_tmplt.id) self.parent.addCleanup(vsd_l3dom.delete) vsd_zone = self.parent.vsd.create_zone(domain=vsd_l3dom) self.parent_resource = self.parent.vsd.create_subnet( zone=vsd_zone, ip_type='IPV4', enable_dhcpv4=False, cidr4=cidr_parent) self.parent.addCleanup(self.parent_resource.delete) self.sub_resource1 = self.parent.vsd.create_subnet( zone=vsd_zone, ip_type='IPV4', enable_dhcpv4=False, cidr4=cidr_sub1) self.parent.addCleanup(self.sub_resource1.delete) self.sub_resource2 = self.parent.vsd.create_subnet( zone=vsd_zone, ip_type='IPV4', enable_dhcpv4=False, cidr4=cidr_sub2) self.parent.addCleanup(self.sub_resource2.delete) self.sub_resource3 = self.parent.vsd.create_subnet( zone=vsd_zone, ip_type='IPV4', enable_dhcpv4=False, cidr4=cidr_sub3) self.parent.addCleanup(self.sub_resource3.delete) # Parent network kwargs = { 'provider:network_type': 'flat', 'provider:physical_network': self.physnet } self.parent_network = self.parent.create_network(**kwargs) subnet_kwargs = {} if self.is_l3: subnet_kwargs['nuagenet'] = self.parent_resource.id subnet_kwargs['net_partition'] = ( self.parent.default_netpartition_name) self.parent_subnet = self.parent.create_subnet( self.parent_network, cidr=cidr_parent, mask_bits=cidr_parent.prefixlen, enable_dhcp=False, **subnet_kwargs) # first subport network kwargs = { 'provider:network_type': 'vlan', 'provider:physical_network': self.physnet } self.sub_network1 = self.parent.create_network(**kwargs) subnet_kwargs = {} if self.is_l3: subnet_kwargs['nuagenet'] = self.sub_resource1.id subnet_kwargs['net_partition'] = ( self.parent.default_netpartition_name) self.sub_subnet1 = self.parent.create_subnet( self.sub_network1, cidr=cidr_sub1, mask_bits=cidr_sub1.prefixlen, enable_dhcp=False, **subnet_kwargs) # second subport network self.sub_network2 = self.parent.create_network(**kwargs) subnet_kwargs = {} if self.is_l3: subnet_kwargs['nuagenet'] = self.sub_resource2.id subnet_kwargs['net_partition'] = ( self.parent.default_netpartition_name) self.sub_subnet2 = self.parent.create_subnet( self.sub_network2, cidr=cidr_sub2, mask_bits=cidr_sub2.prefixlen, enable_dhcp=False, **subnet_kwargs) # Third subport network self.sub_network3 = self.parent.create_network(**kwargs) subnet_kwargs = {} if self.is_l3: subnet_kwargs['nuagenet'] = self.sub_resource3.id subnet_kwargs['net_partition'] = ( self.parent.default_netpartition_name) self.sub_subnet3 = self.parent.create_subnet( self.sub_network3, cidr=cidr_sub3, mask_bits=cidr_sub3.prefixlen, enable_dhcp=False, **subnet_kwargs) # Get domains if not self.is_l3: self.parent_resource = self.parent.vsd.get_l2domain( by_subnet=self.parent_subnet) self.sub_resource1 = self.parent.vsd.get_l2domain( by_subnet=self.sub_subnet1) self.sub_resource2 = self.parent.vsd.get_l2domain( by_subnet=self.sub_subnet2) self.sub_resource3 = self.parent.vsd.get_l2domain( by_subnet=self.sub_subnet3) # Create trunk with two subports self.parent_port = self.parent.create_port(self.parent_network) self.sub_port1 = self.parent.create_port(self.sub_network1) self.sub_port2 = self.parent.create_port(self.sub_network2) self.sub_port3 = self.parent.create_port(self.sub_network3) subport_dicts = [ { 'port_id': self.sub_port1['id'], 'segmentation_type': 'vlan', 'segmentation_id': 100 }, { 'port_id': self.sub_port2['id'], 'segmentation_type': 'vlan', 'segmentation_id': 101 }, { 'port_id': self.sub_port3['id'], 'segmentation_type': 'vlan', 'segmentation_id': 102 }, ] self.trunk = self.parent.create_trunk( port=self.parent_port, subports=subport_dicts, client=self.parent.plugin_network_client_admin) # Create a VM on sub_network3: No need to manually correct then for # sub_network3 self.vm = self.parent.create_tenant_server( networks=[self.sub_network3], prepare_for_connectivity=False) # Create a VM on the parentport self.vm = self.parent.create_tenant_server( ports=[self.parent_port], prepare_for_connectivity=False)
def test_icmp_connectivity_multiple_subnets_in_shared_network(self): """test_icmp_connectivity_multiple_subnets_in_shared_network Check that there is connectivity between VM's with floatingip's in different subnets of the same network. These subnets have underlay=False so they end up in a new L3 domain on VSD instead of the existing shared FIP to underlay domain. """ # Provision OpenStack network resources kwargs = { "router:external": True } ext_network = self.create_network(manager=self.admin_manager, **kwargs) ext_s1 = self.create_subnet(ext_network, manager=self.admin_manager, cidr=data_utils.gimme_a_cidr(), underlay=False) ext_s2 = self.create_subnet(ext_network, manager=self.admin_manager, cidr=data_utils.gimme_a_cidr(), underlay=False) r1 = self.create_router(external_network_id=ext_network['id']) r2 = self.create_router(external_network_id=ext_network['id']) r_access = self.create_router(external_network_id=self.ext_net_id) n1 = self.create_network() s1 = self.create_subnet(n1, cidr=IPNetwork('52.0.0.0/24')) self.router_attach(r1, s1) n2 = self.create_network() s2 = self.create_subnet(n2, cidr=IPNetwork('53.0.0.0/24')) self.router_attach(r2, s2) # create resources in order to ssh into server 1 n_access = self.create_network() s_access = self.create_subnet(n_access, cidr=data_utils.gimme_a_cidr()) self.router_attach(r_access, s_access) # create open-ssh security group ssh_security_group = self.create_open_ssh_security_group() # Launch tenant servers in OpenStack network p1 = self.create_port( network=n1, security_groups=[ssh_security_group['id']], extra_dhcp_opts=[{'opt_name': 'router', 'opt_value': '0'}] ) p2 = self.create_port( network=n2, security_groups=[ssh_security_group['id']]) p_access = self.create_port( network=n_access, security_groups=[ssh_security_group['id']]) self.create_floatingip(external_network_id=ext_network['id'], subnet_id=ext_s1['id'], port_id=p1['id']) fip2 = self.create_floatingip(external_network_id=ext_network['id'], subnet_id=ext_s2['id'], port_id=p2['id']) server2 = self.create_tenant_server( ports=[p2], pre_prepared_fip=fip2, prepare_for_connectivity=False) server1 = self.create_tenant_server( ports=[p_access, p1], prepare_for_connectivity=True, user_data='ip route add {} via {}'.format(ext_s2['cidr'], s1['gateway_ip'])) # Test connectivity between peer servers self.assert_ping(server1, server2, ext_network, address=fip2['floating_ip_address'])
def _cli_list_external_subnets_underlay(self): """_cli_list_external_subnets_underlay List external subnets with and without underlay The created fip subnets must be in the list and a show of them reveals the same underlay value used during creation """ # self.needs_ini_nuage_fip_underlay(default_underlay) # avoid overlapping cidr's: use different ones ;-) # cleanup is at class level ftb, so use a different cidr according # the value of default_underlay default_underlay = self.nuage_fip_underlay_ini if default_underlay is None: cidr_addition = 0 elif default_underlay is False: cidr_addition = 20 else: cidr_addition = 50 my_subnet_list = ['list-subnet-underlay-false-', 'list-subnet_underlay-true-', 'list-subnet_underlay-'] cidr_net = nuage_data_utils.gimme_a_cidr().next(cidr_addition) for this_subnet in my_subnet_list: rand_name_str = data_utils.rand_name() ext_network_name = "ext-fip-network-" + rand_name_str ext_network = self.create_network_with_args(ext_network_name, " --router:external") ext_subnet_name = this_subnet + rand_name_str if re.search('true', this_subnet): underlay = True elif re.search('false', this_subnet): underlay = False else: # Use this for checking default behavior when # nuage_fip_underlay is present in .ini file underlay = None cidr = str(cidr_net.cidr) if underlay is None: created_subnet = self.create_subnet_with_args( ext_network['name'], cidr, "--name ", ext_subnet_name) else: underlay_str = "--underlay=" + str(underlay) created_subnet = self.create_subnet_with_args( ext_network['name'], cidr, "--name ", ext_subnet_name, underlay_str) subnet_list = self.parser.listing(self.list_subnets()) # Verify the subnet exists in the list of all subnets subnet_found = False for subnet in subnet_list: created_id = created_subnet['id'] listed_id = subnet['id'] if created_id == listed_id: # our created subnet is in the list: check the # underlay field of the show output, as it is not # in the list output for performance reasons # (requires an extra api call per subnet...) subnet_found = True show_subnet = self.show_subnet(listed_id) underlay_listed = show_subnet['underlay'] if underlay is not None: # Verify underlay with the value used at creation time underlay_compare = created_subnet['underlay'] else: # No underlay given: should match the value in # the .ini file # Note that 'None' has same effect as false: # check on False underlay_compare = default_underlay if default_underlay is None: underlay_compare = False self.assertEqual(underlay_listed, str(underlay_compare), "FIP NOK: listed underlay values do not " "match") cidr_net = cidr_net.next(1) self.assertEqual(subnet_found, True, "FIP NOK: created fip subnet is not in the " "subnet list")