def test_gateway(self): '''Validate that GW of first address of the subnet cidr is chosen by default. Check that gw cannot be from within the allocation pool Check that custom addresses can be given ''' vn1_name = get_random_name('vn1') vn1_subnet_cidr = get_random_cidr() vn1_gateway = get_an_ip(vn1_subnet_cidr, 1) vn1_subnets = [{'cidr': vn1_subnet_cidr, 'allocation_pools': [ {'start': get_an_ip(vn1_subnet_cidr, 3), 'end': get_an_ip(vn1_subnet_cidr, 10)}], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, image_name='cirros-0.3.0-x86_64-uec') assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] assert vn1_gateway in route_output, 'First address of CIDR %s : %s'\ 'is NOT set as gateway on the VM' % ( vn1_subnet_cidr, vn1_gateway) self.logger.info( 'First address of CIDR %s : %s' 'is set as gateway on the VM' % (vn1_subnet_cidr, vn1_gateway))
def create_netns(self, bms_node, namespace, cidr): bms_data = self.inputs.bms_data[bms_node] server_ip = bms_data['mgmt_ip'] username = bms_data['username'] password = bms_data['password'] intf = bms_data['interfaces'][0] mac = intf['host_mac'] gw_ip = get_an_ip(cidr, offset=1) bms_ip = get_an_ip(cidr, offset=2) mask = cidr.split('/')[1] interface = get_intf_name_from_mac(server_ip, mac, username=username, password=password) create_netns(server_ip, username, password, namespace, interface, address=bms_ip, gateway=gw_ip, mask=mask) self.addCleanup(delete_netns, server_ip, username, password, namespace) prouter = self.get_associated_prouters(bms_node, [intf])[0] prouter.configure_interface(intf['tor_port'], gw_ip, mask) self.addCleanup(prouter.delete_interface, intf['tor_port'])
def test_gateway(self): '''Validate that GW of first address of the subnet cidr is chosen by default. Check that gw cannot be from within the allocation pool Check that custom addresses can be given ''' vn1_name = get_random_name('vn1') vn1_subnet_cidr = get_random_cidr() vn1_gateway = get_an_ip(vn1_subnet_cidr, 1) vn1_subnets = [{ 'cidr': vn1_subnet_cidr, 'allocation_pools': [{ 'start': get_an_ip(vn1_subnet_cidr, 3), 'end': get_an_ip(vn1_subnet_cidr, 10) }], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, image_name='cirros-0.3.0-x86_64-uec') assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] assert vn1_gateway in route_output, 'First address of CIDR %s : %s'\ 'is NOT set as gateway on the VM' % ( vn1_subnet_cidr, vn1_gateway) self.logger.info('First address of CIDR %s : %s' 'is set as gateway on the VM' % (vn1_subnet_cidr, vn1_gateway))
def test_bug_1630829(self): ''' Create an IPAM ipam1 and create a VN with a subnet from ipam1 create a VM and then create another subnet on default-network-ipam Use allocation pool while creating subnets ''' ipam_fixture = self.create_ipam() cidr1 = get_random_cidr() alloc_pool1 = { 'start': get_an_ip(cidr1, 100), 'end': get_an_ip(cidr1, 200) } subnet1 = {'cidr': cidr1, 'allocation_pools': [alloc_pool1]} vn_fixture = self.create_vn(vn_subnets=[subnet1], ipam_fq_name=ipam_fixture.fq_name, option='contrail') vm1_fixture = self.create_vm(vn_fixture, image_name='cirros') assert vm1_fixture.verify_on_setup() assert vn_fixture.verify_on_setup() cidr2 = get_random_cidr() alloc_pool2 = { 'start': get_an_ip(cidr2, 100), 'end': get_an_ip(cidr2, 200) } subnet2 = {'cidr': cidr2, 'allocation_pools': [alloc_pool2]} vn_fixture.create_subnet(subnet2) assert vn_fixture.verify_on_setup() assert vm1_fixture.verify_on_setup()
def test_bug_1630829(self): ''' Create an IPAM ipam1 and create a VN with a subnet from ipam1 create a VM and then create another subnet on default-network-ipam Use allocation pool while creating subnets ''' ipam_fixture = self.create_ipam() cidr1 = get_random_cidr() alloc_pool1 = {'start': get_an_ip(cidr1, 100), 'end': get_an_ip(cidr1, 200)} subnet1 = {'cidr': cidr1, 'allocation_pools': [alloc_pool1]} vn_fixture = self.create_vn(vn_subnets=[subnet1], ipam_fq_name=ipam_fixture.fq_name, option='contrail') vm1_fixture = self.create_vm(vn_fixture, image_name='cirros') assert vm1_fixture.verify_on_setup() assert vn_fixture.verify_on_setup() cidr2 = get_random_cidr() alloc_pool2 = {'start': get_an_ip(cidr2, 100), 'end': get_an_ip(cidr2, 200)} subnet2 = {'cidr': cidr2, 'allocation_pools': [alloc_pool2]} vn_fixture.create_subnet(subnet2) assert vn_fixture.verify_on_setup() assert vm1_fixture.verify_on_setup()
def test_subnet_host_routes(self): '''Validate host_routes parameter in subnet Create a VN with subnet having a host-route Create a VM using that subnet Check the route table in the VM ''' vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn1_gateway = get_an_ip(vn1_subnets[0], 1) dest_ip = '8.8.8.8' destination = dest_ip + '/32' # Set nh to be some other IP in the subnet nh = get_an_ip(vn1_subnets[0], 10) vn1_subnets = [{ 'cidr': vn1_subnets[0], 'host_routes': [{ 'destination': destination, 'nexthop': nh }, { 'destination': '0.0.0.0/0', 'nexthop': vn1_gateway }], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] assert dest_ip in route_output, 'Route pushed from DHCP is not '\ 'present in Route table of the VM' self.logger.info('Route pushed from DHCP is present in route-table ' ' of the VM..OK') self.logger.info('Updating the subnet to remove the host routes') vn1_subnet_dict = {'host_routes': []} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) time.sleep(5) vm1_fixture.reboot() assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] assert dest_ip not in route_output, 'Route pushed from DHCP is still '\ 'present in Route table of the VM' self.logger.info('Route table in VM does not have the host routes..OK') assert vn1_gateway in route_output, 'Default Gateway is missing the \
def test_dns_nameservers(self): '''Validate dns-nameservers parameter in subnet Create a VN with subnet having a dns-nameserver Create a VM using that subnet Check the resolv.conf in the VM ''' vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn1_gateway = get_an_ip(vn1_subnets[0], 1) vn1_default_dns = get_an_ip(vn1_subnets[0], 2) dns1_ip = '8.8.8.8' dns2_ip = '4.4.4.4' vn1_subnets = [{ 'cidr': vn1_subnets[0], 'dns_nameservers': [dns1_ip, dns2_ip] }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['cat /etc/resolv.conf']) resolv_output = output.values()[0] assert dns1_ip in resolv_output, 'DNS Server IP %s not seen in '\ 'resolv.conf of the VM' % (dns1_ip) assert dns2_ip in resolv_output, 'DNS Server IP %s not seen in '\ 'resolv.conf of the VM' % (dns2_ip) self.logger.info('DNS Server IPs are seen in resolv.conf of the VM') self.logger.info('Updating the subnet to remove the dns servers') vn1_subnet_dict = {'dns_nameservers': []} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['cat /etc/resolv.conf']) dns_output = output.values()[0] assert dns1_ip not in dns_output, 'DNS Server IP %s still seen '\ ' in resolv.conf of the VM' % (dns1_ip) assert dns2_ip not in dns_output, 'DNS Server IP %s still seen '\ ' in resolv.conf of the VM' % (dns2_ip) assert vn1_default_dns in dns_output, 'Default DNS Server %s is missing in the '\ 'resolv.conf of the VM' % (vn1_default_dns) self.logger.info('resolv.conf in VM has the default DNS Server..OK')
def start_dhcp_server(self, vn_fixtures, dhcp_server=None, bms_node=None, namespace=None): subnet_ranges = list() for vn in vn_fixtures: cidr = vn.get_cidrs()[0] subnet_ranges.append( {'start': get_an_ip(cidr, 8), 'end': get_an_ip(cidr, 15), 'mask': str(IPNetwork(cidr).netmask)}) if dhcp_server: dhcp_server.run_dhcp_server(subnet_ranges) else: bms_data = self.inputs.bms_data[bms_node] server_ip = bms_data['mgmt_ip'] username = bms_data['username'] password = bms_data['password'] run_dhcp_server(subnet_ranges, server_ip, username, password, namespace)
def test_dns_nameservers(self): '''Validate dns-nameservers parameter in subnet Create a VN with subnet having a dns-nameserver Create a VM using that subnet Check the resolv.conf in the VM ''' vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn1_gateway = get_an_ip(vn1_subnets[0], 1) vn1_default_dns = get_an_ip(vn1_subnets[0], 2) dns1_ip = '8.8.8.8' dns2_ip = '4.4.4.4' vn1_subnets = [{'cidr': vn1_subnets[0], 'dns_nameservers': [dns1_ip, dns2_ip] }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['cat /etc/resolv.conf']) resolv_output = output.values()[0] assert dns1_ip in resolv_output, 'DNS Server IP %s not seen in '\ 'resolv.conf of the VM' % (dns1_ip) assert dns2_ip in resolv_output, 'DNS Server IP %s not seen in '\ 'resolv.conf of the VM' % (dns2_ip) self.logger.info('DNS Server IPs are seen in resolv.conf of the VM') self.logger.info('Updating the subnet to remove the dns servers') vn1_subnet_dict = {'dns_nameservers': []} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['cat /etc/resolv.conf']) dns_output = output.values()[0] assert dns1_ip not in dns_output, 'DNS Server IP %s still seen '\ ' in resolv.conf of the VM' % (dns1_ip) assert dns2_ip not in dns_output, 'DNS Server IP %s still seen '\ ' in resolv.conf of the VM' % (dns2_ip) assert vn1_default_dns in dns_output, 'Default DNS Server %s is missing in the '\ 'resolv.conf of the VM' % (vn1_default_dns) self.logger.info('resolv.conf in VM has the default DNS Server..OK')
def test_subnet_host_routes(self): '''Validate host_routes parameter in subnet Create a VN with subnet having a host-route Create a VM using that subnet Check the route table in the VM ''' vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn1_gateway = get_an_ip(vn1_subnets[0], 1) dest_ip = '8.8.8.8' destination = dest_ip + '/32' # Set nh to be some other IP in the subnet nh = get_an_ip(vn1_subnets[0], 10) vn1_subnets = [{'cidr': vn1_subnets[0], 'host_routes': [{'destination': destination, 'nexthop': nh}, {'destination': '0.0.0.0/0', 'nexthop': vn1_gateway}], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] assert dest_ip in route_output, 'Route pushed from DHCP is not '\ 'present in Route table of the VM' self.logger.info('Route pushed from DHCP is present in route-table ' ' of the VM..OK') self.logger.info('Updating the subnet to remove the host routes') vn1_subnet_dict = {'host_routes': []} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) time.sleep(5) vm1_fixture.reboot() assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] assert dest_ip not in route_output, 'Route pushed from DHCP is still '\ 'present in Route table of the VM' self.logger.info('Route table in VM does not have the host routes..OK') assert vn1_gateway in route_output, 'Default Gateway is missing the \
def test_ecmp_active_active_aap(self): image_name = 'ubuntu' src_node = self.inputs.compute_names[0] dst_node1 = self.inputs.compute_names[1] dst_node2 = self.inputs.compute_names[2] encrypt_nodes = self.inputs.compute_names[:2] non_encrypt_nodes = self.inputs.compute_names[2:] vn = self.create_vn() src_vm = self.create_vm(vn_fixture=vn, node_name=src_node, image_name=image_name) dst_vm1 = self.create_vm(vn_fixture=vn, node_name=dst_node1, image_name=image_name) dst_vm2 = self.create_vm(vn_fixture=vn, node_name=dst_node2, image_name=image_name) self.enable_encryption(encrypt_nodes) self.validate_tunnels(encrypt_nodes, encrypt_nodes) self.validate_tunnels(vrouters=non_encrypt_nodes) self.check_vms_booted([src_vm, dst_vm1, dst_vm2]) vIP = get_an_ip(vn.get_cidrs()[0], offset=10) for vm in [dst_vm1, dst_vm2]: port = vm.get_vmi_obj_from_api_server()[1][0] self.config_aap(port.uuid, vIP, mac=port.mac_addr, aap_mode='active-active', contrail_api=True) cmd = 'ip addr add %s/24 dev eth0' % vIP vm.run_cmd_on_vm([cmd], as_sudo=True) vm.start_webserver() b_src_dst1, b_dst1_src = self.get_crypt_stats(src_node, dst_node1) b_src_dst2, b_dst2_src = self.get_crypt_stats(src_node, dst_node2) exp_output = set([dst_vm1.vm_name, dst_vm2.vm_name]) for retry in range(1, 15): cmd = "curl %s:8000" % vIP result = src_vm.run_cmd_on_vm(cmds=[cmd])[cmd].strip() assert result in [dst_vm1.vm_name, dst_vm2.vm_name] exp_output.discard(result) if not exp_output: break assert not exp_output, 'active-active aap doesnt seem to work' a_src_dst1, a_dst1_src = self.get_crypt_stats(src_node, dst_node1) a_src_dst2, a_dst2_src = self.get_crypt_stats(src_node, dst_node2) assert (b_src_dst1 != a_src_dst1) and (b_dst1_src != a_dst1_src) assert (b_src_dst2 == a_src_dst2) and (b_dst2_src == a_dst2_src)
def test_enable_dhcp(self): '''Validate dhcp-enable parameter in subnet Check that dhcp-enable is set to true by default Create a VN with subnet where dhcp is disabled Create a VM using that subnet Validate that the VM does not get an IP ''' vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn1_gateway = get_an_ip(vn1_subnets[0], 1) vn1_subnets = [{ 'cidr': vn1_subnets[0], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) assert vn1_fixture.vn_subnet_objs[0]['enable_dhcp'],\ 'DHCP is not enabled by default in the Subnet!' vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, image_name='cirros') assert vm1_fixture.wait_till_vm_up(),\ 'VM not able to boot' # Update subnet to disable dhcp vn1_subnet_dict = {'enable_dhcp': False} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_active(), 'VM is not up on reboot!' time.sleep(30) console_log = vm1_fixture.get_console_output() assert 'No lease, failing' in console_log,\ 'Failure while determining if VM got a DHCP IP. Log : %s' % ( console_log) self.logger.info('VM did not get an IP when DHCP is disabled..OK') # Update Subnet to enable DHCP vn1_subnet_dict = {'enable_dhcp': True} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' result_output = vm1_fixture.run_cmd_on_vm(['ifconfig -a']) output = result_output.values()[0] assert vm1_fixture.vm_ip in output,\ 'VM did not get an IP %s after enabling DHCP' % (vm1_fixture.vm_ip) self.logger.info('VM got DHCP IP after subnet-dhcp is enabled..OK')
def test_dhcp_relay_default_inet(self): ''' Create VNs vn1, vn2 Create a Logical Router and add respective VNs Configure dhcp server on a BMS instance Configure the QFX server accordingly Create 2 BMS instances on BMS2 of vn1 and vn2 Check if the instances can get ip via DHCP Add additional BMS instance, if available, and test multiple dhcp clients ''' bms_nodes = self.get_bms_nodes(rb_role=self.rb_role) vn1 = self.create_vn() vn2 = self.create_vn() dhcp_server_vn = '42.44.46.48/30' self.create_netns(bms_nodes[0], 'dhcp_test', dhcp_server_vn) lr1 = self.create_logical_router( [vn1, vn2], dhcp_relay_servers=[get_an_ip(dhcp_server_vn, offset=2)], devices=self.server_leafs) self.start_dhcp_server([vn1, vn2], bms_node=bms_nodes[0], namespace='dhcp_test') bms1 = self.create_bms(bms_nodes[1], vlan_id=5, vn_fixture=vn1, external_dhcp_server=True) bms2 = self.create_bms(bms_nodes[1], vlan_id=6, vn_fixture=vn2, external_dhcp_server=True, bond_name=bms1.bond_name, port_group_name=bms1.port_group_name) self.do_ping_test(bms1, bms2.bms_ip) if len(bms_nodes) > 2: bms3_1 = self.create_bms(bms_nodes[2], tor_port_vlan_tag=5, vn_fixture=vn1, external_dhcp_server=True) bms3_2 = self.create_bms(bms_nodes[2], vlan_id=6, vn_fixture=vn2, external_dhcp_server=True, bond_name=bms3_1.bond_name, port_group_name=bms3_1.port_group_name) self.do_ping_test(bms3_1, bms1.bms_ip) self.do_ping_test(bms3_2, bms2.bms_ip) self.perform_cleanup(lr1) self.sleep(60) assert bms1.run_dhclient(expectation=False)[0]
def test_enable_dhcp(self): '''Validate dhcp-enable parameter in subnet Check that dhcp-enable is set to true by default Create a VN with subnet where dhcp is disabled Create a VM using that subnet Validate that the VM does not get an IP ''' vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn1_gateway = get_an_ip(vn1_subnets[0], 1) vn1_subnets = [{'cidr': vn1_subnets[0], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) assert vn1_fixture.vn_subnet_objs[0]['enable_dhcp'],\ 'DHCP is not enabled by default in the Subnet!' vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, image_name='cirros-0.3.0-x86_64-uec') assert vm1_fixture.wait_till_vm_up(),\ 'VM not able to boot' # Update subnet to disable dhcp vn1_subnet_dict = {'enable_dhcp': False} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_active(), 'VM is not up on reboot!' time.sleep(30) console_log = vm1_fixture.get_console_output() assert 'No lease, failing' in console_log,\ 'Failure while determining if VM got a DHCP IP. Log : %s' % ( console_log) self.logger.info('VM did not get an IP when DHCP is disabled..OK') # Update Subnet to enable DHCP vn1_subnet_dict = {'enable_dhcp': True} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' result_output = vm1_fixture.run_cmd_on_vm(['ifconfig -a']) output = result_output.values()[0] assert vm1_fixture.vm_ip in output,\ 'VM did not get an IP %s after enabling DHCP' % (vm1_fixture.vm_ip) self.logger.info('VM got DHCP IP after subnet-dhcp is enabled..OK')
def test_ecmp_active_active_aap(self): image_name = 'ubuntu' src_node = self.inputs.compute_names[0] dst_node1 = self.inputs.compute_names[1] dst_node2 = self.inputs.compute_names[2] encrypt_nodes = self.inputs.compute_names[:2] non_encrypt_nodes = self.inputs.compute_names[2:] vn = self.create_vn() src_vm = self.create_vm(vn_fixture=vn, node_name=src_node, image_name=image_name) dst_vm1 = self.create_vm(vn_fixture=vn, node_name=dst_node1, image_name=image_name) dst_vm2 = self.create_vm(vn_fixture=vn, node_name=dst_node2, image_name=image_name) self.enable_encryption(encrypt_nodes) self.validate_tunnels(encrypt_nodes, encrypt_nodes) self.validate_tunnels(vrouters=non_encrypt_nodes) self.check_vms_booted([src_vm, dst_vm1, dst_vm2]) vIP = get_an_ip(vn.get_cidrs()[0], offset=10) for vm in [dst_vm1, dst_vm2]: port = vm.get_vmi_obj_from_api_server()[1][0] self.config_aap(port.uuid, vIP, mac=port.mac_addr, aap_mode='active-active', contrail_api=True) cmd = 'ip addr add %s/24 dev eth0'%vIP vm.run_cmd_on_vm([cmd], as_sudo=True) vm.start_webserver() b_src_dst1, b_dst1_src = self.get_crypt_stats(src_node, dst_node1) b_src_dst2, b_dst2_src = self.get_crypt_stats(src_node, dst_node2) exp_output = set([dst_vm1.vm_name, dst_vm2.vm_name]) for retry in range(1, 15): cmd = "curl %s:8000"%vIP result = src_vm.run_cmd_on_vm(cmds=[cmd])[cmd].strip() assert result in [dst_vm1.vm_name, dst_vm2.vm_name] exp_output.discard(result) if not exp_output: break assert not exp_output, 'active-active aap doesnt seem to work' a_src_dst1, a_dst1_src = self.get_crypt_stats(src_node, dst_node1) a_src_dst2, a_dst2_src = self.get_crypt_stats(src_node, dst_node2) assert (b_src_dst1 != a_src_dst1) and (b_dst1_src != a_dst1_src) assert (b_src_dst2 == a_src_dst2) and (b_dst2_src == a_dst2_src)
def test1 (self): orch_ctrl = self.connections.get_orch_ctrl() zones = orch_ctrl.get_zones() hosts = orch_ctrl.get_hosts() #pass only zone for vm1 env['parameters']['availability_zone'] = zones[0] #pass zone as well as host for vm2 env['parameters']['availability_zone2'] = zones[0] + ':' + hosts[0] offset = 5 fixed_ip = get_an_ip(env['parameters']['vn1_subnet2_prefix']+ '/' + str(env['parameters']['vn1_subnet2_prefixlen']), offset=offset) env['parameters']['fixed_ip'] = fixed_ip objs = resource_handler.create(self, tmpl, env) resource_handler.verify_on_setup(objs) #objs = resource_handler.update(self, objs, tmpl, env) #resource_handler.verify_on_setup(objs) return True
def test1 (self): hosts = self.connections.orch.get_hosts() zones = self.connections.orch.get_zones() vn1 = self.create_vn(vn_name=get_random_name('vn1'), vn_subnets=get_random_cidrs('dual'), option='quantum') #Create with only host name vm1 = self.create_vm(vn_fixture=vn1, vm_name=get_random_name('vm1'), node_name=hosts[0]) #Create with only zone vm2 = self.create_vm(vn_fixture=vn1, vm_name=get_random_name('vm2'), zone=zones[0]) #Create with both zone and host name and fixed ip fixed_ip = get_an_ip(vn1.subnets[0], offset=10) vm3 = self.create_vm(vn_fixture=vn1, vm_name=get_random_name('vm3'), zone=zones[0], node_name=hosts[0], fixed_ips=[fixed_ip]) vn1.verify_on_setup() vm1.verify_on_setup() vm2.verify_on_setup() vm3.verify_on_setup() return True
def test_allocation_pools(self): '''Validate allocation pool config Create a VN with subnet having allocation pool Verify VMs are only created when alloc pool is available ''' vn1_name = get_random_name('vn1') vn1_subnet_cidr = get_random_cidr('29') vn1_gateway = get_an_ip(vn1_subnet_cidr, 1) # Leave out the second IP...start from 3 vn1_subnets = [{ 'cidr': vn1_subnet_cidr, 'allocation_pools': [{ 'start': get_an_ip(vn1_subnet_cidr, 3), 'end': get_an_ip(vn1_subnet_cidr, 4) }, { 'start': get_an_ip(vn1_subnet_cidr, 6), 'end': get_an_ip(vn1_subnet_cidr, 6) }], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, image_name='cirros-0.3.0-x86_64-uec') assert vm1_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm1_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 3),\ 'IP of VM %s should have been %s. It is %s' % ( vm1_fixture.vm_name, get_an_ip(vn1_subnet_cidr, 3), vm1_fixture.vm_ip) vm2_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), image_name='cirros-0.3.0-x86_64-uec') assert vm2_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm2_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 4),\ 'IP of VM %s should have been %s. It is %s' % ( vm2_fixture.vm_name, get_an_ip(vn1_subnet_cidr, 4), vm2_fixture.vm_ip) vm3_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), image_name='cirros-0.3.0-x86_64-uec') assert vm3_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm3_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 6),\ 'IP of VM %s should have been %s. It is %s' % ( vm3_fixture.vm_name, get_an_ip(vn1_subnet_cidr, 6), vm3_fixture.vm_ip) vm4_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), image_name='cirros-0.3.0-x86_64-uec') assert vm4_fixture.wait_till_vm_status('ERROR'), 'VM %s should '\ 'have failed since allocation pool is full' % (vm4_fixture.vm_name)
def test_allocation_pools(self): '''Validate allocation pool config Create a VN with subnet having allocation pool Verify VMs are only created when alloc pool is available ''' vn1_name = get_random_name('vn1') vn1_subnet_cidr = get_random_cidr('29') vn1_gateway = get_an_ip(vn1_subnet_cidr, 1) # Leave out the second IP...start from 3 vn1_subnets = [{'cidr': vn1_subnet_cidr, 'allocation_pools': [ {'start': get_an_ip(vn1_subnet_cidr, 3), 'end': get_an_ip(vn1_subnet_cidr, 4) }, {'start': get_an_ip(vn1_subnet_cidr, 6), 'end': get_an_ip(vn1_subnet_cidr, 6) } ], }] vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, image_name='cirros-0.3.0-x86_64-uec') assert vm1_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm1_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 3),\ 'IP of VM %s should have been %s. It is %s' % ( vm1_fixture.vm_name, get_an_ip(vn1_subnet_cidr, 3), vm1_fixture.vm_ip) vm2_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), image_name='cirros-0.3.0-x86_64-uec') assert vm2_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm2_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 4),\ 'IP of VM %s should have been %s. It is %s' % ( vm2_fixture.vm_name, get_an_ip(vn1_subnet_cidr, 4), vm2_fixture.vm_ip) vm3_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), image_name='cirros-0.3.0-x86_64-uec') assert vm3_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm3_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 6),\ 'IP of VM %s should have been %s. It is %s' % ( vm3_fixture.vm_name, get_an_ip(vn1_subnet_cidr, 6), vm3_fixture.vm_ip) vm4_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), image_name='cirros-0.3.0-x86_64-uec') assert vm4_fixture.wait_till_vm_status('ERROR'), 'VM %s should '\ 'have failed since allocation pool is full' % (vm4_fixture.vm_name)
def test_interface_mirroring(self): image_name = 'ubuntu' src_node = dst_node = self.inputs.compute_names[0] if len(self.inputs.compute_names) > 3: dst_node = self.inputs.compute_names[-1] mirror_node1 = self.inputs.compute_names[1] mirror_node2 = self.inputs.compute_names[2] encrypt_nodes = self.inputs.compute_names[:2] non_encrypt_nodes = self.inputs.compute_names[2:] if src_node != dst_node: encrypt_nodes.append(dst_node) non_encrypt_nodes.remove(dst_node) vn = self.create_vn() mirror_vn = self.create_vn() src_vm = self.create_vm(vn_fixture=vn, node_name=src_node, image_name=image_name) dst_vm = self.create_vm(vn_fixture=vn, node_name=dst_node, image_name=image_name) mirror1_vm = self.create_vm(vn_fixture=mirror_vn, node_name=mirror_node1, image_name=image_name) mirror2_vm = self.create_vm(vn_fixture=mirror_vn, node_name=mirror_node2, image_name=image_name) self.enable_encryption(encrypt_nodes) self.validate_tunnels(encrypt_nodes, encrypt_nodes) self.validate_tunnels(vrouters=non_encrypt_nodes) # Wait till VMs are booted up self.check_vms_booted([src_vm, dst_vm, mirror1_vm, mirror2_vm]) self.setup_policy_between_vns(vn, mirror_vn) vIP = get_an_ip(mirror_vn.get_cidrs()[0], offset=10) for vm in [mirror1_vm, mirror2_vm]: port = vm.get_vmi_obj_from_api_server()[1][0] self.config_aap(port.uuid, vIP, mac=port.mac_addr, aap_mode='active-active', contrail_api=True) cmd = 'ip addr add %s/24 dev eth0'%vIP vm.run_cmd_on_vm([cmd], as_sudo=True) src_vmi = src_vm.get_vmi_obj_from_api_server()[1][0].uuid self.vnc_h.enable_intf_mirroring(src_vmi, vIP) self.addCleanup(self.vnc_h.disable_intf_mirroring, src_vmi) b_src_dst1, b_dst1_src = self.get_crypt_stats(src_node, mirror_node1) b_src_dst2, b_dst2_src = self.get_crypt_stats(src_node, mirror_node2) exp_vms = set([mirror1_vm, mirror2_vm]) for retry in range(1, 15): pcap_ids = list() for mirror_vm in exp_vms: pcap_id = start_tcpdump_for_vm_intf(None, [mirror_vm], None, filters='udp port 8099', pcap_on_vm=True) pcap_ids.append(pcap_id) result = src_vm.ping_to_ip(dst_vm.vm_ip, size=1200) filters = '| grep \"length [1-9][2-9][0-9][0-9][0-9]*\"' for pcap_id in pcap_ids: ignore, count = stop_tcpdump_for_vm_intf(None, None, None, vm_fix_pcap_pid_files=pcap_id, filters=filters, verify_on_all=True) if count and count[0]: exp_vms.discard(pcap_id[0][0]) if not exp_vms: break assert not exp_vms, '%s'%exp_vms a_src_dst1, a_dst1_src = self.get_crypt_stats(src_node, mirror_node1) a_src_dst2, a_dst2_src = self.get_crypt_stats(src_node, mirror_node2) assert (b_src_dst2 == a_src_dst2) and (b_dst2_src == a_dst2_src) assert (b_src_dst1 != a_src_dst1) and (b_dst1_src != a_dst1_src)
def test_evpn_type_5_vxlan_traffic_between_vn(self): ''' Configure Encapsulation order as VxLAN, MPLSoverGRE, MPLSoverUDP Enable VxLAN Routing under that project settings Create Virtual Networks Create logical Routers and attach above created VNs Create VMs on Virtual Networks Verify traffic between accross Virtual Networks ''' bms_vn_fixture = self.create_vn( vn_name='vn100', vn_subnets=['100.0.0.0/24']) self.setup_fixtures = self.setup_evpn_type5( lrs=self.lrs, vn=self.vn, vmi=self.vmi, vm=self.vm) lr1_fix = self.setup_fixtures['lr_fixtures']['lr1'] lr1_fix.add_interface([bms_vn_fixture.vn_id]) vn1_fixture = self.setup_fixtures['vn_fixtures']['vn1'] vn2_fixture = self.setup_fixtures['vn_fixtures']['vn2'] for spine in self.spines: self.setup_fixtures['lr_fixtures']['lr1'].add_physical_router( spine.uuid) self.logger.debug( "Sleeping for 60 secs..after extending LR to Physical Router ...") time.sleep(60) # find out compute nodes those are part of given logical router self.lrs['lr1']['node_ip_list'] = set() self.lrs['lr2']['node_ip_list'] = set() for each_vm in self.setup_fixtures['vm_fixtures']: vm_fix = self.setup_fixtures['vm_fixtures'][each_vm] for each_lr in self.lrs: for each_vn in self.vm[each_vm]['vn']: if each_vn in self.lrs[each_lr]['vn_list']: self.lrs[each_lr]['node_ip_list'].add( vm_fix.vm_node_ip) # verify on setup for each_lr in self.setup_fixtures['lr_fixtures']: lr_fix = self.setup_fixtures['lr_fixtures'][each_lr] lr_fix.verify_on_setup(self.lrs[each_lr]['node_ip_list']) self.logger.info( "Verify Traffic between VN-1 and VN-2 on Logical Router: lr1") send_vm_fixture = self.setup_fixtures['vm_fixtures']['vm11'] recv_vm_fixture = self.setup_fixtures['vm_fixtures']['vm21'] traffic_result = self.verify_traffic(sender_vm=send_vm_fixture, receiver_vm=recv_vm_fixture, proto='udp', sport=10000, dport=20000) self.logger.info("Traffic Tx-Pkts: %d Rx-Pkts: %d" % (traffic_result[0], traffic_result[1])) assert traffic_result[0] == traffic_result[1], "Traffic between VN-1 and VN-2 on Logical Router: lr1 Failed" self.logger.info( "Verify Traffic between VN-3 and VN-4 on Logical Router: lr2") send_vm_fixture = self.setup_fixtures['vm_fixtures']['vm31'] recv_vm_fixture = self.setup_fixtures['vm_fixtures']['vm41'] traffic_result = self.verify_traffic(sender_vm=send_vm_fixture, receiver_vm=recv_vm_fixture, proto='udp', sport=10000, dport=20000) self.logger.info("Traffic Tx-Pkts: %d Rx-Pkts: %d" % (traffic_result[0], traffic_result[1])) assert traffic_result[0] == traffic_result[1], "Traffic between VN-3 and VN-4 on Logical Router: lr2 Failed" bms_fixtures = [] for bms in self.inputs.bms_data.keys(): offset = 10 + int(self.inputs.bms_data.keys().index(bms)) bms_ip = get_an_ip(bms_vn_fixture.get_cidrs()[0], offset=offset) bms_fixtures.append(self.create_bms(bms_name=bms, vn_fixture=bms_vn_fixture, unit=100, bms_ip=bms_ip, bms_mac=get_random_mac(), bms_ip_netmask='24', bms_gw_ip='100.0.0.1', static_ip=True, security_groups=[self.default_sg.uuid])) self.logger.info("Modifying SG to allow traffic from BMS to VM...") self.allow_default_sg_to_allow_all_on_project(self.inputs.project_name) self.logger.info("Modified Default Secutiy Group Rules") vm11_fixture = self.setup_fixtures['vm_fixtures']['vm11'] vm21_fixture = self.setup_fixtures['vm_fixtures']['vm21'] vm11_ip = vm11_fixture.get_vm_ips()[0] vm21_ip = vm21_fixture.get_vm_ips()[0] self.logger.info( "Verify Traffic between BMS and (vn1, vn2) Logical Router: lr1") for bms_fix in bms_fixtures: assert bms_fix.ping_with_certainty( vm11_ip), "Traffic from BMS to VM-11 Failed" assert bms_fix.ping_with_certainty( vm21_ip), "Traffic from BMS to VM-21 Failed"
def test_evpn_type_5_vm_to_bms_add_rt_to_lr(self): ''' Configure Encapsulation order as VxLAN, MPLSoverGRE, MPLSoverUDP Enable VxLAN Routing under that project settings Create Virtual Networks Create a Logical Router and attach above created VNs Create a VM in VN1 Assign an IP from VN2 to a BMS Verify traffic between VM to BMS Now add a new RT to the LR Traffic across the VNs should continue to work ''' my_lrs = {'lr1': {'vn_list': ['vn1', 'vn2'], 'vni': 70001}, } my_vn = {'count': 2, 'vn1': {'subnet': get_random_cidr(af='v4')}, 'vn2': {'subnet': get_random_cidr(af='v4')}, } my_vmi = {'count': 2, 'vmi11': {'vn': 'vn1'}, # VMI details 'vmi21': {'vn': 'vn2'}, # VMI details } my_vm = {'count': 2, 'launch_mode': 'distribute', 'vm11': {'vn': ['vn1'], 'vmi': ['vmi11']}, # VM Details 'vm21': {'vn': ['vn2'], 'vmi': ['vmi21']}, # VM Details } self.setup_fixtures = self.setup_evpn_type5( lrs=my_lrs, vn=my_vn, vmi=my_vmi, vm=my_vm) vn1_fixture = self.setup_fixtures['vn_fixtures']['vn1'] vn2_fixture = self.setup_fixtures['vn_fixtures']['vn2'] lr1_fix = self.setup_fixtures['lr_fixtures']['lr1'] lr1_fix.add_interface([vn1_fixture.vn_id, vn2_fixture.vn_id]) for spine in self.spines: self.setup_fixtures['lr_fixtures']['lr1'].add_physical_router( spine.uuid) self.logger.debug( "Sleeping for 60 secs..after extending LR to Physical Router ...") time.sleep(60) bms_fixtures = [] bms = self.inputs.bms_data.keys()[0] bms_ip = get_an_ip(vn2_fixture.get_cidrs()[0], offset=100) bms_fixtures.append(self.create_bms(bms_name=bms, vn_fixture=vn2_fixture, unit=100, bms_ip=bms_ip, bms_mac=get_random_mac(), bms_ip_netmask='24', bms_gw_ip=vn2_fixture.get_subnets()[ 0]['gateway_ip'], static_ip=True, security_groups=[self.default_sg.uuid])) self.logger.info("Modifying SG to allow traffic from BMS to VM...") self.allow_default_sg_to_allow_all_on_project(self.inputs.project_name) self.logger.info("Modified Default Secutiy Group Rules") vm11_fixture = self.setup_fixtures['vm_fixtures']['vm11'] vm11_ip = vm11_fixture.get_vm_ips()[0] self.logger.info("Verify Traffic between BMS and VM") for bms_fix in bms_fixtures: assert bms_fix.ping_with_certainty( vm11_ip), "Traffic from BMS to VM-11 Failed" self.logger.info( 'Will add a new Route-Target to the LR. Traffic between the BMS and VM should continue to pass') lr1_fix.add_rt('target:64512:12345') self.logger.debug( "Sleeping for 30 secs to allow config change to be pushed to the Spine") time.sleep(30) for bms_fix in bms_fixtures: assert bms_fix.ping_with_certainty( vm11_ip), "Traffic from BMS to VM-11 failed"
def test_interface_mirroring(self): image_name = 'ubuntu' src_node = dst_node = self.inputs.compute_names[0] if len(self.inputs.compute_names) > 3: dst_node = self.inputs.compute_names[-1] mirror_node1 = self.inputs.compute_names[1] mirror_node2 = self.inputs.compute_names[2] encrypt_nodes = self.inputs.compute_names[:2] non_encrypt_nodes = self.inputs.compute_names[2:] if src_node != dst_node: encrypt_nodes.append(dst_node) non_encrypt_nodes.remove(dst_node) vn = self.create_vn() mirror_vn = self.create_vn() src_vm = self.create_vm(vn_fixture=vn, node_name=src_node, image_name=image_name) dst_vm = self.create_vm(vn_fixture=vn, node_name=dst_node, image_name=image_name) mirror1_vm = self.create_vm(vn_fixture=mirror_vn, node_name=mirror_node1, image_name=image_name) mirror2_vm = self.create_vm(vn_fixture=mirror_vn, node_name=mirror_node2, image_name=image_name) self.enable_encryption(encrypt_nodes) self.validate_tunnels(encrypt_nodes, encrypt_nodes) self.validate_tunnels(vrouters=non_encrypt_nodes) # Wait till VMs are booted up self.check_vms_booted([src_vm, dst_vm, mirror1_vm, mirror2_vm]) self.setup_policy_between_vns(vn, mirror_vn) vIP = get_an_ip(mirror_vn.get_cidrs()[0], offset=10) for vm in [mirror1_vm, mirror2_vm]: port = vm.get_vmi_obj_from_api_server()[1][0] self.config_aap(port.uuid, vIP, mac=port.mac_addr, aap_mode='active-active', contrail_api=True) cmd = 'ip addr add %s/24 dev eth0' % vIP vm.run_cmd_on_vm([cmd], as_sudo=True) src_vmi = src_vm.get_vmi_obj_from_api_server()[1][0].uuid self.vnc_h.enable_intf_mirroring(src_vmi, vIP) self.addCleanup(self.vnc_h.disable_intf_mirroring, src_vmi) b_src_dst1, b_dst1_src = self.get_crypt_stats(src_node, mirror_node1) b_src_dst2, b_dst2_src = self.get_crypt_stats(src_node, mirror_node2) exp_vms = set([mirror1_vm, mirror2_vm]) for retry in range(1, 15): pcap_ids = list() for mirror_vm in exp_vms: pcap_id = start_tcpdump_for_vm_intf(None, [mirror_vm], None, filters='udp port 8099', pcap_on_vm=True) pcap_ids.append(pcap_id) result = src_vm.ping_to_ip(dst_vm.vm_ip, size=1200) filters = '| grep \"length [1-9][2-9][0-9][0-9][0-9]*\"' for pcap_id in pcap_ids: ignore, count = stop_tcpdump_for_vm_intf( None, None, None, vm_fix_pcap_pid_files=pcap_id, filters=filters, verify_on_all=True) if count and count[0]: exp_vms.discard(pcap_id[0][0]) if not exp_vms: break assert not exp_vms, '%s' % exp_vms a_src_dst1, a_dst1_src = self.get_crypt_stats(src_node, mirror_node1) a_src_dst2, a_dst2_src = self.get_crypt_stats(src_node, mirror_node2) assert (b_src_dst2 == a_src_dst2) and (b_dst2_src == a_dst2_src) assert (b_src_dst1 != a_src_dst1) and (b_dst1_src != a_dst1_src)