class TestDdpGtp(TestCase): def set_up_all(self): self.verify(self.nic in ['fortville_25g'], 'ddp gtp can not support %s nic' % self.nic) self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") self.vm0 = None self.env_done = False profile_file = 'dep/gtp.pkgo' profile_dst = "/tmp/" self.dut.session.copy_file_to(profile_file, profile_dst) PF_Q_strip = 'CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF' VF_Q_strip = 'CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF' self.PF_QUEUE = self.search_queue_number(PF_Q_strip) self.VF_QUEUE = self.search_queue_number(VF_Q_strip) def set_up(self): self.setup_vm_env() self.load_profile() def search_queue_number(self, Q_strip): """ Search max queue number from configuration. """ out = self.dut.send_expect("cat config/common_base", "]# ", 10) pattern = "(%s=)(\d*)" % Q_strip s = re.compile(pattern) res = s.search(out) if res is None: print utils.RED('Search no queue number.') return None else: queue = res.group(2) return int(queue) def bind_nic_driver(self, ports, driver=""): if driver == "igb_uio": for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'igb_uio': netdev.bind_driver(driver='igb_uio') else: for port in ports: netdev = self.dut.ports_info[port]['port'] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver if driver != driver_now: netdev.bind_driver(driver=driver) def setup_vm_env(self, driver='igb_uio'): """ Create testing environment with VF generated from 1PF """ if self.env_done is False: self.bind_nic_driver(self.dut_ports[:1], driver="igb_uio") self.used_dut_port = self.dut_ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) self.sriov_vfs_port = self.dut.ports_info[ self.used_dut_port]['vfs_port'] for port in self.sriov_vfs_port: port.bind_driver('pci-stub') time.sleep(1) self.dut_testpmd = PmdOutput(self.dut) time.sleep(1) vf0_prop = {'opt_host': self.sriov_vfs_port[0].pci} # set up VM0 ENV self.vm0 = QEMUKvm(self.dut, 'vm0', 'ddp_gtp') self.vm0.set_vm_device(driver='pci-assign', **vf0_prop) try: self.vm0_dut = self.vm0.start() if self.vm0_dut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) self.vm0_dut_ports = self.vm0_dut.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm0_dut) self.env_done = True def destroy_vm_env(self): if getattr(self, 'vm0', None): self.vm0_dut.kill_all() self.vm0_testpmd = None self.vm0_dut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None if getattr(self, 'used_dut_port', None): self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) port = self.dut.ports_info[self.used_dut_port]['port'] self.used_dut_port = None self.env_done = False def load_profile(self): """ Load profile to update FVL configuration tables, profile will be stored in binary file and need to be passed to AQ to program FVL during initialization stage. """ self.dut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE)) self.vm0_testpmd.start_testpmd( VM_CORES_MASK, "--port-topology=chained --txq=%s --rxq=%s" % (self.VF_QUEUE, self.VF_QUEUE)) self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) out = self.dut_testpmd.execute_cmd('ddp get list 0') self.dut_testpmd.execute_cmd('ddp add 0 /tmp/gtp.pkgo') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') time.sleep(1) self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') def gtp_packets(self, type='fdir', tunnel_pkt='gtpu', inner_L3='ipv4', match_opt='matched', chk='', teid=0xF): """ Generate different GTP types according to different parameters. Input: filter type: includes flow director and cloud filter tunnel packet: includes GTPC and GTPU inner_L3: GTPC has no inner L3. GTPU has no, IPV4 and IPV6 inner L3. match_opt: PF or VSIs receive match packets to configured queue, but receive not match packets to queue 0. Flow director directs different TEIDs, inner L3 GTP packets to different queues. Cloud filter directs different TEIDs GTP packets to different queues. chk: checksum teid: GTP teid """ pkts = [] pkts_gtpc_pay = { 'IPV4/GTPC': 'Ether()/IP()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw("X"*20)' % (chk, teid), 'IPV6/GTPC': 'Ether()/IPv6()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw("X"*20)' % (chk, teid) } pkts_gtpu_pay = { 'IPV4/GTPU': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw("X"*20)' % (chk, teid) } pkts_gtpu_ipv4 = { 'IPV4/GTPU/IPV4': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV4/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV4/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV4/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV4/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV4/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV4': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV4/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV4/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV4/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV4/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV4/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw("X"*20)' % (chk, teid) } pkts_gtpu_ipv6 = { 'IPV4/GTPU/IPV6/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV6': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV6/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV6/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV6/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw("X"*20)' % (chk, teid), 'IPV4/GTPU/IPV6/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV6/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV6': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV6/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV6/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV6/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw("X"*20)' % (chk, teid), 'IPV6/GTPU/IPV6/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw("X"*20)' % (chk, teid) } if match_opt == 'matched': if tunnel_pkt is 'gtpc' and inner_L3 is None: pkts = pkts_gtpc_pay if tunnel_pkt is 'gtpu' and inner_L3 is None: pkts = pkts_gtpu_pay if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4': pkts = pkts_gtpu_ipv4 if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6': pkts = pkts_gtpu_ipv6 if match_opt == 'not matched': if type is 'fdir': if tunnel_pkt is 'gtpc' and inner_L3 is None: pkts = dict(pkts_gtpu_pay.items() + pkts_gtpu_ipv4.items() + pkts_gtpu_ipv6.items()) if tunnel_pkt is 'gtpu' and inner_L3 is None: pkts = dict(pkts_gtpc_pay.items() + pkts_gtpu_ipv4.items() + pkts_gtpu_ipv6.items()) if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4': pkts = dict(pkts_gtpc_pay.items() + pkts_gtpu_pay.items() + pkts_gtpu_ipv6.items()) if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6': pkts = dict(pkts_gtpc_pay.items() + pkts_gtpu_pay.items() + pkts_gtpu_ipv4.items()) if type is 'clfter': if tunnel_pkt is 'gtpc': pkts = dict(pkts_gtpu_pay.items() + pkts_gtpu_ipv4.items() + pkts_gtpu_ipv6.items()) if tunnel_pkt is 'gtpu': pkts = pkts_gtpc_pay return pkts def gtp_test(self, type='fdir', port='pf', tunnel_pkt='gtpu', inner_L3='ipv4'): """ Send GTP packet to dut, receive packet from configured queue. Input: filter type, port type, packet type, inner L3 type """ queue = random.randint(1, self.PF_QUEUE - 1) if port != 'pf': queue = random.randint(1, self.VF_QUEUE - 1) random_teid = random.randint(0x0, 0xFFFFFFFF) correct_teid = hex(random_teid) wrong_teid = hex((random_teid + 2) % int(0xFFFFFFFF)) if type is 'fdir': if inner_L3 is None: self.dut_testpmd.execute_cmd( 'flow create 0 ingress pattern eth / ipv4 / udp / \ %s teid is %s / end actions queue index %d / end' % (tunnel_pkt, correct_teid, queue)) else: self.dut_testpmd.execute_cmd( 'flow create 0 ingress pattern eth / ipv4 / udp / \ %s teid is %s / %s / end actions queue index %d / end' % (tunnel_pkt, correct_teid, inner_L3, queue)) if type is 'clfter': self.dut_testpmd.execute_cmd( 'flow create 0 ingress pattern eth / ipv4 / udp / \ %s teid is %s / end actions %s / queue index %d / end' % (tunnel_pkt, correct_teid, port, queue)) for match_opt in ['matched', 'not matched']: teid = correct_teid pkts = [] for teid_opt in ['correct teid', 'wrong teid']: chk = '' for chksum_opt in ['good chksum', 'bad chksum']: pkts = self.gtp_packets(type, tunnel_pkt, inner_L3, match_opt, chk, teid) for packet_type in pkts.keys(): self.tester.scapy_append( 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() if port is 'pf': out = self.dut.get_session_output(timeout=2) else: out = self.vm0_dut.get_session_output(timeout=2) self.verify( "port 0/queue %d" % queue in out, "Failed to receive packet in this queue!!!") if port is 'pf': layerparams = [ 'L3_', 'TUNNEL_', 'INNER_L3_', 'INNER_L4_' ] ptypes = packet_type.split('/') endparams = [ '_EXT_UNKNOWN', '', '_EXT_UNKNOWN', '' ] for layerparam, ptype, endparam in zip( layerparams, ptypes, endparams): layer_type = layerparam + ptype + endparam self.verify( layer_type in out, "Failed to output ptype information!!!") if queue != 0 and type is 'fdir': self.verify("PKT_RX_FDIR" in out, "Failed to test flow director!!!") if teid == wrong_teid or match_opt == 'not matched': break chk = 'chksum=0x1234,' if match_opt == 'not matched': break queue = 0 teid = wrong_teid def test_fdir_gtpc_pf(self): """ GTP is supported by NVM with profile updated. Select flow director to do classfication, send gtpc packet to PF, check PF could receive packet using configured queue, checksum is good. """ self.gtp_test(type='fdir', port='pf', tunnel_pkt='gtpc', inner_L3=None) def test_fdir_gtpu_pf(self): """ GTP is supported by NVM with profile updated. Select flow director to do classfication, send gtpu packet to PF, check PF could receive packet using configured queue, checksum is good. """ self.gtp_test(type='fdir', port='pf', tunnel_pkt='gtpu', inner_L3=None) self.gtp_test(type='fdir', port='pf', tunnel_pkt='gtpu', inner_L3='ipv4') self.gtp_test(type='fdir', port='pf', tunnel_pkt='gtpu', inner_L3='ipv6') def test_clfter_gtpc_pf(self): """ GTP is supported by NVM with profile updated. Select cloud filter, send gtpc packet to PF, check PF could receive packet using configured queue, checksum is good. """ self.gtp_test(type='clfter', port='pf', tunnel_pkt='gtpc', inner_L3=None) def test_clfter_gtpu_pf(self): """ GTP is supported by NVM with profile updated. Select cloud filter, send gtpu packet to PF, check PF could receive packet using configured queue, checksum is good. """ self.gtp_test(type='clfter', port='pf', tunnel_pkt='gtpu', inner_L3=None) self.gtp_test(type='clfter', port='pf', tunnel_pkt='gtpu', inner_L3='ipv4') self.gtp_test(type='clfter', port='pf', tunnel_pkt='gtpu', inner_L3='ipv6') def test_clfter_gtpc_vf(self): """ GTP is supported by NVM with profile updated. Select cloud filter, send gtpc packet to VF, check PF could receive packet using configured queue, checksum is good. """ self.gtp_test(type='clfter', port='vf id 0', tunnel_pkt='gtpc', inner_L3=None) def test_clfter_gtpu_vf(self): """ GTP is supported by NVM with profile updated. Select cloud filter, send gtpu packet to VF, check PF could receive packet using configured queue, checksum is good. """ self.gtp_test(type='clfter', port='vf id 0', tunnel_pkt='gtpu', inner_L3=None) self.gtp_test(type='clfter', port='vf id 0', tunnel_pkt='gtpu', inner_L3='ipv4') self.gtp_test(type='clfter', port='vf id 0', tunnel_pkt='gtpu', inner_L3='ipv6') def tear_down(self): if self.vm0_testpmd: self.dut_testpmd.execute_cmd('write reg 0 0xb8190 1') self.dut_testpmd.execute_cmd('write reg 0 0xb8190 2') self.vm0_testpmd.quit() self.dut_testpmd.quit() def tear_down_all(self): self.destroy_vm_env()
class Testddp_mpls(TestCase): def set_up_all(self): self.verify('fortville' in self.nic, 'ddp mpls can not support %s nic' % self.nic) self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") self.vm0 = None self.env_done = False profile_file = r'dep/mpls.pkgo' profile_dst = "/tmp/" self.dut.session.copy_file_to(profile_file, profile_dst) def set_up(self): self.setup_vm_env() def bind_nic_driver(self, ports, driver=""): if driver == "igb_uio": for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'igb_uio': netdev.bind_driver(driver='igb_uio') else: for port in ports: netdev = self.dut.ports_info[port]['port'] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver if driver != driver_now: netdev.bind_driver(driver=driver) def setup_vm_env(self, driver='igb_uio'): """ Create testing environment with VF generated from 1PF """ if self.env_done == False: self.bind_nic_driver(self.dut_ports[:1], driver="igb_uio") self.used_dut_port = self.dut_ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) self.sriov_vfs_port = self.dut.ports_info[ self.used_dut_port]['vfs_port'] for port in self.sriov_vfs_port: port.bind_driver('pci-stub') time.sleep(1) self.dut_testpmd = PmdOutput(self.dut) time.sleep(1) vf0_prop = {'opt_host': self.sriov_vfs_port[0].pci} # set up VM0 ENV self.vm0 = QEMUKvm(self.dut, 'vm0', 'ddp_mpls') self.vm0.set_vm_device(driver='pci-assign', **vf0_prop) try: self.vm0_dut = self.vm0.start() if self.vm0_dut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) self.vm0_dut_ports = self.vm0_dut.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm0_dut) self.env_done = True self.dut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (PF_MAX_QUEUE, PF_MAX_QUEUE)) self.vm0_testpmd.start_testpmd( VM_CORES_MASK, "--port-topology=chained --txq=%s --rxq=%s" % (VF_MAX_QUEUE, VF_MAX_QUEUE)) def destroy_vm_env(self): if getattr(self, 'vm0', None): self.vm0_dut.kill_all() self.vm0_testpmd = None self.vm0_dut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None if getattr(self, 'used_dut_port', None): self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) port = self.dut.ports_info[self.used_dut_port]['port'] self.used_dut_port = None self.env_done = False def load_profile(self): """ Load profile to update FVL configuration tables, profile will be stored in binary file and need to be passed to AQ to program FVL during initialization stage. """ self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 0" in out, "Failed to get ddp profile info list!!!") self.dut_testpmd.execute_cmd('ddp add 0 /tmp/mpls.pkgo,/tmp/mpls.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') time.sleep(1) def mpls_test(self, port='pf', pkt='udp'): """ Send mpls packet to dut, receive packet from configured queue. Input: port type, packet type """ pkts = [] if port == 'pf': queue = random.randint(1, PF_MAX_QUEUE - 1) self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') else: queue = random.randint(1, VF_MAX_QUEUE - 1) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') random_label = random.randint(0x0, 0xFFFFF) label = hex(random_label) wrong_label = hex((random_label + 2) % int(0xFFFFF)) self.dut_testpmd.execute_cmd('flow create 0 ingress pattern eth / ipv4\ / %s / mpls label is %s / end actions %s / queue index %d / end' % (pkt, label, port, queue)) for times in range(2): if pkt == 'udp': pkts = { 'mpls/good chksum udp': 'Ether()/IP()/UDP(dport=6635)\ /MPLS(label=%s)/Ether()/IP()/TCP()' % label, 'mpls/bad chksum udp': 'Ether()/IP()/UDP(chksum=0x1234,\ dport=6635)/MPLS(label=%s)/Ether()/IP()/TCP()' % label } else: pkts = { 'mpls/good chksum gre': 'Ether()/IP(proto=47)/GRE(proto=0x8847)\ /MPLS(label=%s)/Ether()/IP()/UDP()' % label, 'mpls/bad chksum gre': 'Ether()/IP(proto=47)/GRE(chksum=0x1234,\ proto=0x8847)/MPLS(label=%s)/Ether()/IP()/UDP()' % label } for packet_type in pkts.keys(): self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() if port == 'pf': out = self.dut.get_session_output(timeout=2) else: out = self.vm0_dut.get_session_output(timeout=2) self.verify("port 0/queue %d" % queue in out, "Failed to receive packet in this queue!!!") self.verify("PKT_RX_L4_CKSUM_GOOD" in out, "Failed to check CKSUM!!!") label = wrong_label queue = 0 def test_load_ddp(self): """ Load profile to update FVL configuration tables. """ self.load_profile() def test_mpls_udp_pf(self): """ MPLS is supported by NVM with profile updated. Send mpls udp packet to PF, check PF could receive packet using configured queue, checksum is good. """ self.load_profile() self.mpls_test(port='pf', pkt='udp') def test_mpls_gre_pf(self): """ MPLS is supported by NVM with profile updated. Send mpls gre packet to PF, check PF could receive packet using configured queue, checksum is good. """ self.load_profile() self.mpls_test(port='pf', pkt='gre') def test_mpls_udp_vf(self): """ MPLS is supported by NVM with profile updated. Send mpls udp packet to VF, check VF could receive packet using configured queue, checksum is good. """ self.load_profile() self.mpls_test(port='vf id 0', pkt='udp') def test_mpls_gre_vf(self): """ MPLS is supported by NVM with profile updated. Send mpls gre packet to VF, check VF could receive packet using configured queue, checksum is good. """ self.load_profile() self.mpls_test(port='vf id 0', pkt='gre') def tear_down(self): self.vm0_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('stop') out = self.dut_testpmd.execute_cmd('ddp get list 0') if "Profile number is: 0" not in out: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd('ddp del 0 /tmp/mpls.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 0" in out, "Failed to delete mpls profile!!!") self.dut_testpmd.execute_cmd('port start all') self.vm0_testpmd.quit() self.dut_testpmd.quit() def tear_down_all(self): self.destroy_vm_env() pass
class TestDynamicQueue(TestCase): def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") out = self.dut.send_expect("cat config/common_base", "]# ", 10) self.PF_Q_strip = 'CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF' pattern = "%s=(\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern) self.used_dut_port = self.dut_ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut_testpmd = PmdOutput(self.dut) def set_up(self): # Because of fortville spirit limitation,can't use 2 ports for testing if (self.nic in ["fortville_spirit"]): self.dut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), eal_param="-w %s" % (self.dut.get_port_pci(self.dut_ports[0]))) elif (self.nic in ["cavium_a063"]): eal_opts = "" for port in self.dut_ports: eal_opts += "-w %s,max_pools=256 " % (self.dut.get_port_pci( self.dut_ports[port])) self.dut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE), eal_param=eal_opts) else: self.dut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE)) def element_strip(self, out, pattern): """ Strip and get queue number. """ s = re.compile(pattern, re.DOTALL) res = s.search(out) if res is None: print utils.RED('Fail to search number.') return None else: result = res.group(1) return int(result) def send_packet(self): """ Generate packets and send them to dut """ mac = self.dut.get_mac_address(0) for i in range(self.PF_QUEUE * 2): pkt = Packet(pkt_type='IP_RAW') pkt.config_layer('ether', {'dst': mac}) pkt.config_layer('ipv4', { 'dst': '192.168.0.%d' % i, 'src': '191.168.0.1' }) pkt.send_pkt(tx_port=self.tester_intf) def rxq_setup_test(self, chgflag=0): """ Dynamic to setup rxq and reconfigure ring size at runtime. chgflag: reconfigure ring size flag 1: reconfigure Rx ring size 0: no change on Rx ring size """ queue = list() for i in range(test_loop): queue.append(random.randint(1, self.PF_QUEUE - 1)) self.dut_testpmd.execute_cmd('port 0 rxq %d stop' % queue[i]) self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('start') self.send_packet() self.dut.get_session_output(timeout=10) out = self.dut_testpmd.execute_cmd('stop') # Check Rx stopped queues can't receive packets for i in range(test_loop): self.verify( "Forward Stats for RX Port= 0/Queue=%2d" % queue[i] not in out, "Fail to verify rxq stop!") if chgflag == 1: for i in range(test_loop): out = self.dut_testpmd.execute_cmd('show rxq info 0 %d' % queue[i]) qring_strip = 'Number of RXDs: ' pattern = "%s([0-9]+)" % qring_strip qringsize = self.element_strip(out, pattern) chg_qringsize = qringsize % 1024 + 256 self.dut_testpmd.execute_cmd( 'port config 0 rxq %d ring_size %d' % (queue[i], chg_qringsize)) self.dut_testpmd.execute_cmd('port 0 rxq %d setup' % queue[i]) out = self.dut_testpmd.execute_cmd('show rxq info 0 %d' % queue[i]) chk_qringsize = self.element_strip(out, pattern) self.verify(chk_qringsize == chg_qringsize, "Fail to change ring size at runtime!") for i in range(test_loop): if chgflag == 0: self.dut_testpmd.execute_cmd('port 0 rxq %d setup' % queue[i]) self.dut_testpmd.execute_cmd('port 0 rxq %d start' % queue[i]) self.dut_testpmd.execute_cmd('start') self.send_packet() self.dut.get_session_output(timeout=10) out = self.dut_testpmd.execute_cmd('stop') # Check Rx setup queues could receive packets for i in range(test_loop): self.verify( "Forward Stats for RX Port= 0/Queue=%2d" % queue[i] in out, "Fail to setup rxq %d at runtime" % queue[i]) def txq_setup_test(self, chgflag=0): """ Dynamic to setup txq and reconfigure ring size at runtime. chgflag: reconfigure ring size flag 1: reconfigure Tx ring size 0: no change on Tx ring size """ for i in range(test_loop): queue = random.randint(1, self.PF_QUEUE - 1) out = self.dut_testpmd.execute_cmd('show txq info 0 %d' % queue) qring_strip = 'Number of TXDs: ' pattern = "%s([0-9]+)" % qring_strip qringsize = self.element_strip(out, pattern) self.dut_testpmd.execute_cmd('port 0 txq %d stop' % queue) self.dut_testpmd.execute_cmd('set fwd txonly') self.dut_testpmd.execute_cmd('start') time.sleep(10) out = self.dut_testpmd.execute_cmd('stop') tx_num = qringsize - 1 # Check Tx stopped queue only transmits qringsize-1 packets self.verify("TX-packets: %d" % tx_num in out, "Fail to stop txq at runtime") if chgflag == 1: chg_qringsize = qringsize % 1024 + 256 self.dut_testpmd.execute_cmd( 'port config 0 txq %d ring_size %d' % (queue, chg_qringsize)) self.dut_testpmd.execute_cmd('port 0 txq %d setup' % queue) out = self.dut_testpmd.execute_cmd('show txq info 0 %d' % queue) chk_qringsize = self.element_strip(out, pattern) self.verify(chk_qringsize == chg_qringsize, "Fail to change ring size at runtime!") if chgflag == 0: self.dut_testpmd.execute_cmd('port 0 txq %d setup' % queue) self.dut_testpmd.execute_cmd('port 0 txq %d start' % queue) self.dut_testpmd.execute_cmd('start') time.sleep(10) out = self.dut_testpmd.execute_cmd('stop') """ Check Tx setup queue could transmit packets normally, not only qringsize-1 packets """ self.verify("TX-packets: %d" % tx_num not in out, "Fail to setup txq at runtime") if chgflag == 1: chgtx_num = chg_qringsize - 1 self.verify("TX-packets: %d" % chgtx_num not in out, "Fail to change txq ring size at runtime") def test_rxq_setup(self): """ Dynamic to setup rxq test """ self.rxq_setup_test() def test_rxq_chgring_setup(self): """ Dynamic to setup rxq and change ring size test """ self.rxq_setup_test(chgflag=1) def test_txq_setup(self): """ Dynamic to setup txq test """ self.txq_setup_test() def test_txq_chgring_setup(self): """ Dynamic to setup txq and change ring size test """ self.txq_setup_test(chgflag=1) def tear_down(self): self.dut_testpmd.quit() def tear_down_all(self): pass
class TestVfJumboFrame(TestCase): supported_vf_driver = ['pci-stub', 'vfio-pci'] def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") self.vm0 = None self.env_done = False self.port = self.dut_ports[0] self.vm_port = 0 cores = self.dut.get_core_list("1S/1C/1T") self.port_mask = utils.create_mask([self.port]) # set vf assign method and vf driver self.dut.send_expect('modprobe vfio-pci', '#') self.vf_driver = self.get_suite_cfg()['vf_driver'] if self.vf_driver is None: self.vf_driver = 'pci-stub' self.verify(self.vf_driver in self.supported_vf_driver, "Unspported vf driver") if self.vf_driver == 'pci-stub': self.vf_assign_method = 'pci-assign' else: self.vf_assign_method = 'vfio-pci' self.dut.send_expect('modprobe vfio-pci', '#') # enable tester mtu tester_port = self.tester.get_local_port(self.port) self.netobj = self.tester.ports_info[tester_port]['port'] self.netobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU + 100) def set_up(self): self.setup_vm_env() def bind_nic_driver(self, ports, driver=""): # modprobe vfio driver if driver == "vfio-pci": for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'vfio-pci': netdev.bind_driver(driver='vfio-pci') elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'igb_uio': netdev.bind_driver(driver='igb_uio') else: for port in ports: netdev = self.dut.ports_info[port]['port'] driver_now = netdev.get_nic_driver() if driver == None: driver = netdev.default_driver if driver != driver_now: netdev.bind_driver(driver=driver) def setup_vm_env(self, driver='default'): """ Create testing environment with 1VF generated from 1PF """ if self.env_done: return # bind to default driver self.bind_nic_driver(self.dut_ports[:1], driver="") self.used_dut_port = self.dut_ports[0] self.host_intf = self.dut.ports_info[self.used_dut_port]['intf'] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 1, driver=driver) self.sriov_vfs_port = self.dut.ports_info[ self.used_dut_port]['vfs_port'] self.vf_mac = "00:10:00:00:00:00" self.dut.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf, self.vf_mac), "# ") try: for port in self.sriov_vfs_port: port.bind_driver(self.vf_driver) time.sleep(1) vf_popt = {'opt_host': self.sriov_vfs_port[0].pci} # set up VM ENV self.vm = QEMUKvm(self.dut, 'vm0', 'vf_jumboframe') self.vm.set_vm_device(driver=self.vf_assign_method, **vf_popt) self.vm_dut = self.vm.start() if self.vm_dut is None: raise Exception("Set up VM ENV failed!") TESTPMD_MAIN = "app/test-pmd/testpmd.c" if self.kdriver == "ixgbe": self.vm_dut.send_expect( "sed -i -e 's/.jumbo_frame = .*$/.jumbo_frame = 1,/g' %s" % TESTPMD_MAIN, "# ") self.vm_dut.build_install_dpdk(self.target) self.vm_testpmd = PmdOutput(self.vm_dut) except Exception as e: self.destroy_vm_env() raise Exception(e) self.env_done = True def destroy_vm_env(self): if getattr(self, 'vm', None): if getattr(self, 'vm_dut', None): self.vm_dut.kill_all() self.vm_testpmd = None self.vm_dut_ports = None # destroy vm0 self.vm.stop() self.dut.virt_exit() time.sleep(3) self.vm = None if getattr(self, 'used_dut_port', None) != None: self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) self.used_dut_port = None self.bind_nic_driver(self.dut_ports[:1], driver='default') self.env_done = False def jumboframes_get_stat(self, portid, rx_tx): """ Get packets number from port statistic """ stats = self.vm_testpmd.get_pmd_stats(portid) if rx_tx == "rx": return [stats['RX-packets'], stats['RX-errors'], stats['RX-bytes']] elif rx_tx == "tx": return [stats['TX-packets'], stats['TX-errors'], stats['TX-bytes']] else: return None def jumboframes_send_packet(self, pktsize, received=True): """ Send 1 packet to portid """ tx_pkts_ori, _, tx_bytes_ori = [ int(_) for _ in self.jumboframes_get_stat(self.vm_port, "tx") ] rx_pkts_ori, rx_err_ori, rx_bytes_ori = [ int(_) for _ in self.jumboframes_get_stat(self.vm_port, "rx") ] mac = self.vm_dut.get_mac_address(self.vm_port) pkt = Packet(pkt_type='UDP', pkt_len=pktsize) pkt.config_layer('ether', {'dst': mac}) pkt.send_pkt(tx_port=self.tester_intf) time.sleep(1) tx_pkts, _, tx_bytes = [ int(_) for _ in self.jumboframes_get_stat(self.port, "tx") ] rx_pkts, rx_err, rx_bytes = [ int(_) for _ in self.jumboframes_get_stat(self.vm_port, "rx") ] tx_pkts -= tx_pkts_ori tx_bytes -= tx_bytes_ori rx_pkts -= rx_pkts_ori rx_bytes -= rx_bytes_ori rx_err -= rx_err_ori if received: self.verify((rx_pkts == 1) and (tx_pkts == 1), "Packet forward assert error") if self.kdriver == "ixgbe": self.verify((rx_bytes + 4) == pktsize, "Rx packet size should be packet size - 4") else: self.verify(rx_bytes == pktsize, "Tx packet size should be equal to packet size") if self.kdriver == "igb": self.verify(tx_bytes == pktsize, "Tx packet size should be packet size") else: self.verify((tx_bytes + 4) == pktsize, "Tx packet size should be packet size - 4") else: self.verify(rx_err == 1 or tx_pkts == 0, "Packet drop assert error") def test_vf_normal_nojumbo(self): """ This case aims to test transmitting normal size packet without jumbo enable """ # should enable jumbo on host self.dutobj = self.dut.ports_info[self.port]['port'] self.dutobj.enable_jumbo(framesize=ETHER_STANDARD_MTU) self.vm_testpmd.start_testpmd( "Default", "--max-pkt-len=%d --port-topology=loop --txqflags=0x0" % (ETHER_STANDARD_MTU)) self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("start") self.jumboframes_send_packet(ETHER_STANDARD_MTU - 1) self.jumboframes_send_packet(ETHER_STANDARD_MTU) self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.quit() def test_vf_normal_withjumbo(self): """ When jumbo frame supported, this case is to verify that the normal size packet forwrding should be support correct. """ # should enable jumbo on host self.dutobj = self.dut.ports_info[self.port]['port'] self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.start_testpmd( "Default", "--max-pkt-len=%d --port-topology=loop --txqflags=0x0" % (ETHER_JUMBO_FRAME_MTU)) self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("start") self.jumboframes_send_packet(ETHER_STANDARD_MTU - 1) self.jumboframes_send_packet(ETHER_STANDARD_MTU) self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.quit() def test_vf_jumbo_nojumbo(self): """ This case aims to test transmitting jumbo frame packet on testpmd without jumbo frame support. """ # should enable jumbo on host self.dutobj = self.dut.ports_info[self.port]['port'] self.dutobj.enable_jumbo(framesize=ETHER_STANDARD_MTU) self.vm_testpmd.start_testpmd("Default", "--port-topology=loop --txqflags=0x0") self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("start") # On igb, for example i350, refer to :DPDK-1117 # For PF, the max-pkt-len = mtu + 18 + 4(VLAN header len). # For VF, the real max-pkt-len = the given max-pkt-len + 4(VLAN header len). # This behavior is levelraged from kernel driver. # And it means max-pkt-len is always 4 bytes longer than assumed. if self.kdriver == "igb": self.jumboframes_send_packet(ETHER_STANDARD_MTU + 1 + 4, False) else: self.jumboframes_send_packet(ETHER_STANDARD_MTU + 1, False) self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.quit() def test_vf_jumbo_withjumbo(self): """ When jumbo frame supported, this case is to verify that jumbo frame packet can be forwarded correct. """ # should enable jumbo on host self.dutobj = self.dut.ports_info[self.port]['port'] self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.start_testpmd( "Default", "--max-pkt-len=%d --port-topology=loop --txqflags=0x0" % (ETHER_JUMBO_FRAME_MTU)) self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("start") self.jumboframes_send_packet(ETHER_STANDARD_MTU + 1) self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU - 1) self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.quit() def test_vf_jumbo_overjumbo(self): """ When the jubmo frame MTU set as 9000, this case is to verify that the packet which the length bigger than MTU can not be forwarded. """ # should enable jumbo on host self.dutobj = self.dut.ports_info[self.port]['port'] self.dutobj.enable_jumbo(framesize=ETHER_JUMBO_FRAME_MTU) self.vm_testpmd.start_testpmd( "Default", "--max-pkt-len=%d --port-topology=loop --txqflags=0x0" % (ETHER_JUMBO_FRAME_MTU)) self.vm_testpmd.execute_cmd("set fwd mac") self.vm_testpmd.execute_cmd("start") # On 1G NICs, when the jubmo frame MTU set as 9000, the software adjust it to 9004. if self.kdriver == "igb": self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU + 4 + 1, False) else: self.jumboframes_send_packet(ETHER_JUMBO_FRAME_MTU + 1, False) self.vm_testpmd.execute_cmd("stop") self.vm_testpmd.quit() def tear_down(self): """ Run after each test case. """ self.destroy_vm_env() def tear_down_all(self): """ When the case of this test suite finished, the enviroment should clear up. """ self.destroy_vm_env() self.netobj.enable_jumbo(framesize=ETHER_STANDARD_MTU)
class Testvf_daemon(TestCase): def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") self.vm0 = None self.vm1 = None self.env_done = False def set_up(self): self.setup_vm_env() def bind_nic_driver(self, ports, driver=""): if driver == "igb_uio": for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'igb_uio': netdev.bind_driver(driver='igb_uio') else: for port in ports: netdev = self.dut.ports_info[port]['port'] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver if driver != driver_now: netdev.bind_driver(driver=driver) def setup_vm_env(self, driver='igb_uio'): """ Create testing environment with 2VFs generated from 1PF """ if self.env_done: return self.bind_nic_driver(self.dut_ports[:1], driver="igb_uio") self.used_dut_port = self.dut_ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 2, driver=driver) self.sriov_vfs_port = self.dut.ports_info[ self.used_dut_port]['vfs_port'] for port in self.sriov_vfs_port: port.bind_driver('pci-stub') time.sleep(1) self.dut_testpmd = PmdOutput(self.dut) time.sleep(1) vf0_prop = {'opt_host': self.sriov_vfs_port[0].pci} # set up VM0 ENV self.vm0 = QEMUKvm(self.dut, 'vm0', 'vf_daemon') self.vm0.set_vm_device(driver='pci-assign', **vf0_prop) try: self.vm0_dut = self.vm0.start() if self.vm0_dut is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) self.vm0_dut_ports = self.vm0_dut.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm0_dut) vf1_prop = {'opt_host': self.sriov_vfs_port[1].pci} self.vm1 = QEMUKvm(self.dut, 'vm1', 'vf_daemon') self.vm1.set_vm_device(driver='pci-assign', **vf1_prop) try: self.vm1_dut = self.vm1.start() if self.vm1_dut is None: raise Exception("Set up VM1 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) self.vm1_dut_ports = self.vm1_dut.get_ports('any') self.vm1_testpmd = PmdOutput(self.vm1_dut) self.env_done = True def destroy_vm_env(self): if getattr(self, 'vm0', None): self.vm0_dut.kill_all() self.vm0_testpmd = None self.vm0_dut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None if getattr(self, 'vm1', None): self.vm1_dut.kill_all() self.vm1_testpmd = None self.vm1_dut_ports = None # destroy vm1 self.vm1.stop() self.vm1 = None if getattr(self, 'used_dut_port', None): self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) port = self.dut.ports_info[self.used_dut_port]['port'] self.used_dut_port = None self.env_done = False def send_packet(self, dst_mac, vlan_id, pktsize, num): """ Generate packets and send them to dut """ if vlan_id == 0: pkt = Packet(pkt_type='UDP', pkt_len=pktsize) else: pkt = Packet(pkt_type='VLAN_UDP', pkt_len=pktsize) pkt.config_layer('vlan', {'vlan': vlan_id}) pkt.config_layer('ether', {'dst': dst_mac}) inst = sniff_packets(self.tester_intf, timeout=5) pkt.send_pkt(tx_port=self.tester_intf, count=num) return inst def strip_mac(self, inst, element="src"): """ Load sniff packets, strip and return mac address from dump message """ pkts = load_sniff_packets(inst) macs = [] for pkt in pkts: mac = pkt.strip_element_layer2(element) macs.append(mac) return macs def strip_vlan(self, inst): """ Load sniff packets, strip and return vlan id from dump message """ pkts = load_sniff_packets(inst) vlans = [] for pkt in pkts: vlan = pkt.strip_element_vlan("vlan") vlans.append(vlan) return vlans def send_and_pmdout(self, dst_mac, vlan_id=0, pktsize=64, num=1): """ Send packets to dut and return testpmd output message Input: dst_mac, vlan_id, packet size, packet number Output: testpmd output message """ inst = self.send_packet(dst_mac, vlan_id, pktsize, num) out = self.vm0_dut.get_session_output(timeout=2) return out def send_and_vlanstrip(self, dst_mac, vlan_id=0, pktsize=64, num=1): """ Send packets to dut, strip and return vlan id from dump message Input: dst_mac, vlan_id, packet size, packet number Output: vlan id stripped from dump message """ inst = self.send_packet(dst_mac, vlan_id, pktsize, num) vlans = self.strip_vlan(inst) return vlans def send_and_macstrip(self, dst_mac, vlan_id=0, pktsize=64, num=1): """ Send packets to dut, strip and return src/dst mac from dump message Input: dst_mac, vlan_id, packet size, packet number Output: src/dst mac stripped from dump message """ inst = self.send_packet(dst_mac, vlan_id, pktsize, num) macs = self.strip_mac(inst) return macs def test_vlan_insert(self): """ Insert a vlan id for a VF from PF If insert vlan id as 0, means disabling vlan id insertion If insert vlan id as 1~4095, means enabling vlan id insertion and vlan id as configured value """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') #Disable vlan insert which means insert vlan id as 0 rx_vlan = 0 self.dut_testpmd.execute_cmd('set vf vlan insert 0 0 %s' % rx_vlan) time.sleep(1) vlans = self.send_and_vlanstrip(self.vf0_mac) self.verify(rx_vlan not in vlans, "Failed to disable vlan insert!!!") #Enable vlan insert which means insert vlan id as 1~4095 random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] for rx_vlan in rx_vlans: self.dut_testpmd.execute_cmd('set vf vlan insert 0 0 %s' % rx_vlan) time.sleep(1) vlans = self.send_and_vlanstrip(self.vf0_mac) self.verify(rx_vlan in vlans, "Failed to enable vlan insert packet!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_multicast_mode(self): """ Enable/disable multicast promiscuous mode for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('set vf promisc 0 0 off') self.dut_testpmd.execute_cmd('set vf allmulti 0 0 off') multi_mac = 'F3:00:33:22:11:00' out = self.send_and_pmdout(multi_mac) self.verify("received" not in out, "Failed to disable vf multicast mode!!!") out = self.send_and_pmdout(self.vf0_mac) self.verify("received" in out, "Failed to disable vf multicast mode!!!") self.verify("dst=%s" % self.vf0_mac in out, "Failed to disable vf multicast mode!!!") self.dut_testpmd.execute_cmd('set vf allmulti 0 0 on') out = self.send_and_pmdout(multi_mac) self.verify("received" in out, "Failed to enable vf multicast mode!!!") self.verify("dst=%s" % multi_mac in out, "Failed to enable vf multicast mode!!!") out = self.send_and_pmdout(self.vf0_mac) self.verify("received" in out, "Failed to enable vf multicast mode!!!") self.verify("dst=%s" % self.vf0_mac in out, "Failed to enable vf multicast mode!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_promisc_mode(self): """ Enable/disable promiscuous mode for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('set vf promisc 0 0 off') wrong_mac = '9E:AC:72:49:43:11' out = self.send_and_pmdout(wrong_mac) self.verify("received" not in out, "Failed to disable vf promisc mode!!!") out = self.send_and_pmdout(self.vf0_mac) self.verify("received" in out, "Failed to disable vf promisc mode!!!") self.verify("dst=%s" % self.vf0_mac in out, "Failed to disable vf promisc mode!!!") self.dut_testpmd.execute_cmd('set vf promisc 0 0 on') out = self.send_and_pmdout(wrong_mac) self.verify("received" in out, "Failed to enable vf promisc mode!!!") self.verify("dst=%s" % wrong_mac in out, "Failed to enable vf promisc mode!!!") out = self.send_and_pmdout(self.vf0_mac) self.verify("received" in out, "Failed to enable vf promisc mode!!!") self.verify("dst=%s" % self.vf0_mac in out, "Failed to enable vf promisc mode!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_broadcast_mode(self): """ Enable/disable broadcast mode for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('set vf broadcast 0 0 off') dst_mac = 'FF:FF:FF:FF:FF:FF' out = self.send_and_pmdout(dst_mac) self.verify("received" not in out, "Failed to disable vf broadcast mode!!!") self.dut_testpmd.execute_cmd('set vf broadcast 0 0 on') out = self.send_and_pmdout(dst_mac) self.verify("received" in out, "Failed to enable vf broadcast mode!!!") self.verify("dst=%s" % dst_mac in out, "Failed to enable vf broadcast mode!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_vf_mtu(self): """ Enable VF MTU change """ self.dut.send_expect("ifconfig %s mtu 9000" % self.tester_intf, "#") self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') pktsize = random.randint(1500, 9000) out = self.send_and_macstrip(self.vf0_mac, 0, pktsize) self.vm0_testpmd.execute_cmd('stop') self.verify(self.vf0_mac.lower() not in out, "Failed to receive and forward this length packet!!!") self.vm0_testpmd.execute_cmd('port stop all') self.vm0_testpmd.execute_cmd('port config mtu 0 %s' % (pktsize + 100)) self.vm0_testpmd.execute_cmd('port start all') self.vm0_testpmd.execute_cmd('start') out = self.send_and_macstrip(self.vf0_mac, 0, pktsize) self.vm0_testpmd.execute_cmd('stop') self.verify(self.vf0_mac.lower() in out, "Failed to receive and forward this length packet!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() self.dut.send_expect("ifconfig %s mtu 1500" % self.tester_intf, "#") def test_vlan_tag(self): """ Enable/disable vlan tag for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd('rx_vlan add %s 0' % rx_vlan) self.dut_testpmd.execute_cmd('set vf vlan tag 0 0 off') time.sleep(1) out = self.send_and_macstrip(self.vf0_mac, rx_vlan) self.verify(self.vf0_mac.lower() not in out, "Failed to disable vlan tag!!!") self.dut_testpmd.execute_cmd('set vf vlan tag 0 0 on') time.sleep(1) out = self.send_and_macstrip(self.vf0_mac, rx_vlan) self.verify(self.vf0_mac.lower() in out, "Failed to enable vlan tag!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_tx_loopback(self): """ Enable/disable TX loopback from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm1_testpmd.start_testpmd( VM_CORES_MASK, '--port-topology=chained --eth-peer=0,%s' % self.vf0_mac) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('set tx loopback 0 off') inst = sniff_packets(self.tester_intf, timeout=5) self.vm1_testpmd.execute_cmd('set burst 5') self.vm1_testpmd.execute_cmd('start tx_first') dumpout = self.strip_mac(inst, "dst") out = self.vm0_testpmd.execute_cmd('stop') self.verify(self.vf0_mac.lower() in dumpout, "Failed to disable tx loopback!!!") self.verify("RX-packets: 0" in out, "Failed to disable tx loopback!!!") self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('set tx loopback 0 on') inst = sniff_packets(self.tester_intf, timeout=5) self.vm1_testpmd.execute_cmd('stop') self.vm1_testpmd.execute_cmd('start tx_first') dumpout = self.strip_mac(inst, "dst") out = self.vm0_testpmd.execute_cmd('stop') self.verify(self.vf0_mac.lower() not in dumpout, "Failed to enable tx loopback!!!") self.verify("RX-packets: 5" in out, "Failed to enable tx loopback!!!") self.vm0_testpmd.quit() self.vm1_testpmd.quit() self.dut_testpmd.quit() def test_all_queues_drop(self): """ Enable/disable drop enable bit for all queues from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vm1_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('set all queues drop 0 off') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vf1_mac = self.vm1_testpmd.get_port_mac(0) out = self.send_and_pmdout(self.vf1_mac, 0, 64, 200) out = self.vm1_testpmd.execute_cmd('show port stats 0') self.verify("RX-packets: 127" in out, "Failed to let vf1 full of queues!!!") out = self.send_and_pmdout(self.vf0_mac, 0, 64, 20) out = self.vm0_testpmd.execute_cmd('show port stats 0') self.verify("RX-packets: 0" in out, "Failed to disable all queues drop!!!") self.dut_testpmd.execute_cmd('set all queues drop 0 on') out = self.vm0_testpmd.execute_cmd('show port stats 0') self.verify("RX-packets: 20" in out, "Failed to enable all queues drop!!!") out = self.send_and_pmdout(self.vf0_mac, 0, 64, 20) out = self.vm0_testpmd.execute_cmd('show port stats 0') self.verify("RX-packets: 40" in out, "Failed to enable all queues drop!!!") self.vm0_testpmd.quit() self.vm1_testpmd.quit() self.dut_testpmd.quit() def test_mac_antispoof(self): """ Enable/disable mac anti-spoof for a VF from PF """ fake_mac = '00:11:22:33:44:55' self.vm0_dut.send_expect("sed -i -e '/uint64_t ol_flags = 0;/a " +\ "\struct ether_addr fake_mac = {.addr_bytes = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55},};'" +\ " app/test-pmd/macswap.c", "# ", 30) self.vm0_dut.send_expect("sed -i -e '/ether_addr_copy(&addr, ð_hdr->s_addr);/d' " +\ " app/test-pmd/macswap.c", "# ", 30) self.vm0_dut.send_expect("sed -i -e '/ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);/a " +\ "\ether_addr_copy(&fake_mac, ð_hdr->s_addr);' app/test-pmd/macswap.c", "# ", 30) self.vm0_dut.build_install_dpdk(self.target) self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd macswap') self.dut_testpmd.execute_cmd('set vf mac antispoof 0 0 off') self.vm0_testpmd.execute_cmd('start') dumpout = self.send_and_macstrip(self.vf0_mac) out = self.vm0_testpmd.execute_cmd('stop') self.verify(fake_mac in dumpout, "Failed to disable vf mac anspoof!!!") self.verify("RX-packets: 1" in out, "Failed to receive packet!!!") self.verify("TX-packets: 1" in out, "Failed to disable mac antispoof!!!") self.dut_testpmd.execute_cmd('set vf mac antispoof 0 0 on') out = self.vm0_testpmd.execute_cmd('start') dumpout = self.send_and_macstrip(self.vf0_mac) out = self.vm0_testpmd.execute_cmd('stop') self.verify(fake_mac not in dumpout, "Failed to enable vf mac anspoof!!!") self.verify("RX-packets: 1" in out, "Failed to receive packet!!!") self.verify("TX-packets: 0" in out, "Failed to enable mac antispoof!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() self.vm0_dut.send_expect("sed -i '/struct ether_addr fake_mac = {.addr_bytes = " +\ "{0x00, 0x11, 0x22, 0x33, 0x44, 0x55},};/d' app/test-pmd/macswap.c", "# ", 30) self.vm0_dut.send_expect("sed -i '/ether_addr_copy(&fake_mac, ð_hdr->s_addr);/d' " +\ "app/test-pmd/macswap.c", "# ", 30) self.vm0_dut.send_expect("sed -i '/ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);/a " +\ "\ether_addr_copy(&addr, ð_hdr->s_addr);' app/test-pmd/macswap.c", "# ", 30) self.vm0_dut.build_install_dpdk(self.target) def test_vf_mac_set(self): """ Set MAC address for a VF from PF """ expect_mac = 'A2:22:33:44:55:66' self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.dut_testpmd.execute_cmd('set vf mac addr 0 0 %s' % expect_mac) out = self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.verify("%s" % expect_mac in out, "Failed to set vf mac!!!") self.vf0_mac = self.vm0_testpmd.get_port_mac(0) self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('start') out = self.send_and_macstrip(self.vf0_mac) self.verify(expect_mac.lower() in out, "Failed to receive packet on setted vf mac!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_vlan_antispoof(self): """ Enable/disable vlan antispoof for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) vf0_mac_lower = self.vf0_mac.lower() random_vlan = random.randint(1, MAX_VLAN) match_vlan = random_vlan unmatch_vlan = (random_vlan + 2) % 4096 self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('start') self.dut_testpmd.execute_cmd('rx_vlan add %d port 0 vf 1' % match_vlan) if self.kdriver == "i40e": self.dut_testpmd.execute_cmd('set vf vlan stripq 0 0 off') else: self.dut_testpmd.execute_cmd('vlan set filter off 0') self.dut_testpmd.execute_cmd('vlan set strip off 0') self.vm0_testpmd.execute_cmd('vlan set strip off 0') self.dut_testpmd.execute_cmd('set vf vlan antispoof 0 0 off') time.sleep(1) out = self.send_and_macstrip(self.vf0_mac, match_vlan) self.verify(vf0_mac_lower in out, "Failed to disable vlan antispoof with match vlan!!!") out = self.send_and_macstrip(self.vf0_mac, unmatch_vlan) self.verify(vf0_mac_lower in out, "Failed to disable vlan antispoof with unmatch vlan!!!") out = self.send_and_macstrip(self.vf0_mac) self.verify(vf0_mac_lower in out, "Failed to disable vlan antispoof with no vlan!!!") if self.kdriver == "ixgbe": self.dut_testpmd.execute_cmd('set vf mac antispoof 0 0 on') self.dut_testpmd.execute_cmd('set vf vlan antispoof 0 0 on') time.sleep(1) out = self.send_and_macstrip(self.vf0_mac, match_vlan) self.verify(vf0_mac_lower in out, "Failed to enable vlan antispoof with match vlan!!!") out = self.send_and_macstrip(self.vf0_mac, unmatch_vlan) self.verify(vf0_mac_lower not in out, "Failed to enable vlan antispoof with unmatch vlan!!!") out = self.send_and_macstrip(self.vf0_mac) self.verify(vf0_mac_lower not in out, "Failed to enable vlan antispoof with no vlan!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_vlan_strip(self): """ Enable/disable the VLAN strip for all queues in a pool for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vf0_mac = self.vm0_testpmd.get_port_mac(0) random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd('rx_vlan add %s 0' % rx_vlan) self.dut_testpmd.execute_cmd('set vf vlan stripq 0 0 off') time.sleep(1) out = self.send_and_vlanstrip(self.vf0_mac, rx_vlan) self.verify(rx_vlan in out, "Failed to disable strip vlan!!!") self.dut_testpmd.execute_cmd('set vf vlan stripq 0 0 on') time.sleep(1) out = self.send_and_vlanstrip(self.vf0_mac, rx_vlan) self.verify(rx_vlan not in out, "Failed to disable strip vlan!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def test_vlan_filter(self): """ Add/Remove vlan filter for a VF from PF """ self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '--port-topology=chained') self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') wrong_mac = '9E:AC:72:49:43:11' out = self.send_and_pmdout(wrong_mac) self.verify("dst=%s" % wrong_mac in out, "Failed to receive untagged packet!!!") random_vlan = random.randint(1, MAX_VLAN) out = self.send_and_pmdout(wrong_mac, random_vlan) self.verify("dst=%s" % wrong_mac in out, "Failed to receive packet with vlan id!!!") self.verify("VLAN tci=%s" % hex(random_vlan) in out, "Failed to receive packet with vlan id!!!") random_vlan = random.randint(2, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] for rx_vlan in rx_vlans: self.dut_testpmd.execute_cmd('rx_vlan add %s port 0 vf 1' % rx_vlan) time.sleep(1) out = self.send_and_pmdout(wrong_mac, rx_vlan) self.verify("dst=%s" % wrong_mac in out, "Failed to enable vlan filter!!!") self.verify("VLAN tci=%s" % hex(rx_vlan) in out, "Failed to receive packet with vlan id!!!") wrong_rx_vlan = (rx_vlan + 1) % 4096 #Packet for vlan id 0 is equal to untagged packet for this case if wrong_rx_vlan == 0: wrong_rx_vlan = random.randint(1, MAX_VLAN - 1) out = self.send_and_pmdout(wrong_mac, wrong_rx_vlan) self.verify("dst=%s" % wrong_mac not in out, "Failed to enable vlan filter!!!") self.dut_testpmd.execute_cmd('rx_vlan rm %s port 0 vf 1' % rx_vlan) out = self.send_and_pmdout(wrong_mac, rx_vlan) self.verify("dst=%s" % wrong_mac in out, "Failed to disable vlan filter!!!") self.verify("VLAN tci=%s" % hex(rx_vlan) in out, "Failed to receive packet with vlan id!!!") out = self.send_and_pmdout(wrong_mac, wrong_rx_vlan) self.verify("dst=%s" % wrong_mac in out, "Failed to disable vlan filter!!!") self.verify("VLAN tci=%s" % hex(wrong_rx_vlan) in out, "Failed to receive packet with vlan id!!!") out = self.send_and_pmdout(wrong_mac) self.verify("dst=%s" % wrong_mac in out, "Failed to receive untagged packet!!!") self.vm0_testpmd.quit() self.dut_testpmd.quit() def tear_down(self): self.vm0_dut.kill_all() self.vm1_dut.kill_all() pass def tear_down_all(self): self.destroy_vm_env() pass
class TestPtype_Mapping(TestCase): def set_up_all(self): """ Run at the start of each test suite. """ self.verify( self.nic in [ 'fortville_eagle', 'fortville_spirit', 'fortville_spirit_single', 'fortville_25g' ], 'ptype mapping test can not support %s nic' % self.nic) ports = self.dut.get_ports() self.verify(len(ports) >= 1, "Insufficient ports for testing") valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] self.dut_port = valports[0] tester_port = self.tester.get_local_port(self.dut_port) self.tester_iface = self.tester.get_interface(tester_port) self.dut.send_expect("sed -i -e '" +\ "/printf(\" - VLAN tci=0x%x\", mb->vlan_tci);" +\ "/a\\\\t\\tprintf(\" - pktype: 0x%x\", mb->packet_type);'" +\ " app/test-pmd/util.c", "# ", 30, verify = True) self.dut.build_install_dpdk(self.dut.target) def set_up(self): """ Run before each test case. """ self.dut_testpmd = PmdOutput(self.dut) self.dut_testpmd.start_testpmd("Default", "--port-topology=chained") self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') def run_test(self, sw_ptype, pkt_types, chk_types): """ Generate and send packet according to packet type, detect each packet layer. """ for pkt_type in pkt_types.keys(): if chk_types != None: pkt_names = chk_types[pkt_type] else: pkt_names = pkt_types[pkt_type] pkt = Packet(pkt_type=pkt_type) pkt.send_pkt(tx_port=self.tester_iface) out = self.dut.get_session_output(timeout=2) self.verify(sw_ptype in out, "Failed to detect correct ptype value") for pkt_layer_name in pkt_names: if pkt_layer_name not in out: print utils.RED("Fail to detect %s" % pkt_layer_name) raise VerifyFailure("Failed to detect %s" % pkt_layer_name) print utils.GREEN("Detected %s successfully" % pkt_type) def strip_ptype(self, table, hw_ptype): """ Strip software packet type from packet mapping table. Input: packet mapping table, hardware ptype Out: 32 bits software ptype or none """ pattern = r"\s(%s)\s0x(0*)([1-9a-f][0-9a-f]*)" % hw_ptype s = re.compile(pattern) res = s.search(table) if res is None: print utils.RED("search none ptype") return None else: ptype = res.group(3) return ptype def run_ptype_test(self, hw_ptype, check_ptype): """ Get ptype mapping table and run ptype test. """ out = self.dut_testpmd.execute_cmd('ptype mapping get 0 0') time.sleep(3) self.verify("255" in out, "Failed to get 255 items ptype mapping table!!!") out = self.dut_testpmd.execute_cmd('ptype mapping get 0 1') time.sleep(3) self.verify("166" in out, "Failed to get 166 items ptype mapping table!!!") sw_ptype = self.strip_ptype(out, hw_ptype) if hw_ptype == 38: pktType = { "MAC_IP_IPv6_UDP_PKT": [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", "TUNNEL_IP", "INNER_L3_IPV6_EXT_UNKNOWN", "INNER_L4_UDP" ] } elif hw_ptype == 75: pktType = { "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", "TUNNEL_GRENAT", "INNER_L2_ETHER_VLAN", "INNER_L3_IPV4_EXT_UNKNOWN", "INNER_L4_NONFRAG" ] } self.run_test(sw_ptype, pktType, check_ptype) def ptype_mapping_test(self, check_ptype=None): self.run_ptype_test(hw_ptype=38, check_ptype=check_ptype) self.run_ptype_test(hw_ptype=75, check_ptype=check_ptype) def test_ptype_mapping_get(self): """ Get hardware defined ptype to software defined ptype mapping items. """ self.ptype_mapping_test() def test_ptype_mapping_reset(self): """ Reset packet mapping table after changing table. """ self.ptype_mapping_test() self.dut_testpmd.execute_cmd('ptype mapping update 0 38 0x026010e1') chk_types = { "MAC_IP_IPv6_UDP_PKT": [ "L2_ETHER", "L3_IPV6_EXT_UNKNOWN", "TUNNEL_IP", "INNER_L3_IPV6_EXT_UNKNOWN", "INNER_L4_UDP" ], "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", "TUNNEL_GRENAT", "INNER_L2_ETHER_VLAN", "INNER_L3_IPV4_EXT_UNKNOWN", "INNER_L4_NONFRAG" ] } self.ptype_mapping_test(check_ptype=chk_types) self.dut_testpmd.execute_cmd('ptype mapping reset 0') self.ptype_mapping_test() def test_ptype_mapping_update(self): """ Update a specific hardware ptype's software ptype as a new one. """ self.ptype_mapping_test() self.dut_testpmd.execute_cmd('ptype mapping update 0 38 0x026010e1') self.dut_testpmd.execute_cmd('ptype mapping update 0 75 0x026010e1') check_types = [ "L2_ETHER", "L3_IPV6_EXT_UNKNOWN", "TUNNEL_IP", "INNER_L3_IPV6_EXT_UNKNOWN", "INNER_L4_UDP" ] chk_types = { "MAC_IP_IPv6_UDP_PKT": check_types, "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": check_types } self.ptype_mapping_test(check_ptype=chk_types) self.dut_testpmd.execute_cmd('ptype mapping reset 0') self.ptype_mapping_test() def test_ptype_mapping_replace(self): """ Replace a specific or a group of software defined ptypes with a new one. """ self.ptype_mapping_test() self.dut_testpmd.execute_cmd( 'ptype mapping replace 0 0x06426091 0 0x06421091') self.dut_testpmd.execute_cmd('ptype mapping update 0 38 0x06421091') check_types = [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", "TUNNEL_IP", "INNER_L2_ETHER_VLAN", "INNER_L3_IPV4_EXT_UNKNOWN", "INNER_L4_NONFRAG" ] chk_types = { "MAC_IP_IPv6_UDP_PKT": check_types, "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": check_types } self.ptype_mapping_test(check_ptype=chk_types) self.dut_testpmd.execute_cmd( 'ptype mapping replace 0 0x06421091 1 0x02601091') check_types = [ "L2_ETHER", "L3_IPV4_EXT_UNKNOWN", "TUNNEL_IP", "INNER_L3_IPV6_EXT_UNKNOWN", "INNER_L4_UDP" ] chk_types = { "MAC_IP_IPv6_UDP_PKT": check_types, "MAC_IP_NVGRE_MAC_VLAN_IP_PKT": check_types } self.ptype_mapping_test(check_ptype=chk_types) self.dut_testpmd.execute_cmd('ptype mapping reset 0') self.ptype_mapping_test() def tear_down(self): """ Run after each test case. """ self.dut_testpmd.quit() def tear_down_all(self): """ Run after each test suite. """ self.dut.send_expect("sed -i '/printf(\" - pktype: 0x%x\", " +\ "mb->packet_type);/d' app/test-pmd/util.c", "# ", 30, verify = True) self.dut.build_install_dpdk(self.dut.target) self.dut.kill_all()
class TestEtag(TestCase): supported_vf_driver = ['pci-stub', 'vfio-pci'] def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(self.nic in ['sagepond', 'sageville'], '802.1BR only support by sagepond and sageville') self.verify(len(self.dut_ports) >= 1, 'Insufficient ports') self.src_intf = self.tester.get_interface( self.tester.get_local_port(0)) self.src_mac = self.tester.get_mac(self.tester.get_local_port(0)) self.dst_mac = self.dut.get_mac_address(0) self.vm0 = None self.printFlag = self._enable_debug self.dut.send_expect('ls', '#') self.setup_vm_env_flag = 0 self.preset_host_cmds = list() def set_up(self): pass def setup_vm_env(self, driver='default'): ''' setup qemu virtual environment ''' if self.setup_vm_env_flag == 1: return self.used_dut_port_0 = self.dut_ports[0] self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 2, driver=driver) self.sriov_vfs_port_0 = self.dut.ports_info[ self.used_dut_port_0]['vfs_port'] # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()['vf_driver'] if self.vf_driver is None: self.vf_driver = 'pci-stub' self.verify(self.vf_driver in self.supported_vf_driver, "Unspported vf driver") if self.vf_driver == 'pci-stub': self.vf_assign_method = 'pci-assign' else: self.vf_assign_method = 'vfio-pci' self.dut.send_expect('modprobe vfio-pci', '#') try: for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) time.sleep(1) vf0_prop = {'opt_host': self.sriov_vfs_port_0[0].pci} vf1_prop = {'opt_host': self.sriov_vfs_port_0[1].pci} # start testpmd without the two VFs on the host self.host_testpmd = PmdOutput(self.dut) eal_param = '-b %(vf0)s -b %(vf1)s' % { 'vf0': self.sriov_vfs_port_0[0].pci, 'vf1': self.sriov_vfs_port_0[1].pci } self.preset_host_testpmd(VM_CORES_MASK, eal_param) # set up VM0 ENV self.vm0 = QEMUKvm(self.dut, 'vm0', 'vf_etag') self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) self.vm_dut_0 = self.vm0.start() if self.vm_dut_0 is None: raise Exception('Set up VM0 ENV failed!') except Exception as e: print e self.destroy_vm_env() raise Exception(e) def destroy_vm_env(self): #destroy testpmd in vm0 if getattr(self, 'vm0_testpmd', None) and self.vm0_testpmd: self.vm0_testpmd.execute_cmd('stop') self.vm0_testpmd.execute_cmd('quit', '# ') self.vm0_testpmd = None #destroy vm0 if getattr(self, 'vm0', None) and self.vm0: self.vm0_dut_ports = None self.vm0.stop() self.vm0 = None #destroy host testpmd if getattr(self, 'host_testpmd', None): self.host_testpmd.execute_cmd('quit', '# ') self.host_testpmd = None # reset used port's sriov if getattr(self, 'used_dut_port_0', None): self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) port = self.dut.ports_info[self.used_dut_port_0]['port'] port.bind_driver() self.used_dut_port_0 = None # bind used ports with default driver for port_id in self.dut_ports: port = self.dut.ports_info[port_id]['port'] port.bind_driver() self.setup_vm_env_flag = 0 def check_packet_transmission(self, pkt_types): time.sleep(1) for pkt_type in pkt_types.keys(): intf = self.src_intf pkt = Packet(pkt_type=pkt_type) # set packet every layer's input parameters if 'layer_configs' in pkt_types[pkt_type].keys(): pkt_configs = pkt_types[pkt_type]['layer_configs'] if pkt_configs: for layer in pkt_configs.keys(): pkt.config_layer(layer, pkt_configs[layer]) pkt.send_pkt(tx_port=self.src_intf) # check vm testpmd packet received information if 'vm' in pkt_types[pkt_type].keys(): out = self.vm0_testpmd.get_output(timeout=2) if self.printFlag: # debug output print out for pkt_attribute in pkt_types[pkt_type]['vm']: if self.printFlag: # debug output print pkt_attribute if pkt_attribute not in out: print utils.RED('Fail to detect %s' % pkt_attribute) if not self.printFlag: # print out all info in debug mode raise VerifyFailure('Failed to detect %s' % pkt_attribute) print utils.GREEN('VM detected %s successfully' % pkt_type) # check dut testpmd packet received information if 'dut' in pkt_types[pkt_type].keys(): out = self.host_testpmd.get_output(timeout=2) if self.printFlag: # debug output print out for pkt_attribute in pkt_types[pkt_type]['dut']: if self.printFlag: # debug output print pkt_attribute if pkt_attribute not in out: print utils.RED('Fail to detect %s' % pkt_attribute) if not self.printFlag: # print out all info in debug mode raise VerifyFailure('Failed to detect %s' % pkt_attribute) print utils.GREEN('DUT detected %s successfully' % pkt_type) time.sleep(1) def preset_host_testpmd(self, core_mask, eal_param): if self.setup_vm_env_flag == 0: self.host_testpmd.start_testpmd(core_mask, param='--port-topology=loop', eal_param=eal_param) self.execute_host_testpmd_cmd(self.preset_host_cmds) self.preset_host_cmds = list() time.sleep(2) def execute_host_testpmd_cmd(self, cmds): if len(cmds) == 0: return for item in cmds: if len(item) == 2: self.host_testpmd.execute_cmd(item[0], int(item[1])) else: self.host_testpmd.execute_cmd(item[0]) time.sleep(2) def preset_guest_testpmd(self): if self.setup_vm_env_flag == 0: self.vm0_testpmd = PmdOutput(self.vm_dut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK, param='--port-topology=loop') time.sleep(1) elif self.vm0_testpmd: self.vm0_testpmd.quit() self.vm0_testpmd.start_testpmd(VM_CORES_MASK, param='--port-topology=loop') time.sleep(1) def execute_guest_testpmd_cmd(self, cmds): if len(cmds) == 0: return for item in cmds: if len(item) == 2: self.vm0_testpmd.execute_cmd(item[0], int(item[1])) else: self.vm0_testpmd.execute_cmd(item[0]) def preset_test_enviroment(self): self.setup_vm_env(driver='igb_uio') self.preset_guest_testpmd() self.setup_vm_env_flag = 1 time.sleep(2) def test_l2_tunnel_filter(self): ''' Enable E-tag l2 tunnel support means enabling ability of parsing E-tag packet. This ability should be enabled before we enable filtering, forwarding, offloading for this specific type of tunnel. ''' host_cmds = [['port config 0 l2-tunnel E-tag enable'], ['set fwd rxonly'], ['set verbose 1'], ['start']] guest_cmds = [['set fwd rxonly'], ['set verbose 1'], ['start']] config_layers = { 'ether': { 'src': self.src_mac }, 'etag': { 'ECIDbase': 1000 } } pkt_types = { 'ETAG_UDP': { 'dut': ['type=0x893f'], 'vm': ['type=0x893f'], 'layer_configs': config_layers } } self.preset_test_enviroment() self.execute_host_testpmd_cmd(host_cmds) self.execute_guest_testpmd_cmd(guest_cmds) self.check_packet_transmission(pkt_types) def test_etag_filter(self): ''' when E-tag packet forwarding and add E-tag on VF0 ''' test_types = ['etag_pf', 'etag_remove', 'etag_vf_0', 'etag_vf_1'] host_cmds = [['port config 0 l2-tunnel E-tag enable'], ['E-tag set forwarding on port 0']] self.preset_test_enviroment() self.execute_host_testpmd_cmd(host_cmds) for test_type in test_types: host_cmds = list() guest_cmds = [['set fwd rxonly'], ['set verbose 1'], ['start']] if test_type == 'etag_pf': # Same E-tag forwarding to PF0, Send 802.1BR packet with broadcast mac and # check packet only received on PF host_cmds = [[ 'E-tag set filter add e-tag-id 1000 dst-pool 2 port 0' ], ['set fwd mac'], ['set verbose 1'], ['start']] # set packet type and its expecting result config_layers = { 'ether': { 'src': self.src_mac, 'dst': self.dst_mac }, 'etag': { 'ECIDbase': 1000 } } pkt_types = { 'ETAG_UDP': { 'dut': ['type=0x893f'], 'layer_configs': config_layers } } elif test_type == 'etag_remove': # Remove E-tag, Send 802.1BR packet with broadcast mac and check packet not # received host_cmds = [['E-tag set filter del e-tag-id 1000 port 0'], ['set fwd rxonly'], ['set verbose 1'], ['start']] config_layers = { 'ether': { 'src': self.src_mac }, 'etag': { 'ECIDbase': 1000 } } pkt_types = { 'ETAG_UDP': { 'vm': [''], 'dut': [''], 'layer_configs': config_layers } } else: # Same E-tag forwarding to VF0, Send 802.1BR packet with broadcast mac and # check packet only received on VF0 or VF1 host_cmds = [[ 'E-tag set filter add e-tag-id 1000 dst-pool %d port 0' % int(test_type[-1:]) ], ['set fwd rxonly'], ['set verbose 1'], ['start']] config_layers = { 'ether': { 'src': self.src_mac }, 'etag': { 'ECIDbase': 1000 } } pkt_types = { 'ETAG_UDP': { 'vm': ['type=0x893f'], 'layer_configs': config_layers } } self.execute_host_testpmd_cmd(host_cmds) self.execute_guest_testpmd_cmd(guest_cmds) self.check_packet_transmission(pkt_types) self.host_testpmd.execute_cmd('E-tag set forwarding off port 0') def test_etag_insertion(self): ''' When E-tag insertion enable in VF0 ''' host_cmds = [['port config 0 l2-tunnel E-tag enable'], ['E-tag set insertion on port-tag-id 1000 port 0 vf 0'], ['set fwd mac'], ['set verbose 1'], ['start']] guest_cmds = [['set fwd mac'], ['set verbose 1'], ['start']] self.preset_test_enviroment() self.execute_host_testpmd_cmd(host_cmds) self.execute_guest_testpmd_cmd(guest_cmds) self.vm0_dut_ports = self.vm_dut_0.get_ports('any') config_layers = {'ether': {'src': self.src_mac}} pkt_types = {'IP_RAW': {'layer_configs': config_layers}} intf = self.src_intf inst = self.tester.tcpdump_sniff_packets(intf) self.check_packet_transmission(pkt_types) time.sleep(1) pkts = self.tester.load_tcpdump_sniff_packets(inst) self.host_testpmd.execute_cmd( 'E-tag set insertion off port-tag-id 1000 port 0 vf 0') # load sniff pcap file, check received packet's content packetContentFile = "/tmp/packetContent.log" pcap_file = "/tmp/sniff_%s.pcap" % intf fp = open(packetContentFile, 'w') backup_out = sys.stdout sys.stdout = fp pkts = rdpcap(pcap_file) pkts.show() fp.close() sys.stdout = backup_out fp = open(packetContentFile, 'r') out = fp.read() fp.close() if self.printFlag: # debug output print out self.verify("Dot1BR" in out, "tester %s hasn't receiver etag packet" % intf) def test_etag_strip(self): ''' When E-tag strip enable on PF ''' host_cmds = [['port config 0 l2-tunnel E-tag enable'], ['set fwd rxonly'], ['set verbose 1'], ['start']] guest_cmds = [['set fwd rxonly'], ['set verbose 1'], ['start']] config_layers = { 'ether': { 'src': self.src_mac }, 'etag': { 'ECIDbase': 1000 } } pkt_types_on = { 'ETAG_UDP': { 'vm': ['type=0x0800', 'type=0x893f'], 'layer_configs': config_layers } } pkt_types_off = { 'ETAG_UDP': { 'vm': ['type=0x893f', 'type=0x893f'], 'layer_configs': config_layers } } self.preset_test_enviroment() self.execute_host_testpmd_cmd(host_cmds) self.execute_guest_testpmd_cmd(guest_cmds) # Enable E-tag strip on PF, Send 802.1BR packet to VF and check forwarded packet without E-tag self.host_testpmd.execute_cmd('E-tag set stripping on port 0') self.check_packet_transmission(pkt_types_on) # Disable E-tag strip on PF, Send 802.1BR packet and check forwarded packet with E-tag self.host_testpmd.execute_cmd('E-tag set stripping off port 0') self.check_packet_transmission(pkt_types_off) def tear_down(self): pass def tear_down_all(self): if self.setup_vm_env_flag == 1: self.destroy_vm_env() if getattr(self, 'vm0', None): self.vm0.stop() for port_id in self.dut_ports: self.dut.destroy_sriov_vfs_by_port(port_id) self.tester.send_expect( "kill -9 $(ps aux | grep -i qemu | grep -v grep | awk {'print $2'})", '# ', 5)
class TestTX_preparation(TestCase): # # Test cases. # def set_up_all(self): """ Run at the start of each test suite. """ self.ports = self.dut.get_ports(self.nic) self.verify(len(self.ports) >= 1, "Insufficient number of ports.") self.used_dut_port = self.ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) out = self.tester.send_expect( "ethtool -K %s rx off tx off tso off gso\ off gro off lro off" % self.tester_intf, "#") if "Cannot change large-receive-offload" in out: self.tester.send_expect( "ethtool -K %s rx off tx off tso off gso\ off gro off" % self.tester_intf, "#") self.tester.send_expect( "ifconfig %s mtu %s" % (self.tester_intf, Max_mtu), "#") def set_up(self): """ Run before each test case. """ self.dut_testpmd = PmdOutput(self.dut) # use one port test the case self.dut_testpmd.start_testpmd( "Default", " --portmask=1 --port-topology=chained --max-pkt-len=%s --tx-offloads=0x8000" % Max_mtu) self.dmac = self.dut_testpmd.get_port_mac(0) self.dut_testpmd.execute_cmd('set fwd csum') self.dut_testpmd.execute_cmd('set verbose 1') #enable ip/udp/tcp hardware checksum self.dut_testpmd.execute_cmd('port stop all') self.dut_testpmd.execute_cmd('csum set ip hw 0') self.dut_testpmd.execute_cmd('csum set tcp hw 0') self.dut_testpmd.execute_cmd('csum set udp hw 0') def start_tcpdump(self, rxItf): # only sniff form dut packet and filter lldp packet param = "ether[12:2]!=0x88cc and ether src %s" % self.dmac self.tester.send_expect("rm -rf ./getPackageByTcpdump.cap", "#") self.tester.send_expect( "tcpdump %s -i %s -n -e -vv -w\ ./getPackageByTcpdump.cap 2> /dev/null& " % (param, rxItf), "#") def get_tcpdump_package(self): self.tester.send_expect("killall tcpdump", "#") return self.tester.send_expect( "tcpdump -nn -e -v -r ./getPackageByTcpdump.cap", "#") def send_packet_verify(self, tsoflag=0): """ Send packet to portid and output """ LrgLength = random.randint(Normal_mtu, Max_mtu - 100) pkts = { 'IPv4/cksum TCP': 'Ether(dst="%s")/IP()/TCP(flags=0x10)\ /Raw(RandString(50))' % self.dmac, 'IPv4/bad IP cksum': 'Ether(dst="%s")/IP(chksum=0x1234)\ /TCP(flags=0x10)/Raw(RandString(50))' % self.dmac, 'IPv4/bad TCP cksum': 'Ether(dst="%s")/IP()/TCP(flags=0x10,\ chksum=0x1234)/Raw(RandString(50))' % self.dmac, 'IPv4/large pkt': 'Ether(dst="%s")/IP()/TCP(flags=0x10)\ /Raw(RandString(%s))' % (self.dmac, LrgLength), 'IPv4/bad cksum/large pkt': 'Ether(dst="%s")/IP(chksum=0x1234)\ /TCP(flags=0x10,chksum=0x1234)/Raw(RandString(%s))' % (self.dmac, LrgLength), 'IPv6/cksum TCP': 'Ether(dst="%s")/IPv6()/TCP(flags=0x10)\ /Raw(RandString(50))' % self.dmac, 'IPv6/cksum UDP': 'Ether(dst="%s")/IPv6()/UDP()\ /Raw(RandString(50))' % self.dmac, 'IPv6/bad TCP cksum': 'Ether(dst="%s")/IPv6()/TCP(flags=0x10,\ chksum=0x1234)/Raw(RandString(50))' % self.dmac, 'IPv6/large pkt': 'Ether(dst="%s")/IPv6()/TCP(flags=0x10)\ /Raw(RandString(%s))' % (self.dmac, LrgLength) } for packet_type in pkts.keys(): self.start_tcpdump(self.tester_intf) self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() out = self.get_tcpdump_package() if packet_type == 'IPv6/cksum UDP': self.verify("udp sum ok" in out, "Failed to check UDP checksum correctness!!!") else: self.verify("cksum" in out, "Failed to check IP/TCP checksum!!!") self.verify( "correct" in out and "incorrect" not in out, "Failed to check IP/TCP/UDP checksum correctness!!!") if tsoflag == 1: if packet_type in\ ['IPv4/large pkt', 'IPv6/large pkt', 'IPv4/bad cksum/large pkt']: segnum = LrgLength / TSO_value LastLength = LrgLength % TSO_value num = out.count('length %s' % TSO_value) self.verify( "length %s" % TSO_value in out and num == segnum, "Failed to verify TSO correctness for large packets!!!" ) if LastLength != 0: num = out.count('length %s' % LastLength) self.verify( "length %s" % LastLength in out and num == 1, "Failed to verify TSO correctness for large packets!!!" ) def test_tx_preparation_NonTSO(self): """ ftag functional test """ self.dut_testpmd.execute_cmd('tso set 0 0') self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('start') self.send_packet_verify() self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.quit() def test_tx_preparation_TSO(self): """ ftag functional test """ self.dut_testpmd.execute_cmd('tso set %s 0' % TSO_value) self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('start') self.send_packet_verify(1) self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.quit() def tear_down(self): """ Run after each test case. """ pass def tear_down_all(self): """ Run after each test suite. """ self.tester.send_expect( "ifconfig %s mtu %s" % (self.tester_intf, Normal_mtu), "#") self.dut.kill_all()
class TestVfVlan(TestCase): supported_vf_driver = ['pci-stub', 'vfio-pci'] def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) > 1, "Insufficient ports") self.vm0 = None self.env_done = False # set vf assign method and vf driver self.vf_driver = self.get_suite_cfg()['vf_driver'] if self.vf_driver is None: self.vf_driver = 'pci-stub' self.verify(self.vf_driver in self.supported_vf_driver, "Unspported vf driver") if self.vf_driver == 'pci-stub': self.vf_assign_method = 'pci-assign' else: self.vf_assign_method = 'vfio-pci' self.dut.send_expect('modprobe vfio-pci', '#') def set_up(self): self.setup_vm_env() def bind_nic_driver(self, ports, driver=""): # modprobe vfio driver if driver == "vfio-pci": for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'vfio-pci': netdev.bind_driver(driver='vfio-pci') elif driver == "igb_uio": # igb_uio should insmod as default, no need to check for port in ports: netdev = self.dut.ports_info[port]['port'] driver = netdev.get_nic_driver() if driver != 'igb_uio': netdev.bind_driver(driver='igb_uio') else: for port in ports: netdev = self.dut.ports_info[port]['port'] driver_now = netdev.get_nic_driver() if driver == "": driver = netdev.default_driver if driver != driver_now: netdev.bind_driver(driver=driver) def setup_vm_env(self, driver='default'): """ Create testing environment with 2VFs generated from 2PFs """ if self.env_done: return # bind to default driver self.bind_nic_driver(self.dut_ports[:2], driver="") self.used_dut_port_0 = self.dut_ports[0] self.host_intf0 = self.dut.ports_info[self.used_dut_port_0]['intf'] tester_port = self.tester.get_local_port(self.used_dut_port_0) self.tester_intf0 = self.tester.get_interface(tester_port) self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) self.sriov_vfs_port_0 = self.dut.ports_info[ self.used_dut_port_0]['vfs_port'] self.vf0_mac = "00:10:00:00:00:00" self.dut.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf0, self.vf0_mac), "# ") self.used_dut_port_1 = self.dut_ports[1] self.host_intf1 = self.dut.ports_info[self.used_dut_port_1]['intf'] self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 1, driver=driver) self.sriov_vfs_port_1 = self.dut.ports_info[ self.used_dut_port_1]['vfs_port'] tester_port = self.tester.get_local_port(self.used_dut_port_1) self.tester_intf1 = self.tester.get_interface(tester_port) self.vf1_mac = "00:20:00:00:00:00" self.dut.send_expect( "ip link set %s vf 0 mac %s" % (self.host_intf1, self.vf1_mac), "# ") try: for port in self.sriov_vfs_port_0: port.bind_driver(self.vf_driver) for port in self.sriov_vfs_port_1: port.bind_driver(self.vf_driver) time.sleep(1) vf0_prop = {'opt_host': self.sriov_vfs_port_0[0].pci} vf1_prop = {'opt_host': self.sriov_vfs_port_1[0].pci} # set up VM0 ENV self.vm0 = VM(self.dut, 'vm0', 'vf_vlan') self.vm0.set_vm_device(driver=self.vf_assign_method, **vf0_prop) self.vm0.set_vm_device(driver=self.vf_assign_method, **vf1_prop) self.vm_dut_0 = self.vm0.start() if self.vm_dut_0 is None: raise Exception("Set up VM0 ENV failed!") except Exception as e: self.destroy_vm_env() raise Exception(e) self.env_done = True def destroy_vm_env(self): if getattr(self, 'vm0', None): if getattr(self, 'vm_dut_0', None): self.vm_dut_0.kill_all() self.vm0_testpmd = None self.vm0_dut_ports = None # destroy vm0 self.vm0.stop() self.dut.virt_exit() self.vm0 = None if getattr(self, 'used_dut_port_0', None) != None: self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_0) port = self.dut.ports_info[self.used_dut_port_0]['port'] self.used_dut_port_0 = None if getattr(self, 'used_dut_port_1', None) != None: self.dut.destroy_sriov_vfs_by_port(self.used_dut_port_1) port = self.dut.ports_info[self.used_dut_port_1]['port'] self.used_dut_port_1 = None self.bind_nic_driver(self.dut_ports[:2], driver='default') self.env_done = False def test_pvid_vf_tx(self): """ Add port based vlan on vf device and check vlan tx work """ random_vlan = random.randint(1, MAX_VLAN) self.dut.send_expect( "ip link set %s vf 0 vlan %d" % (self.host_intf0, random_vlan), "# ") out = self.dut.send_expect("ip link show %s" % self.host_intf0, "# ") self.verify("vlan %d" % random_vlan in out, "Failed to add pvid on VF0") self.vm0_dut_ports = self.vm_dut_0.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm_dut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd('set fwd mac') self.vm0_testpmd.execute_cmd('start') pkt = Packet(pkt_type='UDP') pkt.config_layer('ether', {'dst': self.vf1_mac}) inst = self.tester.tcpdump_sniff_packets(self.tester_intf0, timeout=5) pkt.send_pkt(tx_port=self.tester_intf1) pkts = self.tester.load_tcpdump_sniff_packets(inst) self.verify(len(pkts), "Not receive expected packet") self.vm0_testpmd.quit() # disable pvid self.dut.send_expect("ip link set %s vf 0 vlan 0" % (self.host_intf0), "# ") def send_and_getout(self, vlan=0, pkt_type="UDP"): if pkt_type == "UDP": pkt = Packet(pkt_type='UDP') pkt.config_layer('ether', {'dst': self.vf0_mac}) elif pkt_type == "VLAN_UDP": pkt = Packet(pkt_type='VLAN_UDP') pkt.config_layer('vlan', {'vlan': vlan}) pkt.config_layer('ether', {'dst': self.vf0_mac}) pkt.send_pkt(tx_port=self.tester_intf0) out = self.vm_dut_0.get_session_output(timeout=2) return out def test_add_pvid_vf(self): random_vlan = random.randint(1, MAX_VLAN) self.dut.send_expect( "ip link set %s vf 0 vlan %d" % (self.host_intf0, random_vlan), "# ") out = self.dut.send_expect("ip link show %s" % self.host_intf0, "# ") self.verify("vlan %d" % random_vlan in out, "Failed to add pvid on VF0") # start testpmd in VM self.vm0_dut_ports = self.vm_dut_0.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm_dut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') out = self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") self.verify("received" in out, "Failed to received vlan packet!!!") # send packet without vlan out = self.send_and_getout(pkt_type="UDP") self.verify("received" not in out, "Received packet without vlan!!!") # send packet with vlan not matched wrong_vlan = (random_vlan + 1) % 4096 out = self.send_and_getout(vlan=wrong_vlan, pkt_type="VLAN_UDP") self.verify("received" not in out, "Received pacekt with wrong vlan!!!") # remove vlan self.dut.send_expect("ip link set %s vf 0 vlan 0" % self.host_intf0, "# ") # send packet with vlan out = self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") if self.kdriver == "i40e": self.verify("received" in out, "Failed to received vlan packet!!!") else: self.verify("received" not in out, "Received vlan packet without pvid!!!") # send packet with vlan 0 out = self.send_and_getout(vlan=0, pkt_type="VLAN_UDP") self.verify("received" in out, "Not recevied packet with vlan 0!!!") # send packet without vlan out = self.send_and_getout(vlan=0, pkt_type="UDP") self.verify("received" in out, "Not received packet without vlan!!!") self.vm0_testpmd.quit() # disable pvid self.dut.send_expect("ip link set %s vf 0 vlan 0" % (self.host_intf0), "# ") def tx_and_check(self, tx_vlan=1): inst = self.tester.tcpdump_sniff_packets(self.tester_intf0, timeout=5) self.vm0_testpmd.execute_cmd('set burst 1') self.vm0_testpmd.execute_cmd('start tx_first') self.vm0_testpmd.execute_cmd('stop') # strip sniffered vlans pkts = self.tester.load_tcpdump_sniff_packets(inst) vlans = [] for pkt in pkts: vlan = pkt.strip_element_vlan("vlan") vlans.append(vlan) self.verify(tx_vlan in vlans, "Tx packet with vlan not received!!!") def test_vf_vlan_tx(self): self.verify(self.kdriver not in ["ixgbe"], "NIC Unsupported: " + str(self.nic)) random_vlan = random.randint(1, MAX_VLAN) tx_vlans = [1, random_vlan, MAX_VLAN] # start testpmd in VM self.vm0_dut_ports = self.vm_dut_0.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm_dut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd('set verbose 1') for tx_vlan in tx_vlans: # for fortville , # if you want insert tx_vlan, # please enable rx_vlan at the same time if self.kdriver == "i40e": self.vm0_testpmd.execute_cmd('rx_vlan add %d 0' % tx_vlan) self.vm0_testpmd.execute_cmd('stop') self.vm0_testpmd.execute_cmd('port stop all') self.vm0_testpmd.execute_cmd('tx_vlan set 0 %d' % tx_vlan) self.vm0_testpmd.execute_cmd('port start all') self.tx_and_check(tx_vlan=tx_vlan) self.vm0_testpmd.quit() def test_vf_vlan_rx(self): random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] # start testpmd in VM self.vm0_dut_ports = self.vm_dut_0.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm_dut_0) self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('vlan set strip on 0') self.vm0_testpmd.execute_cmd('vlan set filter on 0') self.vm0_testpmd.execute_cmd("set promisc all off") self.vm0_testpmd.execute_cmd('start') # send packet without vlan out = self.send_and_getout(vlan=0, pkt_type="UDP") self.verify("received 1 packets" in out, "Not received normal packet as default!!!") # send packet with vlan 0 out = self.send_and_getout(vlan=0, pkt_type="VLAN_UDP") self.verify("VLAN tci=0x0" in out, "Not received vlan 0 packet as default!!!") for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd('rx_vlan add %d 0' % rx_vlan) time.sleep(1) # send packet with same vlan out = self.send_and_getout(vlan=rx_vlan, pkt_type="VLAN_UDP") vlan_hex = hex(rx_vlan) self.verify("VLAN tci=%s" % vlan_hex in out, "Not received expected vlan packet!!!") pkt = Packet(pkt_type='VLAN_UDP') if rx_vlan == MAX_VLAN: continue wrong_vlan = (rx_vlan + 1) % 4096 # send packet with wrong vlan out = self.send_and_getout(vlan=wrong_vlan, pkt_type="VLAN_UDP") self.verify("received 1 packets" not in out, "Received filtered vlan packet!!!") for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd('rx_vlan rm %d 0' % rx_vlan) # send packet with vlan 0 out = self.send_and_getout(vlan=0, pkt_type="VLAN_UDP") self.verify("VLAN tci=0x0" in out, "Not received vlan 0 packet as default!!!") # send packet without vlan out = self.send_and_getout(pkt_type="UDP") self.verify("received 1 packets" in out, "Not received normal packet after remove vlan filter!!!") # send packet with vlan out = self.send_and_getout(vlan=random_vlan, pkt_type="VLAN_UDP") if self.kdriver == "i40e": self.verify( "received 1 packets" in out, "Received mismatched vlan packet while vlan filter on") else: self.verify( "received 1 packets" not in out, "Received mismatched vlan packet while vlan filter on") self.vm0_testpmd.quit() def test_vf_vlan_strip(self): random_vlan = random.randint(1, MAX_VLAN - 1) rx_vlans = [1, random_vlan, MAX_VLAN] # start testpmd in VM self.vm0_dut_ports = self.vm_dut_0.get_ports('any') self.vm0_testpmd = PmdOutput(self.vm_dut_0) if self.kdriver == "i40e": self.vm0_testpmd.start_testpmd(VM_CORES_MASK, '') else: self.vm0_testpmd.start_testpmd(VM_CORES_MASK) self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') for rx_vlan in rx_vlans: self.vm0_testpmd.execute_cmd('vlan set strip on 0') self.vm0_testpmd.execute_cmd('vlan set filter on 0') self.vm0_testpmd.execute_cmd('rx_vlan add %d 0' % rx_vlan) time.sleep(1) out = self.send_and_getout(vlan=rx_vlan, pkt_type="VLAN_UDP") # enable strip, vlan will be in mbuf vlan_hex = hex(rx_vlan) self.verify("VLAN tci=%s" % vlan_hex in out, "Failed to strip vlan packet!!!") self.verify("PKT_RX_VLAN_STRIPPED" in out, "Failed to strip vlan packet!") self.vm0_testpmd.execute_cmd('vlan set strip off 0') out = self.send_and_getout(vlan=rx_vlan, pkt_type="VLAN_UDP") self.verify("received 1 packets" in out, "Not received vlan packet as expected!!!") self.verify("PKT_RX_VLAN_STRIPPED" not in out, "Failed to disable strip vlan!!!") self.vm0_testpmd.quit() def tear_down(self): self.destroy_vm_env() def tear_down_all(self): self.destroy_vm_env() pass
class TestExternalMempool(TestCase): def set_up_all(self): """ Run at the start of each test suite. """ self.dut_ports = self.dut.get_ports() self.verify(len(self.dut_ports) >= 2, "Not enough ports") self.pmdout = PmdOutput(self.dut) def set_up(self): """ Run before each test case. """ pass def change_mempool_ops(self, ops=''): self.dut.send_expect( "sed -i 's/CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS=.*$/CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS=\"%s\"/' ./config/common_base" % ops, "# ") self.dut.build_install_dpdk(self.target) def verify_unit_func(self): self.dut.send_expect("./%s/app/test -n 4 -c f" % self.target, "R.*T.*E.*>.*>", 60) out = self.dut.send_expect("mempool_autotest", "RTE>>", 120) self.dut.send_expect("quit", "# ") self.verify("Test OK" in out, "Mempool autotest failed") def verify_unit_perf(self): self.dut.send_expect("./%s/app/test -n 4 -c f" % self.target, "R.*T.*E.*>.*>", 60) out = self.dut.send_expect("mempool_perf_autotest", "RTE>>", 1200) self.dut.send_expect("quit", "# ") # may need to compare performance self.verify("Test OK" in out, "Mempool performance autotest failed") def verify_app_func(self): # start testpmd self.pmdout.start_testpmd("1S/2C/1T", "--portmask=0x3") self.pmdout.execute_cmd("set fwd mac") self.pmdout.execute_cmd("start") tgen_input = [] tx_port = self.tester.get_local_port(self.dut_ports[0]) rx_port = self.tester.get_local_port(self.dut_ports[1]) tgen_input.append((tx_port, rx_port)) tx_port = self.tester.get_local_port(self.dut_ports[1]) rx_port = self.tester.get_local_port(self.dut_ports[0]) tgen_input.append((tx_port, rx_port)) result = self.tester.check_random_pkts(tgen_input, allow_miss=False) self.pmdout.quit() self.verify(result is True, "Mempool function check failed with testpmd") def test_mempool_handler_default(self): """ Verify default mempool ops """ self.verify_unit_func() self.verify_app_func() def test_mempool_handler_sp_sc(self): """ Verify mempool single produce single customer ops """ self.change_mempool_ops(ops='ring_sp_sc') self.verify_unit_func() self.verify_app_func() def test_mempool_handler_sp_mc(self): """ Verify mempool single produce multiple customer ops """ self.change_mempool_ops(ops='ring_sp_mc') self.verify_unit_func() self.verify_app_func() def test_mempool_handler_mp_sc(self): """ Verify mempool multiple produce single customer ops """ self.change_mempool_ops(ops='ring_mp_sc') self.verify_unit_func() self.verify_app_func() def test_mempool_handler_stack(self): """ Verify external mempool handler stack ops """ self.change_mempool_ops(ops='stack') self.verify_unit_func() self.verify_app_func() def tear_down(self): """ Run after each test case. """ self.dut.kill_all() pass def tear_down_all(self): """ Run after each test suite. """ self.change_mempool_ops(ops='ring_mp_mc')
class TestVfKernel(TestCase): def set_up_all(self): """ Run at the start of each test suite. """ self.dut.send_expect("service network-manager stop", "#", 60) self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") self.cores = self.dut.get_core_list("1S/4C/1T") self.coremask = utils.create_mask(self.cores) self.dmac = self.dut.get_mac_address(self.dut_ports[0]) txport = self.tester.get_local_port(self.dut_ports[0]) self.tester_intf = self.tester.get_interface(txport) self.tester_mac = self.tester.get_mac(txport) self.intf = self.dut.ports_info[self.dut_ports[0]]['intf'] self.pci = self.dut.ports_info[self.dut_ports[0]]['pci'].split(':') self.src_logo = '12:34:56:78:90:10' self.setup_vm_env() def set_up(self): """ Run before each test case. """ self.verify(self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "vf link down") pass def generate_pcap_pkt(self, macs, pktname='flow.pcap'): """ generate pcap pkt """ pkts = '' for mac in macs: pkt = "Ether(dst='%s',src='%s')/IP()/Raw(load='X'*18)," % ( mac, self.src_logo) pkts += pkt self.tester.send_expect("rm -rf flow.pcap", "#", 10) self.tester.scapy_append('wrpcap("flow.pcap", [%s])' % pkts) self.tester.scapy_execute() def setup_vm_env(self): """ 1pf -> 6vfs , 4vf->vm0, 2vf->vm1 """ self.used_dut_port = self.dut_ports[0] self.dut.generate_sriov_vfs_by_port(self.used_dut_port, 6, driver='igb_uio') self.sriov_vfs_port = self.dut.ports_info[ self.used_dut_port]['vfs_port'] for port in self.sriov_vfs_port: port.bind_driver('pci-stub') time.sleep(1) self.dut_testpmd = PmdOutput(self.dut) self.dut_testpmd.start_testpmd( "Default", "--rxq=4 --txq=4 --port-topology=chained") # dpdk-2208 # since there is no forward engine on DPDK PF to forward or drop packet in packet pool, # so finally the pool will be full, then no more packet will be # received by VF self.dut_testpmd.execute_cmd("start") time.sleep(5) vf0_prop_1 = {'opt_host': self.sriov_vfs_port[0].pci} vf0_prop_2 = {'opt_host': self.sriov_vfs_port[1].pci} vf0_prop_3 = {'opt_host': self.sriov_vfs_port[2].pci} vf0_prop_4 = {'opt_host': self.sriov_vfs_port[3].pci} self.vm0 = QEMUKvm(self.dut, 'vm0', 'vf_kernel') self.vm0.set_vm_device(driver='pci-assign', **vf0_prop_1) self.vm0.set_vm_device(driver='pci-assign', **vf0_prop_2) self.vm0.set_vm_device(driver='pci-assign', **vf0_prop_3) self.vm0.set_vm_device(driver='pci-assign', **vf0_prop_4) try: self.vm0_dut = self.vm0.start() if self.vm0_dut is None: raise Exception("Set up VM ENV failed") else: self.verify(self.vm0_dut.ports_info[0]['intf'] != 'N/A', "Not interface") except Exception as e: self.destroy_vm_env() self.logger.error("Failure for %s" % str(e)) vf1_prop_5 = {'opt_host': self.sriov_vfs_port[4].pci} vf1_prop_6 = {'opt_host': self.sriov_vfs_port[5].pci} self.vm1 = QEMUKvm(self.dut, 'vm1', 'vf_kernel') self.vm1.set_vm_device(driver='pci-assign', **vf1_prop_5) self.vm1.set_vm_device(driver='pci-assign', **vf1_prop_6) try: self.vm1_dut = self.vm1.start() if self.vm1_dut is None: raise Exception("Set up VM1 ENV failed!") else: # fortville: PF not up ,vf will not get interface self.verify(self.vm1_dut.ports_info[0]['intf'] != 'N/A', "Not interface") except Exception as e: self.destroy_vm_env() raise Exception(e) self.vm0_testpmd = PmdOutput(self.vm0_dut) self.vm1_testpmd = PmdOutput(self.vm1_dut) self.vm0_vf0_mac = self.vm0_dut.get_mac_address(0) self.vm0_vf1_mac = self.vm0_dut.get_mac_address(1) self.vm0_vf2_mac = self.vm0_dut.get_mac_address(2) self.vm0_vf3_mac = self.vm0_dut.get_mac_address(3) self.vm1_vf0_mac = self.vm1_dut.get_mac_address(0) self.vm1_vf1_mac = self.vm1_dut.get_mac_address(1) self.vm0_intf0 = self.vm0_dut.ports_info[0]['intf'] self.vm0_intf1 = self.vm0_dut.ports_info[1]['intf'] self.vm1_intf0 = self.vm1_dut.ports_info[0]['intf'] self.vm0_dut.restore_interfaces_linux() self.vm1_dut.restore_interfaces_linux() # stop NetworkManager, this if for centos7 # you may change it when the os no support self.vm0_dut.send_expect("systemctl stop NetworkManager", "# ", 60) self.vm1_dut.send_expect("systemctl stop NetworkManager", "# ", 60) def destroy_vm_env(self): """ destroy vm environment """ if getattr(self, 'vm0', None): self.vm0_dut.kill_all() self.vm0_dut_ports = None # destroy vm0 self.vm0.stop() self.vm0 = None if getattr(self, 'vm1', None): self.vm1_dut.kill_all() self.vm1_dut_ports = None # destroy vm1 self.vm1.stop() self.vm1 = None self.dut.virt_exit() if getattr(self, 'used_dut_port', None) != None: self.dut.destroy_sriov_vfs_by_port(self.used_dut_port) port = self.dut.ports_info[self.used_dut_port]['port'] self.used_dut_port = None def test_link(self): """ verify the link state """ for i in range(5): # pf up + vf up -> vf up self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") time.sleep(3) # pf up + vf down -> vf down self.vm0_dut.send_expect("ifconfig %s down" % self.vm0_intf0, "#") out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: no" in out, "Wrong link status") time.sleep(3) def ping4(self, session, intf, ipv4): """ using seesion , ping -I $intf $ip sometimes it failed, so we try 5 times. """ for i in range(5): out = session.send_expect( "ping -w 5 -c 5 -A -I %s %s" % (intf, ipv4), "# ") if '64 bytes from' not in out: print GREEN("%s ping %s failed, retry" % (intf, ipv4)) else: return True return False def test_ping(self): """ verify the ping state """ for i in range(5): random_ip = random.randint(2, 249) vm0_ip0 = "5.5.5.%d" % random_ip vm0_ip1 = "5.5.5.%d" % (random_ip + 1) pf_ip = "5.5.5.%d" % (random_ip + 2) # down-up link for port_info in self.vm0_dut.ports_info: vm0_intf = port_info['intf'] self.verify( self.check_pf_vf_link_status(self.vm0_dut, vm0_intf), "VM0_vf: %s link down" % vm0_intf) self.vm0_dut.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.vm0_intf0, vm0_ip0), "#") self.vm0_dut.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.vm0_intf1, vm0_ip1), "#") self.tester.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.tester_intf, pf_ip), "#") # pf ping vm0_vf0 self.verify(self.ping4(self.tester, self.tester_intf, vm0_ip0), "%s ping %s failed" % (self.tester_intf, vm0_ip0)) # vm0_vf0 ping pf self.verify(self.ping4(self.vm0_dut, self.vm0_intf0, pf_ip), "%s ping %s failed" % (self.vm0_intf0, pf_ip)) # pf ping vm0_vf1 self.verify(self.ping4(self.tester, self.tester_intf, vm0_ip1), "%s ping %s failed" % (self.tester_intf, vm0_ip1)) # vm0_pf1 ping pf self.verify(self.ping4(self.vm0_dut, self.vm0_intf1, pf_ip), "%s ping %s failed" % (self.vm0_intf1, pf_ip)) # clear ip self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf1, "#") self.tester.send_expect("ifconfig %s 0.0.0.0" % self.tester_intf, "#") def test_reset(self): """ verify reset the vf1 impact on VF0 """ self.verify(self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "VM0_VF0 link up failed") self.verify(self.check_pf_vf_link_status(self.vm1_dut, self.vm1_intf0), "VM1_VF0 link up failed") # Link down VF1 in VM1 and check no impact on VF0 status self.vm1_dut.send_expect("ifconfig %s down" % self.vm1_intf0, "#") out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") # Unload VF1 kernel driver and expect no problem for VF0 self.vm1_dut.send_expect("rmmod %svf" % self.kdriver, "#") out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), "Unload VF1 kernel driver impact VF0") self.verify(self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "vm0_vf0 link down") time.sleep(10) vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), "reset PF testpmd impact VF RX failure") self.vm1_dut.send_expect("modprobe %svf" % self.kdriver, "#") out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), "load VF1 kernel driver impact VF0") self.vm1_dut.send_expect("rmmod %svf" % self.kdriver, "#") out = self.vm0_dut.send_expect("ethtool %s" % self.vm0_intf0, "#") self.verify("Link detected: yes" in out, "Wrong link status") vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), "Reset VF1 kernel driver impact VF0") self.vm1_dut.send_expect("modprobe %svf" % self.kdriver, "#") def test_address(self): """ verify add/delete IP/MAC address """ # ipv4 test: random_ip = random.randint(2, 249) vm0_ip0 = "5.5.5.%d" % random_ip pf_ip = "5.5.5.%d" % (random_ip + 2) self.vm0_dut.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.vm0_intf0, vm0_ip0), "#") self.tester.send_expect( "ifconfig %s %s netmask 255.255.255.0" % (self.tester_intf, pf_ip), "#") # pf ping vm0_vf0 self.verify(self.ping4(self.tester, self.tester_intf, vm0_ip0), "%s ping %s failed" % (self.tester_intf, vm0_ip0)) # vm0_vf0 ping pf self.verify(self.ping4(self.vm0_dut, self.vm0_intf0, pf_ip), "%s ping %s failed" % (self.vm0_intf0, pf_ip)) # clear ip self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") self.tester.send_expect("ifconfig %s 0.0.0.0" % self.tester_intf, "#") # ipv6 test: add_ipv6 = 'efdd::9fc8:6a6d:c232:f1c0' self.vm0_dut.send_expect( "ifconfig %s add %s" % (self.vm0_intf0, add_ipv6), "#") out = self.vm0_dut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) self.verify(add_ipv6 in out, "Failed to add ipv6 address") self.vm0_dut.send_expect( "ifconfig %s del %s" % (self.vm0_intf0, add_ipv6), "#") out = self.vm0_dut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) self.verify(add_ipv6 not in out, "Failed to del ipv6 address") # mac test: modify_mac = 'aa:bb:cc:dd:ee:ff' self.vm0_dut.send_expect( "ifconfig %s hw ether %s" % (self.vm0_intf0, modify_mac), "#") out = self.vm0_dut.send_expect("ifconfig %s " % self.vm0_intf0, "#", 10) self.verify(modify_mac in out, "Failed to add mac address") time.sleep(5) self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, modify_mac), "modify mac address can't received packet") def verify_vm_tcpdump(self, vm_dut, intf, mac, pkt_lens=64, num=1, vlan_id='', param=''): vm_dut.send_expect( "tcpdump -i %s %s -e ether src %s" % (intf, param, self.tester_mac), "tcpdump", 10) self.send_packet(mac, pkt_lens, num, vlan_id) out = vm_dut.get_session_output(timeout=10) vm_dut.send_expect("^C", "#", 10) if self.tester_mac in out: return True else: return False def send_packet(self, mac, pkt_lens=64, num=1, vlan_id=''): if vlan_id == '': pkt = Packet(pkt_type='TCP', pkt_len=pkt_lens) pkt.config_layer('ether', {'dst': mac, 'src': self.tester_mac}) pkt.send_pkt(tx_port=self.tester_intf, count=num) else: pkt = Packet(pkt_type='VLAN_UDP', pkt_len=pkt_lens) pkt.config_layer('ether', {'dst': mac, 'src': self.tester_mac}) pkt.config_layer('vlan', {'vlan': vlan_id}) pkt.send_pkt(tx_port=self.tester_intf, count=num) def test_vlan(self): """ verify add/delete vlan """ vlan_ids = random.randint(1, 4095) self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.vm0_dut.send_expect("modprobe 8021q", "#") out = self.vm0_dut.send_expect("lsmod |grep 8021q", "#") self.verify("8021q" in out, "modprobe 8021q failure") # Add random vlan id(0~4095) on kernel VF0 self.vm0_dut.send_expect( "vconfig add %s %s" % (self.vm0_intf0, vlan_ids), "#") out = self.vm0_dut.send_expect("ls /proc/net/vlan/ ", "#") self.verify("%s.%s" % (self.vm0_intf0, vlan_ids) in out, "take vlan id failure") # Send packet from tester to VF MAC with not-matching vlan id, check # the packet can't be received at the vlan device wrong_vlan = vlan_ids % 4095 + 1 self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, vlan_id='%d' % wrong_vlan) == False, "received wrong vlan packet") # Send packet from tester to VF MAC with matching vlan id, check the packet can be received at the vlan device. # check_result = self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, self.vm0_vf0_mac, vlan_id='%d' %vlan_ids) check_result = self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, vlan_id='%d' % vlan_ids) self.verify(check_result, "can't received vlan_id=%d packet" % vlan_ids) # Delete configured vlan device self.vm0_dut.send_expect( "vconfig rem %s.%s" % (self.vm0_intf0, vlan_ids), "#") out = self.vm0_dut.send_expect("ls /proc/net/vlan/ ", "#") self.verify("%s.%s" % (self.vm0_intf0, vlan_ids) not in out, "vlan error") # behavior is diffrent bettwn niantic and fortville ,because of kernel # driver if self.nic.startswith('fortville'): self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, vlan_id='%d' % vlan_ids) == True, "delete vlan error") else: self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, vlan_id='%d' % vlan_ids) == False, "delete vlan error") def test_packet_statistic(self): """ verify packet statistic """ time.sleep(10) out = self.vm0_dut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") rx_packets_before = re.findall("\s*rx.*packets:\s*(\d*)", out) nb_rx_pkts_before = 0 for i in range(len(rx_packets_before)): nb_rx_pkts_before += int(rx_packets_before[i]) vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, num=10), "VM reveive packet failed") out = self.vm0_dut.send_expect("ethtool -S %s" % self.vm0_intf0, "#") rx_packets_after = re.findall("\s*rx.*packets:\s*(\d*)", out) nb_rx_pkts_after = 0 for i in range(len(rx_packets_after)): nb_rx_pkts_after += int(rx_packets_after[i]) self.verify(nb_rx_pkts_after == 10 + nb_rx_pkts_before, "rx_packets calculate error") def check_pf_vf_link_status(self, session, intf): """ sometimes pf/vf will up abnormal, retry 5 times """ for i in range(5): # down-up get new mac form pf. # because dpdk pf will give an random mac when dpdk pf restart. session.send_expect("ifconfig %s down" % intf, "#") out = session.send_expect("ifconfig %s up" % intf, "#") # SIOCSIFFLAGS: Network is down # i think the pf link abnormal if "Network is down" in out: print GREEN(out) print GREEN("Try again") self.vm0_dut.restore_interfaces_linux() else: out = session.send_expect("ethtool %s" % intf, "#") if "Link detected: yes" in out: return True time.sleep(1) return False def test_mtu(self): """ verify mtu change HW limitation on 82599, need add '--max-pkt-len=<length>' on testpmd to set mtu value, all the VFs and PF share same MTU, the largest one take effect. """ vm0_intf0 = self.vm0_dut.ports_info[0]['intf'] vm0_intf1 = self.vm0_dut.ports_info[1]['intf'] self.vm0_dut.send_expect("ifconfig %s up" % self.vm0_intf0, "#") out = self.vm0_dut.send_expect("ifconfig %s" % self.vm0_intf0, "#") self.verify('mtu 1500' in out, "modify MTU failed") self.tester.send_expect("ifconfig %s mtu 3000" % self.tester_intf, "#") self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('set promisc all off') self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') # Send one packet with length as 2000 with DPDK PF MAC as DEST MAC, # check that DPDK PF can't receive packet self.send_packet(self.dmac, pkt_lens=2000) out = self.dut.get_session_output(timeout=10) self.verify(self.dmac.upper() not in out, "PF receive error packet") # send one packet with length as 2000 with kernel VF MAC as DEST MAC, # check that Kernel VF can't receive packet vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, pkt_lens=2000) == False, "kernel VF receive error packet") # Change DPDK PF mtu as 3000,check no confusion/crash on kernel VF self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('port stop all') self.dut_testpmd.execute_cmd('port config mtu 0 3000') self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('set promisc all off') self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') # sleep 5s to wait vf up , because of pf down-up self.verify(self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "VM0_VF0 link down") # clear output self.dut.get_session_output(timeout=10) # send one packet with length as 2000 with DPDK PF MAC as DEST MAC , # check that DPDK PF can receive packet self.send_packet(self.dmac, pkt_lens=2000) out = self.dut.get_session_output(timeout=10) self.verify(self.dmac.upper() in out, "PF can't receive packet") # Change kernel VF mtu as 3000,check no confusion/crash on DPDK PF if self.nic.startswith('fortville'): self.vm0_dut.send_expect("ifconfig %s mtu 3000" % self.vm0_intf0, "#") # send one packet with length as 2000 with kernel VF MAC as DEST MAC, # check Kernel VF can receive packet vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, pkt_lens=2000), "VF can't receive packet") self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('port stop all') self.dut_testpmd.execute_cmd('port config mtu 0 1500') self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('start') self.vm0_dut.send_expect("ifconfig %s mtu 1500" % self.vm0_intf0, "#", 10) def test_promisc_mode(self): """ verify Enable/disable promisc mode """ self.verify(self.nic not in ["niantic"], "%s NIC not support" % self.nic) wrong_mac = '01:02:03:04:05:06' # Set up kernel VF tcpdump with -p parameter, which means disable promisc # Start DPDK PF, enable promisc mode, set rxonly forwarding self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('set promisc all on') self.dut_testpmd.execute_cmd('start') self.verify(self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "VM0_VF0 link down") self.dut.get_session_output() # Send packet from tester to VF with correct DST MAC, check the packet # can be received by kernel VF vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), "VM reveive packet failed") # Send packet from tester to PF with correct DST MAC, check the packet # can be received by DPDK PF self.send_packet(self.dmac) out = self.dut.get_session_output() self.verify(self.tester_mac.upper() in out, "PF reveive packet failed") # Send packet from tester with random DST MAC, check the packet can be # received by DPDK PF and kernel VF self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, wrong_mac), "VM reveive misc packet failed") self.send_packet(wrong_mac) out = self.dut.get_session_output() self.verify(self.tester_mac.upper() in out, "PF reveive misc packet failed") # Send packet from tester to VF with correct DST MAC, check the packet # can be received by kernel VF vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac), "VM reveive packet failed") # Send packet from tester to PF with correct DST MAC, check the packet # can be received by DPDK PF self.send_packet(self.dmac) out = self.dut.get_session_output() self.verify(self.tester_mac.upper() in out, "PF reveive packet failed") # Disable DPDK PF promisc mode self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('set promisc all off') self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') self.dut.get_session_output() # Set up kernel VF tcpdump with -p parameter, which means disable promisc mode # Send packet from tester with random DST MAC, check the packet can't # be received by DPDK PF and kernel VF self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, wrong_mac, param='-p') == False, "VM should not reveive misc packet") self.send_packet(wrong_mac) out = self.dut.get_session_output() self.verify(wrong_mac not in out, "PF should not receive misc packet") # Send packet from tester to VF with correct DST MAC, check the packet # can be received by kernel VF vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() self.verify( self.verify_vm_tcpdump(self.vm0_dut, self.vm0_intf0, vm0_vf0_mac, param='-p'), "VM reveive packet failed") # Send packet from tester to PF with correct DST MAC, check the packet # can be received by DPDK PF self.send_packet(self.dmac) out = self.dut.get_session_output() self.verify(self.tester_mac.upper() in out, "PF reveive packet failed") def test_rss(self): """ verify kernel VF each queue can receive packets """ self.verify(self.nic not in ["niantic"], "%s NIC not support tcpid " % self.nic) # Verify kernel VF RSS using ethtool -"l" (lower case L) <devx> that the # default RSS setting is equal to the number of CPUs in the system and # that the maximum number of RSS queues displayed is correct for the # DUT self.verify(self.check_pf_vf_link_status(self.vm0_dut, self.vm0_intf0), "VM0_VF0 link down") cpus = self.vm0_dut.send_expect( "cat /proc/cpuinfo| grep 'processor'| wc -l", "#") out = self.vm0_dut.send_expect("ethtool -l %s" % self.vm0_intf0, "#", 10) combined = re.findall("Combined:\s*(\d*)", out) self.verify(cpus == combined[0], "the queues count error") # Run "ethtool -S <devx> | grep rx_bytes | column" to see the current # queue count and verify that it is correct to step 1 out = self.vm0_dut.send_expect( "ethtool -S %s |grep rx-.*bytes" % self.vm0_intf0, "#") rx_bytes_before = re.findall("rx-.*bytes:\s*(\d*)", out) self.verify( len(rx_bytes_before) == int(combined[0]), "the queues count error") # Send multi-threaded traffics to the DUT with a number of threads # Check kernel VF each queue can receive packets vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() for i in xrange(5): mythread = threading.Thread(target=self.send_packet(vm0_vf0_mac)) mythread.start() out = self.vm0_dut.send_expect( "ethtool -S %s |grep rx-*bytes" % self.vm0_intf0, "#") rx_bytes_after = re.findall("rx-*.bytes:\s*(\d*)", out) for i in range(len(rx_bytes_after)): self.verify(rx_bytes_after[i] > rx_bytes_before[i], "NOT each queue receive packets") def test_dpf_kvf_dvf(self): """ Check DPDK VF0 and kernel VF1 don't impact each other and no performance drop """ self.vm0_dut.send_expect("ifconfig %s up " % self.vm0_intf0, "#") self.vm0_dut.send_expect("ifconfig %s up " % self.vm0_intf1, "#") self.vm0_dut.ports_info[1]['port'].bind_driver('igb_uio') self.vm0_testpmd.start_testpmd("Default") self.vm0_testpmd.execute_cmd('set promisc all on') self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') vm0_vf0_mac = self.vm0_dut.ports_info[0]['port'].get_mac_addr() vm0_vf1_mac = self.vm0_testpmd.get_port_mac(0) macs = [vm0_vf0_mac, vm0_vf1_mac] self.generate_pcap_pkt(macs) vm0_newvmsession = self.vm0_dut.new_session() date_old = datetime.datetime.now() date_new = date_old + datetime.timedelta(minutes=.5) while (1): date_now = datetime.datetime.now() vm0_newvmsession.send_expect( "tcpdump -i %s -e ether src %s " % (self.vm0_intf0, self.src_logo), "tcpdump") self.send_packets() out = self.vm0_dut.get_session_output(timeout=20) self.verify(self.src_logo in out, "VM PF Confiscated to the specified package") put = vm0_newvmsession.send_expect("^C", "#", 10) rx_packet = re.findall("(\d*) packe.* captured", put) if rx_packet[0] == '1': self.verify( rx_packet[0] == '1', "VM KF Confiscated to the specified package\n'%s'" % put) if date_now >= date_new: break time.sleep(3) def send_packets(self): self.tester.scapy_foreground() self.tester.scapy_append("pkts=rdpcap('flow.pcap')") self.tester.scapy_append("sendp(pkts, iface='%s')" % self.tester_intf) self.tester.scapy_execute() def reboot_vm1(self): """ reboot vm1. """ self.vm1.stop() vf1_prop_5 = {'opt_host': self.sriov_vfs_port[4].pci} vf1_prop_6 = {'opt_host': self.sriov_vfs_port[5].pci} self.vm1 = QEMUKvm(self.dut, 'vm1', 'vf_kernel') self.vm1.set_vm_device(driver='pci-assign', **vf1_prop_5) self.vm1.set_vm_device(driver='pci-assign', **vf1_prop_6) try: self.vm1_dut = self.vm1.start() if self.vm1_dut is None: raise Exception("Set up VM1 ENV failed!") else: self.verify(self.vm1_dut.ports_info[0]['intf'] != 'N/A', "Not interface") except Exception as e: self.destroy_vm_env() raise Exception(e) def test_zdpf_2kvf_2dvf_2vm(self): """ Check DPDK PF 2kernel VFs 2DPDK VFs 2VMs link change impact on other VFs DPDK PF + 2kernel VFs + 2DPDK VFs + 2VMs Host one DPDK PF and create 6 VFs, pass through VF0, VF1, VF2 and VF3 to VM0, pass through VF4, VF5 to VM1, power on VM0 and VM1. Load host DPDK driver, VM DPDK driver and kernel driver. """ for port_info in self.vm0_dut.ports_info: vm0_intf = port_info['intf'] self.verify(self.check_pf_vf_link_status(self.vm0_dut, vm0_intf), "VM0_vf: %s link down" % vm0_intf) for port_info in self.vm1_dut.ports_info: vm1_intf = port_info['intf'] self.verify(self.check_pf_vf_link_status(self.vm1_dut, vm1_intf), "VM1_vf: %s link down" % vm1_intf) # Bind kernel VF0, VF1 to igb_uio in VM0, bind kernel VF4 to igb_uio in # VM1 self.vm0_dut.ports_info[0]['port'].bind_driver('igb_uio') self.vm0_dut.ports_info[1]['port'].bind_driver('igb_uio') self.vm1_dut.ports_info[0]['port'].bind_driver('igb_uio') # Start DPDK VF0, VF1 in VM0 and VF4 in VM1, enable promisc mode self.vm0_testpmd.start_testpmd("Default") self.vm0_testpmd.execute_cmd('set promisc all on') self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') self.vm1_testpmd.start_testpmd("Default") self.vm1_testpmd.execute_cmd('set promisc all on') self.vm1_testpmd.execute_cmd('set fwd rxonly') self.vm1_testpmd.execute_cmd('set verbose 1') self.vm1_testpmd.execute_cmd('start') vm0_vf0_mac = self.vm0_testpmd.get_port_mac(0) vm0_vf1_mac = self.vm0_testpmd.get_port_mac(1) vm0_vf2_mac = self.vm0_dut.ports_info[2]['port'].get_mac_addr() vm0_vf3_mac = self.vm0_dut.ports_info[3]['port'].get_mac_addr() vm1_vf0_mac = self.vm1_testpmd.get_port_mac(0) vm1_vf1_mac = self.vm1_dut.ports_info[1]['port'].get_mac_addr() pf0_mac = self.dut_testpmd.get_port_mac(0) pf1_mac = self.dut_testpmd.get_port_mac(1) macs = [ vm0_vf0_mac, vm0_vf1_mac, vm0_vf2_mac, vm0_vf3_mac, vm1_vf0_mac, vm1_vf1_mac, pf0_mac, pf1_mac ] self.generate_pcap_pkt(macs) self.send_packets() vm0_vf2_newvmsession = self.vm0_dut.new_session() vm0_vf3_newvmsession = self.vm0_dut.new_session() vm1_newvmsession = self.vm1_dut.new_session() # Set up kernel VF2, VF3 in VM0 and VF5 in VM1 tcpdump without -p # parameter on promisc mode vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[2]['intf'], self.src_logo), "tcpdump", 10) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[3]['intf'], self.src_logo), "tcpdump", 10) vm1_newvmsession.send_expect( "tcpdump -i %s -e -p ether src %s" % (self.vm0_dut.ports_info[1]['intf'], self.src_logo), "tcpdump", 10) self.send_packets() out = self.vm0_dut.get_session_output() self.verify(self.src_logo in out, "VM0 PF Confiscated to the specified package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") vm0_vf2_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm0_vf2_out_rx_packet[0] == '1', "vm0 vf2 Confiscated to the specified package") vm0_vf3_out = vm0_vf3_newvmsession.send_expect("^C", "#") vm0_vf3_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf3_out) self.verify(vm0_vf3_out_rx_packet[0] == '1', "vm0 vf3 Confiscated to the specified package") out = self.vm1_dut.get_session_output() self.verify(self.src_logo in out, "VM1 PF Confiscated to the specified package") vm1_vf1_out = vm1_newvmsession.send_expect("^C", "#") vm1_vf1_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm1_vf1_out_rx_packet[0] == '1', "vm1 vf1 Confiscated to the specified package") # Link down DPDK VF0 and expect no impact on other VFs self.vm0_testpmd.quit() eal_param = '-b %(vf0)s' % ({'vf0': self.vm0_dut.ports_info[0]['pci']}) self.vm0_testpmd.start_testpmd("Default", eal_param=eal_param) self.vm0_testpmd.execute_cmd('set promisc all on') self.vm0_testpmd.execute_cmd('set fwd rxonly') self.vm0_testpmd.execute_cmd('set verbose 1') self.vm0_testpmd.execute_cmd('start') vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[2]['intf'], self.src_logo), "tcpdump", 10) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[3]['intf'], self.src_logo), "tcpdump", 10) vm1_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[1]['intf'], self.src_logo), "tcpdump", 10) self.send_packets() out = self.vm0_dut.get_session_output() self.verify(self.src_logo in out, "link down impact VM0 PF receive package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") vm0_vf2_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm0_vf2_out_rx_packet[0] == '1', "link down impact vm0 vf2 receive package") vm0_vf3_out = vm0_vf3_newvmsession.send_expect("^C", "#") vm0_vf3_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf3_out) self.verify(vm0_vf3_out_rx_packet[0] == '1', "link down impact vm0 vf3 receive package") out = self.vm1_dut.get_session_output() self.verify(self.src_logo in out, "link down impact VM1 PF receive package") vm1_vf1_out = vm1_newvmsession.send_expect("^C", "#") vm1_vf1_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm1_vf1_out_rx_packet[0] == '1', "link down impact vm1 vf1 receive package") # Link down kernel VF2 and expect no impact on other VFs vm0_vf2_newvmsession.send_expect( "ifconfig %s down" % self.vm0_dut.ports_info[2]['intf'], "#", 10) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[3]['intf'], self.src_logo), "tcpdump", 10) vm1_newvmsession.send_expect( "tcpdump -i -p %s -e ether src %s" % (self.vm0_dut.ports_info[1]['intf'], self.src_logo), "tcpdump", 10) self.send_packets() out = self.vm0_dut.get_session_output() self.verify(self.src_logo in out, "link down kernel vf2 impact VM0 PF receive package") vm0_vf3_out = vm0_vf3_newvmsession.send_expect("^C", "#") vm0_vf3_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf3_out) self.verify(vm0_vf3_out_rx_packet[0] == '1', "link down kernel vf2 impact vm0 vf3 receive package") out = self.vm1_dut.get_session_output() self.verify(self.src_logo in out, "link down kernel vf2 impact VM1 PF receive package") vm1_vf1_out = vm1_newvmsession.send_expect("^C", "#") vm1_vf1_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm1_vf1_out_rx_packet[0] == '1', "link down kernel vf2 impact vm1 vf1 receive package") vm0_vf2_newvmsession.send_expect( "ifconfig %s up" % self.vm0_dut.ports_info[2]['intf'], "#") # Quit VF4 DPDK testpmd and expect no impact on other VFs self.vm1_testpmd.quit() vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[2]['intf'], self.src_logo), "tcpdump", 10) vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[3]['intf'], self.src_logo), "tcpdump", 10) vm1_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[1]['intf'], self.src_logo), "tcpdump", 10) self.send_packets() out = self.vm0_dut.get_session_output() self.verify(self.src_logo in out, "quit vf4 DPDK testpmd impact VM0 PF receive package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") vm0_vf2_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm0_vf2_out_rx_packet[0] == '1', "quit vf4 DPDK testpmd impact vm0 vf2 receive package") vm0_vf3_out = vm0_vf3_newvmsession.send_expect("^C", "#") vm0_vf3_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf3_out) self.verify(vm0_vf3_out_rx_packet[0] == '1', "quit vf4 DPDK testpmd impact vm0 vf3 receive package") vm1_vf1_out = vm1_newvmsession.send_expect("^C", "#") vm1_vf1_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm1_vf1_out_rx_packet[0] == '1', "quit vf4 DPDK testpmd impact vm1 vf1 receive package") self.vm1_testpmd.start_testpmd("Default") self.vm1_testpmd.execute_cmd('set promisc all on') self.vm1_testpmd.execute_cmd('set fwd rxonly') self.vm1_testpmd.execute_cmd('set verbose 1') self.vm1_testpmd.execute_cmd('start') # Unload VF5 kernel driver and expect no impact on other VFs vm1_newvmsession.send_expect( "./usertools/dpdk-devbind.py -b pci-stub %s" % (self.vm1_dut.ports_info[1]['pci']), "#") vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[2]['intf'], self.src_logo), "tcpdump") vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[3]['intf'], self.src_logo), "tcpdump") self.send_packets() out = self.vm0_dut.get_session_output() self.verify(self.src_logo in out, "unload vf5 kernel driver impact VM0 PF receive package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") vm0_vf2_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm0_vf2_out_rx_packet[0] == '1', "unload vf5 kernel driver impact vm0 vf2 receive package") vm0_vf3_out = vm0_vf3_newvmsession.send_expect("^C", "#", 10) vm0_vf3_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf3_out) self.verify(vm0_vf3_out_rx_packet[0] == '1', "unload vf5 kernel driver impact vm0 vf3 receive package") out = self.vm1_dut.get_session_output(timeout=20) self.verify(self.src_logo in out, "unload vf5 kernel driver impact VM1 PF receive package") # Reboot VM1 and expect no impact on VFs of VM0 self.vm1_dut.send_expect("quit", "#") self.reboot_vm1() vm0_vf2_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[2]['intf'], self.src_logo), "tcpdump") vm0_vf3_newvmsession.send_expect( "tcpdump -i %s -p -e ether src %s" % (self.vm0_dut.ports_info[3]['intf'], self.src_logo), "tcpdump") self.send_packets() out = self.vm0_dut.get_session_output() self.verify(self.src_logo in out, "reboot vm1 impact VM0 PF receive package") vm0_vf2_out = vm0_vf2_newvmsession.send_expect("^C", "#") vm0_vf2_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf2_out) self.verify(vm0_vf2_out_rx_packet[0] == '1', "reboot vm1 impact vm0 vf2 receive package") vm0_vf3_out = vm0_vf3_newvmsession.send_expect("^C", "#") vm0_vf3_out_rx_packet = re.findall("(\d*) packe.* captured", vm0_vf3_out) self.verify(vm0_vf3_out_rx_packet[0] == '1', "reboot vm1 impact vm0 vf3 receive package") def test_stress(self): """ Load kernel driver stress """ for i in xrange(100): out = self.vm0_dut.send_expect("rmmod %svf" % self.kdriver, "#") self.verify('error' not in out, "stress error for rmmod %svf:%s" % (self.kdriver, out)) out = self.vm0_dut.send_expect("modprobe %svf" % self.kdriver, "#") self.verify( 'error' not in out, "stress error for modprobe %svf:%s" % (self.kdriver, out)) def tear_down(self): """ Run after each test case. """ self.vm0_testpmd.quit() self.vm0_dut.restore_interfaces_linux() if getattr(self, 'vm0_newvmsession', None): self.vm0_dut.close_session(vm0_newvmsession) if getattr(self, 'vm0_vf2_newvmsession', None): self.vm0_dut.close_session(vm0_vf2_newvmsession) if getattr(self, 'vm0_vf3_newvmsession', None): self.vm0_dut.close_session(vm0_vf3_newvmsession) # Sometime test failed ,we still need clear ip. self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf0, "#") self.vm0_dut.send_expect("ifconfig %s 0.0.0.0" % self.vm0_intf1, "#") self.tester.send_expect("ifconfig %s 0.0.0.0" % self.tester_intf, "#") time.sleep(5) def tear_down_all(self): """ Run after each test suite. """ self.dut_testpmd.quit() self.destroy_vm_env() self.dut.kill_all() time.sleep(2)
class TestDynamicFlowtype(TestCase): def set_up_all(self): self.verify( 'fortville' in self.nic, 'dynamic flow type mapping can not support %s nic' % self.nic) ports = self.dut.get_ports() self.verify(len(ports) >= 1, "Insufficient ports for testing") valports = [_ for _ in ports if self.tester.get_local_port(_) != -1] self.dut_port = valports[0] tester_port = self.tester.get_local_port(self.dut_port) self.tester_intf = self.tester.get_interface(tester_port) profile_file = 'dep/gtp.pkgo' profile_dst = "/tmp/" self.dut.session.copy_file_to(profile_file, profile_dst) PF_Q_strip = 'CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF' self.PF_QUEUE = self.search_queue_number(PF_Q_strip) def set_up(self): """ Run before each test case. """ self.dut_testpmd = PmdOutput(self.dut) self.dut_testpmd.start_testpmd( "Default", "--port-topology=chained --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE)) self.load_profile() def search_queue_number(self, Q_strip): """ Search max queue number from configuration. """ out = self.dut.send_expect("cat config/common_base", "]# ", 10) pattern = "(%s=)(\d*)" % Q_strip s = re.compile(pattern) res = s.search(out) if res is None: print utils.RED('Search no queue number.') return None else: queue = res.group(2) return int(queue) def load_profile(self): """ Load profile to update FVL configuration tables, profile will be stored in binary file and need to be passed to AQ to program FVL during initialization stage. """ self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) out = self.dut_testpmd.execute_cmd('ddp get list 0') self.dut_testpmd.execute_cmd('ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') time.sleep(1) self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') def gtp_packets(self, flowtype=26, match_opt='matched'): """ Generate different GTP types according to different parameters. I40e PCTYPEs are statically mapped to RTE_ETH_FLOW_* types in DPDK, defined in rte_eth_ctrl.h, and flow types used to define ETH_RSS_* offload types in rte_ethdev.h. RTE_ETH_FLOW_MAX is defined now as 22, leaves 42 flow type unassigned. Input: flowtype: define flow type 26, 23, 24, 25 for GTP types as below table, check each layer type, tunnel packet includes GTPC and GTPU, GTPC has none inner L3, GTPU has none, IPV4 and IPV6 inner L3. match_opt: PF or VSIs receive match packets to rss queue, but receive not match packets to queue 0. +------------+------------+------------+ |Packet Type | PCTYPEs | Flow Types | +------------+------------+------------+ |GTP-U IPv4 | 22 | 26 | +------------+------ -----+------------+ |GTP-U IPv6 | 23 | 23 | +------------+------------+------------+ |GTP-U PAY4 | 24 | 24 | +------------+------------+------------+ |GTP-C PAY4 | 25 | 25 | +------------+------------+------------+ """ pkts = [] pkts_ipv4 = {'IPV4': 'Ether()/IP()/Raw("X"*20)'} pkts_gtpc_pay = { 'IPV4/GTPC': 'Ether()/IP()/UDP(dport=2123)/GTP_U_Header()/Raw("X"*20)', 'IPV6/GTPC': 'Ether()/IPv6()/UDP(dport=2123)/GTP_U_Header()/Raw("X"*20)' } pkts_gtpu_pay = { 'IPV4/GTPU': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/Raw("X"*20)', 'IPV6/GTPU': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/Raw("X"*20)' } pkts_gtpu_ipv4 = { 'IPV4/GTPU/IPV4': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IP()/Raw("X"*20)', 'IPV4/GTPU/IPV4/FRAG': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IP(frag=5)/Raw("X"*20)', 'IPV4/GTPU/IPV4/UDP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IP()/UDP()/Raw("X"*20)', 'IPV4/GTPU/IPV4/TCP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IP()/TCP()/Raw("X"*20)', 'IPV4/GTPU/IPV4/SCTP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IP()/SCTP()/Raw("X"*20)', 'IPV4/GTPU/IPV4/ICMP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IP()/ICMP()/Raw("X"*20)', 'IPV6/GTPU/IPV4': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IP()/Raw("X"*20)', 'IPV6/GTPU/IPV4/FRAG': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IP(frag=5)/Raw("X"*20)', 'IPV6/GTPU/IPV4/UDP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IP()/UDP()/Raw("X"*20)', 'IPV6/GTPU/IPV4/TCP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IP()/TCP()/Raw("X"*20)', 'IPV6/GTPU/IPV4/SCTP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IP()/SCTP()/Raw("X"*20)', 'IPV6/GTPU/IPV4/ICMP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IP()/ICMP()/Raw("X"*20)' } pkts_gtpu_ipv6 = { 'IPV4/GTPU/IPV6/FRAG': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IPv6()/IPv6ExtHdrFragment()/Raw("X"*20)', 'IPV4/GTPU/IPV6': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IPv6()/Raw("X"*20)', 'IPV4/GTPU/IPV6/UDP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IPv6()/UDP()/Raw("X"*20)', 'IPV4/GTPU/IPV6/TCP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IPv6()/TCP()/Raw("X"*20)', 'IPV4/GTPU/IPV6/SCTP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IPv6()/SCTP()/Raw("X"*20)', 'IPV4/GTPU/IPV6/ICMP': 'Ether()/IP()/UDP(dport=2152)/GTP_U_Header()/IPv6(nh=58)/ICMP()/Raw("X"*20)', 'IPV6/GTPU/IPV6/FRAG': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IPv6()/IPv6ExtHdrFragment()/Raw("X"*20)', 'IPV6/GTPU/IPV6': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IPv6()/Raw("X"*20)', 'IPV6/GTPU/IPV6/UDP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IPv6()/UDP()/Raw("X"*20)', 'IPV6/GTPU/IPV6/TCP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IPv6()/TCP()/Raw("X"*20)', 'IPV6/GTPU/IPV6/SCTP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IPv6()/SCTP()/Raw("X"*20)', 'IPV6/GTPU/IPV6/ICMP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header()/IPv6(nh=58)/ICMP()/Raw("X"*20)' } if match_opt == 'matched': # Define flow type for IPV4 as 2 in rte_eth_ctrl.h if flowtype == 2: pkts = pkts_ipv4 if flowtype == 23: pkts = pkts_gtpu_ipv6 if flowtype == 24: pkts = pkts_gtpu_pay if flowtype == 25: pkts = pkts_gtpc_pay if flowtype == 26: pkts = pkts_gtpu_ipv4 if match_opt == 'not matched': if flowtype == 23: pkts = dict(pkts_gtpc_pay.items() + pkts_gtpu_pay.items() + pkts_gtpu_ipv4.items()) if flowtype == 24: pkts = dict(pkts_gtpc_pay.items() + pkts_gtpu_ipv4.items() + pkts_gtpu_ipv6.items()) if flowtype == 25: pkts = dict(pkts_gtpu_pay.items() + pkts_gtpu_ipv4.items() + pkts_gtpu_ipv6.items()) if flowtype == 26: pkts = dict(pkts_gtpc_pay.items() + pkts_gtpu_pay.items() + pkts_gtpu_ipv6.items()) return pkts def packet_send_verify(self, flowtype=26, match_opt='matched'): """ Send packet and verify rss function. """ pkts = self.gtp_packets(flowtype, match_opt) for packet_type in pkts.keys(): self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() out = self.dut.get_session_output(timeout=2) if match_opt == 'matched': self.verify("PKT_RX_RSS_HASH" in out, "Failed to receive packet in rss queue!!!") elif match_opt == 'not matched': self.verify("port 0/queue 0" in out, "Failed to receive packet in queue 0!!!") self.verify("PKT_RX_RSS_HASH" not in out, "Failed to receive packet in queue 0!!!") def dynamic_flowtype_test(self, pctype=22, flowtype=26, reset=False): """ Dynamic modify, return or reset the contents of flow type to pctype dynamic mapping, enable rss hash for new protocol. reset: If reset is true, reset the contents of flow type to pctype mapping. If reset is false, enable rss hash for new protocal. """ out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify("pctype: 63 -> flowtype: 14" in out, "Failed show flow type to pctype mapping!!!") self.verify( "pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed show flow type to pctype mapping!!!") self.dut_testpmd.execute_cmd( 'port config 0 pctype mapping update %s %s' % (pctype, flowtype)) out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify("pctype: %s -> flowtype: %s" % (pctype, flowtype) in out, "Failed update flow type to pctype mapping!!!") if reset is False: self.dut_testpmd.execute_cmd( 'set_hash_input_set 0 %s udp-key add' % flowtype) self.dut_testpmd.execute_cmd('port config all rss %s' % flowtype) else: self.dut_testpmd.execute_cmd('port config 0 pctype mapping reset') out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify( "pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed reset flow type to pctype mapping!!!") """ Send normal ipv4 packet to test rss, rte_eth_ctrl.h defines flow type for IPV4 is 2. """ flowtype = 2 for match_opt in ['matched', 'not matched']: if match_opt == 'not matched' and reset is True: break self.packet_send_verify(flowtype, match_opt) def test_profile_correctness(self): """ GTP is supported by NVM with profile updated. Check profile information correctness, includes used protocols, packet classification types, defined packet types and so on. """ out = self.dut_testpmd.execute_cmd('ddp get info /tmp/gtp.pkgo') self.verify("i40e Profile Version" in out, "Failed to verify profile version!!!") self.verify("List of used protocols" in out, "Failed to verify profie used protocols!!!") self.verify("List of defined packet classification types" in out, "Failed to verify profile packet classification types!!!") self.verify("List of defined packet types" in out, "Failed to verify profile packet types!!!") def test_dynamic_flowtype_reset(self): """ Dynamic modify, reset and return the contents of flow type to pctype dynamic mapping. """ self.dynamic_flowtype_test(pctype=22, flowtype=26, reset=True) def test_dynamic_flowtype_gtpu_ipv4(self): """ Dynamic modify, return the contents of flow type to pctype dynamic mapping, enable and verify rss for GTP-U IPV4 packets. """ self.dynamic_flowtype_test(pctype=22, flowtype=26, reset=False) def test_dynamic_flowtype_gtpu_ipv6(self): """ Dynamic modify, return the contents of flow type to pctype dynamic mapping, enable and verify rss for GTP-U IPV6 packets. """ self.dynamic_flowtype_test(pctype=23, flowtype=23, reset=False) def test_dynamic_flowtype_gtpu_pay(self): """ Dynamic modify, return the contents of flow type to pctype dynamic mapping, enable and verify rss for GTP-U PAY packets. """ self.dynamic_flowtype_test(pctype=24, flowtype=24, reset=False) def test_dynamic_flowtype_gtpc_pay(self): """ Dynamic modify, return the contents of flow type to pctype dynamic mapping, enable and verify rss for GTP-C PAY packets. """ self.dynamic_flowtype_test(pctype=25, flowtype=25, reset=False) def tear_down(self): self.dut_testpmd.execute_cmd('stop') out = self.dut_testpmd.execute_cmd('ddp get list 0') if "Profile number is: 0" not in out: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd('ddp del 0 /tmp/gtp.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 0" in out, "Failed to delete ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.quit() def tear_down_all(self): self.dut.kill_all()
class PmdBonding(object): ''' common methods for testpmd bonding ''' def __init__(self, **kwargs): # set parent instance self.parent = kwargs.get('parent') # set target source code directory self.target_source = self.parent.dut.base_dir # set logger self.logger = self.parent.logger self.verify = self.parent.verify # select packet generator self.pktgen_name = 'ixia' if self.is_perf else 'scapy' # traffic default config self.default_pkt_size = kwargs.get('pkt_size') or FRAME_SIZE_64 self.default_src_mac = kwargs.get('src_mac') self.default_src_ip = kwargs.get('src_ip') self.default_src_port = kwargs.get('src_port') self.default_dst_ip = kwargs.get('dst_ip') self.default_dst_port = kwargs.get('dst_port') self.default_pkt_name = kwargs.get('pkt_name') # testpmd self.testpmd = PmdOutput(self.parent.dut) self.testpmd_status = 'close' # # On tester platform, packet transmission # def mac_str_to_int(self, mac_str): """ convert the MAC type from the string into the int. """ mac_hex = '0x' for mac_part in mac_str.split(':'): mac_hex += mac_part return int(mac_hex, 16) def mac_int_to_str(self, mac_int): """ Translate the MAC type from the string into the int. """ temp = hex(mac_int)[2:] b = [] [b.append(temp[n:n+2]) for n in range(len(temp)) if n % 2 == 0] new_mac = ":".join(b) return new_mac def ip_str_to_int(self, ip_str): """ convert the IP type from the string into the int. """ ip_int = socket.ntohl(struct.unpack( "I", socket.inet_aton(str(ip_str)))[0]) return ip_int def ip_int_to_str(self, ip_int): """ convert the IP type from the int into the string. """ ip_str = socket.inet_ntoa(struct.pack('I', socket.htonl(ip_int))) return ip_str def increase_ip(self, ip, step=1): ''' ip: string format ''' _ip_int = self.ip_str_to_int(ip) new_ip = self.ip_int_to_str(_ip_int + step) return new_ip def increase_mac(self, mac, step=1): ''' mac: string format ''' _mac_int = self.mac_str_to_int(mac) new_mac = self.mac_int_to_str(_mac_int+step) return new_mac def increase_port(self, port, step=1): ''' port: int format ''' new_port = port + step return new_port def increase_mac_ip_port(self, step=1): # get source port setting mac, ip, port = (self.default_src_mac, self.default_src_ip, self.default_src_port) return (self.increase_mac(mac, step), self.increase_ip(ip, step), self.increase_port(port, step)) def get_pkt_len(self, pkt_type): ''' get packet payload size ''' frame_size = self.default_pkt_size headers_size = sum(map(lambda x: HEADER_SIZE[x], ['eth', 'ip', pkt_type])) pktlen = frame_size - headers_size return pktlen def set_stream_to_slave_port(self, dut_port_id): ''' use framework/packet.py module to create one stream, send stream to slave port ''' # get dst port mac address pkt_name = self.default_pkt_name destport = self.default_dst_port destip = self.default_dst_ip dst_mac = self.get_port_info(dut_port_id, 'mac') # packet size pktlen = self.get_pkt_len(pkt_name) # set stream configuration srcmac, srcip, srcport = self.increase_mac_ip_port(0) pkt_config = { 'type': pkt_name.upper(), 'pkt_layers': { # Ether(dst=nutmac, src=srcmac) 'ether': {'src': srcmac, 'dst': dst_mac}, # IP(dst=destip, src=srcip, len=%s) 'ipv4': {'src': srcip, 'dst': destip}, # pkt_name(sport=srcport, dport=destport) pkt_name: {'src': srcport, 'dst': destport}, # Raw(load='\x50'*%s) 'raw': {'payload': ['58'] * self.get_pkt_len(pkt_name)}}} # create packet streams = [] # keep a copy of pcap for debug savePath = os.sep.join([self.target_source, "pkt_{0}.pcap".format(pkt_name)]) pkt_type = pkt_config.get('type') pkt_layers = pkt_config.get('pkt_layers') pkt = Packet(pkt_type=pkt_type.upper()) for layer in pkt_layers.keys(): pkt.config_layer(layer, pkt_layers[layer]) pkt.pktgen.write_pcap(savePath) streams.append(pkt.pktgen.pkt) return streams def set_stream_to_bond_port(self, bond_port, slaves): ''' : use framework/packet.py module to create multiple streams send streams from bond port to slaves :param bond_port: bonded device port id :param slaves: slaves port id ''' pkt_configs = [] # get dst port mac address pkt_name = self.default_pkt_name destport = self.default_dst_port destip = self.default_dst_ip dst_mac = self.get_port_info(bond_port, 'mac') # packet size pktlen = self.get_pkt_len(pkt_type) # set stream configuration for packet_id in range(len(slaves['active'])): srcmac, srcip, srcport = self.increase_mac_ip_port(packet_id) pkt_configs.append({ 'type': pkt_name.upper(), 'pkt_layers': { # Ether(dst=nutmac, src=srcmac) 'ether': {'src': srcmac, 'dst': dst_mac}, # IP(dst=destip, src=srcip, len=%s) 'ipv4': {'src': srcip, 'dst': destip}, # pkt_name(sport=srcport, dport=destport) pkt_name: {'src': srcport, 'dst': destport}, # Raw(load='\x50'*%s) 'raw': {'payload': ['58'] * self.get_pkt_len(pkt_name)}}}) # create packet streams = [] for values in pkt_configs: # keep a copy of pcap for debug savePath = os.sep.join([self.target_source, "pkt_{0}.pcap".format(stm_name)]) pkt_type = values.get('type') pkt_layers = values.get('pkt_layers') pkt = Packet(pkt_type=pkt_type.upper()) for layer in pkt_layers.keys(): pkt.config_layer(layer, pkt_layers[layer]) pkt.pktgen.write_pcap(savePath) streams.append(pkt.pktgen.pkt) return streams def send_packets_by_scapy(self, **kwargs): tx_iface = kwargs.get('port topo')[0] # set interface ready to send packet cmd = "ifconfig {0} up".format(tx_iface) self.parent.tester.send_expect(cmd, '# ', 30) send_pkts = kwargs.get('stream') # stream config stream_configs = kwargs.get('traffic configs') count = stream_configs.get('count') interval = stream_configs.get('interval', 0.01) # run traffic sendp(send_pkts, iface=tx_iface, inter=interval, verbose=False, count=count) def send_packets_by_ixia(self, **kwargs): tester_port = kwargs.get('tx_intf') count = kwargs.get('count', 1) traffic_type = kwargs.get('traffic_type', 'normal') traffic_time = kwargs.get('traffic_time', 0) rate_percent = kwargs.get('rate_percent', float(100)) #--------------------------------------------------------------- send_pkts = [] self.tgen_input = [] tgen_input = self.tgen_input # generate packet contain multi stream for pkt in self.packet_types.values(): send_pkts.append(pkt.pktgen.pkt) ixia_pkt = os.sep.join([self.target_source, 'bonding_ixia.pcap']) wrpcap(ixia_pkt, send_pkts) #---------------------------------------------------------------- # set packet for send # pause frame basic configuration pause_time = 65535 pause_rate = 0.50 # run ixia testing frame_size = self.default_pkt_size # calculate number of packets expect_pps = self.parent.wirespeed(self.parent.nic, frame_size, 1) * \ 1000000.0 # get line rate linerate = expect_pps * (frame_size + 20) * 8 # calculate default sleep time for one pause frame sleep = (1 / linerate) * pause_time * 512 # calculate packets dropped in sleep time self.n_pkts = int((sleep / (1 / expect_pps)) * (1 / pause_rate)) #---------------------------------------------------------------- tester_port = self.parent.tester.get_local_port(self.parent.dut_ports[0]) tgen_input.append((tester_port, tester_port,ixia_pkt)) # run latency stat statistics self.parent.tester.loop_traffic_generator_throughput(tgen_input, self.rate_percent) def stop_ixia(self, data_types='packets'): tester_inst = self.parent.tester # get ixia statistics line_rate = tester_inst.get_port_line_rate() rx_bps, rx_pps = \ tester_inst.stop_traffic_generator_throughput_loop(self.tgen_input) output = tester_inst.traffic_get_port_stats(self.tgen_input) self.cur_data['ixia statistics'] = [] append = self.cur_data['ixia statistics'].append append('send packets: {0}'.format(output[0])) append('line_rate: {0}'.format(line_rate[0])) append('rate_percent: {0}%'.format(self.rate_percent)) def get_pktgen(self, name): pkt_gens = { 'ixia': self.send_packets_by_ixia, 'scapy': self.send_packets_by_scapy,} pkt_generator = pkt_gens.get(name) return pkt_generator def send_packet(self, traffic_config): """ stream transmission on specified link topology """ time.sleep(2) # start traffic self.logger.info("begin transmission ...") pktgen = self.get_pktgen(self.pktgen_name) result = pktgen(**traffic_config) # end traffic self.logger.info("complete transmission") return result # # On dut, dpdk testpmd common methods # def check_process_status(self, process_name='testpmd'): cmd = "ps aux | grep -i %s | grep -v grep | awk {'print $2'}"%( process_name) out = self.parent.dut.alt_session.send_expect(cmd, "# ", 10) status = True if out != "" else False return status def check_process_exist(self, process_name='testpmd'): status = self.check_process_status(process_name) if not status: msg = "{0} process exceptional quit".format(process_name) out = self.parent.dut.session.session.get_output_all() self.logger.info(out) raise VerifyFailure(msg) def d_console(self, cmds): ''' wrap up testpmd command interactive console ''' if len(cmds) == 0: return # check if cmds is string if isinstance(cmds, str): timeout = 10 cmds = [[cmds, '', timeout]] # check if cmds is only one command if not isinstance(cmds[0], list): cmds = [cmds] outputs = [] if len(cmds) > 1 else '' for item in cmds: expected_items = item[1] if expected_items and isinstance(expected_items, (list, tuple)): check_output = True expected_str = expected_items[0] or 'testpmd> ' else: check_output = False expected_str = expected_items or 'testpmd> ' timeout = int(item[2]) if len(item) == 3 else 5 #---------------------------------------------------------------- # run command on session try: console = self.testpmd.execute_cmd msg_pipe = self.testpmd.get_output output = console(item[0], expected_str, timeout) output = msg_pipe(timeout) if not output else output except TimeoutException: try: # check if testpmd quit self.check_process_exist() except Exception as e: self.testpmd_status = 'close' msg = "execute '{0}' timeout".format(item[0]) output = out = self.parent.dut.session.session.get_output_all() self.logger.error(output) raise Exception(msg) if len(cmds) > 1: outputs.append(output) else: outputs = output if check_output and len(expected_items) >= 2: self.logger.info(output) expected_output = expected_items[1] check_type = True if len(expected_items) == 2 \ else expected_items[2] if check_type and expected_output in output: msg = "expected '{0}' is in output".format(expected_output) self.logger.info(msg) elif not check_type and expected_output not in output: fmt = "unexpected '{0}' is not in output" msg = fmt.format(expected_output) self.logger.info(msg) else: status = "isn't in" if check_type else "is in" msg = "[{0}] {1} output".format(expected_output, status) self.logger.error(msg) raise VerifyFailure(msg) time.sleep(2) return outputs def preset_testpmd(self, core_mask, options='', eal_param=''): try: self.testpmd.start_testpmd( core_mask, param=' '.join(options), eal_param=eal_param) except TimeoutException: # check if testpmd quit try: self.check_process_exist() except Exception as e: self.testpmd_status = 'close' msg = "execute '{0}' timeout".format(item[0]) self.logger.error(msg_pipe(timeout)) raise TimeoutException(msg) # wait lsc event udpate done time.sleep(10) # check if testpmd has bootep up if self.check_process_status(): self.logger.info("testpmd boot up successful") else: raise VerifyFailure("testpmd boot up failed") self.d_console(self.preset_testpmd_cmds) self.preset_testpmd_cmds = [] time.sleep(1) def start_testpmd(self, eal_option=''): if self.testpmd_status == 'running': return # boot up testpmd hw_mask = 'all' options = '' self.preset_testpmd_cmds = ['port stop all', '', 15] self.preset_testpmd(hw_mask, options, eal_param=eal_option) self.testpmd_status = 'running' def stop_testpmd(self): time.sleep(1) testpmd_cmds =[['port stop all', '', 15], ['show port stats all', ''], ['stop', ''],] output = self.d_console(testpmd_cmds) time.sleep(1) return output def close_testpmd(self): if self.testpmd_status == 'close': return None output = self.stop_testpmd() time.sleep(1) self.testpmd.quit() time.sleep(10) if self.check_process_status(): raise VerifyFailure("testpmd close failed") else: self.logger.info("close testpmd successful") self.testpmd_status = 'close' return output def start_ports(self, port='all'): """ Start a port which the testpmd can see. """ timeout = 12 if port=='all' else 5 cmds =[ ["port start %s" % str(port), " ", timeout], # to avoid lsc event message interfere normal status [" ", '', timeout]] self.d_console(cmds) def get_stats(self, portid, flow=['rx', 'tx']): """ get one port statistics of testpmd """ _portid = int(portid) if isinstance(portid, (str, unicode)) else portid info = self.testpmd.get_pmd_stats(_portid) _kwd = ["-packets", "-errors", "-bytes"] stats = {} if isinstance(flow, list): for item in flow: for item2 in _kwd: name = item.upper() + item2 stats[name] = int(info[name]) elif isinstance(flow, (str, unicode)): for item in _kwd: name = flow.upper() + item stats[name] = int(info[name]) else: msg = 'unknown data type' raise Exception(msg) return stats def get_all_stats(self, ports): """ Get a group of ports statistics, which testpmd can display. """ stats = {} attrs = ['tx', 'rx'] for port_id in ports: stats[port_id] = self.get_stats(port_id, attrs) return stats def set_tester_port_status(self, port_name, status): """ Do some operations to the network interface port, such as "up" or "down". """ eth = self.parent.tester.get_interface(port_name) self.parent.tester.admin_ports_linux(eth, status) time.sleep(5) def set_dut_peer_port_by_id(self, port_id, status): # stop peer port on tester intf = self.parent.tester.get_local_port(self.parent.dut_ports[port_id]) self.set_tester_port_status(intf, status) time.sleep(5) cur_status = self.get_port_info(port_id, 'link_status') self.logger.info("port {0} is [{1}]".format(port_id, cur_status)) if cur_status != status: self.logger.warning("expected status is [{0}]".format(status)) def set_dut_port_status(self, port_id, status): opt = 'link-up' if status == 'up' else 'link-down' # stop slave link by force cmd = "set {0} port {1}".format(opt, port_id) self.d_console(cmd) time.sleep(5) cur_status = self.get_port_info(port_id, 'link_status') self.logger.info("port {0} is [{1}]".format(port_id, cur_status)) if cur_status != status: self.logger.warning("expected status is [{0}]".format(status)) # # testpmd bonding commands # def get_value_from_str(self, key_str, regx_str, string): """ Get some values from the given string by the regular expression. """ if isinstance(key_str, (unicode, str)): pattern = r"(?<=%s)%s" % (key_str, regx_str) s = re.compile(pattern) res = s.search(string) if type(res).__name__ == 'NoneType': msg = "{0} hasn't match anything".format(key_str) self.logger.warning(msg) return ' ' else: return res.group(0) elif isinstance(key_str, (list, tuple)): for key in key_str: pattern = r"(?<=%s)%s" % (key, regx_str) s = re.compile(pattern) res = s.search(string) if type(res).__name__ == 'NoneType': continue else: return res.group(0) else: self.logger.warning("all key_str hasn't match anything") return ' ' def _get_detail_from_port_info(self, port_id, args): """ Get the detail info from the output of pmd cmd 'show port info <port num>'. """ key_str, regx_str = args out = self.d_console("show port info %d" % port_id) find_value = self.get_value_from_str(key_str, regx_str, out) return find_value def get_detail_from_port_info(self, port_id, args): if isinstance(args[0], (list, tuple)): return [self._get_detail_from_port_info(port_id, sub_args) for sub_args in args] else: return self._get_detail_from_port_info(port_id, args) def get_port_info(self, port_id, info_type): ''' Get the specified port information by its output message format ''' info_set = { 'mac': ["MAC address: ", "([0-9A-F]{2}:){5}[0-9A-F]{2}"], 'connect_socket': ["Connect to socket: ", "\d+"], 'memory_socket': ["memory allocation on the socket: ", "\d+"], 'link_status': ["Link status: ", "\S+"], 'link_speed': ["Link speed: ", "\d+"], 'link_duplex': ["Link duplex: ", "\S+"], 'promiscuous_mode': ["Promiscuous mode: ", "\S+"], 'allmulticast_mode':["Allmulticast mode: ", "\S+"], 'vlan_offload': [ ["strip ", "\S+"], ['filter', "\S+"], ['qinq\(extend\) ', "\S+"]], 'queue_config': [ ["Max possible RX queues: ", "\d+"], ['Max possible number of RXDs per queue: ', "\d+"], ['Min possible number of RXDs per queue: ', "\d+"], ["Max possible TX queues: ", "\d+"], ['Max possible number of TXDs per queue: ', "\d+"], ['Min possible number of TXDs per queue: ', "\d+"],] } if info_type in info_set.keys(): return self.get_detail_from_port_info(port_id, info_set[info_type]) else: msg = os.linesep.join([ "support query items including::", os.linesep.join(info_set.keys())]) self.logger.warning(msg) return None # # On dut, dpdk testpmd common bonding methods # def get_bonding_config(self, config_content, args): """ Get bonding info by command "show bonding config". """ key_str, regx_str = args find_value = self.get_value_from_str(key_str, regx_str, config_content) return find_value def get_info_from_bond_config(self, config_content, args): """ Get active slaves of the bonding device which you choose. """ search_args = args if isinstance(args[0], (list, tuple)) else [args] for search_args in search_args: try: info = self.get_bonding_config(config_content, search_args) break except Exception as e: self.logger.info(e) else: info = None return info def get_bonding_info(self, bond_port, info_types): ''' Get the specified port information by its output message format ''' info_set = { 'mode': ["Bonding mode: ", "\d*"], 'agg_mode': ["IEEE802.3AD Aggregator Mode: ", "\S*"], 'balance_policy':["Balance Xmit Policy: ", "\S+"], 'slaves': [["Slaves \(\d\): \[", "\d*( \d*)*"], ["Slaves: \[", "\d*( \d*)*"]], 'active_slaves': [["Active Slaves \(\d\): \[", "\d*( \d*)*"], ["Acitve Slaves: \[", "\d*( \d*)*"]], 'primary': ["Primary: \[", "\d*"]} # get all config information config_content = self.d_console("show bonding config %d" % bond_port) if isinstance(info_types, (list or tuple)): query_values = [] for info_type in info_types: if info_type in info_set.keys(): find_value = self.get_info_from_bond_config( config_content, info_set[info_type]) if info_type in ['active_slaves', 'slaves']: find_value = [value for value in find_value.split(' ') if value] else: find_value = None query_values.append(find_value) return query_values else: info_type = info_types if info_type in info_set.keys(): find_value = self.get_info_from_bond_config( config_content, info_set[info_type]) if info_type in ['active_slaves', 'slaves']: find_value = [value for value in find_value.split(' ') if value] return find_value else: return None def get_active_slaves(self, bond_port): primary_port = int(self.get_bonding_info(bond_port, 'primary')) active_slaves = self.get_bonding_info(bond_port, 'active_slaves') return int(primary_port), [int(slave) for slave in active_slaves] def create_bonded_device(self, mode=0, socket=0, verify_detail=False): """ Create a bonding device with the parameters you specified. """ cmd = "create bonded device %d %d" % (mode, socket) out = self.d_console(cmd) err_fmt = "Create bonded device on mode [%d] socket [%d] failed" self.verify("Created new bonded device" in out, err_fmt% (mode, socket)) fmts = [ "Created new bonded device net_bond_testpmd_[\d] on \(port ", "Created new bonded device net_bonding_testpmd_[\d] on \(port ", "Created new bonded device eth_bond_testpmd_[\d] on \(port "] bond_port = self.get_value_from_str(fmts, "\d+", out) bond_port = int(bond_port) if verify_detail: out = self.d_console("show bonding config %d" % bond_port) self.verify("Bonding mode: %d" % mode in out, "Bonding mode display error when create bonded device") self.verify("Slaves: []" in out, "Slaves display error when create bonded device") self.verify("Active Slaves: []" in out, "Active Slaves display error when create bonded device") self.verify("Primary: []" not in out, "Primary display error when create bonded device") out = self.d_console("show port info %d" % bond_port) self.verify("Connect to socket: %d" % socket in out, "Bonding port connect socket error") self.verify("Link status: down" in out, "Bonding port default link status error") self.verify("Link speed: 0 Mbps" in out, "Bonding port default link speed error") return bond_port def add_slave(self, bond_port, invert_verify=False, expected_str='', *slave_ports): """ Add ports into the bonding device as slaves. """ if len(slave_ports) <= 0: utils.RED("No port exist when add slave to bonded device") for slave_id in slave_ports: cmd = "add bonding slave %d %d" % (slave_id, bond_port) out = self.d_console(cmd) if expected_str: self.verify(expected_str in out, "message <{0}> is missing".format(expected_str)) slaves = self.get_bonding_info(bond_port, 'slaves') if not invert_verify: self.verify(str(slave_id) in slaves, "Add port as bonding slave failed") else: err = "Add port as bonding slave successfully,should fail" self.verify(str(slave_id) not in slaves, err) def remove_slaves(self, bond_port, invert_verify=False, *slave_port): """ Remove the specified slave port from the bonding device. """ if len(slave_port) <= 0: msg = "No port exist when remove slave from bonded device" self.logger.error(msg) for slave_id in slave_port: cmd = "remove bonding slave %d %d" % (int(slave_id), bond_port) self.d_console(cmd) slaves = self.get_bonding_info(bond_port, 'slaves') if not invert_verify: self.verify(str(slave_id) not in slaves, "Remove slave to fail from bonding device") else: err = ( "Remove slave successfully from bonding device, " "should be failed") self.verify(str(slave_id) in slaves, err) def remove_all_slaves(self, bond_port): """ Remove all slaves of specified bound device. """ all_slaves = self.get_bonding_info(bond_port, 'slaves') if not all_slaves: return all_slaves = all_slaves.split() if len(all_slaves) == 0: return self.remove_slaves(bond_port, False, *all_slaves) def set_primary_slave(self, bond_port, slave_port, invert_verify=False): """ Set the primary slave for the bonding device. """ cmd = "set bonding primary %d %d" % (slave_port, bond_port) self.d_console(cmd) out = self.get_bonding_info(bond_port, 'primary') if not invert_verify: self.verify(str(slave_port) in out, "Set bonding primary port failed") else: err = "Set bonding primary port successfully, should not success" self.verify(str(slave_port) not in out, err) def set_bonding_mode(self, bond_port, mode): """ Set the bonding mode for port_id. """ cmd = "set bonding mode %d %d" % (mode, bond_port) self.d_console(cmd) mode_value = self.get_bonding_info(bond_port, 'mode') self.verify(str(mode) in mode_value, "Set bonding mode failed") def set_bonding_mac(self, bond_port, mac): """ Set the MAC for the bonding device. """ cmd = "set bonding mac_addr %s %s" % (bond_port, mac) self.d_console(cmd) new_mac = self.get_port_mac(bond_port) self.verify(new_mac == mac, "Set bonding mac failed") def get_port_mac(self, bond_port, query_type): bond_port_mac = self.get_port_info(bond_port, query_type) return bond_port_mac def set_bonding_balance_policy(self, bond_port, policy): """ Set the balance transmit policy for the bonding device. """ cmd = "set bonding balance_xmit_policy %d %s" % (bond_port, policy) self.d_console(cmd) new_policy = self.get_bonding_info(bond_port, 'balance_policy') policy = "BALANCE_XMIT_POLICY_LAYER" + policy.lstrip('l') self.verify(new_policy == policy, "Set bonding balance policy failed") @property def is_perf(self): return self.parent._enable_perf
class TestDdpPppL2tp(TestCase): def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") profile_file = 'dep/ppp-oe-ol2tpv2.pkgo' profile_dst = "/tmp/" self.dut.session.copy_file_to(profile_file, profile_dst) out = self.dut.send_expect("cat config/common_base", "]# ", 10) self.PF_Q_strip = 'CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF' pattern = "(%s=)(\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern) self.used_dut_port = self.dut_ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut_testpmd = PmdOutput(self.dut) def set_up(self): self.load_profile() def element_strip(self, out, pattern): """ Strip and get queue number. """ s = re.compile(pattern) res = s.search(out) if res is None: print utils.RED('Search no queue number.') return None else: result = res.group(2) return int(result) def load_profile(self): """ Load profile to update FVL configuration tables, profile will be stored in binary file and need to be passed to AQ to program FVL during initialization stage. """ self.dut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE)) self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd( 'ddp add 0 /tmp/ppp-oe-ol2tpv2.pkgo,/tmp/ppp-oe-ol2tpv2.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') def ppp_l2tp_pkts(self, flowtype, keyword): """ Generate PPPoE, L2TPv2 and PPPoL2TPv2 packets. """ src_mac = "3C:FD:FE:A3:A0:01" dst_mac = "4C:FD:FE:A3:A0:01" src_ip = "1.1.1.1" dst_ip = "2.2.2.2" src_ipv6 = "1001:0db8:85a3:0000:0000:8a2e:0370:0001" dst_ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:0001" sessionid = hex(0x7) sport = 4000 dport = 8000 if keyword is not 'def': if keyword is 'src_mac': src_mac = "3C:FD:FE:A3:A0:02" if keyword is 'dst_mac': dst_mac = "4C:FD:FE:A3:A0:02" if keyword is 'src_ip': src_ip = "1.1.1.2" if keyword is 'dst_ip': dst_ip = "2.2.2.3" if keyword is 'src_ipv6': src_ipv6 = "1001:0db8:85a3:0000:0000:8a2e:0370:0002" if keyword is 'dst_ipv6': dst_ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:0002" if keyword is 'sessionid': sessionid = hex(0x8) if keyword is 'sport': sport = 4001 if keyword is 'dport': dport = 8001 if flowtype == 23: pkts = {'IPV4/L2TP/IPV4/UDP': 'Ether()/IP()/UDP(sport=1701,dport=1701)/PPP_L2TP(proto=0x0021,sessionid=%s)/IP(src="%s",dst="%s")/UDP(sport=%d, dport=%d)/Raw("X"* 20)' % (sessionid, src_ip, dst_ip, sport, dport)} if flowtype == 24: pkts = {'IPV4/L2TP/IPV6/UDP': 'Ether()/IP()/UDP(sport=1701, dport=1701)/PPP_L2TP(proto=0x0057,sessionid=%s)/IPv6(src="%s", dst="%s")/UDP(sport=%d, dport=%d)/Raw("X"* 20)' % (sessionid, src_ipv6, dst_ipv6, sport, dport)} if flowtype == 26: pkts = {'IPV4/L2TP': 'Ether(src="%s", dst="%s")/IP()/UDP(dport=1701, sport=1701)/L2TP(sessionid=%s)/Raw("X"*20)' % (src_mac, dst_mac, sessionid)} if flowtype == 28: pkts = {'PPPOE/IPV4/UDP': 'Ether()/PPPoE(sessionid=%s)/PPP(proto=0x21)/IP(src="%s",dst="%s")/UDP(sport=%d,dport=%d)/Raw("X"*20)' % (sessionid, src_ip, dst_ip, sport, dport)} if flowtype == 29: pkts = {'PPPOE/IPV6/UDP': 'Ether()/PPPoE(sessionid=%s)/PPP(proto=0x57)/IPv6(src="%s",dst="%s")/UDP(sport=%d,dport=%d)/Raw("X"*20)' % (sessionid, src_ipv6, dst_ipv6, sport, dport)} if flowtype == 30: pkts = {'PPPOE': 'Ether(src="%s", dst="%s")/PPPoE(sessionid=%s)' % (src_mac, dst_mac, sessionid)} return pkts def raw_packet_generate(self, flowtype): """ setup raw flow type filter for flow director, source/destination fields (both IP addresses and UDP ports) should be swapped in template file and packets sent to NIC. """ if flowtype == 23: a = Ether()/IP()/UDP(dport=1701, sport=1701)/PPP_L2TP(proto=0x0021, sessionid=0x7)/IP(dst="1.1.1.1", src="2.2.2.2")/UDP(dport=4000, sport=8000) if flowtype == 24: a = Ether()/IP()/UDP(dport=1701, sport=1701)/PPP_L2TP(proto=0x0057, sessionid=0x7)/IPv6(dst="1001:0db8:85a3:0000:0000:8a2e:0370:0001", src="2001:0db8:85a3:0000:0000:8a2e:0370:0001")/UDP(dport=4000, sport=8000)/Raw("X"*20) if flowtype == 26: a = Ether(dst="3C:FD:FE:A3:A0:01", src="4C:FD:FE:A3:A0:01")/IP()/UDP(dport=1701, sport=1701)/L2TP(sessionid=0x7)/Raw("X"*20) if flowtype == 28: a = Ether()/PPPoE(sessionid=0x7)/PPP(proto=0x21)/IP(dst="1.1.1.1", src="2.2.2.2")/UDP(dport=4000, sport=8000)/Raw("X"*20) if flowtype == 29: a = Ether()/PPPoE(sessionid=0x7)/PPP(proto=0x57)/IPv6(dst="1001:0db8:85a3:0000:0000:8a2e:0370:0001", src="2001:0db8:85a3:0000:0000:8a2e:0370:0001")/UDP(dport=4000, sport=8000)/Raw("X"*20) if flowtype == 30: a = Ether(dst="3C:FD:FE:A3:A0:01", src="4C:FD:FE:A3:A0:01")/PPPoE(sessionid=0x7) ba = bytearray(str(a)) rawfile_src = '/tmp/test.raw' File = open("%s" % rawfile_src, "wb") File.write(ba) File.close() rawfile_dst = "/tmp/" self.dut.session.copy_file_to(rawfile_src, rawfile_dst) def send_and_verify(self, flowtype, keyword='def', type='rss'): """ Send packets and verify result. """ pkts = self.ppp_l2tp_pkts(flowtype, keyword) for packet_type in pkts.keys(): self.tester.scapy_append( 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() out = self.dut.get_session_output(timeout=2) print out if type is 'rss': self.verify("PKT_RX_RSS_HASH" in out, "Failed to test RSS!!!") pattern = "port (\d)/queue (\d{1,2}): received (\d) packets" qnum = self.element_strip(out, pattern) ptypes = packet_type.split('/') if flowtype in [23, 24, 26]: layerparams = ['L3_', 'TUNNEL_', 'INNER_L3_', 'INNER_L4_'] endparams = ['_EXT_UNKNOWN', '', '_EXT_UNKNOWN', ''] if flowtype in [28, 29, 30]: layerparams = ['L2_ETHER_', 'L3_', 'L4_'] endparams = ['', '_EXT_UNKNOWN', ''] for layerparam, ptype, endparam in zip( layerparams, ptypes, endparams): layer_type = layerparam + ptype + endparam self.verify( layer_type in out, "Failed to output ptype information!!!") return qnum def pctype_flowtype_mapping(self, flowtype, pctype): """ dynamic flowtype/pctype mapping for new protocol. """ self.dut_testpmd.execute_cmd('port config 0 pctype mapping reset') out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify("pctype: 63 -> flowtype: 14" in out, "Failed show flow type to pctype mapping!!!") self.verify("pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed show flow type to pctype mapping!!!") self.dut_testpmd.execute_cmd( 'port config 0 pctype mapping update %s %s' % (pctype, flowtype)) out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify("pctype: %s -> flowtype: %s" % (pctype, flowtype) in out, "Failed update flow type to pctype mapping!!!") self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') def run_rss_test(self, crlwords, flowtype, pctype, keywords, qchecks): """ Use dynamic flowtype/pctype mapping, use default or dynamic change control words to set hash input configuration for new protocol RSS enabling, check RSS could work and keywords could control queue number. crlwords: control words of keyword. flowtype: define flow type 23~63 values for PPPoE and PPPoL2TPv2 packet types as test plan table. pctype: profile defines 14~21 pctypes for PPPoE and PPPoL2TPv2 packet types. keywords: keywords have session ID, S-Port, D-Port, IP SA, IP DA and etc. qchecks: define sameq and difq. If change keywords, direct packets to different queue, otherwise direct packets to same queue. """ self.pctype_flowtype_mapping(flowtype, pctype) if crlwords is not None: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd( 'port config 0 pctype %s hash_inset clear all' % pctype) for word in crlwords: self.dut_testpmd.execute_cmd( 'port config 0 pctype %s hash_inset set field %s' % (pctype, word)) self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('port config all rss %s' % flowtype) self.dut_testpmd.execute_cmd('start') qnum = self.send_and_verify(flowtype, 'def', 'rss') qdef = qnum for word, chk in zip(keywords, qchecks): qnum = self.send_and_verify(flowtype, word, 'rss') if qnum == qdef: result = 'sameq' elif qnum != qdef: result = 'difq' self.verify(result == chk, "Faild to verify RSS when key word change!!!") def run_fd_test(self, crlwords, flowtype, pctype, keywords, qchecks): """ Use dynamic flowtype/pctype mapping, use default or dynamic change control words to set flow director input configuration for new protocol, setup raw flow type filter for flow director, check flow director could work. crlwords: control words of keyword flowtype: define flow type 23~63 values for PPPoE and PPPoL2TPv2 packet types as test plan table. pctype: profile defines below 14~21 pctypes for PPPoE and PPPoL2TPv2 packet types. keywords: keywords have Session ID, S-Port, D-Port, IP SA, IP DA and etc. qchecks: define sameq and difq. If change keywords, direct packets to queue 0, otherwise direct packets to same queue. """ self.pctype_flowtype_mapping(flowtype, pctype) if crlwords is not None: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd( 'port config 0 pctype %s fdir_inset clear all' % pctype) for word in crlwords: self.dut_testpmd.execute_cmd( 'port config 0 pctype %s fdir_inset set field %s' % (pctype, word)) self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('start') qnum = self.send_and_verify(flowtype, 'def', 'fd') self.verify(qnum == 0, "Receive packet from wrong queue!!!") self.raw_packet_generate(flowtype) queue = random.randint(1, self.PF_QUEUE - 1) self.dut_testpmd.execute_cmd( 'flow_director_filter 0 mode raw add flow %d fwd queue %d \ fd_id 1 packet /tmp/test.raw' % (flowtype, queue)) qnum = self.send_and_verify(flowtype, 'def', 'fd') qdef = qnum self.verify(qnum == queue, "Receive packet from wrong queue!!!") for word, chk in zip(keywords, qchecks): qnum = self.send_and_verify(flowtype, word, 'fd') if qnum == qdef: result = 'sameq' elif qnum == 0: result = 'difq' self.verify(result == chk, "Faild to verify flow director when \ key word change!!!") def test_rss_pppoe(self): """ PPPoE is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default hash input set are MAC SA and session ID, check RSS could work and queue could change when changing them. """ crlwords = None keywords = ['sessionid', 'src_mac', 'dst_mac'] qchecks = ['difq', 'difq', 'sameq'] self.run_rss_test(crlwords, 30, 17, keywords, qchecks) def test_rss_pppoe_ipv4(self): """ PPPoE IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default hash input set are IPv4 SA, IPv4 DA, S-Port, D-Port, check RSS could work and queue could change when changing them. """ crlwords = None keywords = ['src_ip', 'dst_ip', 'sport', 'dport', 'sessionid'] qchecks = ['difq', 'difq', 'difq', 'difq', 'sameq'] self.run_rss_test(crlwords, 28, 15, keywords, qchecks) def test_rss_pppoe_ipv6(self): """ PPPoE IPv6 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default hash input set are IPv6 SA, IPv6 DA, S-Port, D-Port, check RSS could work and queue could change when changing them. """ crlwords = None keywords = ['src_ipv6', 'dst_ipv6', 'sport', 'dport', 'sessionid'] qchecks = ['difq', 'difq', 'difq', 'difq', 'sameq'] self.run_rss_test(crlwords, 29, 16, keywords, qchecks) def test_rss_l2tp(self): """ L2TPv2 PAY is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default hash input set are MAC SA and session ID, check RSS could work and queue could change when changing them. """ crlwords = None keywords = ['sessionid', 'src_mac', 'dst_mac'] qchecks = ['difq', 'difq', 'sameq'] self.run_rss_test(crlwords, 26, 21, keywords, qchecks) def test_rss_pppoe_sessid(self): """ PPPoE is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change hash input set configuration for session ID word 47, enable RSS, check RSS could work and queue could change when changing session ID. """ crlwords = range(47, 48) keywords = ['sessionid'] qchecks = ['difq'] self.run_rss_test(crlwords, 30, 17, keywords, qchecks) def test_rss_pppoe_srcmac(self): """ PPPoE is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change hash input set configuration for source mac words 3~5, enable RSS, check RSS could work and queue could change when changing SA. """ crlwords = range(3, 6) keywords = ['src_mac', 'dst_mac'] qchecks = ['difq', 'sameq'] self.run_rss_test(crlwords, 30, 17, keywords, qchecks) def test_rss_pppol2tp_ipv4(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default hash input set are IPv4 SA, IPv4 DA, S-Port, D-Port, check RSS could work and queue could change when changing them. """ crlwords = None keywords = ['src_ip', 'dst_ip', 'sport', 'dport', 'sessionid'] qchecks = ['difq', 'difq', 'difq', 'difq', 'sameq'] self.run_rss_test(crlwords, 23, 18, keywords, qchecks) def test_rss_pppol2tp_inner_srcip(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change hash input set configuration for IPv4 SA words 15~16, enable RSS, check RSS could work and queue could change when changing IPv4 SA. """ crlwords = range(15, 17) keywords = ['src_ip', 'dst_ip'] qchecks = ['difq', 'sameq'] self.run_rss_test(crlwords, 23, 18, keywords, qchecks) def test_rss_pppol2tp_inner_dstip(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change hash input set configuration for IPv4 DA words 27~28, enable RSS, check RSS could work and queue could change when changing IPv4 DA. """ crlwords = range(27, 29) keywords = ['dst_ip', 'src_ip'] qchecks = ['difq', 'sameq'] self.run_rss_test(crlwords, 23, 18, keywords, qchecks) def test_rss_pppol2tp_sport(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change hash input set configuration for S-Port word 29, enable RSS, check RSS could work and queue could change when changing S-Port. """ crlwords = range(29, 30) keywords = ['sport', 'dport'] qchecks = ['difq', 'sameq'] self.run_rss_test(crlwords, 23, 18, keywords, qchecks) def test_rss_pppol2tp_dport(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change hash input set configuration for D-Port word 30, enable RSS, check RSS could work and queue could change when changing D-Port. """ crlwords = range(30, 31) keywords = ['dport', 'sport'] qchecks = ['difq', 'sameq'] self.run_rss_test(crlwords, 23, 18, keywords, qchecks) def test_fd_pppoe(self): """ PPPoE is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default flow director input set are MAC SA, session ID, setup raw flow type filter for flow director, check flow director could work when sending matched packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_mac', 'sessionid', 'dst_mac'] qchecks = ['difq', 'difq', 'sameq'] self.run_fd_test(crlwords, 30, 17, keywords, qchecks) def test_fd_l2tp(self): """ L2TPv2 PAY is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default flow director input set are MAC SA, session ID, setup raw flow type filter for flow director, check flow director could work when sending matched packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_mac', 'sessionid', 'dst_mac'] qchecks = ['difq', 'difq', 'sameq'] self.run_fd_test(crlwords, 26, 21, keywords, qchecks) def test_fd_pppoe_ipv4(self): """ PPPoE IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default flow director input set are IPv4 SA, IPv4 DA, S-Port, D-Port, setup raw flow type filter for flow director, check flow director could work when sending matched packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_ip', 'dst_ip', 'sport', 'dport', 'sessionid'] qchecks = ['difq', 'difq', 'difq', 'difq', 'sameq'] self.run_fd_test(crlwords, 28, 15, keywords, qchecks) def test_fd_pppoe_ipv6(self): """ PPPoE IPv6 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default flow director input set are IPv6 SA, IPv6 DA, S-Port, D-Port, setup raw flow type filter for flow director, check flow director could work when sending matched packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_ipv6', 'dst_ipv6', 'sport', 'dport', 'sessionid'] qchecks = ['difq', 'difq', 'difq', 'difq', 'sameq'] self.run_fd_test(crlwords, 29, 16, keywords, qchecks) def test_fd_pppol2tp_ipv4(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default flow director input set are IPv4 SA, IPv4 DA, S-Port, D-Port, setup raw flow type filter for flow director, check flow director could work when sending matched packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_ip', 'dst_ip', 'sport', 'dport'] qchecks = ['difq', 'difq', 'difq', 'difq'] self.run_fd_test(crlwords, 23, 18, keywords, qchecks) def test_fd_pppol2tp_ipv6(self): """ PPPoL2TPv2 IPv6 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, default flow director input set are IPv6 SA, IPv6 DA, S-Port, D-Port, setup raw flow type filter for flow director, check flow director could work when sending matched packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_ipv6', 'dst_ipv6', 'sport', 'dport'] qchecks = ['difq', 'difq', 'difq', 'difq'] self.run_fd_test(crlwords, 24, 19, keywords, qchecks) def test_fd_pppol2tp_ipv4_dstip(self): """ PPPoL2TPv2 IPv4 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change flow director input set configuration for IPv4 DA words 27~28, setup raw flow type filter for flow director, check flow director could work when sending matched IPv4 DA packets to configured queue, otherwise direct packets to queue 0. """ crlwords = range(27, 29) keywords = ['src_ip', 'sport', 'dport', 'dst_ip'] qchecks = ['sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 23, 18, keywords, qchecks) def test_fd_pppol2tp_ipv6_dstipv6(self): """ PPPoL2TPv2 IPv6 is supported by NVM with profile updated. Download profile then set flowtype/pctype mapping, dynamic to change flow director input set configuration for IPv6 DA words 21~28, setup raw flow type filter for flow director, check flow director could work when sending matched IPv6 DA packets to configured queue, otherwise direct packets to queue 0. """ crlwords = range(21, 29) keywords = ['src_ipv6', 'sport', 'dport', 'dst_ipv6'] qchecks = ['sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 24, 19, keywords, qchecks) def tear_down(self): self.dut_testpmd.execute_cmd('stop') out = self.dut_testpmd.execute_cmd('ddp get list 0') if "Profile number is: 0" not in out: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd('ddp del 0 /tmp/ppp-oe-ol2tpv2.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 0" in out, "Failed to delete ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.quit() def tear_down_all(self): pass
class TestDdpGtpQregion(TestCase): def set_up_all(self): self.dut_ports = self.dut.get_ports(self.nic) self.verify(len(self.dut_ports) >= 1, "Insufficient ports") profile_file = 'dep/gtp.pkgo' profile_dst = "/tmp/" self.dut.session.copy_file_to(profile_file, profile_dst) out = self.dut.send_expect("cat config/common_base", "]# ", 10) self.PF_Q_strip = 'CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF' pattern = "(%s=)(\d*)" % self.PF_Q_strip self.PF_QUEUE = self.element_strip(out, pattern) self.used_dut_port = self.dut_ports[0] tester_port = self.tester.get_local_port(self.used_dut_port) self.tester_intf = self.tester.get_interface(tester_port) self.dut_testpmd = PmdOutput(self.dut) def set_up(self): self.load_profile() self.flowtype_qregion_mapping() def element_strip(self, out, pattern): """ Strip and get queue number. """ s = re.compile(pattern) res = s.search(out) if res is None: print utils.RED('Search no queue number.') return None else: result = res.group(2) return int(result) def load_profile(self): """ Load profile to update FVL configuration tables, profile will be stored in binary file and need to be passed to AQ to program FVL during initialization stage. """ self.dut_testpmd.start_testpmd( "Default", "--pkt-filter-mode=perfect --port-topology=chained \ --txq=%s --rxq=%s" % (self.PF_QUEUE, self.PF_QUEUE)) self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd('ddp add 0 /tmp/gtp.pkgo,/tmp/gtp.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 1" in out, "Failed to load ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') def flowtype_qregion_mapping(self): """ Queue region, queue range and flow type mapping set according to mapping table. """ rg_ids = [0, 1, 2, 3] idx_ids = [1, 10, 30, 40] q_nums = [8, 16, 8, 16] flowtypes = [26, 23, 24, 25] for rg_id, idx_id, q_num in zip(rg_ids, idx_ids, q_nums): self.dut_testpmd.execute_cmd('set port 0 queue-region region_id \ %d queue_start_index %d queue_num %d' % (rg_id, idx_id, q_num)) for rg_id, flowtype in zip(rg_ids, flowtypes): self.dut_testpmd.execute_cmd('set port 0 queue-region region_id \ %d flowtype %d' % (rg_id, flowtype)) self.dut_testpmd.execute_cmd('set port 0 queue-region flush on') def gtp_pkts(self, flowtype, keyword, opt): """ Generate GTP packets. """ src_ip = "1.1.1.1" dst_ip = "2.2.2.2" src_ipv6 = "1001:0db8:85a3:0000:0000:8a2e:0370:0001" dst_ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:0001" teid = hex(0xfe) sport = 100 dport = 200 if opt is 'chg_preword_opt': if keyword is 'dst_ipv6_64pre': dst_ipv6 = "2001:0db8:85a3:0001:0000:8a2e:0370:0001" if keyword is 'src_ipv6_48pre': src_ipv6 = "1001:0db8:85a4:0000:0000:8a2e:0370:0001" if keyword is 'dst_ipv6_32pre': dst_ipv6 = "2001:0db9:85a3:0000:0000:8a2e:0370:0001" elif opt in ['chg_sufword_opt', 'notword_opt']: if keyword is 'src_ip': src_ip = "1.1.1.2" if keyword is 'dst_ip': dst_ip = "2.2.2.3" if keyword in ['src_ipv6', 'src_ipv6_48pre']: src_ipv6 = "1001:0db8:85a3:0000:0000:8a2e:0370:0002" if keyword in ['dst_ipv6', 'dst_ipv6_32pre', 'dst_ipv6_64pre']: dst_ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:0002" if keyword is 'teid': teid = hex(0xff) if keyword is 'sport': sport = 101 if keyword is 'dport': dport = 201 if flowtype == 23: pkts = {'IPV6/GTPU/IPV6/UDP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header(teid=%s)/IPv6(src="%s",dst="%s")/UDP(sport=%d,dport=%d)/Raw("X"*20)' % (teid, src_ipv6, dst_ipv6, sport, dport)} if flowtype == 25: pkts = {'IPV6/GTPC': 'Ether()/IPv6(src="%s",dst="%s")/UDP(dport=2123)/GTP_U_Header(teid=%s)/Raw("X"*20)' % (src_ipv6, dst_ipv6, teid)} if flowtype == 26: pkts = {'IPV6/GTPU/IPV4/UDP': 'Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header(teid=%s)/IP(src="%s",dst="%s")/UDP(sport=%d,dport=%d)/Raw("X"*20)' % (teid, src_ip, dst_ip, sport, dport)} return pkts def raw_packet_generate(self, flowtype): """ setup raw flow type filter for flow director, source/destinations fields (both ip addresses and udp ports) should be swapped in template file and packets sent to NIC. """ if flowtype == 23: a = Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header(teid=0xfe)/IPv6(dst="1001:0db8:85a3:0000:0000:8a2e:0370:0001", src="2001:0db8:85a3:0000:0000:8a2e:0370:0001")/UDP(dport=100, sport=200)/Raw("X"*20) if flowtype == 26: a = Ether()/IPv6()/UDP(dport=2152)/GTP_U_Header(teid=0xfe)/IP(dst="1.1.1.1", src="2.2.2.2")/UDP(dport=100, sport=200)/Raw("X"*20) ba = bytearray(str(a)) rawfile_src = '/tmp/test_gtp.raw' File = open("%s" % rawfile_src, "wb") File.write(ba) File.close() rawfile_dst = "/tmp/" self.dut.session.copy_file_to(rawfile_src, rawfile_dst) def send_verify_fd(self, flowtype, keyword, opt): """ Send packets and verify result. """ pkts = self.gtp_pkts(flowtype, keyword, opt) for packet_type in pkts.keys(): self.tester.scapy_append( 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() out = self.dut.get_session_output(timeout=2) pattern = "port (\d)/queue (\d{1,2}): received (\d) packets" qnum = self.element_strip(out, pattern) ptypes = packet_type.split('/') layerparams = ['L3_', 'TUNNEL_', 'INNER_L3_', 'INNER_L4_'] endparams = ['_EXT_UNKNOWN', '', '_EXT_UNKNOWN', ''] for layerparam, ptype, endparam in zip( layerparams, ptypes, endparams): layer_type = layerparam + ptype + endparam self.verify( layer_type in out, "Failed to output ptype information!!!") return qnum def send_and_verify(self, flowtype, qmin, qmax, keyword): """ Send packets and verify result. opt has below three scenarios: word_opt: check RSS could work when enable words for keyword. chg_preword_opt: change keyword value for 64,48 or 32-bit prefixes instead of full address, e.g. full IPv6 words are 50~57, 64 bit prefix words should be 50~53, only changing 64 bit prefix dst controls pmd to receive packet from different queue. chg_subword_opt: change keyword value, e.g. if set full address words, changing dst controls pmd to receive packet from different queue, if set prefix address, changing suffix dst controls pmd to receive packet from same queue. notword_opt: change not keyword, e.g. check dst controls queue number, change src then check pmd receives packet from same queue. """ for opt in ['word_opt', 'chg_preword_opt', 'chg_sufword_opt', 'notword_opt']: if opt is 'chg_preword_opt': if keyword not in ['dst_ipv6_64pre', 'src_ipv6_48pre', 'dst_ipv6_32pre']: continue if opt is 'notword_opt': if keyword == 'teid': break elif keyword == 'sport': keyword = 'dport' elif keyword == 'dport': keyword = 'sport' elif keyword == 'src_ip': keyword = 'dst_ip' elif keyword == 'dst_ip': keyword = 'src_ip' elif keyword in ['src_ipv6', 'src_ipv6_48pre']: keyword = 'dst_ipv6' elif keyword in ['dst_ipv6', 'dst_ipv6_64pre', 'dst_ipv6_32pre']: keyword = 'src_ipv6' pkts = self.gtp_pkts(flowtype, keyword, opt) for packet_type in pkts.keys(): self.tester.scapy_append( 'sendp([%s], iface="%s")' % (pkts[packet_type], self.tester_intf)) self.tester.scapy_execute() out = self.dut.get_session_output(timeout=2) self.verify("PKT_RX_RSS_HASH" in out, "Failed to test RSS!!!") pattern = "port (\d)/queue (\d{1,2}): received (\d) packets" qnum = self.element_strip(out, pattern) if opt is 'word_opt': crol_qnum = qnum layerparams = ['L3_', 'TUNNEL_', 'INNER_L3_', 'INNER_L4_'] ptypes = packet_type.split('/') endparams = ['_EXT_UNKNOWN', '', '_EXT_UNKNOWN', ''] for layerparam, ptype, endparam in zip( layerparams, ptypes, endparams): layer_type = layerparam + ptype + endparam self.verify( layer_type in out, "Failed to output ptype information!!!") self.verify(qnum <= qmax and qnum >= qmin, "Queue is not between this queue range!!!") if opt is 'chg_preword_opt': self.verify(qnum != crol_qnum, "Failed to test rss if changing prefix key \ words!!!") if opt is 'chg_sufword_opt': if keyword in ['dst_ipv6_64pre', 'src_ipv6_48pre', 'dst_ipv6_32pre']: self.verify(qnum == crol_qnum, "Failed to test rss if changing suffixal \ key words!!!") else: self.verify(qnum != crol_qnum, "Failed to test rss if changing key \ words!!!") if opt is 'notword_opt': self.verify(qnum == crol_qnum, "Failed to test rss if changing to other key \ words!!!") def flowtype_pctype_mapping(self, flowtype, pctype): self.dut_testpmd.execute_cmd('port config 0 pctype mapping reset') out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify("pctype: 63 -> flowtype: 14" in out, "Failed show flow type to pctype mapping!!!") self.verify("pctype: %s -> flowtype: %s" % (pctype, flowtype) not in out, "Failed show flow type to pctype mapping!!!") self.dut_testpmd.execute_cmd( 'port config 0 pctype mapping update %s %s' % (pctype, flowtype)) out = self.dut_testpmd.execute_cmd('show port 0 pctype mapping') self.verify("pctype: %s -> flowtype: %s" % (pctype, flowtype) in out, "Failed update flow type to pctype mapping!!!") def run_fd_test(self, crlwords, flowtype, pctype, keywords, qchecks): """ Use dynamic flowtype/pctype mapping, use default or dynamic change control words to set flow director input configuration for new protocol, setup raw flow type filter for flow director, check flow director could work. crlwords: control words of keyword flowtype: define flow type 23~63 values for GTP packet types as test plan table. pctype: profile defines below 10~25 pctypes for GTP packet types. keywords: keywords have Session ID, S-Port, D-Port, IP SA, IP DA and etc. qchecks: define sameq and difq. If change keywords, direct packets to queue 0, otherwise direct packets to same queue. """ self.flowtype_pctype_mapping(flowtype, pctype) if crlwords is not None: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd( 'port config 0 pctype %s fdir_inset clear all' % pctype) for word in crlwords: self.dut_testpmd.execute_cmd( 'port config 0 pctype %s fdir_inset set field %s' % (pctype, word)) self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') qnum = self.send_verify_fd(flowtype, keywords, 'word_opt') self.verify(qnum == 0, "Receive packet from wrong queue!!!") self.raw_packet_generate(flowtype) queue = random.randint(1, self.PF_QUEUE - 1) self.dut_testpmd.execute_cmd( 'flow_director_filter 0 mode raw add flow %d fwd queue %d \ fd_id 1 packet /tmp/test_gtp.raw' % (flowtype, queue)) qnum = self.send_verify_fd(flowtype, keywords, 'word_opt') qdef = qnum self.verify(qnum == queue, "Receive packet from wrong queue!!!") for word, chk in zip(keywords, qchecks): qnum = self.send_verify_fd(flowtype, word, 'chg_sufword_opt') if qnum == qdef: result = 'sameq' elif qnum == 0: result = 'difq' self.verify(result == chk, "Faild to verify flow director when \ key word change!!!") def run_gtp_test(self, crlwords, flowtype, pctype, qmin, qmax, keyword): """ Use dynamic flowtype/pctype mapping, queue region, dynamic change control words to set hash input configuration for new protocol GTP RSS enabling, check keyword could control queue number in configured queue region. crlwords: control words of keyword flowtype: define flow type 26, 23, 24, 25 for GTP types as below table, check each layer type, tunnel packet includes GTPC and GTPU, GTPC has none inner L3, GTPU has none, IPV4 and IPV6 inner L3. pctype: profile defines below 22, 23, 24, 25 pctypes for GTP packet types. qmin: design queue minimum value for the flowtype in queue region. qmax: design queue maximum value for the flowtype in queue region. keyword: keyword has teid, sport, dport, src, dst, etc. +-------------+------------+------------+--------------+-------------+ | Packet Type | PCTypes | Flow Types | Queue region | Queue range | +-------------+------------+------------+--------------+-------------+ | GTP-U IPv4 | 22 | 26 | 0 | 1~8 | +-------------+------------+------------+--------------+-------------+ | GTP-U IPv6 | 23 | 23 | 1 | 10~25 | +-------------+------------+------------+--------------+-------------+ | GTP-U PAY4 | 24 | 24 | 2 | 30~37 | +-------------+------------+------------+--------------+-------------+ | GTP-C PAY4 | 25 | 25 | 3 | 40~55 | +-------------+------------+------------+--------------+-------------+ """ self.flowtype_qregion_mapping() self.flowtype_pctype_mapping(flowtype, pctype) self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd( 'port config 0 pctype %s hash_inset clear all' % pctype) for word in crlwords: self.dut_testpmd.execute_cmd( 'port config 0 pctype %s hash_inset set field %s' % (pctype, word)) self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.execute_cmd('port config all rss %s' % flowtype) self.dut_testpmd.execute_cmd('set fwd rxonly') self.dut_testpmd.execute_cmd('set verbose 1') self.dut_testpmd.execute_cmd('start') self.send_and_verify(flowtype, qmin, qmax, keyword) def test_outer_dst_contrl_gtpcq(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for outer dst mac words 50~57, enable rss, check outer dst could control queue, also queue number is between the queue range(40,55). """ crlwords = range(50, 58) self.run_gtp_test(crlwords, 25, 25, 40, 55, "dst_ipv6") def test_teid_contrl_gtpcq(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for teid words 44~45, enable rss, check teid could control queue, also queue number is between the queue range(40,55). """ crlwords = range(44, 46) self.run_gtp_test(crlwords, 25, 25, 40, 55, "teid") def test_teid_contrl_gtpu_ipv4q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for teid words 44~45, enable rss, check teid could control queue, also queue number is between the queue range(1,8). """ crlwords = range(44, 46) self.run_gtp_test(crlwords, 26, 22, 1, 8, "teid") def test_sport_contrl_gtpu_ipv4q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for sport word 29, enable rss, check sport could control queue, also queue number is between the queue range(1,8). """ crlwords = range(29, 30) self.run_gtp_test(crlwords, 26, 22, 1, 8, "sport") def test_dport_contrl_gtpu_ipv4q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for dport word 30, enable rss, check dport could control queue, also queue number is between the queue range(1,8). """ crlwords = range(30, 31) self.run_gtp_test(crlwords, 26, 22, 1, 8, "dport") def test_inner_src_contrl_gtpu_ipv4q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for inner src words 15~16, enable rss, check inner src could control queue, also queue number is between the queue range(1,8). """ crlwords = range(15, 17) self.run_gtp_test(crlwords, 26, 22, 1, 8, "src_ip") def test_inner_dst_contrl_gtpu_ipv4q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for inner dst words 27~28, enable rss, check inner dst could control queue, also queue number is between the queue range(1,8). """ crlwords = range(27, 29) self.run_gtp_test(crlwords, 26, 22, 1, 8, "dst_ip") def test_teid_contrl_gtpu_ipv6q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for teid words 44~45, enable rss, check teid could control queue, also queue number is between the queue range(10,25). """ crlwords = range(44, 46) self.run_gtp_test(crlwords, 23, 23, 10, 25, "teid") def test_sport_contrl_gtpu_ipv6q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for sport word 29, enable rss, check sport could control queue, also queue number is between the queue range(10,25). """ crlwords = range(29, 30) self.run_gtp_test(crlwords, 23, 23, 10, 25, "sport") def test_dport_contrl_gtpu_ipv6q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for dport word 30, enable rss, check dport could control queue, also queue number is between the queue range(10,25). """ crlwords = range(30, 31) self.run_gtp_test(crlwords, 23, 23, 10, 25, "dport") def test_inner_src_contrl_gtpu_ipv6q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for inner src words 13~20, enable rss, check inner src could control queue, also queue number is between the queue range(10,25). """ crlwords = range(13, 21) self.run_gtp_test(crlwords, 23, 23, 10, 25, "src_ipv6") def test_inner_dst_contrl_gtpu_ipv6q(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for inner dst words 21~28, enable rss, check inner dst could control queue, also queue number is between the queue range(10,25). """ crlwords = range(21, 29) self.run_gtp_test(crlwords, 23, 23, 10, 25, "dst_ipv6") def test_fd_gtpu_ipv4(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, default flow director input set configuration is teid, setup raw flow type filter for flow director check flow director could work when sending matched teid packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_ip', 'dst_ip', 'sport', 'dport', 'teid'] qchecks = ['sameq', 'sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 26, 22, keywords, qchecks) def test_fd_gtpu_ipv4_dstip(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change flow director input set configuration for dst IPv4 words 27~28, setup raw flow type filter for flow director, check flow director could work when sending matched dst IPv4 packets to configured queue, otherwise direct packets to queue 0. """ crlwords = range(27, 29) keywords = ['src_ip', 'sport', 'dport', 'dst_ip'] qchecks = ['sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 26, 22, keywords, qchecks) def test_fd_gtpu_ipv4_srcip(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change flow director input set configuration for src IPv4 words 15~16, setup raw flow type filter for flow director, check flow director could work when sending matched src IPv4 packets to configured queue, otherwise direct packets to queue 0. """ crlwords = range(15, 17) keywords = ['dst_ip', 'sport', 'dport', 'src_ip'] qchecks = ['sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 26, 22, keywords, qchecks) def test_fd_gtpu_ipv6(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, default flow director input set configuration is teid, setup raw flow type filter for flow director check flow director could work when sending matched teid packets to configured queue, otherwise direct packets to queue 0. """ crlwords = None keywords = ['src_ipv6', 'dst_ipv6', 'sport', 'dport', 'teid'] qchecks = ['sameq', 'sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 23, 23, keywords, qchecks) def test_fd_gtpu_ipv6_dstipv6(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change flow director input set configuration for dst IPv6 words 21~28, setup raw flow type filter for flow director, check flow director could work when sending matched dst IPv6 packets to configured queue, otherwise direct packets to queue 0. """ crlwords = range(21, 29) keywords = ['src_ipv6', 'sport', 'dport', 'teid', 'dst_ipv6'] qchecks = ['sameq', 'sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 23, 23, keywords, qchecks) def test_fd_gtpu_ipv6_srcipv6(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change flow director input set configuration for src IPv6 words 13~20, setup raw flow type filter for flow director, check flow director could work when sending matched src IPv6 packets to configured queue, otherwise direct packets to queue 0. """ crlwords = range(13, 21) keywords = ['dst_ipv6', 'sport', 'dport', 'teid', 'src_ipv6'] qchecks = ['sameq', 'sameq', 'sameq', 'sameq', 'difq'] self.run_fd_test(crlwords, 23, 23, keywords, qchecks) def test_outer_64pre_dst_contrl_gtpcq(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for outer dst 64 bit prefix words are 50~53, enable rss, check dst 64 bit prefixes could control queue, also queue number is between the queue range(40,55). """ crlwords = range(50, 54) self.run_gtp_test(crlwords, 25, 25, 40, 55, "dst_ipv6_64pre") def test_inner_48pre_src_contrl_gtpuq(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for inner src 48 bit prefix words are 13~15, enable rss, check src 48 bit prefixes could control queue, also queue number is between the queue range(10,25). """ crlwords = range(13, 16) self.run_gtp_test(crlwords, 23, 23, 10, 25, "src_ipv6_48pre") def test_inner_32pre_dst_contrl_gtpuq(self): """ GTP is supported by NVM with profile updated. Download profile then set queue region/flowtype/pctype mapping, dynamic to change hash input set configuration for inner dst 32 bit prefix words are 21~22, enable rss, check dst 32 bit prefixes could control queue, also queue number is between the queue range(10,25). """ crlwords = range(21, 23) self.run_gtp_test(crlwords, 23, 23, 10, 25, "dst_ipv6_32pre") def tear_down(self): self.dut_testpmd.execute_cmd('stop') self.dut_testpmd.execute_cmd('set port 0 queue-region flush off') out = self.dut_testpmd.execute_cmd('ddp get list 0') if "Profile number is: 0" not in out: self.dut_testpmd.execute_cmd('port stop all') time.sleep(1) self.dut_testpmd.execute_cmd('ddp del 0 /tmp/gtp.bak') out = self.dut_testpmd.execute_cmd('ddp get list 0') self.verify("Profile number is: 0" in out, "Failed to delete ddp profile!!!") self.dut_testpmd.execute_cmd('port start all') self.dut_testpmd.quit() def tear_down_all(self): pass