def initialize_dpdk_environment(dut_node, dut_if1, dut_if2): """ Initialize the DPDK test environment on the dut_node. Load the module uio and igb_uio, then bind the test NIC to the igb_uio. :param dut_node: Will init the DPDK on this node. :param dut_if1: DUT interface name. :param dut_if2: DUT interface name. :type dut_node: dict :type dut_if1: str :type dut_if2: str :returns: none :raises RuntimeError: If it fails to bind the interfaces to igb_uio. """ pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1) pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2) ssh = SSH() ssh.connect(dut_node) cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ && sudo ./init_dpdk.sh {1} {2}' \ .format(con.REMOTE_FW_DIR, pci_address1, pci_address2) (ret_code, _, _) = ssh.exec_command(cmd, timeout=600) if ret_code != 0: raise RuntimeError('Failed to bind the interfaces to igb_uio at ' 'node {0}'.format(dut_node['host']))
def cleanup_dpdk_environment(dut_node, dut_if1, dut_if2): """ Cleanup the DPDK test environment on the DUT node. Unbind the NIC from the igb_uio and bind them to the kernel driver. :param dut_node: Will cleanup the DPDK on this node. :param dut_if1: DUT interface name. :param dut_if2: DUT interface name. :type dut_node: dict :type dut_if1: str :type dut_if2: str :returns: none :raises RuntimeError: If it fails to cleanup the dpdk. """ pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1) if1_driver = Topology.get_interface_driver(dut_node, dut_if1) pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2) if2_driver = Topology.get_interface_driver(dut_node, dut_if2) ssh = SSH() ssh.connect(dut_node) cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ && sudo ./cleanup_dpdk.sh ' \ '{1} {2} {3} {4}'.format(con.REMOTE_FW_DIR, if1_driver, pci_address1, if2_driver, pci_address2) (ret_code, _, _) = ssh.exec_command(cmd, timeout=600) if ret_code != 0: raise RuntimeError('Failed to cleanup the dpdk at node {0}'.format( dut_node['host']))
def initialize_dpdk_environment(dut_node, dut_if1, dut_if2): """ Initialize the DPDK test environment on the dut_node. Load the module uio and igb_uio, then bind the test NIC to the igb_uio. :param dut_node: Will init the DPDK on this node. :param dut_if1: DUT interface name. :param dut_if2: DUT interface name. :type dut_node: dict :type dut_if1: str :type dut_if2: str :raises RuntimeError: If it fails to bind the interfaces to igb_uio. """ if dut_node['type'] == NodeType.DUT: pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1) pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2) ssh = SSH() ssh.connect(dut_node) arch = Topology.get_node_arch(dut_node) cmd = '{fwdir}/tests/dpdk/dpdk_scripts/init_dpdk.sh '\ '{pci1} {pci2} {arch}'.format(fwdir=Constants.REMOTE_FW_DIR, pci1=pci_address1, pci2=pci_address2, arch=arch) ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600) if ret_code != 0: raise RuntimeError('Failed to bind the interfaces to igb_uio ' 'at node {name}'.\ format(name=dut_node['host']))
def cleanup_dpdk_environment(dut_node, dut_if1, dut_if2): """ Cleanup the DPDK test environment on the DUT node. Unbind the NIC from the igb_uio and bind them to the kernel driver. :param dut_node: Will cleanup the DPDK on this node. :param dut_if1: DUT interface name. :param dut_if2: DUT interface name. :type dut_node: dict :type dut_if1: str :type dut_if2: str :raises RuntimeError: If it fails to cleanup the dpdk. """ if dut_node['type'] == NodeType.DUT: pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1) if1_driver = Topology.get_interface_driver(dut_node, dut_if1) pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2) if2_driver = Topology.get_interface_driver(dut_node, dut_if2) ssh = SSH() ssh.connect(dut_node) cmd = '{fwdir}/tests/dpdk/dpdk_scripts/cleanup_dpdk.sh ' \ '{drv1} {pci1} {drv2} {pci2}'.\ format(fwdir=Constants.REMOTE_FW_DIR, drv1=if1_driver, pci1=pci_address1, drv2=if2_driver, pci2=pci_address2) ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600) if ret_code != 0: raise RuntimeError( 'Failed to cleanup the dpdk at node {name}'.format( name=dut_node['host']))
def get_adj_mac(nodes_info, dut_node, dut_if1, dut_if2): """ Get adjacency MAC addresses of the DUT node. :param nodes_info: All the nodes info in the topology file. :param dut_node: Will execute the l3fwd on this node :param dut_if1: The test link interface 1. :param dut_if2: The test link interface 2. :type nodes_info: dict :type dut_node: dict :type dut_if1: str :type dut_if2: str :returns: Returns MAC addresses of adjacency DUT nodes. :rtype: str """ if_key0 = dut_if1 if_key1 = dut_if2 if_pci0 = Topology.get_interface_pci_addr(dut_node, if_key0) if_pci1 = Topology.get_interface_pci_addr(dut_node, if_key1) # detect which is the port 0 if min(if_pci0, if_pci1) != if_pci0: if_key0, if_key1 = if_key1, if_key0 adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface( \ nodes_info, dut_node, if_key0) adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface( \ nodes_info, dut_node, if_key1) adj_mac0 = Topology.get_interface_mac(adj_node0, adj_if_key0) adj_mac1 = Topology.get_interface_mac(adj_node1, adj_if_key1) return adj_mac0, adj_mac1
def _c_vpp_2vfpt_ip4scale200k_plen30(self, **kwargs): """Instantiate one VM with vpp_2vfpt_ip4scale200k_plen30 configuration. :param kwargs: Named parameters. :type kwargs: dict """ qemu_id = kwargs[u"qemu_id"] name = kwargs[u"name"] self.machines[name] = QemuUtils( node=self.nodes[kwargs[u"node"]], qemu_id=qemu_id, smp=len(self.machines_affinity[name]), mem=4096, vnf=kwargs[u"vnf"], img=Constants.QEMU_VM_KERNEL ) self.machines[name].add_default_params() self.machines[name].add_kernelvm_params() if u"DUT1" in name: self.machines[name].configure_kernelvm_vnf( ip1=u"2.2.2.1/30", ip2=u"1.1.1.2/30", route1=u"20.0.0.0/30", routeif1=u"avf-0/0/6/0", nexthop1=u"2.2.2.2", route2=u"10.0.0.0/30", routeif2=u"avf-0/0/7/0", nexthop2=u"1.1.1.1", arpmac1=u"3c:fd:fe:d1:5c:d8", arpip1=u"1.1.1.1", arpif1=u"avf-0/0/7/0", queues=kwargs[u"queues"], jumbo_frames=kwargs[u"jumbo"] ) else: self.machines[name].configure_kernelvm_vnf( ip1=u"3.3.3.2/30", ip2=u"2.2.2.2/30", route1=u"10.0.0.0/30", routeif1=u"avf-0/0/7/0", nexthop1=u"2.2.2.1", route2=u"20.0.0.0/30", routeif2=u"avf-0/0/6/0", nexthop2=u"3.3.3.1", arpmac1=u"3c:fd:fe:d1:5c:d9", arpip1=u"3.3.3.1", arpif1=u"avf-0/0/6/0", queues=kwargs[u"queues"], jumbo_frames=kwargs[u"jumbo"] ) self.machines[name].add_vfio_pci_if( pci=Topology.get_interface_pci_addr( self.nodes[kwargs[u"node"]], kwargs[u"if2"]) ) self.machines[name].add_vfio_pci_if( pci=Topology.get_interface_pci_addr( self.nodes[kwargs[u"node"]], kwargs[u"if1"]) )
def start_testpmd( node, if1, if2, lcores_list, nb_cores, queue_nums, jumbo_frames, rxq_size=1024, txq_size=1024): """ Execute the testpmd on the DUT node. :param node: DUT node. :param if1: The test link interface 1. :param if2: The test link interface 2. :param lcores_list: The DPDK run cores. :param nb_cores: The cores number for the forwarding. :param queue_nums: The queues number for the NIC. :param jumbo_frames: Indication if the jumbo frames are used (True) or not (False). :param rxq_size: RXQ size. Default=1024. :param txq_size: TXQ size. Default=1024. :type node: dict :type if1: str :type if2: str :type lcores_list: str :type nb_cores: str :type queue_nums: str :type jumbo_frames: bool :type rxq_size: int :type txq_size: int :raises RuntimeError: If the script "run_testpmd.sh" fails. """ if node[u"type"] == NodeType.DUT: if_pci0 = Topology.get_interface_pci_addr(node, if1) if_pci1 = Topology.get_interface_pci_addr(node, if2) pmd_max_pkt_len = u"9200" if jumbo_frames else u"1518" testpmd_args = DpdkUtil.get_testpmd_args( eal_corelist=f"1,{lcores_list}", eal_driver=False, eal_pci_whitelist0=if_pci0, eal_pci_whitelist1=if_pci1, eal_in_memory=True, pmd_num_mbufs=16384, pmd_fwd_mode=u"io", pmd_nb_ports=u"2", pmd_portmask=u"0x3", pmd_max_pkt_len=pmd_max_pkt_len, pmd_mbuf_size=u"16384", pmd_rxd=rxq_size, pmd_txd=txq_size, pmd_rxq=queue_nums, pmd_txq=queue_nums, pmd_nb_cores=nb_cores, pmd_disable_link_check=True, pmd_auto_start=True, pmd_numa=True ) command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}"\ f"/entry/run_testpmd.sh \"{testpmd_args}\"" message = f"Failed to execute testpmd at node {node['host']}" exec_cmd_no_error(node, command, timeout=1800, message=message)
def iface_update_numa_node(node): """For all interfaces from topology file update numa node based on information from the node. :param node: Node from topology. :type node: dict :returns: Nothing. :raises ValueError: If numa node ia less than 0. :raises RuntimeError: If update of numa node failes. """ ssh = SSH() for if_key in Topology.get_node_interfaces(node): if_pci = Topology.get_interface_pci_addr(node, if_key) ssh.connect(node) cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci) for _ in range(3): (ret, out, _) = ssh.exec_command(cmd) if ret == 0: try: numa_node = int(out) if numa_node < 0: raise ValueError except ValueError: logger.trace('Reading numa location failed for: {0}'\ .format(if_pci)) else: Topology.set_interface_numa_node(node, if_key, numa_node) break else: raise RuntimeError('Update numa node failed for: {0}'\ .format(if_pci))
def add_pci_all_devices(self, node): """Add all PCI devices from topology file to startup config :param node: DUT node :type node: dict :returns: nothing """ for port in node['interfaces'].keys(): pci_addr = Topology.get_interface_pci_addr(node, port) if pci_addr: self.add_pci_device(node, pci_addr)
def _configure_vpp_cross_horiz(self, **kwargs): """Configure VPP in cross horizontal topology (single memif). :param kwargs: Named parameters. :param kwargs: dict """ if 'DUT1' in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs['dut1_if']) if_name = Topology.get_interface_name( self.engine.container.node, kwargs['dut1_if']) if 'DUT2' in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs['dut2_if']) if_name = Topology.get_interface_name( self.engine.container.node, kwargs['dut2_if']) self.engine.create_vpp_startup_config_dpdk_dev(if_pci) self.engine.create_vpp_exec_config( 'memif_create_cross_horizon.exec', mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name, socket1='{guest_dir}/memif-{c.name}-{sid1}'. format(c=self.engine.container, **kwargs))
def get_adj_mac(nodes, node, if1, if2): """ Get adjacency MAC addresses of the DUT node. :param nodes: All the nodes info in the topology file. :param node: DUT node. :param if1: The test link interface 1. :param if2: The test link interface 2. :type nodes: dict :type node: dict :type if1: str :type if2: str :returns: Returns MAC addresses of adjacency DUT nodes and PCI addresses. :rtype: str """ if_key0 = if1 if_key1 = if2 if_pci0 = Topology.get_interface_pci_addr(node, if_key0) if_pci1 = Topology.get_interface_pci_addr(node, if_key1) # Detect which is the port 0. if min(if_pci0, if_pci1) != if_pci0: if_key0, if_key1 = if_key1, if_key0 L3fwdTest.patch_l3fwd(node, u"patch_l3fwd_flip_routes") adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface( nodes, node, if_key0 ) adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface( nodes, node, if_key1 ) if_pci0 = Topology.get_interface_pci_addr(node, if_key0) if_pci1 = Topology.get_interface_pci_addr(node, if_key1) adj_mac0 = Topology.get_interface_mac(adj_node0, adj_if_key0) adj_mac1 = Topology.get_interface_mac(adj_node1, adj_if_key1) return adj_mac0, adj_mac1, if_pci0, if_pci1
def _configure_vpp_cross_horiz(self, **kwargs): """Configure VPP in cross horizontal topology (single memif). :param kwargs: Named parameters. :type kwargs: dict """ if u"DUT1" in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs[u"dut1_if"]) if_name = Topology.get_interface_name( self.engine.container.node, kwargs[u"dut1_if"]) if u"DUT2" in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs[u"dut2_if"]) if_name = Topology.get_interface_name( self.engine.container.node, kwargs[u"dut2_if"]) self.engine.create_vpp_startup_config_dpdk_dev(if_pci) self.engine.create_vpp_exec_config( u"memif_create_cross_horizon.exec", mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name, socket1=f"{kwargs[u'guest_dir']}/memif-" f"{self.engine.container.name}-{kwargs[u'sid1']}" )
def initialize_dpdk_framework(node, if1, if2, nic_driver): """ Initialize the DPDK framework on the DUT node. Bind interfaces to driver. :param node: DUT node. :param if1: DUT first interface name. :param if2: DUT second interface name. :param nic_driver: Interface driver. :type node: dict :type if1: str :type if2: str :type nic_driver: str :raises RuntimeError: If it fails to bind the interfaces to driver. """ if node[u"type"] == NodeType.DUT: pci_address1 = Topology.get_interface_pci_addr(node, if1) pci_address2 = Topology.get_interface_pci_addr(node, if2) command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}"\ f"/entry/init_dpdk.sh " \ f"{nic_driver} {pci_address1} {pci_address2}" message = u"Initialize the DPDK failed!" exec_cmd_no_error(node, command, timeout=600, message=message)
def cleanup_dpdk_framework(node, if1, if2): """ Cleanup the DPDK framework on the DUT node. Bind interfaces to default driver specified in topology. :param node: Will cleanup the DPDK on this node. :param if1: DUT first interface name. :param if2: DUT second interface name. :type node: dict :type if1: str :type if2: str :raises RuntimeError: If it fails to cleanup the dpdk. """ if node[u"type"] == NodeType.DUT: pci_address1 = Topology.get_interface_pci_addr(node, if1) pci_address2 = Topology.get_interface_pci_addr(node, if2) # We are not supporting more than one driver yet. nic_driver = Topology.get_interface_driver(node, if1) command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}"\ f"/entry/cleanup_dpdk.sh " \ f"{nic_driver} {pci_address1} {pci_address2}" message = u"Cleanup the DPDK failed!" exec_cmd_no_error(node, command, timeout=1200, message=message)
def exec_the_udpfwd_test(dut_node, dut_if, file_prefix, \ dest_ip, is_ipv4=True): """Execute the udpfwd on the dut_node. :param dut_node: Will execute the udpfwd on this node. :param dut_if: DUT interface name. :param file_prefix: The test case config file prefix. :param dest_ip: The UDP packet dest IP. :param is_ipv4: Execute the IPv4 or IPv6 test. :type dut_node: dict :type dut_if: str :type file_prefix: str :type dest_ip: str :type is_ipv4: bool :returns: none. :raises RuntimeError: If failed to execute udpfwd test on the dut node. """ pci_address = Topology.get_interface_pci_addr(dut_node, dut_if) ssh = SSH() ssh.connect(dut_node) if is_ipv4: cmd = 'cd {0}/{4} && ./run_tldk.sh {0}/{5}/{2}_rx.pcap ' \ '{0}/{5}/{2}_tx.pcap {1} {0}/{5}/{2}_fe.cfg ' \ '{0}/{5}/{2}_be.cfg {3} NONE' \ .format(con.REMOTE_FW_DIR, pci_address, file_prefix, \ dest_ip, con.TLDK_SCRIPTS, con.TLDK_TESTCONFIG) else: cmd = 'cd {0}/{4} && ./run_tldk.sh {0}/{5}/{2}_rx.pcap ' \ '{0}/{5}/{2}_tx.pcap {1} {0}/{5}/{2}_fe.cfg ' \ '{0}/{5}/{2}_be.cfg NONE {3}' \ .format(con.REMOTE_FW_DIR, pci_address, file_prefix, \ dest_ip, con.TLDK_SCRIPTS, con.TLDK_TESTCONFIG) (ret_code, _, _) = ssh.exec_command(cmd, timeout=600) if ret_code != 0: raise RuntimeError( 'Failed to execute udpfwd test at node {0}'.format( dut_node['host']))
def compute_circular_topology(self, nodes, filter_list=None, nic_pfs=1): """Return computed circular path. :param nodes: Nodes to append to the path. :param filter_list: Filter criteria list. :param nic_pfs: Number of PF of NIC. :type nodes: dict :type filter_list: list of strings :type path_count: int :returns: Topology information dictionary. :rtype: dict """ t_dict = dict() duts = [key for key in nodes if u"DUT" in key] t_dict[u"duts"] = duts t_dict[u"duts_count"] = len(duts) t_dict[u"int"] = u"pf" for idx in range(0, nic_pfs // 2): self.append_node(nodes[u"TG"]) for dut in duts: self.append_node(nodes[dut], filter_list=filter_list) self.append_node(nodes[u"TG"]) self.compute_path(always_same_link=False) n_idx = 0 t_idx = 1 d_idx = 0 while True: interface, node = self.next_interface() if not interface: break if node[u"type"] == u"TG": n_pfx = f"TG" p_pfx = f"pf{t_idx}" i_pfx = f"if{t_idx}" n_idx = 0 t_idx = t_idx + 1 else: n_pfx = f"DUT{n_idx // 2 + 1}" p_pfx = f"pf{d_idx % 2 + t_idx - 1}" i_pfx = f"if{d_idx % 2 + t_idx - 1}" n_idx = n_idx + 1 d_idx = d_idx + 1 t_dict[f"{n_pfx}"] = node t_dict[f"{n_pfx}_{p_pfx}"] = [interface] t_dict[f"{n_pfx}_{p_pfx}_mac"] = \ [Topology.get_interface_mac(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_vlan"] = \ [Topology.get_interface_vlan(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_pci"] = \ [Topology.get_interface_pci_addr(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_ip4_addr"] = \ [Topology.get_interface_ip4(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_ip4_prefix"] = \ [Topology.get_interface_ip4_prefix_length(node, interface)] if f"{n_pfx}_pf_pci" not in t_dict: t_dict[f"{n_pfx}_pf_pci"] = [] t_dict[f"{n_pfx}_pf_pci"].append( Topology.get_interface_pci_addr(node, interface)) if f"{n_pfx}_pf_keys" not in t_dict: t_dict[f"{n_pfx}_pf_keys"] = [] t_dict[f"{n_pfx}_pf_keys"].append(interface) # Backward compatibility below t_dict[f"{n_pfx.lower()}_{i_pfx}"] = interface t_dict[f"{n_pfx.lower()}_{i_pfx}_mac"] = \ Topology.get_interface_mac(node, interface) t_dict[f"{n_pfx.lower()}_{i_pfx}_pci"] = \ Topology.get_interface_pci_addr(node, interface) t_dict[f"{n_pfx.lower()}_{i_pfx}_ip4_addr"] = \ Topology.get_interface_ip4(node, interface) t_dict[f"{n_pfx.lower()}_{i_pfx}_ip4_prefix"] = \ Topology.get_interface_ip4_prefix_length(node, interface) self.clear_path() return t_dict
def configure_vpp_in_all_containers(self, chain_topology, dut1_if=None, dut2_if=None): """Configure VPP in all containers. :param chain_topology: Topology used for chaining containers can be chain or cross_horiz. Chain topology is using 1 memif pair per container. Cross_horiz topology is using 1 memif and 1 physical interface in container (only single container can be configured). :param dut1_if: Interface on DUT1 directly connected to DUT2. :param dut2_if: Interface on DUT2 directly connected to DUT1. :type container_topology: str :type dut1_if: str :type dut2_if: str """ # Count number of DUTs based on node's host information dut_cnt = len( Counter([ self.containers[container].node['host'] for container in self.containers ])) mod = len(self.containers) / dut_cnt container_vat_template = 'memif_create_{topology}.vat'.format( topology=chain_topology) if chain_topology == 'chain': for i, container in enumerate(self.containers): mid1 = i % mod + 1 mid2 = i % mod + 1 sid1 = i % mod * 2 + 1 sid2 = i % mod * 2 + 2 self.engine.container = self.containers[container] self.engine.create_vpp_startup_config() self.engine.create_vpp_exec_config(container_vat_template, \ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \ socket1='memif-{c.name}-{sid}'. \ format(c=self.engine.container, sid=sid1), \ socket2='memif-{c.name}-{sid}'. \ format(c=self.engine.container, sid=sid2)) elif chain_topology == 'cross_horiz': if mod > 1: raise RuntimeError('Container chain topology {topology} ' 'supports only single container.'.format( topology=chain_topology)) for i, container in enumerate(self.containers): mid1 = i % mod + 1 sid1 = i % mod * 2 + 1 self.engine.container = self.containers[container] if 'DUT1' in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( \ self.engine.container.node, dut1_if) if_name = Topology.get_interface_name( \ self.engine.container.node, dut1_if) if 'DUT2' in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( \ self.engine.container.node, dut2_if) if_name = Topology.get_interface_name( \ self.engine.container.node, dut2_if) self.engine.create_vpp_startup_config_dpdk_dev(if_pci) self.engine.create_vpp_exec_config(container_vat_template, \ mid1=mid1, sid1=sid1, if_name=if_name, \ socket1='memif-{c.name}-{sid}'. \ format(c=self.engine.container, sid=sid1)) else: raise RuntimeError( 'Container topology {topology} not implemented'.format( topology=chain_topology))
def _configure_vpp_chain_vswitch(self, **kwargs): """Configure VPP as vswitch in container. :param kwargs: Named parameters. :type kwargs: dict """ dut = self.engine.container.name.split(u"_")[0] if dut == u"DUT1": if1_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs[u"dut1_if2"]) if2_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs[u"dut1_if1"]) if_red_name = Topology.get_interface_name( self.engine.container.node, kwargs[u"dut1_if2"]) if_black_name = Topology.get_interface_name( self.engine.container.node, kwargs[u"dut1_if1"]) tg_pf_ip4 = kwargs[u"tg_pf2_ip4"] tg_pf_mac = kwargs[u"tg_pf2_mac"] else: tg_pf_ip4 = kwargs[u"tg_pf1_ip4"] tg_pf_mac = kwargs[u"tg_pf1_mac"] if1_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs[u"dut2_if1"]) if2_pci = Topology.get_interface_pci_addr( self.engine.container.node, kwargs[u"dut2_if2"]) if_red_name = Topology.get_interface_name( self.engine.container.node, kwargs[u"dut2_if1"]) if_black_name = Topology.get_interface_name( self.engine.container.node, kwargs[u"dut2_if2"]) n_instances = int(kwargs[u"n_instances"]) rxq = 1 if u"rxq" in kwargs: rxq = int(kwargs[u"rxq"]) nodes = kwargs[u"nodes"] cpuset_cpus = CpuUtils.get_affinity_nf( nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1 ) self.engine.create_vpp_startup_config_vswitch( cpuset_cpus, rxq, if1_pci, if2_pci ) instances = [] for i in range(1, n_instances + 1): instances.append( f"create interface memif id {i} socket-id 1 master\n" f"set interface state memif1/{i} up\n" f"set interface l2 bridge memif1/{i} 1\n" f"create interface memif id {i} socket-id 2 master\n" f"set interface state memif2/{i} up\n" f"set interface l2 bridge memif2/{i} 2\n" f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} " f"static\n\n" ) self.engine.create_vpp_exec_config( u"memif_create_chain_vswitch_ipsec.exec", socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1", socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2", if_red_name=if_red_name, if_black_name=if_black_name, instances=u"\n\n".join(instances))
def initialize_traffic_generator(self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if, tg_if2_adj_node, tg_if2_adj_if, test_type, tg_if1_dst_mac=None, tg_if2_dst_mac=None): """TG initialization. :param tg_node: Traffic generator node. :param tg_if1: TG - name of first interface. :param tg_if2: TG - name of second interface. :param tg_if1_adj_node: TG if1 adjecent node. :param tg_if1_adj_if: TG if1 adjecent interface. :param tg_if2_adj_node: TG if2 adjecent node. :param tg_if2_adj_if: TG if2 adjecent interface. :param test_type: 'L2' or 'L3' - src/dst MAC address. :param tg_if1_dst_mac: Interface 1 destination MAC address. :param tg_if2_dst_mac: Interface 2 destination MAC address. :type tg_node: dict :type tg_if1: str :type tg_if2: str :type tg_if1_adj_node: dict :type tg_if1_adj_if: str :type tg_if2_adj_node: dict :type tg_if2_adj_if: str :type test_type: str :type tg_if1_dst_mac: str :type tg_if2_dst_mac: str :returns: nothing :raises: RuntimeError in case of issue during initialization. """ topo = Topology() if tg_node['type'] != NodeType.TG: raise RuntimeError('Node type is not a TG') self._node = tg_node if tg_node['subtype'] == NodeSubTypeTG.TREX: trex_path = "/opt/trex-core-2.25" ssh = SSH() ssh.connect(tg_node) (ret, stdout, stderr) = ssh.exec_command( "sudo -E sh -c '{}/resources/tools/t-rex/" "t-rex-installer.sh'".format(Constants.REMOTE_FW_DIR), timeout=1800) if int(ret) != 0: logger.error('trex installation failed: {0}'.format(stdout + stderr)) raise RuntimeError('Installation of TG failed') if1_pci = topo.get_interface_pci_addr(tg_node, tg_if1) if2_pci = topo.get_interface_pci_addr(tg_node, tg_if2) if1_mac = topo.get_interface_mac(tg_node, tg_if1) if2_mac = topo.get_interface_mac(tg_node, tg_if2) if test_type == 'L2': if1_adj_mac = if2_mac if2_adj_mac = if1_mac elif test_type == 'L3': if1_adj_mac = topo.get_interface_mac(tg_if1_adj_node, tg_if1_adj_if) if2_adj_mac = topo.get_interface_mac(tg_if2_adj_node, tg_if2_adj_if) else: raise ValueError("test_type unknown") if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None: if1_adj_mac = tg_if1_dst_mac if2_adj_mac = tg_if2_dst_mac if min(if1_pci, if2_pci) != if1_pci: if1_mac, if2_mac = if2_mac, if1_mac if1_pci, if2_pci = if2_pci, if1_pci if1_adj_mac, if2_adj_mac = if2_adj_mac, if1_adj_mac self._ifaces_reordered = True if1_mac_hex = "0x" + if1_mac.replace(":", ",0x") if2_mac_hex = "0x" + if2_mac.replace(":", ",0x") if1_adj_mac_hex = "0x" + if1_adj_mac.replace(":", ",0x") if2_adj_mac_hex = "0x" + if2_adj_mac.replace(":", ",0x") (ret, stdout, stderr) = ssh.exec_command( "sudo sh -c 'cat << EOF > /etc/trex_cfg.yaml\n" "- port_limit : 2\n" " version : 2\n" " interfaces : [\"{}\",\"{}\"]\n" " port_info :\n" " - dest_mac : [{}]\n" " src_mac : [{}]\n" " - dest_mac : [{}]\n" " src_mac : [{}]\n" "EOF'"\ .format(if1_pci, if2_pci, if1_adj_mac_hex, if1_mac_hex, if2_adj_mac_hex, if2_mac_hex)) if int(ret) != 0: logger.error("failed to create t-rex config: {}"\ .format(stdout + stderr)) raise RuntimeError('trex config generation error') max_startup_retries = 3 while max_startup_retries > 0: # kill T-rex only if it is already running (ret, _, _) = ssh.exec_command( "sh -c 'pgrep t-rex && sudo pkill t-rex && sleep 3'") # configure T-rex (ret, stdout, stderr) = ssh.exec_command( "sh -c 'cd {0}/scripts/ && sudo ./trex-cfg'"\ .format(trex_path)) if int(ret) != 0: logger.error('trex-cfg failed: {0}'.format(stdout + stderr)) raise RuntimeError('trex-cfg failed') # start T-rex (ret, _, _) = ssh.exec_command( "sh -c 'cd {0}/scripts/ && " "sudo nohup ./t-rex-64 -i -c 7 --iom 0 > /dev/null 2>&1 &'" "> /dev/null"\ .format(trex_path)) if int(ret) != 0: raise RuntimeError('t-rex-64 startup failed') # get T-rex server info (ret, _, _) = ssh.exec_command( "sh -c 'sleep 3; " "{0}/resources/tools/t-rex/t-rex-server-info.py'"\ .format(Constants.REMOTE_FW_DIR), timeout=120) if int(ret) == 0: # If we get info T-rex is running return # try again max_startup_retries -= 1 # after max retries T-rex is still not responding to API # critical error occurred raise RuntimeError('t-rex-64 startup failed')
def start_the_l3fwd_test(nodes_info, dut_node, dut_if1, dut_if2, nb_cores, lcores_list, queue_nums, jumbo_frames): """ Execute the l3fwd on the dut_node. :param nodes_info: All the nodes info in the topology file. :param dut_node: Will execute the l3fwd on this node :param dut_if1: The test link interface 1. :param dut_if2: The test link interface 2. :param nb_cores: The cores number for the forwarding :param lcores_list: The lcore list string for the l3fwd routing :param queue_nums: The queues number for the NIC :param jumbo_frames: Is jumbo frames or not. Accepted: yes / no :type nodes_info: dict :type dut_node: dict :type dut_if1: str :type dut_if2: str :type nb_cores: str :type lcores_list: str :type queue_nums: str :type jumbo_frames: str :return: none """ if_key0 = dut_if1 if_key1 = dut_if2 if_pci0 = Topology.get_interface_pci_addr(dut_node, if_key0) if_pci1 = Topology.get_interface_pci_addr(dut_node, if_key1) # detect which is the port 0 if min(if_pci0, if_pci1) != if_pci0: if_key0, if_key1 = if_key1, if_key0 if_pci0, if_pci1 = if_pci1, if_pci0 adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface( \ nodes_info, dut_node, if_key0) adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface( \ nodes_info, dut_node, if_key1) adj_mac0 = Topology.get_interface_mac(adj_node0, adj_if_key0) adj_mac1 = Topology.get_interface_mac(adj_node1, adj_if_key1) list_cores = lcores_list.split(',') # prepare the port config param index = 0 port_config = '' for port in range(0, 2): for queue in range(0, int(queue_nums)): if int(nb_cores) == 1: index = 0 temp_str = '({0}, {1}, {2}),'.format(port, queue, \ int(list_cores[index])) else: temp_str = '({0}, {1}, {2}),'.format(port, queue, \ int(list_cores[index])) port_config += temp_str index = index + 1 port_config_param = port_config.rstrip(',') ssh = SSH() ssh.connect(dut_node) cmd = 'cd {0}/tests/dpdk/dpdk_scripts/ && ./run_l3fwd.sh ' \ '"{1}" "{2}" {3} {4} {5}'.format(con.REMOTE_FW_DIR, lcores_list, \ port_config_param, adj_mac0, adj_mac1, jumbo_frames) (ret_code, _, stderr) = ssh.exec_command(cmd, timeout=600) if ret_code != 0: logger.error('Execute the l3fwd error: {0}'.format(stderr)) raise Exception('Failed to execute l3fwd test at node {0}'.format( dut_node['host']))
def compute_circular_topology(self, nodes, filter_list=None, nic_pfs=1, always_same_link=False, topo_has_tg=True, topo_has_dut=True): """Return computed circular path. :param nodes: Nodes to append to the path. :param filter_list: Filter criteria list. :param nic_pfs: Number of PF of NIC. :param always_same_link: If True use always same link between two nodes in path. If False use different link (if available) between two nodes if one link was used before. :param topo_has_tg: If True, the topology has a TG node. If False, the topology consists entirely of DUT nodes. :param topo_has_dut: If True, the topology has a DUT node(s). If False, the topology consists entirely of TG nodes. :type nodes: dict :type filter_list: list of strings :type nic_pfs: int :type always_same_link: bool :type topo_has_tg: bool :type topo_has_dut: bool :returns: Topology information dictionary. :rtype: dict :raises RuntimeError: If unsupported combination of parameters. """ t_dict = dict() if topo_has_dut: duts = [key for key in nodes if u"DUT" in key] t_dict[u"duts"] = duts t_dict[u"duts_count"] = len(duts) t_dict[u"int"] = u"pf" for _ in range(0, nic_pfs // 2): if topo_has_tg: if topo_has_dut: self.append_node(nodes[u"TG"]) else: self.append_node(nodes[u"TG"], filter_list=filter_list) if topo_has_dut: for dut in duts: self.append_node(nodes[dut], filter_list=filter_list) if topo_has_tg: if topo_has_dut: self.append_node(nodes[u"TG"]) else: self.append_node(nodes[u"TG"], filter_list=filter_list) self.compute_path(always_same_link, topo_has_dut) n_idx = 0 # node index t_idx = 1 # TG interface index d_idx = 0 # DUT interface index prev_host = None while True: interface, node = self.next_interface() if not interface: break if topo_has_tg and node.get(u"type") == u"TG": n_pfx = f"TG" # node prefix p_pfx = f"pf{t_idx}" # physical interface prefix i_pfx = f"if{t_idx}" # [backwards compatible] interface prefix n_idx = 0 t_idx = t_idx + 1 elif topo_has_tg and topo_has_dut: # Each node has 2 interfaces, starting with 1 # Calculate prefixes appropriately for current # path topology nomenclature: # tg1_if1 -> dut1_if1 -> dut1_if2 -> # [dut2_if1 -> dut2_if2 ...] -> tg1_if2 n_pfx = f"DUT{n_idx // 2 + 1}" p_pfx = f"pf{d_idx % 2 + t_idx - 1}" i_pfx = f"if{d_idx % 2 + t_idx - 1}" n_idx = n_idx + 1 d_idx = d_idx + 1 elif not topo_has_tg and always_same_link: this_host = node.get(u"host") if prev_host != this_host: # When moving to a new host in the path, # increment the node index (n_idx) and # reset DUT interface index (d_idx) to 1. n_idx = n_idx + 1 d_idx = 1 n_pfx = f"DUT{n_idx}" p_pfx = f"pf{d_idx}" i_pfx = f"if{d_idx}" d_idx = d_idx + 1 else: raise RuntimeError(u"Unsupported combination of paramters") t_dict[f"{n_pfx}"] = node t_dict[f"{n_pfx}_{p_pfx}"] = [interface] t_dict[f"{n_pfx}_{p_pfx}_mac"] = \ [Topology.get_interface_mac(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_vlan"] = \ [Topology.get_interface_vlan(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_pci"] = \ [Topology.get_interface_pci_addr(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_ip4_addr"] = \ [Topology.get_interface_ip4(node, interface)] t_dict[f"{n_pfx}_{p_pfx}_ip4_prefix"] = \ [Topology.get_interface_ip4_prefix_length(node, interface)] if f"{n_pfx}_pf_pci" not in t_dict: t_dict[f"{n_pfx}_pf_pci"] = [] t_dict[f"{n_pfx}_pf_pci"].append( Topology.get_interface_pci_addr(node, interface)) if f"{n_pfx}_pf_keys" not in t_dict: t_dict[f"{n_pfx}_pf_keys"] = [] t_dict[f"{n_pfx}_pf_keys"].append(interface) # Backward compatibility below t_dict[f"{n_pfx.lower()}_{i_pfx}"] = interface t_dict[f"{n_pfx.lower()}_{i_pfx}_mac"] = \ Topology.get_interface_mac(node, interface) t_dict[f"{n_pfx.lower()}_{i_pfx}_pci"] = \ Topology.get_interface_pci_addr(node, interface) t_dict[f"{n_pfx.lower()}_{i_pfx}_ip4_addr"] = \ Topology.get_interface_ip4(node, interface) t_dict[f"{n_pfx.lower()}_{i_pfx}_ip4_prefix"] = \ Topology.get_interface_ip4_prefix_length(node, interface) self.clear_path() return t_dict