def get_affinity_nf(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1, vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1): """Get affinity of NF (network function). Result will be used to compute the amount of CPUs and also affinity. :param nodes: Physical topology nodes. :param node: SUT node. :param nf_chains: Number of NF chains. :param nf_nodes: Number of NF nodes in chain. :param nf_chain: Chain number indexed from 1. :param nf_node: Node number indexed from 1. :param vs_dtc: Amount of physical cores for vswitch dataplane. :param nf_dtc: Amount of physical cores for NF dataplane. :param nf_mtcr: NF main thread per core ratio. :param nf_dtcr: NF dataplane thread per core ratio. :type nodes: dict :type node: dict :type nf_chains: int :type nf_nodes: int :type nf_chain: int :type nf_node: int :type vs_dtc: int :type nf_dtc: int or float :type nf_mtcr: int :type nf_dtcr: int :returns: List of CPUs allocated to NF. :rtype: list """ skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc interface_list = [] interface_list.append(BuiltIn().get_variable_value( '${{{node}_if1}}'.format(node=node))) interface_list.append(BuiltIn().get_variable_value( '${{{node}_if2}}'.format(node=node))) cpu_node = Topology.get_interfaces_numa_node(nodes[node], *interface_list) return CpuUtils.cpu_slice_of_list_for_nf(node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains, nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node, nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt)
def get_affinity_trex(node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1): """Get affinity for T-Rex. Result will be used to pin T-Rex threads. :param node: TG node. :param if1_pci: TG first interface. :param if2_pci: TG second interface. :param tg_mtc: TG main thread count. :param tg_dtc: TG dataplane thread count. :param tg_ltc: TG latency thread count. :type node: dict :type if1_pci: str :type if2_pci: str :type tg_mtc: int :type tg_dtc: int :type tg_ltc: int :returns: List of CPUs allocated to T-Rex including numa node. :rtype: int, int, int, list """ interface_list = [if1_pci, if2_pci] cpu_node = Topology.get_interfaces_numa_node(node, *interface_list) master_thread_id = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=tg_mtc, smt_used=False) threads = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc, smt_used=False) latency_thread_id = CpuUtils.cpu_slice_of_list_per_node( node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc, smt_used=False) return master_thread_id[0], latency_thread_id[0], cpu_node, threads
def get_affinity_vswitch(nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None): """Get affinity for vswitch. :param nodes: Topology nodes. :param node: Topology node string. :param phy_cores: Number of physical cores to allocate. :param rx_queues: Number of RX queues. (Optional, Default: None) :param rxd: Number of RX descriptors. (Optional, Default: None) :param txd: Number of TX descriptors. (Optional, Default: None) :type nodes: dict :type node: str :type phy_cores: int :type rx_queues: int :type rxd: int :type txd: int :returns: Compute resource information dictionary. :rtype: dict """ # Number of Data Plane physical cores. dp_cores_count = BuiltIn().get_variable_value(f"${{dp_cores_count}}", phy_cores) # Number of Feature Plane physical cores. fp_cores_count = BuiltIn().get_variable_value( f"${{fp_cores_count}}", phy_cores - dp_cores_count) # Ratio between RX queues and data plane threads. rxq_ratio = BuiltIn().get_variable_value(f"${{rxq_ratio}}", 1) dut_pf_keys = BuiltIn().get_variable_value(f"${{{node}_pf_keys}}") # SMT override in case of non standard test cases. smt_used = BuiltIn().get_variable_value( f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])) cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys) skip_cnt = Constants.CPU_CNT_SYSTEM cpu_main = CpuUtils.cpu_list_per_node_str( nodes[node], cpu_node, skip_cnt=skip_cnt, cpu_cnt=Constants.CPU_CNT_MAIN, smt_used=False) skip_cnt += Constants.CPU_CNT_MAIN cpu_dp = CpuUtils.cpu_list_per_node_str( nodes[node], cpu_node, skip_cnt=skip_cnt, cpu_cnt=int(dp_cores_count), smt_used=smt_used) if int(dp_cores_count) else u"" skip_cnt = skip_cnt + int(dp_cores_count) cpu_fp = CpuUtils.cpu_list_per_node_str( nodes[node], cpu_node, skip_cnt=skip_cnt, cpu_cnt=int(fp_cores_count), smt_used=smt_used) if int(fp_cores_count) else u"" fp_count_int = \ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ else int(fp_cores_count) dp_count_int = \ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \ else int(dp_cores_count) rxq_count_int = rx_queues if rx_queues else int(dp_count_int / rxq_ratio) rxq_count_int = 1 if not rxq_count_int else rxq_count_int compute_resource_info = dict() compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520 compute_resource_info[u"smt_used"] = smt_used compute_resource_info[u"cpu_main"] = cpu_main compute_resource_info[u"cpu_dp"] = cpu_dp compute_resource_info[u"cpu_fp"] = cpu_fp compute_resource_info[u"cpu_wt"] = \ u",".join(filter(None, [cpu_dp, cpu_fp])) compute_resource_info[u"cpu_alloc_str"] = \ u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp])) compute_resource_info[u"cpu_count_int"] = \ int(dp_cores_count) + int(fp_cores_count) compute_resource_info[u"rxd_count_int"] = rxd compute_resource_info[u"txd_count_int"] = txd compute_resource_info[u"rxq_count_int"] = rxq_count_int compute_resource_info[u"fp_count_int"] = fp_count_int compute_resource_info[u"dp_count_int"] = dp_count_int return compute_resource_info