Ejemplo n.º 1
0
    def create_kubernetes_vnf_startup_config(**kwargs):
        """Create Kubernetes VNF startup configuration.

        :param kwargs: Key-value pairs used to create configuration.
        :param kwargs: dict
        """
        smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"])
        skip_cnt = kwargs[u"cpu_skip"] + (kwargs[u"i"] - 1) * \
            (kwargs[u"phy_cores"] - 1)
        cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node(
            node=kwargs[u"node"],
            cpu_node=kwargs[u"cpu_node"],
            skip_cnt=skip_cnt,
            cpu_cnt=kwargs[u"phy_cores"] - 1,
            smt_used=smt_used)
        cpuset_main = CpuUtils.cpu_slice_of_list_per_node(
            node=kwargs[u"node"],
            cpu_node=kwargs[u"cpu_node"],
            skip_cnt=1,
            cpu_cnt=1,
            smt_used=smt_used)
        # Create config instance
        vpp_config = VppConfigGenerator()
        vpp_config.set_node(kwargs[u"node"])
        vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
        vpp_config.add_unix_nodaemon()
        vpp_config.add_socksvr()
        # We will pop first core from list to be main core
        vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
        # if this is not only core in list, the rest will be used as workers.
        if cpuset_cpus:
            corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
            vpp_config.add_cpu_corelist_workers(corelist_workers)
        vpp_config.add_plugin(u"disable", [u"dpdk_plugin.so"])
        vpp_config.write_config(filename=kwargs[u"filename"])
Ejemplo n.º 2
0
    def create_kubernetes_vnf_startup_config(**kwargs):
        """Create Kubernetes VNF startup configuration.

        :param kwargs: Key-value pairs used to create configuration.
        :param kwargs: dict
        """
        skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * \
            (kwargs['cpu_cnt'] - 1)
        cpuset_cpus = \
            CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
                                                cpu_node=kwargs['cpu_node'],
                                                skip_cnt=skip_cnt,
                                                cpu_cnt=kwargs['cpu_cnt']-1,
                                                smt_used=kwargs['smt_used'])
        cpuset_main = \
            CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
                                                cpu_node=kwargs['cpu_node'],
                                                skip_cnt=1,
                                                cpu_cnt=1,
                                                smt_used=kwargs['smt_used'])
        # Create config instance
        vpp_config = VppConfigGenerator()
        vpp_config.set_node(kwargs['node'])
        vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
        vpp_config.add_unix_nodaemon()
        # We will pop first core from list to be main core
        vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
        # if this is not only core in list, the rest will be used as workers.
        if cpuset_cpus:
            corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
            vpp_config.add_cpu_corelist_workers(corelist_workers)
        vpp_config.add_plugin_disable('dpdk_plugin.so')
        vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
Ejemplo n.º 3
0
    def construct_container(self, **kwargs):
        """Construct container object on node with specified parameters.

        :param kwargs: Key-value pairs used to construct container.
        :param kwargs: dict
        """
        # Create base class
        self.engine.initialize()
        # Set parameters
        for key in kwargs:
            setattr(self.engine.container, key, kwargs[key])

        # Set additional environmental variables
        setattr(self.engine.container, 'env',
                'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))

        # Set cpuset.cpus cgroup
        skip_cnt = kwargs['cpu_skip']
        if not kwargs['cpu_shared']:
            skip_cnt += kwargs['i'] * kwargs['cpu_count']
        self.engine.container.cpuset_cpus = \
            CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
                                                cpu_node=kwargs['cpuset_mems'],
                                                skip_cnt=skip_cnt,
                                                cpu_cnt=kwargs['cpu_count'],
                                                smt_used=kwargs['smt_used'])

        # Store container instance
        self.containers[kwargs['name']] = self.engine.container
Ejemplo n.º 4
0
    def create_kubernetes_vswitch_startup_config(**kwargs):
        """Create Kubernetes VSWITCH startup configuration.

        :param kwargs: Key-value pairs used to create configuration.
        :param kwargs: dict
        """
        cpuset_cpus = \
            CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
                                                cpu_node=kwargs['cpu_node'],
                                                skip_cnt=kwargs['cpu_skip'],
                                                cpu_cnt=kwargs['cpu_cnt'],
                                                smt_used=kwargs['smt_used'])

        # Create config instance
        vpp_config = VppConfigGenerator()
        vpp_config.set_node(kwargs['node'])
        vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
        vpp_config.add_unix_nodaemon()
        vpp_config.add_dpdk_socketmem('1024,1024')
        vpp_config.add_heapsize('3G')
        vpp_config.add_ip6_hash_buckets('2000000')
        vpp_config.add_ip6_heap_size('3G')
        if kwargs['framesize'] < 1522:
            vpp_config.add_dpdk_no_multi_seg()
        vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq'])
        vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2'])
        # We will pop first core from list to be main core
        vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
        # if this is not only core in list, the rest will be used as workers.
        if cpuset_cpus:
            corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
            vpp_config.add_cpu_corelist_workers(corelist_workers)
        vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
Ejemplo n.º 5
0
    def _configure_vpp_chain_ipsec(self, **kwargs):
        """Configure VPP in container with memifs.

        :param kwargs: Named parameters.
        :type kwargs: dict
        """
        nf_nodes = int(kwargs[u"nf_nodes"])
        nf_instance = int(kwargs[u"nf_instance"])
        nodes = kwargs[u"nodes"]
        dut = self.engine.container.name.split(u"_")[0]
        cpuset_cpus = CpuUtils.get_affinity_nf(
            nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
            nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
        self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
        local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]

        if dut == u"DUT1":
            tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
            tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
            remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
            tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
            tg_pf_mac = kwargs[u"tg_pf1_mac"]
            raddr_ip4 = kwargs[u"laddr_ip4"]
            l_mac1 = 17
            l_mac2 = 18
            r_mac = 1
        else:
            tnl_local_ip = f"{local_ip_base}.{nf_instance}"
            tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
            remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
            tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
            tg_pf_mac = kwargs[u"tg_pf2_mac"]
            raddr_ip4 = kwargs[u"raddr_ip4"]
            l_mac1 = 1
            l_mac2 = 2
            r_mac = 17

        self.engine.create_vpp_exec_config(
            u"memif_create_chain_ipsec.exec",
            socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
            socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
            mid1=nf_instance,
            mid2=nf_instance,
            sid1=u"1",
            sid2=u"2",
            mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
            mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
            tg_pf2_ip4=tg_pf_ip4,
            tg_pf2_mac=tg_pf_mac,
            raddr_ip4=raddr_ip4,
            tnl_local_ip=tnl_local_ip,
            tnl_remote_ip=tnl_remote_ip,
            tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
            remote_ip=f"{remote_ip_base}.{nf_instance}"
        )
        self.engine.execute(
            f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
            f"{dut}_{nf_instance}.config >> /tmp/running.exec"
        )
Ejemplo n.º 6
0
    def construct_vms_on_node(self, **kwargs):
        """Construct 1..Mx1..N VMs(s) on node with specified name.

        :param kwargs: Named parameters.
        :type kwargs: dict
        """
        node = kwargs[u"node"]
        nf_chains = int(kwargs[u"nf_chains"])
        nf_nodes = int(kwargs[u"nf_nodes"])
        queues = kwargs[u"rxq_count_int"] if kwargs[u"auto_scale"] else 1
        vs_dtc = kwargs[u"vs_dtc"]
        nf_dtc = kwargs[u"nf_dtc"]
        if kwargs[u"auto_scale"] and not kwargs[u"fixed_auto_scale"]:
            nf_dtc = kwargs[u"vs_dtc"]
        nf_dtcr = kwargs[u"nf_dtcr"] \
            if isinstance(kwargs[u"nf_dtcr"], int) else 2

        for nf_chain in range(1, nf_chains + 1):
            for nf_node in range(1, nf_nodes + 1):
                qemu_id = (nf_chain - 1) * nf_nodes + nf_node
                name = f"{node}_{qemu_id}"
                idx1 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2 - 1

                vif1_mac = Topology.get_interface_mac(
                    self.nodes[node], f"vhost{idx1}"
                ) if kwargs[u"vnf"] == u"testpmd_mac" \
                    else kwargs[u"tg_pf1_mac"] if nf_node == 1 \
                    else f"52:54:00:00:{(qemu_id - 1):02x}:02"
                idx2 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2
                vif2_mac = Topology.get_interface_mac(
                    self.nodes[node], f"vhost{idx2}"
                ) if kwargs[u"vnf"] == u"testpmd_mac" \
                    else kwargs[u"tg_pf2_mac"] if nf_node == nf_nodes \
                    else f"52:54:00:00:{(qemu_id + 1):02x}:01"

                self.machines_affinity[name] = CpuUtils.get_affinity_nf(
                    nodes=self.nodes,
                    node=node,
                    nf_chains=nf_chains,
                    nf_nodes=nf_nodes,
                    nf_chain=nf_chain,
                    nf_node=nf_node,
                    vs_dtc=vs_dtc,
                    nf_dtc=nf_dtc,
                    nf_dtcr=nf_dtcr)

                try:
                    getattr(self, f'_c_{kwargs["vnf"]}')(qemu_id=qemu_id,
                                                         name=name,
                                                         queues=queues,
                                                         **kwargs)
                except AttributeError:
                    self._c_default(qemu_id=qemu_id,
                                    name=name,
                                    queues=queues,
                                    vif1_mac=vif1_mac,
                                    vif2_mac=vif2_mac,
                                    **kwargs)
Ejemplo n.º 7
0
    def create_kubernetes_vswitch_startup_config(**kwargs):
        """Create Kubernetes VSWITCH startup configuration.

        :param kwargs: Key-value pairs used to create configuration.
        :param kwargs: dict
        """
        smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"])

        cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node(
            node=kwargs[u"node"],
            cpu_node=kwargs[u"cpu_node"],
            skip_cnt=2,
            cpu_cnt=kwargs[u"phy_cores"],
            smt_used=smt_used)
        cpuset_main = CpuUtils.cpu_slice_of_list_per_node(
            node=kwargs[u"node"],
            cpu_node=kwargs[u"cpu_node"],
            skip_cnt=1,
            cpu_cnt=1,
            smt_used=smt_used)

        # Create config instance
        vpp_config = VppConfigGenerator()
        vpp_config.set_node(kwargs[u"node"])
        vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
        vpp_config.add_unix_nodaemon()
        vpp_config.add_socksvr()
        vpp_config.add_heapsize(u"4G")
        vpp_config.add_ip_heap_size(u"4G")
        vpp_config.add_ip6_heap_size(u"4G")
        vpp_config.add_ip6_hash_buckets(u"2000000")
        if not kwargs[u"jumbo"]:
            vpp_config.add_dpdk_no_multi_seg()
        vpp_config.add_dpdk_no_tx_checksum_offload()
        vpp_config.add_dpdk_dev_default_rxq(kwargs[u"rxq_count_int"])
        vpp_config.add_dpdk_dev(kwargs[u"if1"], kwargs[u"if2"])
        vpp_config.add_buffers_per_numa(kwargs[u"buffers_per_numa"])
        # We will pop first core from list to be main core
        vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
        # if this is not only core in list, the rest will be used as workers.
        if cpuset_cpus:
            corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
            vpp_config.add_cpu_corelist_workers(corelist_workers)
        vpp_config.write_config(filename=kwargs[u"filename"])
Ejemplo n.º 8
0
    def vhost_user_affinity(node, pf_key, skip_cnt=0):
        """Set vhost-user affinity for the given node.

        :param node: Topology node.
        :param pf_key: Interface key to compute numa location.
        :param skip_cnt: Skip first "skip_cnt" CPUs.
        :type node: dict
        :type pf_key: str
        :type skip_cnt: int
        """
        pids, _ = exec_cmd_no_error(
            node, f"grep -h vhost /proc/*/comm | uniq | xargs pidof")

        affinity = CpuUtils.get_affinity_vhost(node,
                                               pf_key,
                                               skip_cnt=skip_cnt,
                                               cpu_cnt=len(pids.split(" ")))

        for cpu, pid in zip(affinity, pids.split(" ")):
            exec_cmd_no_error(node, f"taskset -pc {cpu} {pid}", sudo=True)
Ejemplo n.º 9
0
    def set_interface_irqs_affinity(node, interface, cpu_skip_cnt=0, cpu_cnt=1):
        """Set IRQs affinity for interface in linux.

        :param node: Topology node.
        :param interface: Topology interface.
        :param cpu_skip_cnt: Amount of CPU cores to skip.
        :param cpu_cnt: CPU threads count. (Optional, Default: 0)
        :param cpu_list: List of CPUs. (Optional, Default: 1)
        :type node: dict
        :type interface: str
        :type cpu_skip_cnt: int
        :type cpu_cnt: int
        """
        cpu_list = CpuUtils.get_affinity_af_xdp(
            node, interface, cpu_skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt
        )
        interface = Topology.get_interface_name(node, interface)
        irq_list = IrqUtil.get_interface_irqs(node, interface)

        for irq, cpu in zip(irq_list, cpu_list):
            if cpu < 32:
                mask = 1 << cpu
                mask = f"{mask:x}"
            else:
                groups = int(cpu/32)
                mask_fill = u""
                for _ in range(groups):
                    mask_fill = f"{mask_fill},00000000"
                mask = 1 << (cpu - (32 * groups))
                mask = f"{mask:x}{mask_fill}"

            command = f"sh -c 'echo {mask} > /proc/irq/{irq}/smp_affinity'"
            message = f"Failed to set IRQ affinity for {irq} on {node['host']}!"
            exec_cmd_no_error(
                node, command, timeout=30, sudo=True, message=message
            )
Ejemplo n.º 10
0
def run_wrk(tg_node, profile_name, tg_numa, test_type, warm_up=False):
    """Send the traffic as defined in the profile.

    :param tg_node: Traffic generator node.
    :param profile_name: The name of wrk traffic profile.
    :param tg_numa: Numa node on which wrk will run.
    :param test_type: The type of the tests: cps, rps, bw
    :param warm_up: If True, warm-up traffic is generated before test traffic.
    :type profile_name: str
    :type tg_node: dict
    :type tg_numa: int
    :type test_type: str
    :type warm_up: bool
    :returns: Message with measured data.
    :rtype: str
    :raises: RuntimeError if node type is not a TG.
    """

    if tg_node['type'] != NodeType.TG:
        raise RuntimeError('Node type is not a TG.')

    # Parse and validate the profile
    profile_path = (
        "resources/traffic_profiles/wrk/{0}.yaml".format(profile_name))
    profile = WrkTrafficProfile(profile_path).traffic_profile

    cores = CpuUtils.cpu_list_per_node(tg_node, tg_numa)
    first_cpu = cores[profile["first-cpu"]]

    if len(profile["urls"]) == 1 and profile["cpus"] == 1:
        params = [
            "traffic_1_url_1_core",
            str(first_cpu),
            str(profile["nr-of-threads"]),
            str(profile["nr-of-connections"]),
            "{0}s".format(profile["duration"]),
            "'{0}'".format(profile["header"]),
            str(profile["timeout"]),
            str(profile["script"]),
            str(profile["latency"]), "'{0}'".format(" ".join(profile["urls"]))
        ]
        if warm_up:
            warm_up_params = deepcopy(params)
            warm_up_params[4] = "10s"
    elif len(profile["urls"]) == profile["cpus"]:
        params = [
            "traffic_n_urls_n_cores",
            str(first_cpu),
            str(profile["nr-of-threads"]),
            str(profile["nr-of-connections"]),
            "{0}s".format(profile["duration"]),
            "'{0}'".format(profile["header"]),
            str(profile["timeout"]),
            str(profile["script"]),
            str(profile["latency"]), "'{0}'".format(" ".join(profile["urls"]))
        ]
        if warm_up:
            warm_up_params = deepcopy(params)
            warm_up_params[4] = "10s"
    else:
        params = [
            "traffic_n_urls_m_cores",
            str(first_cpu),
            str(profile["cpus"] / len(profile["urls"])),
            str(profile["nr-of-threads"]),
            str(profile["nr-of-connections"]),
            "{0}s".format(profile["duration"]),
            "'{0}'".format(profile["header"]),
            str(profile["timeout"]),
            str(profile["script"]),
            str(profile["latency"]), "'{0}'".format(" ".join(profile["urls"]))
        ]
        if warm_up:
            warm_up_params = deepcopy(params)
            warm_up_params[5] = "10s"

    args = " ".join(params)

    ssh = SSH()
    ssh.connect(tg_node)

    if warm_up:
        warm_up_args = " ".join(warm_up_params)
        ret, _, _ = ssh.exec_command(
            "{0}/resources/tools/wrk/wrk_utils.sh {1}".format(
                Constants.REMOTE_FW_DIR, warm_up_args),
            timeout=1800)
        if int(ret) != 0:
            raise RuntimeError('wrk runtime error.')
        sleep(60)

    ret, stdout, _ = ssh.exec_command(
        "{0}/resources/tools/wrk/wrk_utils.sh {1}".format(
            Constants.REMOTE_FW_DIR, args),
        timeout=1800)
    if int(ret) != 0:
        raise RuntimeError('wrk runtime error.')

    stats = _parse_wrk_output(stdout)

    log_msg = "\nMeasured values:\n"
    if test_type == "cps":
        log_msg += "Connections/sec: Avg / Stdev / Max  / +/- Stdev\n"
        for item in stats["rps-stats-lst"]:
            log_msg += "{0} / {1} / {2} / {3}\n".format(*item)
        log_msg += "Total cps: {0}cps\n".format(stats["rps-sum"])
    elif test_type == "rps":
        log_msg += "Requests/sec: Avg / Stdev / Max  / +/- Stdev\n"
        for item in stats["rps-stats-lst"]:
            log_msg += "{0} / {1} / {2} / {3}\n".format(*item)
        log_msg += "Total rps: {0}rps\n".format(stats["rps-sum"])
    elif test_type == "bw":
        log_msg += "Transfer/sec: {0}Bps".format(stats["bw-sum"])

    logger.info(log_msg)

    return log_msg
Ejemplo n.º 11
0
    def construct_vms_on_node(self, **kwargs):
        """Construct 1..Mx1..N VMs(s) on node with specified name.

        :param kwargs: Named parameters.
        :type kwargs: dict
        """
        node = kwargs['node']
        nf_chains = int(kwargs['nf_chains'])
        nf_nodes = int(kwargs['nf_nodes'])
        queues = kwargs['rxq_count_int'] if kwargs['auto_scale'] else 1
        vs_dtc = kwargs['vs_dtc']
        nf_dtc = kwargs['vs_dtc'] if kwargs['auto_scale'] else kwargs['nf_dtc']
        nf_dtcr = kwargs['nf_dtcr'] if isinstance(kwargs['nf_dtcr'],
                                                  int) else 2

        img = Constants.QEMU_VM_KERNEL

        for nf_chain in range(1, nf_chains + 1):
            for nf_node in range(1, nf_nodes + 1):
                qemu_id = (nf_chain - 1) * nf_nodes + nf_node
                name = '{node}_{qemu_id}'.format(node=node, qemu_id=qemu_id)
                sock1 = '/var/run/vpp/sock-{qemu_id}-1'.format(qemu_id=qemu_id)
                sock2 = '/var/run/vpp/sock-{qemu_id}-2'.format(qemu_id=qemu_id)
                vif1_mac = kwargs['tg_if1_mac'] if nf_node == 1 \
                        else '52:54:00:00:{id:02x}:02'.format(id=qemu_id - 1)
                vif2_mac = kwargs['tg_if2_mac'] if nf_node == nf_nodes \
                        else '52:54:00:00:{id:02x}:01'.format(id=qemu_id + 1)

                self.machines_affinity[name] = CpuUtils.get_affinity_nf(
                    nodes=self.nodes,
                    node=node,
                    nf_chains=nf_chains,
                    nf_nodes=nf_nodes,
                    nf_chain=nf_chain,
                    nf_node=nf_node,
                    vs_dtc=vs_dtc,
                    nf_dtc=nf_dtc,
                    nf_dtcr=nf_dtcr)

                self.machines[name] = QemuUtils(
                    node=self.nodes[node],
                    qemu_id=qemu_id,
                    smp=len(self.machines_affinity[name]),
                    mem=4096,
                    vnf=kwargs['vnf'],
                    img=img)
                self.machines[name].configure_kernelvm_vnf(
                    mac1='52:54:00:00:{id:02x}:01'.format(id=qemu_id),
                    mac2='52:54:00:00:{id:02x}:02'.format(id=qemu_id),
                    vif1_mac=vif1_mac,
                    vif2_mac=vif2_mac,
                    queues=queues,
                    jumbo_frames=kwargs['jumbo'])
                self.machines[name].qemu_add_vhost_user_if(
                    sock1,
                    jumbo_frames=kwargs['jumbo'],
                    queues=queues,
                    queue_size=kwargs['perf_qemu_qsz'])
                self.machines[name].qemu_add_vhost_user_if(
                    sock2,
                    jumbo_frames=kwargs['jumbo'],
                    queues=queues,
                    queue_size=kwargs['perf_qemu_qsz'])
Ejemplo n.º 12
0
    def construct_vms_on_node(self, **kwargs):
        """Construct 1..Mx1..N VMs(s) on node with specified name.

        :param kwargs: Named parameters.
        :type kwargs: dict
        """
        node = kwargs[u"node"]
        nf_chains = int(kwargs[u"nf_chains"])
        nf_nodes = int(kwargs[u"nf_nodes"])
        queues = kwargs[u"rxq_count_int"] if kwargs[u"auto_scale"] else 1
        vs_dtc = kwargs[u"vs_dtc"]
        nf_dtc = kwargs[u"vs_dtc"] if kwargs[u"auto_scale"] \
            else kwargs[u"nf_dtc"]
        nf_dtcr = kwargs[u"nf_dtcr"] \
            if isinstance(kwargs[u"nf_dtcr"], int) else 2

        img = Constants.QEMU_VM_KERNEL

        for nf_chain in range(1, nf_chains + 1):
            for nf_node in range(1, nf_nodes + 1):
                qemu_id = (nf_chain - 1) * nf_nodes + nf_node
                name = f"{node}_{qemu_id}"
                sock1 = f"/run/vpp/sock-{qemu_id}-1"
                sock2 = f"/run/vpp/sock-{qemu_id}-2"
                idx1 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2 - 1
                vif1_mac = Topology.get_interface_mac(
                    self.nodes[node], f"vhost{idx1}"
                ) if kwargs[u"vnf"] == u"testpmd_mac" \
                    else kwargs[u"tg_pf1_mac"] if nf_node == 1 \
                    else f"52:54:00:00:{(qemu_id - 1):02x}:02"
                idx2 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2
                vif2_mac = Topology.get_interface_mac(
                    self.nodes[node], f"vhost{idx2}"
                ) if kwargs[u"vnf"] == u"testpmd_mac" \
                    else kwargs[u"tg_pf2_mac"] if nf_node == nf_nodes \
                    else f"52:54:00:00:{(qemu_id + 1):02x}:01"

                self.machines_affinity[name] = CpuUtils.get_affinity_nf(
                    nodes=self.nodes, node=node, nf_chains=nf_chains,
                    nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
                    vs_dtc=vs_dtc, nf_dtc=nf_dtc, nf_dtcr=nf_dtcr
                )

                self.machines[name] = QemuUtils(
                    node=self.nodes[node], qemu_id=qemu_id,
                    smp=len(self.machines_affinity[name]), mem=4096,
                    vnf=kwargs[u"vnf"], img=img
                )
                self.machines[name].configure_kernelvm_vnf(
                    mac1=f"52:54:00:00:{qemu_id:02x}:01",
                    mac2=f"52:54:00:00:{qemu_id:02x}:02",
                    vif1_mac=vif1_mac, vif2_mac=vif2_mac, queues=queues,
                    jumbo_frames=kwargs[u"jumbo"]
                )
                self.machines[name].qemu_add_vhost_user_if(
                    sock1, jumbo_frames=kwargs[u"jumbo"], queues=queues,
                    queue_size=kwargs[u"perf_qemu_qsz"],
                    csum=kwargs[u"enable_csum"], gso=kwargs[u"enable_gso"]
                )
                self.machines[name].qemu_add_vhost_user_if(
                    sock2, jumbo_frames=kwargs[u"jumbo"], queues=queues,
                    queue_size=kwargs[u"perf_qemu_qsz"],
                    csum=kwargs[u"enable_csum"], gso=kwargs[u"enable_gso"]
                )
Ejemplo n.º 13
0
    def _configure_vpp_chain_vswitch(self, **kwargs):
        """Configure VPP as vswitch in container.

        :param kwargs: Named parameters.
        :type kwargs: dict
        """
        dut = self.engine.container.name.split(u"_")[0]
        if dut == u"DUT1":
            if1_pci = Topology.get_interface_pci_addr(
                self.engine.container.node, kwargs[u"dut1_if2"])
            if2_pci = Topology.get_interface_pci_addr(
                self.engine.container.node, kwargs[u"dut1_if1"])
            if_red_name = Topology.get_interface_name(
                self.engine.container.node, kwargs[u"dut1_if2"])
            if_black_name = Topology.get_interface_name(
                self.engine.container.node, kwargs[u"dut1_if1"])
            tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
            tg_pf_mac = kwargs[u"tg_pf2_mac"]
        else:
            tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
            tg_pf_mac = kwargs[u"tg_pf1_mac"]
            if1_pci = Topology.get_interface_pci_addr(
                self.engine.container.node, kwargs[u"dut2_if1"])
            if2_pci = Topology.get_interface_pci_addr(
                self.engine.container.node, kwargs[u"dut2_if2"])
            if_red_name = Topology.get_interface_name(
                self.engine.container.node, kwargs[u"dut2_if1"])
            if_black_name = Topology.get_interface_name(
                self.engine.container.node, kwargs[u"dut2_if2"])

        n_instances = int(kwargs[u"n_instances"])
        rxq = 1
        if u"rxq" in kwargs:
            rxq = int(kwargs[u"rxq"])
        nodes = kwargs[u"nodes"]
        cpuset_cpus = CpuUtils.get_affinity_nf(
            nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
            nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
        )
        self.engine.create_vpp_startup_config_vswitch(
            cpuset_cpus, rxq, if1_pci, if2_pci
        )

        instances = []
        for i in range(1, n_instances + 1):
            instances.append(
                f"create interface memif id {i} socket-id 1 master\n"
                f"set interface state memif1/{i} up\n"
                f"set interface l2 bridge memif1/{i} 1\n"
                f"create interface memif id {i} socket-id 2 master\n"
                f"set interface state memif2/{i} up\n"
                f"set interface l2 bridge memif2/{i} 2\n"
                f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
                f"static\n\n"
            )

        self.engine.create_vpp_exec_config(
            u"memif_create_chain_vswitch_ipsec.exec",
            socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
            socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
            if_red_name=if_red_name,
            if_black_name=if_black_name,
            instances=u"\n\n".join(instances))
Ejemplo n.º 14
0
def run_wrk(tg_node, profile_name, tg_numa, test_type, warm_up=False):
    """Send the traffic as defined in the profile.

    :param tg_node: Traffic generator node.
    :param profile_name: The name of wrk traffic profile.
    :param tg_numa: Numa node on which wrk will run.
    :param test_type: The type of the tests: cps, rps, bw
    :param warm_up: If True, warm-up traffic is generated before test traffic.
    :type profile_name: str
    :type tg_node: dict
    :type tg_numa: int
    :type test_type: str
    :type warm_up: bool
    :returns: Message with measured data.
    :rtype: str
    :raises: RuntimeError if node type is not a TG.
    """

    if tg_node[u"type"] != NodeType.TG:
        raise RuntimeError(u"Node type is not a TG.")

    # Parse and validate the profile
    profile_path = f"resources/traffic_profiles/wrk/{profile_name}.yaml"
    profile = WrkTrafficProfile(profile_path).traffic_profile

    cores = CpuUtils.cpu_list_per_node(tg_node, tg_numa)
    first_cpu = cores[profile[u"first-cpu"]]

    if len(profile[u"urls"]) == 1 and profile[u"cpus"] == 1:
        params = [
            u"traffic_1_url_1_core",
            str(first_cpu),
            str(profile[u"nr-of-threads"]),
            str(profile[u"nr-of-connections"]), f"{profile[u'duration']}s",
            f"'{profile[u'header']}'",
            str(profile[u"timeout"]),
            str(profile[u"script"]),
            str(profile[u"latency"]), f"'{u' '.join(profile[u'urls'])}'"
        ]
        if warm_up:
            warm_up_params = deepcopy(params)
            warm_up_params[4] = u"10s"
    elif len(profile[u"urls"]) == profile[u"cpus"]:
        params = [
            u"traffic_n_urls_n_cores",
            str(first_cpu),
            str(profile[u"nr-of-threads"]),
            str(profile[u"nr-of-connections"]), f"{profile[u'duration']}s",
            f"'{profile[u'header']}'",
            str(profile[u"timeout"]),
            str(profile[u"script"]),
            str(profile[u"latency"]), f"'{u' '.join(profile[u'urls'])}'"
        ]
        if warm_up:
            warm_up_params = deepcopy(params)
            warm_up_params[4] = u"10s"
    else:
        params = [
            u"traffic_n_urls_m_cores",
            str(first_cpu),
            str(profile[u"cpus"] // len(profile[u"urls"])),
            str(profile[u"nr-of-threads"]),
            str(profile[u"nr-of-connections"]), f"{profile[u'duration']}s",
            f"'{profile[u'header']}'",
            str(profile[u"timeout"]),
            str(profile[u"script"]),
            str(profile[u"latency"]), f"'{u' '.join(profile[u'urls'])}'"
        ]
        if warm_up:
            warm_up_params = deepcopy(params)
            warm_up_params[5] = u"10s"

    args = u" ".join(params)

    ssh = SSH()
    ssh.connect(tg_node)

    if warm_up:
        warm_up_args = u" ".join(warm_up_params)
        ret, _, _ = ssh.exec_command(
            f"{Constants.REMOTE_FW_DIR}/resources/tools/wrk/wrk_utils.sh "
            f"{warm_up_args}",
            timeout=1800)
        if int(ret) != 0:
            raise RuntimeError(u"wrk runtime error.")
        sleep(60)

    ret, stdout, _ = ssh.exec_command(
        f"{Constants.REMOTE_FW_DIR}/resources/tools/wrk/wrk_utils.sh {args}",
        timeout=1800)
    if int(ret) != 0:
        raise RuntimeError('wrk runtime error.')

    stats = _parse_wrk_output(stdout)

    log_msg = u"\nMeasured values:\n"
    if test_type == u"cps":
        log_msg += u"Connections/sec: Avg / Stdev / Max  / +/- Stdev\n"
        for item in stats[u"rps-stats-lst"]:
            log_msg += u" / ".join(map(str, item)) + u"\n"
        log_msg += f"Total cps: {stats[u'rps-sum']}cps\n"
    elif test_type == u"rps":
        log_msg += u"Requests/sec: Avg / Stdev / Max  / +/- Stdev\n"
        for item in stats[u"rps-stats-lst"]:
            log_msg += u" / ".join(map(str, item)) + u"\n"
        log_msg += f"Total rps: {stats[u'rps-sum']}rps\n"
    elif test_type == u"bw":
        log_msg += f"Transfer/sec: {stats[u'bw-sum']}Bps"

    logger.info(log_msg)

    return log_msg
Ejemplo n.º 15
0
Archivo: Iperf3.py Proyecto: gvnn3/csit
    def initialize_iperf_server(self,
                                node,
                                pf_key,
                                interface,
                                bind,
                                bind_gw,
                                bind_mask,
                                namespace=None,
                                cpu_skip_cnt=0,
                                cpu_cnt=1,
                                instances=1):
        """iPerf3 initialization.

        :param node: Topology node running iPerf3 server.
        :param pf_key: First TG's interface (To compute numa location).
        :param interface: Name of TG bind interface.
        :param bind: Bind to host, one of node's addresses.
        :param bind_gw: Bind gateway (required for default route).
        :param bind_mask: Bind address mask.
        :param namespace: Name of TG namespace to execute.
        :param cpu_skip_cnt: Amount of CPU cores to skip.
        :param cpu_cnt: iPerf3 main thread count.
        :param instances: Number of simultaneous iPerf3 instances.
        :type node: dict
        :type pf_key: str
        :type interface: str
        :type bind: str
        :type bind_gw: str
        :type bind_mask: str
        :type namespace: str
        :type cpu_skip_cnt: int
        :type cpu_cnt: int
        :type instances: int
        """
        if Iperf3.is_iperf_running(node):
            Iperf3.teardown_iperf(node)

        if namespace:
            IPUtil.set_linux_interface_ip(node,
                                          interface=interface,
                                          ip_addr=bind,
                                          prefix=bind_mask,
                                          namespace=namespace)
            IPUtil.set_linux_interface_up(node,
                                          interface=interface,
                                          namespace=namespace)
            Namespaces.add_default_route_to_namespace(node,
                                                      namespace=namespace,
                                                      default_route=bind_gw)

        # Compute affinity for iPerf server.
        self._s_affinity = CpuUtils.get_affinity_iperf(
            node,
            pf_key,
            cpu_skip_cnt=cpu_skip_cnt,
            cpu_cnt=cpu_cnt * instances)
        # Compute affinity for iPerf client.
        self._c_affinity = CpuUtils.get_affinity_iperf(
            node,
            pf_key,
            cpu_skip_cnt=cpu_skip_cnt + cpu_cnt * instances,
            cpu_cnt=cpu_cnt * instances)

        for i in range(0, instances):
            Iperf3.start_iperf_server(node,
                                      namespace=namespace,
                                      port=5201 + i,
                                      affinity=self._s_affinity)