Beispiel #1
0
    def generate_perf_config(self, config):
        flows = []
        for src_nic, dst_nic in zip(config.generator.nics, config.dut.nics):
            src_bind = dict(mac_addr=src_nic.hwaddr,
                            pci_addr=src_nic.bus_info,
                            ip_addr=src_nic.ips[0])
            dst_bind = dict(mac_addr=dst_nic.hwaddr,
                            pci_addr=dst_nic.bus_info,
                            ip_addr=dst_nic.ips[0])
            flows.append(
                PerfFlow(type="pvp_loop_rate",
                         generator=config.generator.host,
                         generator_bind=src_bind,
                         receiver=config.dut.host,
                         receiver_bind=dst_bind,
                         msg_size=self.params.perf_msg_size,
                         duration=self.params.perf_duration,
                         parallel_streams=self.params.perf_streams,
                         cpupin=None))

        return PerfRecipeConf(measurements=[
            self.params.cpu_perf_tool(
                [config.generator.host, config.dut.host, config.guest.host]),
            TRexFlowMeasurement(flows, self.params.trex_dir)
        ],
                              iterations=self.params.perf_iterations)
Beispiel #2
0
    def generate_perf_configurations(self, config):
        if config.encrypt:
            client_nic = config.host1.msec0
            server_nic = config.host2.msec0
            client_netns = client_nic.netns
            server_netns = server_nic.netns

            flow_combinations = self.generate_flow_combinations(config)

            for flows in flow_combinations:
                perf_recipe_conf = dict(
                    recipe_config=config,
                    flows=flows,
                )

                flows_measurement = self.params.net_perf_tool(
                    flows, perf_recipe_conf)

                cpu_measurement = self.params.cpu_perf_tool(
                    [client_netns, server_netns],
                    perf_recipe_conf,
                )

                perf_conf = PerfRecipeConf(
                    measurements=[cpu_measurement, flows_measurement],
                    iterations=self.params.perf_iterations,
                )

                perf_conf.register_evaluators(cpu_measurement,
                                              self.cpu_perf_evaluators)
                perf_conf.register_evaluators(flows_measurement,
                                              self.net_perf_evaluators)

                yield perf_conf
Beispiel #3
0
    def generate_perf_configurations(self, config):
        """Base perf test configuration generator

        The generator loops over all flow combinations to measure performance
        for (generated by the :any:`generate_flow_combinations` method). In
        addition to that during each flow combination measurement we add CPU
        utilization measurement to run on the background.

        Finally for each generated perf test configuration we register
        measurement evaluators based on the :any:`cpu_perf_evaluators` and
        :any:`net_perf_evaluators` properties.

        :return: list of Perf test configurations
        :rtype: List[:any:`PerfRecipeConf`]
        """
        for measurements in self.generate_perf_measurements_combinations(
            config
        ):
            perf_conf = PerfRecipeConf(
                measurements=measurements,
                iterations=self.params.perf_iterations,
                parent_recipe_config=copy.deepcopy(config),
            )

            self.register_perf_evaluators(perf_conf)

            yield perf_conf
Beispiel #4
0
    def generate_perf_configurations(self, config):
        for flows in self.generate_flow_combinations(config):
            perf_recipe_conf = dict(
                recipe_config=config,
                flows=flows,
            )

            flows_measurement = self.params.net_perf_tool(
                flows, perf_recipe_conf)

            cpu_measurement_hosts = set()
            for flow in flows:
                cpu_measurement_hosts.add(flow.generator)
                cpu_measurement_hosts.add(flow.receiver)

            cpu_measurement = self.params.cpu_perf_tool(
                cpu_measurement_hosts,
                perf_recipe_conf,
            )

            perf_conf = PerfRecipeConf(
                measurements=[cpu_measurement, flows_measurement],
                iterations=self.params.perf_iterations,
            )

            perf_conf.register_evaluators(cpu_measurement,
                                          self.cpu_perf_evaluators)
            perf_conf.register_evaluators(flows_measurement,
                                          self.net_perf_evaluators)

            yield perf_conf
Beispiel #5
0
    def generate_perf_config(self, config):
        flows = []
        for src_nic, dst_nic in zip(config.generator.nics, config.dut.nics):
            src_bind = dict(mac_addr=src_nic.hwaddr,
                            pci_addr=src_nic.bus_info,
                            ip_addr=src_nic.ips[0],
                            family=src_nic.ips[0].family)
            dst_bind = dict(mac_addr=dst_nic.hwaddr,
                            pci_addr=dst_nic.bus_info,
                            ip_addr=dst_nic.ips[0],
                            family=dst_nic.ips[0].family)
            flows.append(
                PerfFlow(type="pvp_loop_rate",
                         generator=config.generator.host,
                         generator_nic=src_nic,
                         generator_bind=src_bind,
                         receiver=config.dut.host,
                         receiver_nic=dst_nic,
                         receiver_bind=dst_bind,
                         msg_size=self.params.perf_msg_size,
                         duration=self.params.perf_duration,
                         parallel_streams=self.params.perf_streams,
                         cpupin=None))

        perf_recipe_conf = dict(
            recipe_config=config,
            flows=flows,
        )

        cpu_measurement = self.params.cpu_perf_tool(
            [config.generator.host, config.dut.host, config.guest.host],
            perf_recipe_conf)

        flows_measurement = TRexFlowMeasurement(
            flows,
            self.params.trex_dir,
            self.params.host1_dpdk_cores.split(","),
            perf_recipe_conf,
        )
        perf_conf = PerfRecipeConf(
            measurements=[
                cpu_measurement,
                flows_measurement,
            ],
            iterations=self.params.perf_iterations,
        )
        perf_conf.register_evaluators(cpu_measurement,
                                      self.cpu_perf_evaluators)
        perf_conf.register_evaluators(flows_measurement,
                                      self.net_perf_evaluators)
        return perf_conf
Beispiel #6
0
    def generate_perf_configurations(self, config):
        """Base perf test configuration generator

        The generator loops over all flow combinations to measure performance
        for (generated by the :any:`generate_flow_combinations` method). In
        addition to that during each flow combination measurement we add CPU
        utilization measurement to run on the background.

        Finally for each generated perf test configuration we register
        measurement evaluators based on the :any:`cpu_perf_evaluators` and
        :any:`net_perf_evaluators` properties.

        :return: list of Perf test configurations
        :rtype: List[:any:`PerfRecipeConf`]
        """
        for flows in self.generate_flow_combinations(config):
            perf_recipe_conf = dict(
                recipe_config=config,
                flows=flows,
            )

            flows_measurement = self.params.net_perf_tool(
                flows, perf_recipe_conf)

            cpu_measurement_hosts = set()
            for flow in flows:
                cpu_measurement_hosts.add(flow.generator)
                cpu_measurement_hosts.add(flow.receiver)

            cpu_measurement = self.params.cpu_perf_tool(
                cpu_measurement_hosts,
                perf_recipe_conf,
            )

            perf_conf = PerfRecipeConf(
                measurements=[cpu_measurement, flows_measurement],
                iterations=self.params.perf_iterations,
            )

            perf_conf.register_evaluators(cpu_measurement,
                                          self.cpu_perf_evaluators)
            perf_conf.register_evaluators(flows_measurement,
                                          self.net_perf_evaluators)

            yield perf_conf
Beispiel #7
0
    def generate_perf_config(self, config):
        flows = []
        for i in range(0,
                       min(len(config.generator.nics),
                           len(config.guest.nics))):
            src_nic = config.generator.nics[i]
            src_ip = src_nic.ips[0]
            dst_nic = config.guest.nics[i]
            dst_ip = config.generator.nics[((i + 1) %
                                            len(config.generator.nics))].ips[0]

            src_bind = dict(mac_addr=src_nic.hwaddr,
                            pci_addr=src_nic.bus_info,
                            ip_addr=src_ip)
            dst_bind = dict(mac_addr=dst_nic.hwaddr,
                            pci_addr=dst_nic.bus_info,
                            ip_addr=dst_ip)
            flows.append(
                PerfFlow(type="pvp_loop_rate",
                         generator=config.generator.host,
                         generator_bind=src_bind,
                         receiver=config.guest.host,
                         receiver_bind=dst_bind,
                         msg_size=self.params.perf_msg_size,
                         duration=self.params.perf_duration,
                         parallel_streams=self.params.perf_streams,
                         cpupin=None))

        return PerfRecipeConf(
            measurements=[
                self.params.cpu_perf_tool([
                    config.generator.host, config.dut.host, config.guest.host
                ]),
                TRexFlowMeasurement(
                    flows,
                    self.params.trex_dir,
                    self.params.host1_dpdk_cores.split(","),
                ),
            ],
            iterations=self.params.perf_iterations,
        )
Beispiel #8
0
    def generate_perf_configurations(self, config):
        """
        OvSDPDKBondRecipe perf test configuration generator.

        OvSDPDKBondRecipe requires a specific TrexFlowMeasurement
        configuration, where it needs additional parameters,
        therefore it overrides the inherited method
        from the :any:`BaseEnrtRecipe`.

        The generator creates and loops over all flow combinations
        between the hosts to measure the performance.
        In addition to that during each flow combination measurement,
        we add CPU utilization measurement to run in the background.

        Finally for each generated perf test configuration we register
        measurement evaluators based on the :any:`cpu_perf_evaluators` and
        :any:`net_perf_evaluators` properties.

        :return: Perf test configuration
        :rtype: :any:`PerfRecipeConf`
        """
        host1, host2 = self.matched.host1, self.matched.host2
        testpmd_nics = [host1.dummy_cfg.eth0, host1.dummy_cfg.eth1]
        trex_nics = [host2.dummy_cfg.eth0, host2.dummy_cfg.eth1]
        flows = []
        for trex_nic, testpmd_nic in zip(trex_nics, testpmd_nics):
            trex_bind = dict(mac_addr=trex_nic.hwaddr,
                             pci_addr=trex_nic.bus_info,
                             ip_addr=trex_nic.ips[0],
                             family=trex_nic.ips[0].family)
            testpmd_bind = dict(mac_addr=testpmd_nic.hwaddr,
                                pci_addr=testpmd_nic.bus_info,
                                ip_addr=testpmd_nic.ips[0],
                                family=testpmd_nic.ips[0].family)
            flows.append(PerfFlow(
                type="UDPMultiflow",
                generator=host2,
                generator_nic=trex_nic,
                generator_bind=trex_bind,
                receiver=host1,
                receiver_nic=testpmd_nic,
                receiver_bind=testpmd_bind,
                msg_size=self.params.perf_msg_size,
                duration=self.params.perf_duration,
                parallel_streams=1,
                cpupin=None))

        perf_recipe_conf = dict(
            recipe_config=config,
            flows=flows,
        )

        cpu_measurement = self.params.cpu_perf_tool([host1, host2],
                                                    perf_recipe_conf)

        flows_measurement = TRexFlowMeasurement(
            flows,
            self.params.trex_dir,
            self.params.trex_dpdk_cores.split(","),
            perf_recipe_conf,
        )
        perf_conf = PerfRecipeConf(
            measurements=[
                cpu_measurement,
                flows_measurement,
            ],
            iterations=self.params.perf_iterations,
        )
        perf_conf.register_evaluators(cpu_measurement, self.cpu_perf_evaluators)
        perf_conf.register_evaluators(flows_measurement, self.net_perf_evaluators)
        return perf_conf